summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/80211.tmpl1
-rw-r--r--Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt59
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_canfd.txt96
-rw-r--r--Documentation/devicetree/bindings/net/cirrus,cs89x0.txt13
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt1
-rw-r--r--Documentation/devicetree/bindings/net/davinci-mdio.txt5
-rw-r--r--Documentation/devicetree/bindings/net/dsa/b53.txt88
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt278
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux.txt3
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.txt3
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt3
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,mdio-mux-bus-pci.txt27
-rw-r--r--Documentation/networking/can.txt25
-rw-r--r--Documentation/networking/gen_stats.txt2
-rw-r--r--Documentation/networking/stmmac.txt1
-rw-r--r--MAINTAINERS39
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi3
-rw-r--r--arch/arm/boot/dts/am4372.dtsi3
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi5
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-b.dts328
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2-svk.dts16
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi39
-rw-r--r--arch/arm64/net/bpf_jit.h3
-rw-r--r--arch/arm64/net/bpf_jit_comp.c111
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c12
-rw-r--r--drivers/isdn/hardware/eicon/platform.h6
-rw-r--r--drivers/net/bonding/bond_main.c22
-rw-r--r--drivers/net/can/Kconfig11
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/dev.c140
-rw-r--r--drivers/net/can/rcar/Kconfig21
-rw-r--r--drivers/net/can/rcar/Makefile6
-rw-r--r--drivers/net/can/rcar/rcar_can.c (renamed from drivers/net/can/rcar_can.c)0
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c1858
-rw-r--r--drivers/net/can/sja1000/tscan1.c12
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c7
-rw-r--r--drivers/net/can/usb/gs_usb.c141
-rw-r--r--drivers/net/dsa/Kconfig12
-rw-r--r--drivers/net/dsa/Makefile4
-rw-r--r--drivers/net/dsa/b53/Kconfig33
-rw-r--r--drivers/net/dsa/b53/Makefile6
-rw-r--r--drivers/net/dsa/b53/b53_common.c1787
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c392
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c274
-rw-r--r--drivers/net/dsa/b53/b53_priv.h387
-rw-r--r--drivers/net/dsa/b53/b53_regs.h434
-rw-r--r--drivers/net/dsa/b53/b53_spi.c331
-rw-r--r--drivers/net/dsa/b53/b53_srab.c415
-rw-r--r--drivers/net/dsa/bcm_sf2.c701
-rw-r--r--drivers/net/dsa/bcm_sf2.h16
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h70
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig7
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c (renamed from drivers/net/dsa/mv88e6xxx.c)1838
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h (renamed from drivers/net/dsa/mv88e6xxx.h)47
-rw-r--r--drivers/net/ethernet/8390/ax88796.c40
-rw-r--r--drivers/net/ethernet/agere/et131x.c60
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c54
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c26
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c16
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c69
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig21
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c49
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c152
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c392
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h65
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c199
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c3
-rw-r--r--drivers/net/ethernet/cadence/macb.c54
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c39
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c938
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c1105
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h383
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c171
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h45
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c171
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h21
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h82
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h23
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h240
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c55
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h154
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c217
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c82
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c55
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c28
-rw-r--r--drivers/net/ethernet/dnet.c48
-rw-r--r--drivers/net/ethernet/dnet.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig8
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h58
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c160
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h16
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c66
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c325
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c34
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c59
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c247
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c295
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h45
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c294
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c158
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c49
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c150
-rw-r--r--drivers/net/ethernet/intel/Kconfig43
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c56
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c359
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c57
-rw-r--r--drivers/net/ethernet/lantiq_etop.c1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c277
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c476
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c335
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c852
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c362
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c22
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c44
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c1347
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c1623
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c646
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h55
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h10948
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c55
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c184
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c115
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c61
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c57
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c506
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h13
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c348
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c57
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c141
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c30
-rw-r--r--drivers/net/ethernet/rdc/r6040.c32
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c32
-rw-r--r--drivers/net/ethernet/sfc/ef10.c744
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c44
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h3
-rw-r--r--drivers/net/ethernet/sfc/efx.c66
-rw-r--r--drivers/net/ethernet/sfc/efx.h9
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h1327
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h19
-rw-r--r--drivers/net/ethernet/sfc/nic.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c165
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c147
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c10
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c69
-rw-r--r--drivers/net/ethernet/ti/cpsw.c97
-rw-r--r--drivers/net/ethernet/ti/cpsw.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c137
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c169
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c10
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c3
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/geneve.c61
-rw-r--r--drivers/net/gtp.c1
-rw-r--r--drivers/net/hamradio/baycom_par.c6
-rw-r--r--drivers/net/hyperv/hyperv_net.h19
-rw-r--r--drivers/net/hyperv/netvsc.c25
-rw-r--r--drivers/net/hyperv/netvsc_drv.c22
-rw-r--r--drivers/net/hyperv/rndis_filter.c159
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c22
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/macvlan.c59
-rw-r--r--drivers/net/macvtap.c95
-rw-r--r--drivers/net/phy/Kconfig23
-rw-r--r--drivers/net/phy/Makefile5
-rw-r--r--drivers/net/phy/fixed_phy.c153
-rw-r--r--drivers/net/phy/intel-xway.c376
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c248
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c2
-rw-r--r--drivers/net/phy/mdio-mux.c26
-rw-r--r--drivers/net/phy/swphy.c179
-rw-r--r--drivers/net/phy/swphy.h9
-rw-r--r--drivers/net/ppp/ppp_generic.c3
-rw-r--r--drivers/net/team/team.c19
-rw-r--r--drivers/net/tun.c97
-rw-r--r--drivers/net/usb/r8152.c59
-rw-r--r--drivers/net/virtio_net.c90
-rw-r--r--drivers/net/vmxnet3/Makefile4
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h105
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c285
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c215
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h54
-rw-r--r--drivers/net/vrf.c561
-rw-r--r--drivers/net/vxlan.c85
-rw-r--r--drivers/net/wan/Kconfig22
-rw-r--r--drivers/net/wan/Makefile2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c1192
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.h147
-rw-r--r--drivers/net/wan/slic_ds26522.c255
-rw-r--r--drivers/net/wan/slic_ds26522.h134
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c105
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c36
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h73
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c59
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c245
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig8
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c16
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c46
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c68
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c25
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c42
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h4
-rw-r--r--drivers/net/wireless/broadcom/b43/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c8
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c31
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_a.c595
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_a.h22
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.h3
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_g.c25
-rw-r--r--drivers/net/wireless/broadcom/b43/wa.c283
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c47
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c164
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h22
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c97
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c64
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c15
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c2
-rw-r--r--drivers/net/wireless/wl3501_cs.c31
-rw-r--r--drivers/of/Kconfig1
-rw-r--r--drivers/of/of_mdio.c2
-rw-r--r--drivers/phy/Kconfig8
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/phy-bcm-ns2-pcie.c115
-rw-r--r--drivers/s390/net/qeth_core.h45
-rw-r--r--drivers/s390/net/qeth_core_main.c231
-rw-r--r--drivers/s390/net/qeth_core_sys.c4
-rw-r--r--drivers/s390/net/qeth_l2.h7
-rw-r--r--drivers/s390/net/qeth_l2_main.c105
-rw-r--r--drivers/s390/net/qeth_l3.h31
-rw-r--r--drivers/s390/net/qeth_l3_main.c1024
-rw-r--r--drivers/s390/net/qeth_l3_sys.c78
-rw-r--r--drivers/soc/fsl/qe/Kconfig6
-rw-r--r--drivers/soc/fsl/qe/Makefile1
-rw-r--r--drivers/soc/fsl/qe/qe.c6
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c276
-rw-r--r--drivers/soc/fsl/qe/ucc.c450
-rw-r--r--drivers/soc/fsl/qe/ucc_fast.c36
-rw-r--r--drivers/vhost/net.c64
-rw-r--r--fs/afs/rxrpc.c34
-rw-r--r--include/linux/acpi.h13
-rw-r--r--include/linux/bpf.h25
-rw-r--r--include/linux/icmpv6.h5
-rw-r--r--include/linux/ipv6.h7
-rw-r--r--include/linux/mdio-mux.h4
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx5/device.h4
-rw-r--r--include/linux/mlx5/driver.h27
-rw-r--r--include/linux/mlx5/mlx5_ifc.h275
-rw-r--r--include/linux/mlx5/port.h16
-rw-r--r--include/linux/netdev_features.h7
-rw-r--r--include/linux/netdevice.h91
-rw-r--r--include/linux/of_mdio.h8
-rw-r--r--include/linux/platform_data/b53.h33
-rw-r--r--include/linux/ptr_ring.h393
-rw-r--r--include/linux/qed/common_hsi.h397
-rw-r--r--include/linux/qed/eth_common.h124
-rw-r--r--include/linux/qed/iscsi_common.h1439
-rw-r--r--include/linux/qed/qed_chain.h556
-rw-r--r--include/linux/qed/qed_eth_if.h63
-rw-r--r--include/linux/qed/qed_if.h182
-rw-r--r--include/linux/qed/rdma_common.h44
-rw-r--r--include/linux/qed/roce_common.h17
-rw-r--r--include/linux/qed/storage_common.h91
-rw-r--r--include/linux/qed/tcp_common.h226
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/rxrpc.h21
-rw-r--r--include/linux/skb_array.h169
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/stmmac.h3
-rw-r--r--include/linux/virtio_net.h101
-rw-r--r--include/net/6lowpan.h16
-rw-r--r--include/net/act_api.h28
-rw-r--r--include/net/addrconf.h10
-rw-r--r--include/net/cfg80211.h81
-rw-r--r--include/net/codel_qdisc.h1
-rw-r--r--include/net/dsa.h56
-rw-r--r--include/net/fib_rules.h24
-rw-r--r--include/net/gen_stats.h12
-rw-r--r--include/net/geneve.h9
-rw-r--r--include/net/ip6_route.h23
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/l3mdev.h60
-rw-r--r--include/net/mac80211.h18
-rw-r--r--include/net/ndisc.h248
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/sch_generic.h158
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/tc_act/tc_defact.h4
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/udp.h4
-rw-r--r--include/net/udp_tunnel.h42
-rw-r--r--include/net/vxlan.h13
-rw-r--r--include/soc/fsl/qe/immap_qe.h5
-rw-r--r--include/soc/fsl/qe/qe.h19
-rw-r--r--include/soc/fsl/qe/qe_tdm.h94
-rw-r--r--include/soc/fsl/qe/ucc.h4
-rw-r--r--include/soc/fsl/qe/ucc_fast.h27
-rw-r--r--include/uapi/linux/can/bcm.h1
-rw-r--r--include/uapi/linux/ethtool.h3
-rw-r--r--include/uapi/linux/fib_rules.h1
-rw-r--r--include/uapi/linux/icmp.h1
-rw-r--r--include/uapi/linux/if_tunnel.h1
-rw-r--r--include/uapi/linux/inet_diag.h1
-rw-r--r--include/uapi/linux/netlink_diag.h1
-rw-r--r--include/uapi/linux/nl80211.h14
-rw-r--r--include/uapi/linux/openvswitch.h8
-rw-r--r--include/uapi/linux/pkt_cls.h7
-rw-r--r--include/uapi/linux/virtio_net.h9
-rw-r--r--include/uapi/linux/wireless.h63
-rw-r--r--kernel/bpf/arraymap.c116
-rw-r--r--kernel/bpf/syscall.c13
-rw-r--r--kernel/trace/bpf_trace.c18
-rw-r--r--net/6lowpan/6lowpan_i.h4
-rw-r--r--net/6lowpan/Makefile2
-rw-r--r--net/6lowpan/core.c50
-rw-r--r--net/6lowpan/debugfs.c39
-rw-r--r--net/6lowpan/iphc.c167
-rw-r--r--net/6lowpan/ndisc.c234
-rw-r--r--net/batman-adv/routing.c2
-rw-r--r--net/batman-adv/send.c4
-rw-r--r--net/bluetooth/6lowpan.c13
-rw-r--r--net/caif/chnl_net.c1
-rw-r--r--net/can/Makefile3
-rw-r--r--net/can/af_can.c22
-rw-r--r--net/can/bcm.c309
-rw-r--r--net/can/proc.c3
-rw-r--r--net/core/dev.c59
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/fib_rules.c33
-rw-r--r--net/core/filter.c78
-rw-r--r--net/core/gen_estimator.c24
-rw-r--r--net/core/gen_stats.c35
-rw-r--r--net/core/pktgen.c1
-rw-r--r--net/core/rtnetlink.c22
-rw-r--r--net/core/skbuff.c46
-rw-r--r--net/core/utils.c8
-rw-r--r--net/dsa/Makefile2
-rw-r--r--net/dsa/dsa.c253
-rw-r--r--net/dsa/dsa2.c690
-rw-r--r--net/dsa/dsa_priv.h9
-rw-r--r--net/dsa/slave.c80
-rw-r--r--net/dsa/tag_brcm.c4
-rw-r--r--net/dsa/tag_dsa.c10
-rw-r--r--net/dsa/tag_edsa.c10
-rw-r--r--net/dsa/tag_trailer.c4
-rw-r--r--net/ieee802154/6lowpan/core.c27
-rw-r--r--net/ieee802154/6lowpan/tx.c113
-rw-r--r--net/ipv4/Kconfig16
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/fib_rules.c6
-rw-r--r--net/ipv4/fou.c81
-rw-r--r--net/ipv4/gre_demux.c1
-rw-r--r--net/ipv4/inet_diag.c25
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_gre.c51
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_tunnel.c2
-rw-r--r--net/ipv4/tcp_dctcp.c4
-rw-r--r--net/ipv4/tcp_input.c31
-rw-r--r--net/ipv4/tcp_nv.c476
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/udp_tunnel.c61
-rw-r--r--net/ipv4/xfrm4_policy.c8
-rw-r--r--net/ipv6/addrconf.c268
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/icmp.c78
-rw-r--r--net/ipv6/ila/ila.h3
-rw-r--r--net/ipv6/ila/ila_common.c16
-rw-r--r--net/ipv6/ila/ila_lwt.c4
-rw-r--r--net/ipv6/ila/ila_xlat.c8
-rw-r--r--net/ipv6/ip6_icmp.c2
-rw-r--r--net/ipv6/ip6_input.c1
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/ipv6/ndisc.c123
-rw-r--r--net/ipv6/ping.c4
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/route.c32
-rw-r--r--net/ipv6/sit.c47
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/iucv/af_iucv.c223
-rw-r--r--net/l2tp/l2tp_eth.c4
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/l3mdev/l3mdev.c64
-rw-r--r--net/mac80211/agg-tx.c8
-rw-r--r--net/mac80211/debugfs.c173
-rw-r--r--net/mac80211/debugfs_sta.c78
-rw-r--r--net/mac80211/ieee80211_i.h31
-rw-r--r--net/mac80211/iface.c26
-rw-r--r--net/mac80211/main.c10
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/sta_info.c14
-rw-r--r--net/mac80211/tx.c292
-rw-r--r--net/mac80211/util.c34
-rw-r--r--net/mpls/af_mpls.c9
-rw-r--r--net/netfilter/xt_RATEEST.c2
-rw-r--r--net/netlink/af_netlink.h14
-rw-r--r--net/openvswitch/actions.c40
-rw-r--r--net/openvswitch/conntrack.c69
-rw-r--r--net/openvswitch/datapath.c42
-rw-r--r--net/openvswitch/datapath.h5
-rw-r--r--net/openvswitch/flow_netlink.c9
-rw-r--r--net/openvswitch/vport-internal_dev.c2
-rw-r--r--net/openvswitch/vport.c1
-rw-r--r--net/packet/af_packet.c36
-rw-r--r--net/rds/cong.c3
-rw-r--r--net/rds/connection.c329
-rw-r--r--net/rds/ib.c1
-rw-r--r--net/rds/ib_cm.c3
-rw-r--r--net/rds/ib_rdma.c3
-rw-r--r--net/rds/ib_recv.c1
-rw-r--r--net/rds/ib_send.c1
-rw-r--r--net/rds/loop.c1
-rw-r--r--net/rds/rdma_transport.c1
-rw-r--r--net/rds/rds.h152
-rw-r--r--net/rds/rds_single_path.h30
-rw-r--r--net/rds/recv.c27
-rw-r--r--net/rds/send.c293
-rw-r--r--net/rds/tcp.c7
-rw-r--r--net/rds/tcp_connect.c4
-rw-r--r--net/rds/tcp_listen.c11
-rw-r--r--net/rds/tcp_recv.c1
-rw-r--r--net/rds/tcp_send.c1
-rw-r--r--net/rds/threads.c95
-rw-r--r--net/rxrpc/Makefile36
-rw-r--r--net/rxrpc/af_rxrpc.c297
-rw-r--r--net/rxrpc/ar-connection.c927
-rw-r--r--net/rxrpc/ar-error.c230
-rw-r--r--net/rxrpc/ar-internal.h423
-rw-r--r--net/rxrpc/ar-local.c415
-rw-r--r--net/rxrpc/ar-peer.c303
-rw-r--r--net/rxrpc/ar-transport.c284
-rw-r--r--net/rxrpc/call_accept.c (renamed from net/rxrpc/ar-accept.c)46
-rw-r--r--net/rxrpc/call_event.c (renamed from net/rxrpc/ar-ack.c)37
-rw-r--r--net/rxrpc/call_object.c (renamed from net/rxrpc/ar-call.c)367
-rw-r--r--net/rxrpc/conn_client.c94
-rw-r--r--net/rxrpc/conn_event.c (renamed from net/rxrpc/ar-connevent.c)33
-rw-r--r--net/rxrpc/conn_object.c686
-rw-r--r--net/rxrpc/input.c (renamed from net/rxrpc/ar-input.c)69
-rw-r--r--net/rxrpc/key.c (renamed from net/rxrpc/ar-key.c)6
-rw-r--r--net/rxrpc/local_event.c116
-rw-r--r--net/rxrpc/local_object.c387
-rw-r--r--net/rxrpc/misc.c6
-rw-r--r--net/rxrpc/output.c (renamed from net/rxrpc/ar-output.c)233
-rw-r--r--net/rxrpc/peer_event.c281
-rw-r--r--net/rxrpc/peer_object.c315
-rw-r--r--net/rxrpc/proc.c (renamed from net/rxrpc/ar-proc.c)39
-rw-r--r--net/rxrpc/recvmsg.c (renamed from net/rxrpc/ar-recvmsg.c)10
-rw-r--r--net/rxrpc/rxkad.c76
-rw-r--r--net/rxrpc/security.c (renamed from net/rxrpc/ar-security.c)8
-rw-r--r--net/rxrpc/skbuff.c (renamed from net/rxrpc/ar-skbuff.c)2
-rw-r--r--net/rxrpc/sysctl.c12
-rw-r--r--net/rxrpc/utils.c41
-rw-r--r--net/sched/act_api.c32
-rw-r--r--net/sched/act_bpf.c8
-rw-r--r--net/sched/act_connmark.c6
-rw-r--r--net/sched/act_csum.c7
-rw-r--r--net/sched/act_gact.c7
-rw-r--r--net/sched/act_ife.c13
-rw-r--r--net/sched/act_ipt.c19
-rw-r--r--net/sched/act_mirred.c9
-rw-r--r--net/sched/act_nat.c7
-rw-r--r--net/sched/act_pedit.c8
-rw-r--r--net/sched/act_police.c12
-rw-r--r--net/sched/act_simple.c10
-rw-r--r--net/sched/act_skbedit.c10
-rw-r--r--net/sched/act_vlan.c13
-rw-r--r--net/sched/cls_api.c48
-rw-r--r--net/sched/cls_flower.c65
-rw-r--r--net/sched/sch_api.c30
-rw-r--r--net/sched/sch_atm.c33
-rw-r--r--net/sched/sch_blackhole.c5
-rw-r--r--net/sched/sch_cbq.c305
-rw-r--r--net/sched/sch_choke.c41
-rw-r--r--net/sched/sch_codel.c10
-rw-r--r--net/sched/sch_drr.c38
-rw-r--r--net/sched/sch_dsmark.c27
-rw-r--r--net/sched/sch_fifo.c18
-rw-r--r--net/sched/sch_fq.c29
-rw-r--r--net/sched/sch_fq_codel.c64
-rw-r--r--net/sched/sch_generic.c90
-rw-r--r--net/sched/sch_gred.c42
-rw-r--r--net/sched/sch_hfsc.c44
-rw-r--r--net/sched/sch_hhf.c24
-rw-r--r--net/sched/sch_htb.c68
-rw-r--r--net/sched/sch_mq.c2
-rw-r--r--net/sched/sch_mqprio.c11
-rw-r--r--net/sched/sch_multiq.c32
-rw-r--r--net/sched/sch_netem.c73
-rw-r--r--net/sched/sch_pie.c7
-rw-r--r--net/sched/sch_plug.c19
-rw-r--r--net/sched/sch_prio.c27
-rw-r--r--net/sched/sch_qfq.c63
-rw-r--r--net/sched/sch_red.c28
-rw-r--r--net/sched/sch_sfb.c7
-rw-r--r--net/sched/sch_sfq.c11
-rw-r--r--net/sched/sch_tbf.c34
-rw-r--r--net/sched/sch_teql.c4
-rw-r--r--net/sctp/Makefile3
-rw-r--r--net/sctp/input.c57
-rw-r--r--net/sctp/inqueue.c78
-rw-r--r--net/sctp/offload.c98
-rw-r--r--net/sctp/output.c366
-rw-r--r--net/sctp/protocol.c6
-rw-r--r--net/sctp/sm_sideeffect.c4
-rw-r--r--net/sctp/socket.c10
-rw-r--r--net/tipc/Makefile2
-rw-r--r--net/tipc/addr.h1
-rw-r--r--net/tipc/bearer.c8
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/core.c1
-rw-r--r--net/tipc/core.h15
-rw-r--r--net/tipc/discover.c5
-rw-r--r--net/tipc/link.c51
-rw-r--r--net/tipc/monitor.c651
-rw-r--r--net/tipc/monitor.h73
-rw-r--r--net/tipc/node.c48
-rw-r--r--net/tipc/server.c3
-rw-r--r--net/tipc/udp_media.c24
-rw-r--r--net/wireless/core.c30
-rw-r--r--net/wireless/core.h4
-rw-r--r--net/wireless/nl80211.c232
-rw-r--r--net/wireless/nl80211.h2
-rw-r--r--net/wireless/sme.c8
-rw-r--r--samples/bpf/sockex2_user.c3
-rw-r--r--samples/bpf/sockex3_user.c3
-rw-r--r--tools/virtio/ringtest/Makefile5
-rw-r--r--tools/virtio/ringtest/ptr_ring.c192
682 files changed, 52133 insertions, 20231 deletions
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index 5f7c55999c77..800fe7a9024c 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -136,6 +136,7 @@
136!Finclude/net/cfg80211.h cfg80211_ibss_joined 136!Finclude/net/cfg80211.h cfg80211_ibss_joined
137!Finclude/net/cfg80211.h cfg80211_connect_result 137!Finclude/net/cfg80211.h cfg80211_connect_result
138!Finclude/net/cfg80211.h cfg80211_connect_bss 138!Finclude/net/cfg80211.h cfg80211_connect_bss
139!Finclude/net/cfg80211.h cfg80211_connect_timeout
139!Finclude/net/cfg80211.h cfg80211_roamed 140!Finclude/net/cfg80211.h cfg80211_roamed
140!Finclude/net/cfg80211.h cfg80211_disconnected 141!Finclude/net/cfg80211.h cfg80211_disconnected
141!Finclude/net/cfg80211.h cfg80211_ready_on_channel 142!Finclude/net/cfg80211.h cfg80211_ready_on_channel
diff --git a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
new file mode 100644
index 000000000000..dfe287a5d6f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
@@ -0,0 +1,59 @@
1Properties for an MDIO bus multiplexer found in Broadcom iProc based SoCs.
2
3This MDIO bus multiplexer defines buses that could be internal as well as
4external to SoCs and could accept MDIO transaction compatible to C-22 or
5C-45 Clause. When child bus is selected, one needs to select these two
6properties as well to generate desired MDIO transaction on appropriate bus.
7
8Required properties in addition to the generic multiplexer properties:
9
10MDIO multiplexer node:
11- compatible: brcm,mdio-mux-iproc.
12
13Every non-ethernet PHY requires a compatible so that it could be probed based
14on this compatible string.
15
16Additional information regarding generic multiplexer properties can be found
17at- Documentation/devicetree/bindings/net/mdio-mux.txt
18
19
20for example:
21 mdio_mux_iproc: mdio-mux@6602023c {
22 compatible = "brcm,mdio-mux-iproc";
23 reg = <0x6602023c 0x14>;
24 #address-cells = <1>;
25 #size-cells = <0>;
26
27 mdio@0 {
28 reg = <0x0>;
29 #address-cells = <1>;
30 #size-cells = <0>;
31
32 pci_phy0: pci-phy@0 {
33 compatible = "brcm,ns2-pcie-phy";
34 reg = <0x0>;
35 #phy-cells = <0>;
36 };
37 };
38
39 mdio@7 {
40 reg = <0x7>;
41 #address-cells = <1>;
42 #size-cells = <0>;
43
44 pci_phy1: pci-phy@0 {
45 compatible = "brcm,ns2-pcie-phy";
46 reg = <0x0>;
47 #phy-cells = <0>;
48 };
49 };
50 mdio@10 {
51 reg = <0x10>;
52 #address-cells = <1>;
53 #size-cells = <0>;
54
55 gphy0: eth-phy@10 {
56 reg = <0x10>;
57 };
58 };
59 };
diff --git a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
new file mode 100644
index 000000000000..22a6f10bab05
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
@@ -0,0 +1,96 @@
1Renesas R-Car CAN FD controller Device Tree Bindings
2----------------------------------------------------
3
4Required properties:
5- compatible: Must contain one or more of the following:
6 - "renesas,rcar-gen3-canfd" for R-Car Gen3 compatible controller.
7 - "renesas,r8a7795-canfd" for R8A7795 (R-Car H3) compatible controller.
8
9 When compatible with the generic version, nodes must list the
10 SoC-specific version corresponding to the platform first, followed by the
11 family-specific and/or generic versions.
12
13- reg: physical base address and size of the R-Car CAN FD register map.
14- interrupts: interrupt specifier for the Global & Channel interrupts
15- clocks: phandles and clock specifiers for 3 clock inputs.
16- clock-names: 3 clock input name strings: "fck", "canfd", "can_clk".
17- pinctrl-0: pin control group to be used for this controller.
18- pinctrl-names: must be "default".
19
20Required child nodes:
21The controller supports two channels and each is represented as a child node.
22The name of the child nodes are "channel0" and "channel1" respectively. Each
23child node supports the "status" property only, which is used to
24enable/disable the respective channel.
25
26Required properties for "renesas,r8a7795-canfd" compatible:
27In R8A7795 SoC, canfd clock is a div6 clock and can be used by both CAN
28and CAN FD controller at the same time. It needs to be scaled to maximum
29frequency if any of these controllers use it. This is done using the
30below properties.
31
32- assigned-clocks: phandle of canfd clock.
33- assigned-clock-rates: maximum frequency of this clock.
34
35Optional property:
36The controller can operate in either CAN FD only mode (default) or
37Classical CAN only mode. The mode is global to both the channels. In order to
38enable the later, define the following optional property.
39 - renesas,no-can-fd: puts the controller in Classical CAN only mode.
40
41Example
42-------
43
44SoC common .dtsi file:
45
46 canfd: can@e66c0000 {
47 compatible = "renesas,r8a7795-canfd",
48 "renesas,rcar-gen3-canfd";
49 reg = <0 0xe66c0000 0 0x8000>;
50 interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
51 <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
52 clocks = <&cpg CPG_MOD 914>,
53 <&cpg CPG_CORE R8A7795_CLK_CANFD>,
54 <&can_clk>;
55 clock-names = "fck", "canfd", "can_clk";
56 assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
57 assigned-clock-rates = <40000000>;
58 power-domains = <&cpg>;
59 status = "disabled";
60
61 channel0 {
62 status = "disabled";
63 };
64
65 channel1 {
66 status = "disabled";
67 };
68 };
69
70Board specific .dts file:
71
72E.g. below enables Channel 1 alone in the board in Classical CAN only mode.
73
74&canfd {
75 pinctrl-0 = <&canfd1_pins>;
76 pinctrl-names = "default";
77 renesas,no-can-fd;
78 status = "okay";
79
80 channel1 {
81 status = "okay";
82 };
83};
84
85E.g. below enables Channel 0 alone in the board using External clock
86as fCAN clock.
87
88&canfd {
89 pinctrl-0 = <&canfd0_pins &can_clk_pins>;
90 pinctrl-names = "default";
91 status = "okay";
92
93 channel0 {
94 status = "okay";
95 };
96};
diff --git a/Documentation/devicetree/bindings/net/cirrus,cs89x0.txt b/Documentation/devicetree/bindings/net/cirrus,cs89x0.txt
new file mode 100644
index 000000000000..c070076bacb9
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cirrus,cs89x0.txt
@@ -0,0 +1,13 @@
1* Cirrus Logic CS8900/CS8920 Network Controller
2
3Required properties:
4- compatible : Should be "cirrus,cs8900" or "cirrus,cs8920".
5- reg : Address and length of the IO space.
6- interrupts : Should contain the controller interrupt line.
7
8Examples:
9 eth0: eth@10000000 {
10 compatible = "cirrus,cs8900";
11 reg = <0x10000000 0x400>;
12 interrupts = <10>;
13 };
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 0ae06491b430..5ad439f30135 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -15,7 +15,6 @@ Required properties:
15- cpdma_channels : Specifies number of channels in CPDMA 15- cpdma_channels : Specifies number of channels in CPDMA
16- ale_entries : Specifies No of entries ALE can hold 16- ale_entries : Specifies No of entries ALE can hold
17- bd_ram_size : Specifies internal descriptor RAM size 17- bd_ram_size : Specifies internal descriptor RAM size
18- rx_descs : Specifies number of Rx descriptors
19- mac_control : Specifies Default MAC control register content 18- mac_control : Specifies Default MAC control register content
20 for the specific platform 19 for the specific platform
21- slaves : Specifies number for slaves 20- slaves : Specifies number for slaves
diff --git a/Documentation/devicetree/bindings/net/davinci-mdio.txt b/Documentation/devicetree/bindings/net/davinci-mdio.txt
index 0369e25aabd2..621156ca4ffd 100644
--- a/Documentation/devicetree/bindings/net/davinci-mdio.txt
+++ b/Documentation/devicetree/bindings/net/davinci-mdio.txt
@@ -2,7 +2,10 @@ TI SoC Davinci/Keystone2 MDIO Controller Device Tree Bindings
2--------------------------------------------------- 2---------------------------------------------------
3 3
4Required properties: 4Required properties:
5- compatible : Should be "ti,davinci_mdio" or "ti,keystone_mdio" 5- compatible : Should be "ti,davinci_mdio"
6 and "ti,keystone_mdio" for Keystone 2 SoCs
7 and "ti,cpsw-mdio" for am335x, am472x, am57xx/dra7, dm814x SoCs
8 and "ti,am4372-mdio" for am472x SoC
6- reg : physical base address and size of the davinci mdio 9- reg : physical base address and size of the davinci mdio
7 registers map 10 registers map
8- bus_freq : Mdio Bus frequency 11- bus_freq : Mdio Bus frequency
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
new file mode 100644
index 000000000000..ca752db14dff
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -0,0 +1,88 @@
1Broadcom BCM53xx Ethernet switches
2==================================
3
4Required properties:
5
6- compatible: For external switch chips, compatible string must be exactly one
7 of: "brcm,bcm5325"
8 "brcm,bcm53115"
9 "brcm,bcm53125"
10 "brcm,bcm53128"
11 "brcm,bcm5365"
12 "brcm,bcm5395"
13 "brcm,bcm5397"
14 "brcm,bcm5398"
15
16 For the BCM5310x SoCs with an integrated switch, must be one of:
17 "brcm,bcm53010-srab"
18 "brcm,bcm53011-srab"
19 "brcm,bcm53012-srab"
20 "brcm,bcm53018-srab"
21 "brcm,bcm53019-srab" and the mandatory "brcm,bcm5301x-srab" string
22
23 For the BCM63xx/33xx SoCs with an integrated switch, must be one of:
24 "brcm,bcm3384-switch"
25 "brcm,bcm6328-switch"
26 "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
27
28See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
29required and optional properties.
30
31Examples:
32
33Ethernet switch connected via MDIO to the host, CPU port wired to eth0:
34
35 eth0: ethernet@10001000 {
36 compatible = "brcm,unimac";
37 reg = <0x10001000 0x1000>;
38
39 fixed-link {
40 speed = <1000>;
41 duplex-full;
42 };
43 };
44
45 mdio0: mdio@10000000 {
46 compatible = "brcm,unimac-mdio";
47 #address-cells = <1>;
48 #size-cells = <0>;
49
50 switch0: ethernet-switch@30 {
51 compatible = "brcm,bcm53125";
52 #address-cells = <1>;
53 #size-cells = <0>;
54
55 ports {
56 port0@0 {
57 reg = <0>;
58 label = "lan1";
59 };
60
61 port1@1 {
62 reg = <1>;
63 label = "lan2";
64 };
65
66 port5@5 {
67 reg = <5>;
68 label = "cable-modem";
69 fixed-link {
70 speed = <1000>;
71 duplex-full;
72 };
73 phy-mode = "rgmii-txid";
74 };
75
76 port8@8 {
77 reg = <8>;
78 label = "cpu";
79 fixed-link {
80 speed = <1000>;
81 duplex-full;
82 };
83 phy-mode = "rgmii-txid";
84 ethernet = <&eth0>;
85 };
86 };
87 };
88 };
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index 9f4807f90c31..9bbbe7f87d67 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -1,5 +1,279 @@
1Marvell Distributed Switch Architecture Device Tree Bindings 1Distributed Switch Architecture Device Tree Bindings
2------------------------------------------------------------ 2----------------------------------------------------
3
4Two bindings exist, one of which has been deprecated due to
5limitations.
6
7Current Binding
8---------------
9
10Switches are true Linux devices and can be probes by any means. Once
11probed, they register to the DSA framework, passing a node
12pointer. This node is expected to fulfil the following binding, and
13may contain additional properties as required by the device it is
14embedded within.
15
16Required properties:
17
18- ports : A container for child nodes representing switch ports.
19
20Optional properties:
21
22- dsa,member : A two element list indicates which DSA cluster, and position
23 within the cluster a switch takes. <0 0> is cluster 0,
24 switch 0. <0 1> is cluster 0, switch 1. <1 0> is cluster 1,
25 switch 0. A switch not part of any cluster (single device
26 hanging off a CPU port) must not specify this property
27
28The ports container has the following properties
29
30Required properties:
31
32- #address-cells : Must be 1
33- #size-cells : Must be 0
34
35Each port children node must have the following mandatory properties:
36- reg : Describes the port address in the switch
37- label : Describes the label associated with this port, which
38 will become the netdev name. Special labels are
39 "cpu" to indicate a CPU port and "dsa" to
40 indicate an uplink/downlink port between switches in
41 the cluster.
42
43A port labelled "dsa" has the following mandatory property:
44
45- link : Should be a list of phandles to other switch's DSA
46 port. This port is used as the outgoing port
47 towards the phandle ports. The full routing
48 information must be given, not just the one hop
49 routes to neighbouring switches.
50
51A port labelled "cpu" has the following mandatory property:
52
53- ethernet : Should be a phandle to a valid Ethernet device node.
54 This host device is what the switch port is
55 connected to.
56
57Port child nodes may also contain the following optional standardised
58properties, described in binding documents:
59
60- phy-handle : Phandle to a PHY on an MDIO bus. See
61 Documentation/devicetree/bindings/net/ethernet.txt
62 for details.
63
64- phy-mode : See
65 Documentation/devicetree/bindings/net/ethernet.txt
66 for details.
67
68- fixed-link : Fixed-link subnode describing a link to a non-MDIO
69 managed entity. See
70 Documentation/devicetree/bindings/net/fixed-link.txt
71 for details.
72
73Example
74
75The following example shows three switches on three MDIO busses,
76linked into one DSA cluster.
77
78&mdio1 {
79 #address-cells = <1>;
80 #size-cells = <0>;
81
82 switch0: switch0@0 {
83 compatible = "marvell,mv88e6085";
84 #address-cells = <1>;
85 #size-cells = <0>;
86 reg = <0>;
87
88 dsa,member = <0 0>;
89
90 ports {
91 #address-cells = <1>;
92 #size-cells = <0>;
93 port@0 {
94 reg = <0>;
95 label = "lan0";
96 };
97
98 port@1 {
99 reg = <1>;
100 label = "lan1";
101 };
102
103 port@2 {
104 reg = <2>;
105 label = "lan2";
106 };
107
108 switch0port5: port@5 {
109 reg = <5>;
110 label = "dsa";
111 phy-mode = "rgmii-txid";
112 link = <&switch1port6
113 &switch2port9>;
114 fixed-link {
115 speed = <1000>;
116 full-duplex;
117 };
118 };
119
120 port@6 {
121 reg = <6>;
122 label = "cpu";
123 ethernet = <&fec1>;
124 fixed-link {
125 speed = <100>;
126 full-duplex;
127 };
128 };
129 };
130 };
131};
132
133&mdio2 {
134 #address-cells = <1>;
135 #size-cells = <0>;
136
137 switch1: switch1@0 {
138 compatible = "marvell,mv88e6085";
139 #address-cells = <1>;
140 #size-cells = <0>;
141 reg = <0>;
142
143 dsa,member = <0 1>;
144
145 ports {
146 #address-cells = <1>;
147 #size-cells = <0>;
148 port@0 {
149 reg = <0>;
150 label = "lan3";
151 phy-handle = <&switch1phy0>;
152 };
153
154 port@1 {
155 reg = <1>;
156 label = "lan4";
157 phy-handle = <&switch1phy1>;
158 };
159
160 port@2 {
161 reg = <2>;
162 label = "lan5";
163 phy-handle = <&switch1phy2>;
164 };
165
166 switch1port5: port@5 {
167 reg = <5>;
168 label = "dsa";
169 link = <&switch2port9>;
170 phy-mode = "rgmii-txid";
171 fixed-link {
172 speed = <1000>;
173 full-duplex;
174 };
175 };
176
177 switch1port6: port@6 {
178 reg = <6>;
179 label = "dsa";
180 phy-mode = "rgmii-txid";
181 link = <&switch0port5>;
182 fixed-link {
183 speed = <1000>;
184 full-duplex;
185 };
186 };
187 };
188 mdio-bus {
189 #address-cells = <1>;
190 #size-cells = <0>;
191 switch1phy0: switch1phy0@0 {
192 reg = <0>;
193 };
194 switch1phy1: switch1phy0@1 {
195 reg = <1>;
196 };
197 switch1phy2: switch1phy0@2 {
198 reg = <2>;
199 };
200 };
201 };
202};
203
204&mdio4 {
205 #address-cells = <1>;
206 #size-cells = <0>;
207
208 switch2: switch2@0 {
209 compatible = "marvell,mv88e6085";
210 #address-cells = <1>;
211 #size-cells = <0>;
212 reg = <0>;
213
214 dsa,member = <0 2>;
215
216 ports {
217 #address-cells = <1>;
218 #size-cells = <0>;
219 port@0 {
220 reg = <0>;
221 label = "lan6";
222 };
223
224 port@1 {
225 reg = <1>;
226 label = "lan7";
227 };
228
229 port@2 {
230 reg = <2>;
231 label = "lan8";
232 };
233
234 port@3 {
235 reg = <3>;
236 label = "optical3";
237 fixed-link {
238 speed = <1000>;
239 full-duplex;
240 link-gpios = <&gpio6 2
241 GPIO_ACTIVE_HIGH>;
242 };
243 };
244
245 port@4 {
246 reg = <4>;
247 label = "optical4";
248 fixed-link {
249 speed = <1000>;
250 full-duplex;
251 link-gpios = <&gpio6 3
252 GPIO_ACTIVE_HIGH>;
253 };
254 };
255
256 switch2port9: port@9 {
257 reg = <9>;
258 label = "dsa";
259 phy-mode = "rgmii-txid";
260 link = <&switch1port5
261 &switch0port5>;
262 fixed-link {
263 speed = <1000>;
264 full-duplex;
265 };
266 };
267 };
268 };
269};
270
271Deprecated Binding
272------------------
273
274The deprecated binding makes use of a platform device to represent the
275switches. The switches themselves are not Linux devices, and make use
276of an MDIO bus for management.
3 277
4Required properties: 278Required properties:
5- compatible : Should be "marvell,dsa" 279- compatible : Should be "marvell,dsa"
diff --git a/Documentation/devicetree/bindings/net/mdio-mux.txt b/Documentation/devicetree/bindings/net/mdio-mux.txt
index 491f5bd55203..f58571f36570 100644
--- a/Documentation/devicetree/bindings/net/mdio-mux.txt
+++ b/Documentation/devicetree/bindings/net/mdio-mux.txt
@@ -5,11 +5,12 @@ numbered uniquely in a device dependent manner. The nodes for an MDIO
5bus multiplexer/switch will have one child node for each child bus. 5bus multiplexer/switch will have one child node for each child bus.
6 6
7Required properties: 7Required properties:
8- mdio-parent-bus : phandle to the parent MDIO bus.
9- #address-cells = <1>; 8- #address-cells = <1>;
10- #size-cells = <0>; 9- #size-cells = <0>;
11 10
12Optional properties: 11Optional properties:
12- mdio-parent-bus : phandle to the parent MDIO bus.
13
13- Other properties specific to the multiplexer/switch hardware. 14- Other properties specific to the multiplexer/switch hardware.
14 15
15Required properties for child nodes: 16Required properties for child nodes:
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
index 93eac7ce1446..cccd945fc45b 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
@@ -3,7 +3,8 @@ Rockchip SoC RK3288 10/100/1000 Ethernet driver(GMAC)
3The device node has following properties. 3The device node has following properties.
4 4
5Required properties: 5Required properties:
6 - compatible: Can be one of "rockchip,rk3288-gmac", "rockchip,rk3368-gmac" 6 - compatible: Can be one of "rockchip,rk3228-gmac", "rockchip,rk3288-gmac",
7 "rockchip,rk3368-gmac"
7 - reg: addresses and length of the register sets for the device. 8 - reg: addresses and length of the register sets for the device.
8 - interrupts: Should contain the GMAC interrupts. 9 - interrupts: Should contain the GMAC interrupts.
9 - interrupt-names: Should contain the interrupt names "macirq". 10 - interrupt-names: Should contain the interrupt names "macirq".
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 95816c5fc589..41b49e6075f5 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -47,6 +47,9 @@ Optional properties:
47 supported by this device instance 47 supported by this device instance
48- snps,perfect-filter-entries: Number of perfect filter entries supported 48- snps,perfect-filter-entries: Number of perfect filter entries supported
49 by this device instance 49 by this device instance
50- snps,ps-speed: port selection speed that can be passed to the core when
51 PCS is supported. For example, this is used in case of SGMII
52 and MAC2MAC connection.
50- AXI BUS Mode parameters: below the list of all the parameters to program the 53- AXI BUS Mode parameters: below the list of all the parameters to program the
51 AXI register inside the DMA module: 54 AXI register inside the DMA module:
52 - snps,lpi_en: enable Low Power Interface 55 - snps,lpi_en: enable Low Power Interface
diff --git a/Documentation/devicetree/bindings/phy/brcm,mdio-mux-bus-pci.txt b/Documentation/devicetree/bindings/phy/brcm,mdio-mux-bus-pci.txt
new file mode 100644
index 000000000000..5b51007c6f24
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,mdio-mux-bus-pci.txt
@@ -0,0 +1,27 @@
1* Broadcom NS2 PCIe PHY binding document
2
3Required bus properties:
4- reg: MDIO Bus number for the MDIO interface
5- #address-cells: must be 1
6- #size-cells: must be 0
7
8Required PHY properties:
9- compatible: should be "brcm,ns2-pcie-phy"
10- reg: MDIO Phy ID for the MDIO interface
11- #phy-cells: must be 0
12
13This is a child bus node of "brcm,mdio-mux-iproc" node.
14
15Example:
16
17mdio@0 {
18 reg = <0x0>;
19 #address-cells = <1>;
20 #size-cells = <0>;
21
22 pci_phy0: pci-phy@0 {
23 compatible = "brcm,ns2-pcie-phy";
24 reg = <0x0>;
25 #phy-cells = <0>;
26 };
27};
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index d58ff8467953..aa15b9ee2e70 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -31,6 +31,7 @@ This file contains
31 4.2.4 Broadcast Manager message sequence transmission 31 4.2.4 Broadcast Manager message sequence transmission
32 4.2.5 Broadcast Manager receive filter timers 32 4.2.5 Broadcast Manager receive filter timers
33 4.2.6 Broadcast Manager multiplex message receive filter 33 4.2.6 Broadcast Manager multiplex message receive filter
34 4.2.7 Broadcast Manager CAN FD support
34 4.3 connected transport protocols (SOCK_SEQPACKET) 35 4.3 connected transport protocols (SOCK_SEQPACKET)
35 4.4 unconnected transport protocols (SOCK_DGRAM) 36 4.4 unconnected transport protocols (SOCK_DGRAM)
36 37
@@ -799,7 +800,7 @@ solution for a couple of reasons:
799 } mytxmsg; 800 } mytxmsg;
800 801
801 (..) 802 (..)
802 mytxmsg.nframes = 4; 803 mytxmsg.msg_head.nframes = 4;
803 (..) 804 (..)
804 805
805 write(s, &mytxmsg, sizeof(mytxmsg)); 806 write(s, &mytxmsg, sizeof(mytxmsg));
@@ -852,6 +853,28 @@ solution for a couple of reasons:
852 853
853 write(s, &msg, sizeof(msg)); 854 write(s, &msg, sizeof(msg));
854 855
856 4.2.7 Broadcast Manager CAN FD support
857
858 The programming API of the CAN_BCM depends on struct can_frame which is
859 given as array directly behind the bcm_msg_head structure. To follow this
860 schema for the CAN FD frames a new flag 'CAN_FD_FRAME' in the bcm_msg_head
861 flags indicates that the concatenated CAN frame structures behind the
862 bcm_msg_head are defined as struct canfd_frame.
863
864 struct {
865 struct bcm_msg_head msg_head;
866 struct canfd_frame frame[5];
867 } msg;
868
869 msg.msg_head.opcode = RX_SETUP;
870 msg.msg_head.can_id = 0x42;
871 msg.msg_head.flags = CAN_FD_FRAME;
872 msg.msg_head.nframes = 5;
873 (..)
874
875 When using CAN FD frames for multiplex filtering the MUX mask is still
876 expected in the first 64 bit of the struct canfd_frame data section.
877
855 4.3 connected transport protocols (SOCK_SEQPACKET) 878 4.3 connected transport protocols (SOCK_SEQPACKET)
856 4.4 unconnected transport protocols (SOCK_DGRAM) 879 4.4 unconnected transport protocols (SOCK_DGRAM)
857 880
diff --git a/Documentation/networking/gen_stats.txt b/Documentation/networking/gen_stats.txt
index ff630a87b511..179b18ce45ff 100644
--- a/Documentation/networking/gen_stats.txt
+++ b/Documentation/networking/gen_stats.txt
@@ -21,7 +21,7 @@ struct mystruct {
21 ... 21 ...
22}; 22};
23 23
24Update statistics: 24Update statistics, in dequeue() methods only, (while owning qdisc->running)
25mystruct->tstats.packet++; 25mystruct->tstats.packet++;
26mystruct->qstats.backlog += skb->pkt_len; 26mystruct->qstats.backlog += skb->pkt_len;
27 27
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 671fe3dd56d3..e226f8925c9e 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -285,6 +285,7 @@ Please see the following document:
285 o mmc_core.c/mmc.h: Management MAC Counters; 285 o mmc_core.c/mmc.h: Management MAC Counters;
286 o stmmac_hwtstamp.c: HW timestamp support for PTP; 286 o stmmac_hwtstamp.c: HW timestamp support for PTP;
287 o stmmac_ptp.c: PTP 1588 clock; 287 o stmmac_ptp.c: PTP 1588 clock;
288 o stmmac_pcs.h: Physical Coding Sublayer common implementation;
288 o dwmac-<XXX>.c: these are for the platform glue-logic file; e.g. dwmac-sti.c 289 o dwmac-<XXX>.c: these are for the platform glue-logic file; e.g. dwmac-sti.c
289 for STMicroelectronics SoCs. 290 for STMicroelectronics SoCs.
290 291
diff --git a/MAINTAINERS b/MAINTAINERS
index 1209323b7e43..d8c078491e55 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2460,6 +2460,14 @@ L: netdev@vger.kernel.org
2460S: Supported 2460S: Supported
2461F: drivers/net/ethernet/broadcom/b44.* 2461F: drivers/net/ethernet/broadcom/b44.*
2462 2462
2463BROADCOM B53 ETHERNET SWITCH DRIVER
2464M: Florian Fainelli <f.fainelli@gmail.com>
2465L: netdev@vger.kernel.org
2466L: openwrt-devel@lists.openwrt.org (subscribers-only)
2467S: Supported
2468F: drivers/net/dsa/b53/*
2469F: include/linux/platform_data/b53.h
2470
2463BROADCOM GENET ETHERNET DRIVER 2471BROADCOM GENET ETHERNET DRIVER
2464M: Florian Fainelli <f.fainelli@gmail.com> 2472M: Florian Fainelli <f.fainelli@gmail.com>
2465L: netdev@vger.kernel.org 2473L: netdev@vger.kernel.org
@@ -2576,12 +2584,11 @@ S: Supported
2576F: drivers/net/ethernet/broadcom/tg3.* 2584F: drivers/net/ethernet/broadcom/tg3.*
2577 2585
2578BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER 2586BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
2579M: Brett Rudley <brudley@broadcom.com> 2587M: Arend van Spriel <arend.vanspriel@broadcom.com>
2580M: Arend van Spriel <arend@broadcom.com> 2588M: Franky Lin <franky.lin@broadcom.com>
2581M: Franky (Zhenhui) Lin <frankyl@broadcom.com> 2589M: Hante Meuleman <hante.meuleman@broadcom.com>
2582M: Hante Meuleman <meuleman@broadcom.com>
2583L: linux-wireless@vger.kernel.org 2590L: linux-wireless@vger.kernel.org
2584L: brcm80211-dev-list@broadcom.com 2591L: brcm80211-dev-list.pdl@broadcom.com
2585S: Supported 2592S: Supported
2586F: drivers/net/wireless/broadcom/brcm80211/ 2593F: drivers/net/wireless/broadcom/brcm80211/
2587 2594
@@ -2813,6 +2820,7 @@ W: https://github.com/linux-can
2813T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git 2820T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
2814T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git 2821T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
2815S: Maintained 2822S: Maintained
2823F: Documentation/devicetree/bindings/net/can/
2816F: drivers/net/can/ 2824F: drivers/net/can/
2817F: include/linux/can/dev.h 2825F: include/linux/can/dev.h
2818F: include/linux/can/platform/ 2826F: include/linux/can/platform/
@@ -4887,6 +4895,13 @@ F: drivers/net/ethernet/freescale/gianfar*
4887X: drivers/net/ethernet/freescale/gianfar_ptp.c 4895X: drivers/net/ethernet/freescale/gianfar_ptp.c
4888F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt 4896F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
4889 4897
4898FREESCALE QUICC ENGINE UCC HDLC DRIVER
4899M: Zhao Qiang <qiang.zhao@nxp.com>
4900L: netdev@vger.kernel.org
4901L: linuxppc-dev@lists.ozlabs.org
4902S: Maintained
4903F: drivers/net/wan/fsl_ucc_hdlc*
4904
4890FREESCALE QUICC ENGINE UCC UART DRIVER 4905FREESCALE QUICC ENGINE UCC UART DRIVER
4891M: Timur Tabi <timur@tabi.org> 4906M: Timur Tabi <timur@tabi.org>
4892L: linuxppc-dev@lists.ozlabs.org 4907L: linuxppc-dev@lists.ozlabs.org
@@ -7173,6 +7188,12 @@ W: http://www.kernel.org/doc/man-pages
7173L: linux-man@vger.kernel.org 7188L: linux-man@vger.kernel.org
7174S: Maintained 7189S: Maintained
7175 7190
7191MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
7192M: Andrew Lunn <andrew@lunn.ch>
7193M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7194S: Maintained
7195F: drivers/net/dsa/mv88e6xxx/
7196
7176MARVELL ARMADA DRM SUPPORT 7197MARVELL ARMADA DRM SUPPORT
7177M: Russell King <rmk+kernel@armlinux.org.uk> 7198M: Russell King <rmk+kernel@armlinux.org.uk>
7178S: Maintained 7199S: Maintained
@@ -7180,11 +7201,6 @@ F: drivers/gpu/drm/armada/
7180F: include/uapi/drm/armada_drm.h 7201F: include/uapi/drm/armada_drm.h
7181F: Documentation/devicetree/bindings/display/armada/ 7202F: Documentation/devicetree/bindings/display/armada/
7182 7203
7183MARVELL 88E6352 DSA support
7184M: Guenter Roeck <linux@roeck-us.net>
7185S: Maintained
7186F: drivers/net/dsa/mv88e6352.c
7187
7188MARVELL CRYPTO DRIVER 7204MARVELL CRYPTO DRIVER
7189M: Boris Brezillon <boris.brezillon@free-electrons.com> 7205M: Boris Brezillon <boris.brezillon@free-electrons.com>
7190M: Arnaud Ebalard <arno@natisbad.org> 7206M: Arnaud Ebalard <arno@natisbad.org>
@@ -10281,10 +10297,9 @@ W: http://www.avagotech.com
10281S: Supported 10297S: Supported
10282F: drivers/scsi/be2iscsi/ 10298F: drivers/scsi/be2iscsi/
10283 10299
10284Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER 10300Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net)
10285M: Sathya Perla <sathya.perla@broadcom.com> 10301M: Sathya Perla <sathya.perla@broadcom.com>
10286M: Ajit Khaparde <ajit.khaparde@broadcom.com> 10302M: Ajit Khaparde <ajit.khaparde@broadcom.com>
10287M: Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
10288M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com> 10303M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
10289M: Somnath Kotur <somnath.kotur@broadcom.com> 10304M: Somnath Kotur <somnath.kotur@broadcom.com>
10290L: netdev@vger.kernel.org 10305L: netdev@vger.kernel.org
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 52be48bbd2dd..7fa295155543 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -766,7 +766,6 @@
766 ale_entries = <1024>; 766 ale_entries = <1024>;
767 bd_ram_size = <0x2000>; 767 bd_ram_size = <0x2000>;
768 no_bd_ram = <0>; 768 no_bd_ram = <0>;
769 rx_descs = <64>;
770 mac_control = <0x20>; 769 mac_control = <0x20>;
771 slaves = <2>; 770 slaves = <2>;
772 active_slave = <0>; 771 active_slave = <0>;
@@ -789,7 +788,7 @@
789 status = "disabled"; 788 status = "disabled";
790 789
791 davinci_mdio: mdio@4a101000 { 790 davinci_mdio: mdio@4a101000 {
792 compatible = "ti,davinci_mdio"; 791 compatible = "ti,cpsw-mdio","ti,davinci_mdio";
793 #address-cells = <1>; 792 #address-cells = <1>;
794 #size-cells = <0>; 793 #size-cells = <0>;
795 ti,hwmods = "davinci_mdio"; 794 ti,hwmods = "davinci_mdio";
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 12fcde4d4d2e..cd81ecf12731 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -626,7 +626,6 @@
626 ale_entries = <1024>; 626 ale_entries = <1024>;
627 bd_ram_size = <0x2000>; 627 bd_ram_size = <0x2000>;
628 no_bd_ram = <0>; 628 no_bd_ram = <0>;
629 rx_descs = <64>;
630 mac_control = <0x20>; 629 mac_control = <0x20>;
631 slaves = <2>; 630 slaves = <2>;
632 active_slave = <0>; 631 active_slave = <0>;
@@ -636,7 +635,7 @@
636 syscon = <&scm_conf>; 635 syscon = <&scm_conf>;
637 636
638 davinci_mdio: mdio@4a101000 { 637 davinci_mdio: mdio@4a101000 {
639 compatible = "ti,am4372-mdio","ti,davinci_mdio"; 638 compatible = "ti,am4372-mdio","ti,cpsw-mdio","ti,davinci_mdio";
640 reg = <0x4a101000 0x100>; 639 reg = <0x4a101000 0x100>;
641 #address-cells = <1>; 640 #address-cells = <1>;
642 #size-cells = <0>; 641 #size-cells = <0>;
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index d4537dc61497..f23cae0c2179 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -509,7 +509,6 @@
509 ale_entries = <1024>; 509 ale_entries = <1024>;
510 bd_ram_size = <0x2000>; 510 bd_ram_size = <0x2000>;
511 no_bd_ram = <0>; 511 no_bd_ram = <0>;
512 rx_descs = <64>;
513 mac_control = <0x20>; 512 mac_control = <0x20>;
514 slaves = <2>; 513 slaves = <2>;
515 active_slave = <0>; 514 active_slave = <0>;
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 3a8f3976f6f9..de559f6e4fee 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1628,7 +1628,6 @@
1628 ale_entries = <1024>; 1628 ale_entries = <1024>;
1629 bd_ram_size = <0x2000>; 1629 bd_ram_size = <0x2000>;
1630 no_bd_ram = <0>; 1630 no_bd_ram = <0>;
1631 rx_descs = <64>;
1632 mac_control = <0x20>; 1631 mac_control = <0x20>;
1633 slaves = <2>; 1632 slaves = <2>;
1634 active_slave = <0>; 1633 active_slave = <0>;
@@ -1663,7 +1662,7 @@
1663 status = "disabled"; 1662 status = "disabled";
1664 1663
1665 davinci_mdio: mdio@48485000 { 1664 davinci_mdio: mdio@48485000 {
1666 compatible = "ti,davinci_mdio"; 1665 compatible = "ti,cpsw-mdio","ti,davinci_mdio";
1667 #address-cells = <1>; 1666 #address-cells = <1>;
1668 #size-cells = <0>; 1667 #size-cells = <0>;
1669 ti,hwmods = "davinci_mdio"; 1668 ti,hwmods = "davinci_mdio";
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 3b44ef3cff12..3ebee530f2b0 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -539,8 +539,9 @@
539 gmac: ethernet@ff290000 { 539 gmac: ethernet@ff290000 {
540 compatible = "rockchip,rk3288-gmac"; 540 compatible = "rockchip,rk3288-gmac";
541 reg = <0xff290000 0x10000>; 541 reg = <0xff290000 0x10000>;
542 interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; 542 interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
543 interrupt-names = "macirq"; 543 <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
544 interrupt-names = "macirq", "eth_wake_irq";
544 rockchip,grf = <&grf>; 545 rockchip,grf = <&grf>;
545 clocks = <&cru SCLK_MAC>, 546 clocks = <&cru SCLK_MAC>,
546 <&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>, 547 <&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>,
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index 6c60b7f91104..5c1fcab4a6f7 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -85,187 +85,199 @@
85 reg = <1>; 85 reg = <1>;
86 #address-cells = <1>; 86 #address-cells = <1>;
87 #size-cells = <0>; 87 #size-cells = <0>;
88
89 switch0: switch0@0 {
90 compatible = "marvell,mv88e6085";
91 #address-cells = <1>;
92 #size-cells = <0>;
93 reg = <0>;
94 dsa,member = <0 0>;
95
96 ports {
97 #address-cells = <1>;
98 #size-cells = <0>;
99 port@0 {
100 reg = <0>;
101 label = "lan0";
102 };
103
104 port@1 {
105 reg = <1>;
106 label = "lan1";
107 };
108
109 port@2 {
110 reg = <2>;
111 label = "lan2";
112 };
113
114 switch0port5: port@5 {
115 reg = <5>;
116 label = "dsa";
117 phy-mode = "rgmii-txid";
118 link = <&switch1port6
119 &switch2port9>;
120 fixed-link {
121 speed = <1000>;
122 full-duplex;
123 };
124 };
125
126 port@6 {
127 reg = <6>;
128 label = "cpu";
129 ethernet = <&fec1>;
130 fixed-link {
131 speed = <100>;
132 full-duplex;
133 };
134 };
135 };
136 };
88 }; 137 };
89 138
90 mdio_mux_2: mdio@2 { 139 mdio_mux_2: mdio@2 {
91 reg = <2>; 140 reg = <2>;
92 #address-cells = <1>; 141 #address-cells = <1>;
93 #size-cells = <0>; 142 #size-cells = <0>;
94 };
95
96 mdio_mux_4: mdio@4 {
97 reg = <4>;
98 #address-cells = <1>;
99 #size-cells = <0>;
100 };
101
102 mdio_mux_8: mdio@8 {
103 reg = <8>;
104 #address-cells = <1>;
105 #size-cells = <0>;
106 };
107 };
108
109 dsa {
110 compatible = "marvell,dsa";
111 #address-cells = <2>;
112 #size-cells = <0>;
113 dsa,ethernet = <&fec1>;
114 dsa,mii-bus = <&mdio_mux_1>;
115
116 /* 6352 - Primary - 7 ports */
117 switch0: switch@0-0 {
118 #address-cells = <1>;
119 #size-cells = <0>;
120 reg = <0x00 0>;
121 eeprom-length = <512>;
122 143
123 port@0 { 144 switch1: switch1@0 {
145 compatible = "marvell,mv88e6085";
146 #address-cells = <1>;
147 #size-cells = <0>;
124 reg = <0>; 148 reg = <0>;
125 label = "lan0"; 149 dsa,member = <0 1>;
126 }; 150
127 151 ports {
128 port@1 { 152 #address-cells = <1>;
129 reg = <1>; 153 #size-cells = <0>;
130 label = "lan1"; 154 port@0 {
131 }; 155 reg = <0>;
132 156 label = "lan3";
133 port@2 { 157 phy-handle = <&switch1phy0>;
134 reg = <2>; 158 };
135 label = "lan2"; 159
136 }; 160 port@1 {
137 161 reg = <1>;
138 switch0port5: port@5 { 162 label = "lan4";
139 reg = <5>; 163 phy-handle = <&switch1phy1>;
140 label = "dsa"; 164 };
141 phy-mode = "rgmii-txid"; 165
142 link = <&switch1port6 166 port@2 {
143 &switch2port9>; 167 reg = <2>;
144 168 label = "lan5";
145 fixed-link { 169 phy-handle = <&switch1phy2>;
146 speed = <1000>; 170 };
147 full-duplex; 171
172 switch1port5: port@5 {
173 reg = <5>;
174 label = "dsa";
175 link = <&switch2port9>;
176 phy-mode = "rgmii-txid";
177 fixed-link {
178 speed = <1000>;
179 full-duplex;
180 };
181 };
182
183 switch1port6: port@6 {
184 reg = <6>;
185 label = "dsa";
186 phy-mode = "rgmii-txid";
187 link = <&switch0port5>;
188 fixed-link {
189 speed = <1000>;
190 full-duplex;
191 };
192 };
148 }; 193 };
149 }; 194 mdio {
150 195 #address-cells = <1>;
151 port@6 { 196 #size-cells = <0>;
152 reg = <6>; 197 switch1phy0: switch1phy0@0 {
153 label = "cpu"; 198 reg = <0>;
154 199 };
155 fixed-link { 200 switch1phy1: switch1phy0@1 {
156 speed = <100>; 201 reg = <1>;
157 full-duplex; 202 };
203 switch1phy2: switch1phy0@2 {
204 reg = <2>;
205 };
158 }; 206 };
159 }; 207 };
160
161 }; 208 };
162 209
163 /* 6352 - Secondary - 7 ports */ 210 mdio_mux_4: mdio@4 {
164 switch1: switch@0-1 {
165 #address-cells = <1>; 211 #address-cells = <1>;
166 #size-cells = <0>; 212 #size-cells = <0>;
167 reg = <0x00 1>; 213 reg = <4>;
168 eeprom-length = <512>;
169 mii-bus = <&mdio_mux_2>;
170 214
171 port@0 { 215 switch2: switch2@0 {
216 compatible = "marvell,mv88e6085";
217 #address-cells = <1>;
218 #size-cells = <0>;
172 reg = <0>; 219 reg = <0>;
173 label = "lan3"; 220 dsa,member = <0 2>;
174 }; 221
175 222 ports {
176 port@1 { 223 #address-cells = <1>;
177 reg = <1>; 224 #size-cells = <0>;
178 label = "lan4"; 225 port@0 {
179 }; 226 reg = <0>;
180 227 label = "lan6";
181 port@2 { 228 };
182 reg = <2>; 229
183 label = "lan5"; 230 port@1 {
184 }; 231 reg = <1>;
185 232 label = "lan7";
186 switch1port5: port@5 { 233 };
187 reg = <5>; 234
188 label = "dsa"; 235 port@2 {
189 link = <&switch2port9>; 236 reg = <2>;
190 phy-mode = "rgmii-txid"; 237 label = "lan8";
191 238 };
192 fixed-link { 239
193 speed = <1000>; 240 port@3 {
194 full-duplex; 241 reg = <3>;
195 }; 242 label = "optical3";
196 }; 243 fixed-link {
197 244 speed = <1000>;
198 switch1port6: port@6 { 245 full-duplex;
199 reg = <6>; 246 link-gpios = <&gpio6 2
200 label = "dsa"; 247 GPIO_ACTIVE_HIGH>;
201 phy-mode = "rgmii-txid"; 248 };
202 link = <&switch0port5>; 249 };
203 250
204 fixed-link { 251 port@4 {
205 speed = <1000>; 252 reg = <4>;
206 full-duplex; 253 label = "optical4";
254 fixed-link {
255 speed = <1000>;
256 full-duplex;
257 link-gpios = <&gpio6 3
258 GPIO_ACTIVE_HIGH>;
259 };
260 };
261
262 switch2port9: port@9 {
263 reg = <9>;
264 label = "dsa";
265 phy-mode = "rgmii-txid";
266 link = <&switch1port5
267 &switch0port5>;
268 fixed-link {
269 speed = <1000>;
270 full-duplex;
271 };
272 };
207 }; 273 };
208 }; 274 };
209 }; 275 };
210 276
211 /* 6185 - 10 ports */ 277 mdio_mux_8: mdio@8 {
212 switch2: switch@0-2 { 278 reg = <8>;
213 #address-cells = <1>; 279 #address-cells = <1>;
214 #size-cells = <0>; 280 #size-cells = <0>;
215 reg = <0x00 2>;
216 mii-bus = <&mdio_mux_4>;
217
218 port@0 {
219 reg = <0>;
220 label = "lan6";
221 };
222
223 port@1 {
224 reg = <1>;
225 label = "lan7";
226 };
227
228 port@2 {
229 reg = <2>;
230 label = "lan8";
231 };
232
233 port@3 {
234 reg = <3>;
235 label = "optical3";
236
237 fixed-link {
238 speed = <1000>;
239 full-duplex;
240 link-gpios = <&gpio6 2
241 GPIO_ACTIVE_HIGH>;
242 };
243 };
244
245 port@4 {
246 reg = <4>;
247 label = "optical4";
248
249 fixed-link {
250 speed = <1000>;
251 full-duplex;
252 link-gpios = <&gpio6 3
253 GPIO_ACTIVE_HIGH>;
254 };
255 };
256
257 switch2port9: port@9 {
258 reg = <9>;
259 label = "dsa";
260 phy-mode = "rgmii-txid";
261 link = <&switch1port5
262 &switch0port5>;
263
264 fixed-link {
265 speed = <1000>;
266 full-duplex;
267 };
268 };
269 }; 281 };
270 }; 282 };
271 283
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
index 54ca40c9f711..ea5603fd106a 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -52,6 +52,14 @@
52 }; 52 };
53}; 53};
54 54
55&pci_phy0 {
56 status = "ok";
57};
58
59&pci_phy1 {
60 status = "ok";
61};
62
55&pcie0 { 63&pcie0 {
56 status = "ok"; 64 status = "ok";
57}; 65};
@@ -132,3 +140,11 @@
132 #size-cells = <1>; 140 #size-cells = <1>;
133 }; 141 };
134}; 142};
143
144&mdio_mux_iproc {
145 mdio@10 {
146 gphy0: eth-phy@10 {
147 reg = <0x10>;
148 };
149 };
150};
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index ec68ec1a80c8..46b78fa89f4c 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -263,6 +263,45 @@
263 IRQ_TYPE_LEVEL_HIGH)>; 263 IRQ_TYPE_LEVEL_HIGH)>;
264 }; 264 };
265 265
266 mdio_mux_iproc: mdio-mux@6602023c {
267 compatible = "brcm,mdio-mux-iproc";
268 reg = <0x6602023c 0x14>;
269 #address-cells = <1>;
270 #size-cells = <0>;
271
272 mdio@0 {
273 reg = <0x0>;
274 #address-cells = <1>;
275 #size-cells = <0>;
276
277 pci_phy0: pci-phy@0 {
278 compatible = "brcm,ns2-pcie-phy";
279 reg = <0x0>;
280 #phy-cells = <0>;
281 status = "disabled";
282 };
283 };
284
285 mdio@7 {
286 reg = <0x7>;
287 #address-cells = <1>;
288 #size-cells = <0>;
289
290 pci_phy1: pci-phy@0 {
291 compatible = "brcm,ns2-pcie-phy";
292 reg = <0x0>;
293 #phy-cells = <0>;
294 status = "disabled";
295 };
296 };
297
298 mdio@10 {
299 reg = <0x10>;
300 #address-cells = <1>;
301 #size-cells = <0>;
302 };
303 };
304
266 timer0: timer@66030000 { 305 timer0: timer@66030000 {
267 compatible = "arm,sp804", "arm,primecell"; 306 compatible = "arm,sp804", "arm,primecell";
268 reg = <0x66030000 0x1000>; 307 reg = <0x66030000 0x1000>;
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index aee5637ea436..7c16e547ccb2 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * BPF JIT compiler for ARM64 2 * BPF JIT compiler for ARM64
3 * 3 *
4 * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com> 4 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -55,6 +55,7 @@
55#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK) 55#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
56 56
57/* Unconditional branch (register) */ 57/* Unconditional branch (register) */
58#define A64_BR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_NOLINK)
58#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK) 59#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
59#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN) 60#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
60 61
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 49ba37e4bfc0..b2fc97a2c56c 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -18,6 +18,7 @@
18 18
19#define pr_fmt(fmt) "bpf_jit: " fmt 19#define pr_fmt(fmt) "bpf_jit: " fmt
20 20
21#include <linux/bpf.h>
21#include <linux/filter.h> 22#include <linux/filter.h>
22#include <linux/printk.h> 23#include <linux/printk.h>
23#include <linux/skbuff.h> 24#include <linux/skbuff.h>
@@ -33,6 +34,7 @@ int bpf_jit_enable __read_mostly;
33 34
34#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) 35#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
35#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) 36#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
37#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
36 38
37/* Map BPF registers to A64 registers */ 39/* Map BPF registers to A64 registers */
38static const int bpf2a64[] = { 40static const int bpf2a64[] = {
@@ -54,6 +56,8 @@ static const int bpf2a64[] = {
54 /* temporary registers for internal BPF JIT */ 56 /* temporary registers for internal BPF JIT */
55 [TMP_REG_1] = A64_R(10), 57 [TMP_REG_1] = A64_R(10),
56 [TMP_REG_2] = A64_R(11), 58 [TMP_REG_2] = A64_R(11),
59 /* tail_call_cnt */
60 [TCALL_CNT] = A64_R(26),
57 /* temporary register for blinding constants */ 61 /* temporary register for blinding constants */
58 [BPF_REG_AX] = A64_R(9), 62 [BPF_REG_AX] = A64_R(9),
59}; 63};
@@ -146,13 +150,18 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
146 150
147#define STACK_SIZE STACK_ALIGN(_STACK_SIZE) 151#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
148 152
149static void build_prologue(struct jit_ctx *ctx) 153#define PROLOGUE_OFFSET 8
154
155static int build_prologue(struct jit_ctx *ctx)
150{ 156{
151 const u8 r6 = bpf2a64[BPF_REG_6]; 157 const u8 r6 = bpf2a64[BPF_REG_6];
152 const u8 r7 = bpf2a64[BPF_REG_7]; 158 const u8 r7 = bpf2a64[BPF_REG_7];
153 const u8 r8 = bpf2a64[BPF_REG_8]; 159 const u8 r8 = bpf2a64[BPF_REG_8];
154 const u8 r9 = bpf2a64[BPF_REG_9]; 160 const u8 r9 = bpf2a64[BPF_REG_9];
155 const u8 fp = bpf2a64[BPF_REG_FP]; 161 const u8 fp = bpf2a64[BPF_REG_FP];
162 const u8 tcc = bpf2a64[TCALL_CNT];
163 const int idx0 = ctx->idx;
164 int cur_offset;
156 165
157 /* 166 /*
158 * BPF prog stack layout 167 * BPF prog stack layout
@@ -162,8 +171,6 @@ static void build_prologue(struct jit_ctx *ctx)
162 * |FP/LR| 171 * |FP/LR|
163 * current A64_FP => -16:+-----+ 172 * current A64_FP => -16:+-----+
164 * | ... | callee saved registers 173 * | ... | callee saved registers
165 * +-----+
166 * | | x25/x26
167 * BPF fp register => -64:+-----+ <= (BPF_FP) 174 * BPF fp register => -64:+-----+ <= (BPF_FP)
168 * | | 175 * | |
169 * | ... | BPF prog stack 176 * | ... | BPF prog stack
@@ -183,18 +190,90 @@ static void build_prologue(struct jit_ctx *ctx)
183 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 190 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
184 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 191 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
185 192
186 /* Save callee-saved register */ 193 /* Save callee-saved registers */
187 emit(A64_PUSH(r6, r7, A64_SP), ctx); 194 emit(A64_PUSH(r6, r7, A64_SP), ctx);
188 emit(A64_PUSH(r8, r9, A64_SP), ctx); 195 emit(A64_PUSH(r8, r9, A64_SP), ctx);
196 emit(A64_PUSH(fp, tcc, A64_SP), ctx);
189 197
190 /* Save fp (x25) and x26. SP requires 16 bytes alignment */ 198 /* Set up BPF prog stack base register */
191 emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
192
193 /* Set up BPF prog stack base register (x25) */
194 emit(A64_MOV(1, fp, A64_SP), ctx); 199 emit(A64_MOV(1, fp, A64_SP), ctx);
195 200
201 /* Initialize tail_call_cnt */
202 emit(A64_MOVZ(1, tcc, 0, 0), ctx);
203
196 /* Set up function call stack */ 204 /* Set up function call stack */
197 emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); 205 emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
206
207 cur_offset = ctx->idx - idx0;
208 if (cur_offset != PROLOGUE_OFFSET) {
209 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
210 cur_offset, PROLOGUE_OFFSET);
211 return -1;
212 }
213 return 0;
214}
215
216static int out_offset = -1; /* initialized on the first pass of build_body() */
217static int emit_bpf_tail_call(struct jit_ctx *ctx)
218{
219 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
220 const u8 r2 = bpf2a64[BPF_REG_2];
221 const u8 r3 = bpf2a64[BPF_REG_3];
222
223 const u8 tmp = bpf2a64[TMP_REG_1];
224 const u8 prg = bpf2a64[TMP_REG_2];
225 const u8 tcc = bpf2a64[TCALL_CNT];
226 const int idx0 = ctx->idx;
227#define cur_offset (ctx->idx - idx0)
228#define jmp_offset (out_offset - (cur_offset))
229 size_t off;
230
231 /* if (index >= array->map.max_entries)
232 * goto out;
233 */
234 off = offsetof(struct bpf_array, map.max_entries);
235 emit_a64_mov_i64(tmp, off, ctx);
236 emit(A64_LDR32(tmp, r2, tmp), ctx);
237 emit(A64_CMP(0, r3, tmp), ctx);
238 emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
239
240 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
241 * goto out;
242 * tail_call_cnt++;
243 */
244 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
245 emit(A64_CMP(1, tcc, tmp), ctx);
246 emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
247 emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
248
249 /* prog = array->ptrs[index];
250 * if (prog == NULL)
251 * goto out;
252 */
253 off = offsetof(struct bpf_array, ptrs);
254 emit_a64_mov_i64(tmp, off, ctx);
255 emit(A64_LDR64(tmp, r2, tmp), ctx);
256 emit(A64_LDR64(prg, tmp, r3), ctx);
257 emit(A64_CBZ(1, prg, jmp_offset), ctx);
258
259 /* goto *(prog->bpf_func + prologue_size); */
260 off = offsetof(struct bpf_prog, bpf_func);
261 emit_a64_mov_i64(tmp, off, ctx);
262 emit(A64_LDR64(tmp, prg, tmp), ctx);
263 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
264 emit(A64_BR(tmp), ctx);
265
266 /* out: */
267 if (out_offset == -1)
268 out_offset = cur_offset;
269 if (cur_offset != out_offset) {
270 pr_err_once("tail_call out_offset = %d, expected %d!\n",
271 cur_offset, out_offset);
272 return -1;
273 }
274 return 0;
275#undef cur_offset
276#undef jmp_offset
198} 277}
199 278
200static void build_epilogue(struct jit_ctx *ctx) 279static void build_epilogue(struct jit_ctx *ctx)
@@ -499,13 +578,15 @@ emit_cond_jmp:
499 const u64 func = (u64)__bpf_call_base + imm; 578 const u64 func = (u64)__bpf_call_base + imm;
500 579
501 emit_a64_mov_i64(tmp, func, ctx); 580 emit_a64_mov_i64(tmp, func, ctx);
502 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
503 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
504 emit(A64_BLR(tmp), ctx); 581 emit(A64_BLR(tmp), ctx);
505 emit(A64_MOV(1, r0, A64_R(0)), ctx); 582 emit(A64_MOV(1, r0, A64_R(0)), ctx);
506 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
507 break; 583 break;
508 } 584 }
585 /* tail call */
586 case BPF_JMP | BPF_CALL | BPF_X:
587 if (emit_bpf_tail_call(ctx))
588 return -EFAULT;
589 break;
509 /* function return */ 590 /* function return */
510 case BPF_JMP | BPF_EXIT: 591 case BPF_JMP | BPF_EXIT:
511 /* Optimization: when last instruction is EXIT, 592 /* Optimization: when last instruction is EXIT,
@@ -650,11 +731,8 @@ emit_cond_jmp:
650 emit_a64_mov_i64(r3, size, ctx); 731 emit_a64_mov_i64(r3, size, ctx);
651 emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx); 732 emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
652 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); 733 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
653 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
654 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
655 emit(A64_BLR(r5), ctx); 734 emit(A64_BLR(r5), ctx);
656 emit(A64_MOV(1, r0, A64_R(0)), ctx); 735 emit(A64_MOV(1, r0, A64_R(0)), ctx);
657 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
658 736
659 jmp_offset = epilogue_offset(ctx); 737 jmp_offset = epilogue_offset(ctx);
660 check_imm19(jmp_offset); 738 check_imm19(jmp_offset);
@@ -780,7 +858,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
780 goto out_off; 858 goto out_off;
781 } 859 }
782 860
783 build_prologue(&ctx); 861 if (build_prologue(&ctx)) {
862 prog = orig_prog;
863 goto out_off;
864 }
784 865
785 ctx.epilogue_offset = ctx.idx; 866 ctx.epilogue_offset = ctx.idx;
786 build_epilogue(&ctx); 867 build_epilogue(&ctx);
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index a2e0ed6c9a4d..32f34511c416 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -445,32 +445,32 @@ void divasa_unmap_pci_bar(void __iomem *bar)
445/********************************************************* 445/*********************************************************
446 ** I/O port access 446 ** I/O port access
447 *********************************************************/ 447 *********************************************************/
448byte __inline__ inpp(void __iomem *addr) 448inline byte inpp(void __iomem *addr)
449{ 449{
450 return (inb((unsigned long) addr)); 450 return (inb((unsigned long) addr));
451} 451}
452 452
453word __inline__ inppw(void __iomem *addr) 453inline word inppw(void __iomem *addr)
454{ 454{
455 return (inw((unsigned long) addr)); 455 return (inw((unsigned long) addr));
456} 456}
457 457
458void __inline__ inppw_buffer(void __iomem *addr, void *P, int length) 458inline void inppw_buffer(void __iomem *addr, void *P, int length)
459{ 459{
460 insw((unsigned long) addr, (word *) P, length >> 1); 460 insw((unsigned long) addr, (word *) P, length >> 1);
461} 461}
462 462
463void __inline__ outppw_buffer(void __iomem *addr, void *P, int length) 463inline void outppw_buffer(void __iomem *addr, void *P, int length)
464{ 464{
465 outsw((unsigned long) addr, (word *) P, length >> 1); 465 outsw((unsigned long) addr, (word *) P, length >> 1);
466} 466}
467 467
468void __inline__ outppw(void __iomem *addr, word w) 468inline void outppw(void __iomem *addr, word w)
469{ 469{
470 outw(w, (unsigned long) addr); 470 outw(w, (unsigned long) addr);
471} 471}
472 472
473void __inline__ outpp(void __iomem *addr, word p) 473inline void outpp(void __iomem *addr, word p)
474{ 474{
475 outb(p, (unsigned long) addr); 475 outb(p, (unsigned long) addr);
476} 476}
diff --git a/drivers/isdn/hardware/eicon/platform.h b/drivers/isdn/hardware/eicon/platform.h
index b2edb7590dda..62e2073c3690 100644
--- a/drivers/isdn/hardware/eicon/platform.h
+++ b/drivers/isdn/hardware/eicon/platform.h
@@ -203,7 +203,7 @@ void PCIread(byte bus, byte func, int offset, void *data, int length, void *pci_
203/* 203/*
204** I/O Port utilities 204** I/O Port utilities
205*/ 205*/
206int diva_os_register_io_port(void *adapter, int register, unsigned long port, 206int diva_os_register_io_port(void *adapter, int reg, unsigned long port,
207 unsigned long length, const char *name, int id); 207 unsigned long length, const char *name, int id);
208/* 208/*
209** I/O port access abstraction 209** I/O port access abstraction
@@ -271,13 +271,13 @@ void diva_os_get_time(dword *sec, dword *usec);
271** atomic operation, fake because we use threads 271** atomic operation, fake because we use threads
272*/ 272*/
273typedef int diva_os_atomic_t; 273typedef int diva_os_atomic_t;
274static diva_os_atomic_t __inline__ 274static inline diva_os_atomic_t
275diva_os_atomic_increment(diva_os_atomic_t *pv) 275diva_os_atomic_increment(diva_os_atomic_t *pv)
276{ 276{
277 *pv += 1; 277 *pv += 1;
278 return (*pv); 278 return (*pv);
279} 279}
280static diva_os_atomic_t __inline__ 280static inline diva_os_atomic_t
281diva_os_atomic_decrement(diva_os_atomic_t *pv) 281diva_os_atomic_decrement(diva_os_atomic_t *pv)
282{ 282{
283 *pv -= 1; 283 *pv -= 1;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 941ec99cd3b6..90157e20357e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4607,26 +4607,6 @@ static int bond_check_params(struct bond_params *params)
4607 return 0; 4607 return 0;
4608} 4608}
4609 4609
4610static struct lock_class_key bonding_netdev_xmit_lock_key;
4611static struct lock_class_key bonding_netdev_addr_lock_key;
4612static struct lock_class_key bonding_tx_busylock_key;
4613
4614static void bond_set_lockdep_class_one(struct net_device *dev,
4615 struct netdev_queue *txq,
4616 void *_unused)
4617{
4618 lockdep_set_class(&txq->_xmit_lock,
4619 &bonding_netdev_xmit_lock_key);
4620}
4621
4622static void bond_set_lockdep_class(struct net_device *dev)
4623{
4624 lockdep_set_class(&dev->addr_list_lock,
4625 &bonding_netdev_addr_lock_key);
4626 netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
4627 dev->qdisc_tx_busylock = &bonding_tx_busylock_key;
4628}
4629
4630/* Called from registration process */ 4610/* Called from registration process */
4631static int bond_init(struct net_device *bond_dev) 4611static int bond_init(struct net_device *bond_dev)
4632{ 4612{
@@ -4639,7 +4619,7 @@ static int bond_init(struct net_device *bond_dev)
4639 if (!bond->wq) 4619 if (!bond->wq)
4640 return -ENOMEM; 4620 return -ENOMEM;
4641 4621
4642 bond_set_lockdep_class(bond_dev); 4622 netdev_lockdep_set_classes(bond_dev);
4643 4623
4644 list_add_tail(&bond->bond_list, &bn->dev_list); 4624 list_add_tail(&bond->bond_list, &bn->dev_list);
4645 4625
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 0d40aef928e2..22570ea3a8d2 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -104,16 +104,6 @@ config CAN_JANZ_ICAN3
104 This driver can also be built as a module. If so, the module will be 104 This driver can also be built as a module. If so, the module will be
105 called janz-ican3.ko. 105 called janz-ican3.ko.
106 106
107config CAN_RCAR
108 tristate "Renesas R-Car CAN controller"
109 depends on ARCH_RENESAS || ARM
110 ---help---
111 Say Y here if you want to use CAN controller found on Renesas R-Car
112 SoCs.
113
114 To compile this driver as a module, choose M here: the module will
115 be called rcar_can.
116
117config CAN_SUN4I 107config CAN_SUN4I
118 tristate "Allwinner A10 CAN controller" 108 tristate "Allwinner A10 CAN controller"
119 depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST 109 depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
@@ -152,6 +142,7 @@ source "drivers/net/can/cc770/Kconfig"
152source "drivers/net/can/ifi_canfd/Kconfig" 142source "drivers/net/can/ifi_canfd/Kconfig"
153source "drivers/net/can/m_can/Kconfig" 143source "drivers/net/can/m_can/Kconfig"
154source "drivers/net/can/mscan/Kconfig" 144source "drivers/net/can/mscan/Kconfig"
145source "drivers/net/can/rcar/Kconfig"
155source "drivers/net/can/sja1000/Kconfig" 146source "drivers/net/can/sja1000/Kconfig"
156source "drivers/net/can/softing/Kconfig" 147source "drivers/net/can/softing/Kconfig"
157source "drivers/net/can/spi/Kconfig" 148source "drivers/net/can/spi/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index e3db0c807f55..26ba4b794a0b 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ can-dev-y := dev.o
10 10
11can-dev-$(CONFIG_CAN_LEDS) += led.o 11can-dev-$(CONFIG_CAN_LEDS) += led.o
12 12
13obj-y += rcar/
13obj-y += spi/ 14obj-y += spi/
14obj-y += usb/ 15obj-y += usb/
15obj-y += softing/ 16obj-y += softing/
@@ -24,7 +25,6 @@ obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
24obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o 25obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
25obj-$(CONFIG_CAN_MSCAN) += mscan/ 26obj-$(CONFIG_CAN_MSCAN) += mscan/
26obj-$(CONFIG_CAN_M_CAN) += m_can/ 27obj-$(CONFIG_CAN_M_CAN) += m_can/
27obj-$(CONFIG_CAN_RCAR) += rcar_can.o
28obj-$(CONFIG_CAN_SJA1000) += sja1000/ 28obj-$(CONFIG_CAN_SJA1000) += sja1000/
29obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o 29obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o
30obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 30obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index ad535a854e5c..e21f7cc5ae4d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -69,6 +69,7 @@ EXPORT_SYMBOL_GPL(can_len2dlc);
69 69
70#ifdef CONFIG_CAN_CALC_BITTIMING 70#ifdef CONFIG_CAN_CALC_BITTIMING
71#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ 71#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
72#define CAN_CALC_SYNC_SEG 1
72 73
73/* 74/*
74 * Bit-timing calculation derived from: 75 * Bit-timing calculation derived from:
@@ -83,98 +84,126 @@ EXPORT_SYMBOL_GPL(can_len2dlc);
83 * registers of the CAN controller. You can find more information 84 * registers of the CAN controller. You can find more information
84 * in the header file linux/can/netlink.h. 85 * in the header file linux/can/netlink.h.
85 */ 86 */
86static int can_update_spt(const struct can_bittiming_const *btc, 87static int can_update_sample_point(const struct can_bittiming_const *btc,
87 int sampl_pt, int tseg, int *tseg1, int *tseg2) 88 unsigned int sample_point_nominal, unsigned int tseg,
89 unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
90 unsigned int *sample_point_error_ptr)
88{ 91{
89 *tseg2 = tseg + 1 - (sampl_pt * (tseg + 1)) / 1000; 92 unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
90 if (*tseg2 < btc->tseg2_min) 93 unsigned int sample_point, best_sample_point = 0;
91 *tseg2 = btc->tseg2_min; 94 unsigned int tseg1, tseg2;
92 if (*tseg2 > btc->tseg2_max) 95 int i;
93 *tseg2 = btc->tseg2_max; 96
94 *tseg1 = tseg - *tseg2; 97 for (i = 0; i <= 1; i++) {
95 if (*tseg1 > btc->tseg1_max) { 98 tseg2 = tseg + CAN_CALC_SYNC_SEG - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / 1000 - i;
96 *tseg1 = btc->tseg1_max; 99 tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
97 *tseg2 = tseg - *tseg1; 100 tseg1 = tseg - tseg2;
101 if (tseg1 > btc->tseg1_max) {
102 tseg1 = btc->tseg1_max;
103 tseg2 = tseg - tseg1;
104 }
105
106 sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / (tseg + CAN_CALC_SYNC_SEG);
107 sample_point_error = abs(sample_point_nominal - sample_point);
108
109 if ((sample_point <= sample_point_nominal) && (sample_point_error < best_sample_point_error)) {
110 best_sample_point = sample_point;
111 best_sample_point_error = sample_point_error;
112 *tseg1_ptr = tseg1;
113 *tseg2_ptr = tseg2;
114 }
98 } 115 }
99 return 1000 * (tseg + 1 - *tseg2) / (tseg + 1); 116
117 if (sample_point_error_ptr)
118 *sample_point_error_ptr = best_sample_point_error;
119
120 return best_sample_point;
100} 121}
101 122
102static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, 123static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
103 const struct can_bittiming_const *btc) 124 const struct can_bittiming_const *btc)
104{ 125{
105 struct can_priv *priv = netdev_priv(dev); 126 struct can_priv *priv = netdev_priv(dev);
106 long best_error = 1000000000, error = 0; 127 unsigned int bitrate; /* current bitrate */
107 int best_tseg = 0, best_brp = 0, brp = 0; 128 unsigned int bitrate_error; /* difference between current and nominal value */
108 int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0; 129 unsigned int best_bitrate_error = UINT_MAX;
109 int spt_error = 1000, spt = 0, sampl_pt; 130 unsigned int sample_point_error; /* difference between current and nominal value */
110 long rate; 131 unsigned int best_sample_point_error = UINT_MAX;
132 unsigned int sample_point_nominal; /* nominal sample point */
133 unsigned int best_tseg = 0; /* current best value for tseg */
134 unsigned int best_brp = 0; /* current best value for brp */
135 unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
111 u64 v64; 136 u64 v64;
112 137
113 /* Use CiA recommended sample points */ 138 /* Use CiA recommended sample points */
114 if (bt->sample_point) { 139 if (bt->sample_point) {
115 sampl_pt = bt->sample_point; 140 sample_point_nominal = bt->sample_point;
116 } else { 141 } else {
117 if (bt->bitrate > 800000) 142 if (bt->bitrate > 800000)
118 sampl_pt = 750; 143 sample_point_nominal = 750;
119 else if (bt->bitrate > 500000) 144 else if (bt->bitrate > 500000)
120 sampl_pt = 800; 145 sample_point_nominal = 800;
121 else 146 else
122 sampl_pt = 875; 147 sample_point_nominal = 875;
123 } 148 }
124 149
125 /* tseg even = round down, odd = round up */ 150 /* tseg even = round down, odd = round up */
126 for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1; 151 for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
127 tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) { 152 tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
128 tsegall = 1 + tseg / 2; 153 tsegall = CAN_CALC_SYNC_SEG + tseg / 2;
154
129 /* Compute all possible tseg choices (tseg=tseg1+tseg2) */ 155 /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
130 brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2; 156 brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
131 /* chose brp step which is possible in system */ 157
158 /* choose brp step which is possible in system */
132 brp = (brp / btc->brp_inc) * btc->brp_inc; 159 brp = (brp / btc->brp_inc) * btc->brp_inc;
133 if ((brp < btc->brp_min) || (brp > btc->brp_max)) 160 if ((brp < btc->brp_min) || (brp > btc->brp_max))
134 continue; 161 continue;
135 rate = priv->clock.freq / (brp * tsegall); 162
136 error = bt->bitrate - rate; 163 bitrate = priv->clock.freq / (brp * tsegall);
164 bitrate_error = abs(bt->bitrate - bitrate);
165
137 /* tseg brp biterror */ 166 /* tseg brp biterror */
138 if (error < 0) 167 if (bitrate_error > best_bitrate_error)
139 error = -error;
140 if (error > best_error)
141 continue; 168 continue;
142 best_error = error; 169
143 if (error == 0) { 170 /* reset sample point error if we have a better bitrate */
144 spt = can_update_spt(btc, sampl_pt, tseg / 2, 171 if (bitrate_error < best_bitrate_error)
145 &tseg1, &tseg2); 172 best_sample_point_error = UINT_MAX;
146 error = sampl_pt - spt; 173
147 if (error < 0) 174 can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error);
148 error = -error; 175 if (sample_point_error > best_sample_point_error)
149 if (error > spt_error) 176 continue;
150 continue; 177
151 spt_error = error; 178 best_sample_point_error = sample_point_error;
152 } 179 best_bitrate_error = bitrate_error;
153 best_tseg = tseg / 2; 180 best_tseg = tseg / 2;
154 best_brp = brp; 181 best_brp = brp;
155 if (error == 0) 182
183 if (bitrate_error == 0 && sample_point_error == 0)
156 break; 184 break;
157 } 185 }
158 186
159 if (best_error) { 187 if (best_bitrate_error) {
160 /* Error in one-tenth of a percent */ 188 /* Error in one-tenth of a percent */
161 error = (best_error * 1000) / bt->bitrate; 189 v64 = (u64)best_bitrate_error * 1000;
162 if (error > CAN_CALC_MAX_ERROR) { 190 do_div(v64, bt->bitrate);
191 bitrate_error = (u32)v64;
192 if (bitrate_error > CAN_CALC_MAX_ERROR) {
163 netdev_err(dev, 193 netdev_err(dev,
164 "bitrate error %ld.%ld%% too high\n", 194 "bitrate error %d.%d%% too high\n",
165 error / 10, error % 10); 195 bitrate_error / 10, bitrate_error % 10);
166 return -EDOM; 196 return -EDOM;
167 } else {
168 netdev_warn(dev, "bitrate error %ld.%ld%%\n",
169 error / 10, error % 10);
170 } 197 }
198 netdev_warn(dev, "bitrate error %d.%d%%\n",
199 bitrate_error / 10, bitrate_error % 10);
171 } 200 }
172 201
173 /* real sample point */ 202 /* real sample point */
174 bt->sample_point = can_update_spt(btc, sampl_pt, best_tseg, 203 bt->sample_point = can_update_sample_point(btc, sample_point_nominal, best_tseg,
175 &tseg1, &tseg2); 204 &tseg1, &tseg2, NULL);
176 205
177 v64 = (u64)best_brp * 1000000000UL; 206 v64 = (u64)best_brp * 1000 * 1000 * 1000;
178 do_div(v64, priv->clock.freq); 207 do_div(v64, priv->clock.freq);
179 bt->tq = (u32)v64; 208 bt->tq = (u32)v64;
180 bt->prop_seg = tseg1 / 2; 209 bt->prop_seg = tseg1 / 2;
@@ -182,9 +211,9 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
182 bt->phase_seg2 = tseg2; 211 bt->phase_seg2 = tseg2;
183 212
184 /* check for sjw user settings */ 213 /* check for sjw user settings */
185 if (!bt->sjw || !btc->sjw_max) 214 if (!bt->sjw || !btc->sjw_max) {
186 bt->sjw = 1; 215 bt->sjw = 1;
187 else { 216 } else {
188 /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */ 217 /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
189 if (bt->sjw > btc->sjw_max) 218 if (bt->sjw > btc->sjw_max)
190 bt->sjw = btc->sjw_max; 219 bt->sjw = btc->sjw_max;
@@ -194,8 +223,9 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
194 } 223 }
195 224
196 bt->brp = best_brp; 225 bt->brp = best_brp;
197 /* real bit-rate */ 226
198 bt->bitrate = priv->clock.freq / (bt->brp * (tseg1 + tseg2 + 1)); 227 /* real bitrate */
228 bt->bitrate = priv->clock.freq / (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2));
199 229
200 return 0; 230 return 0;
201} 231}
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
new file mode 100644
index 000000000000..7b03a3a37db7
--- /dev/null
+++ b/drivers/net/can/rcar/Kconfig
@@ -0,0 +1,21 @@
1config CAN_RCAR
2 tristate "Renesas R-Car CAN controller"
3 depends on ARCH_RENESAS || ARM
4 ---help---
5 Say Y here if you want to use CAN controller found on Renesas R-Car
6 SoCs.
7
8 To compile this driver as a module, choose M here: the module will
9 be called rcar_can.
10
11config CAN_RCAR_CANFD
12 tristate "Renesas R-Car CAN FD controller"
13 depends on ARCH_RENESAS || ARM
14 ---help---
15 Say Y here if you want to use CAN FD controller found on
16 Renesas R-Car SoCs. The driver puts the controller in CAN FD only
17 mode, which can interoperate with CAN2.0 nodes but does not support
18 dedicated CAN 2.0 mode.
19
20 To compile this driver as a module, choose M here: the module will
21 be called rcar_canfd.
diff --git a/drivers/net/can/rcar/Makefile b/drivers/net/can/rcar/Makefile
new file mode 100644
index 000000000000..08de36a4cfcc
--- /dev/null
+++ b/drivers/net/can/rcar/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the Renesas R-Car CAN & CAN FD controller drivers
3#
4
5obj-$(CONFIG_CAN_RCAR) += rcar_can.o
6obj-$(CONFIG_CAN_RCAR_CANFD) += rcar_canfd.o
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 788459f6bf5c..788459f6bf5c 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
new file mode 100644
index 000000000000..43cdd5544b0c
--- /dev/null
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -0,0 +1,1858 @@
1/* Renesas R-Car CAN FD device driver
2 *
3 * Copyright (C) 2015 Renesas Electronics Corp.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11/* The R-Car CAN FD controller can operate in either one of the below two modes
12 * - CAN FD only mode
13 * - Classical CAN (CAN 2.0) only mode
14 *
15 * This driver puts the controller in CAN FD only mode by default. In this
16 * mode, the controller acts as a CAN FD node that can also interoperate with
17 * CAN 2.0 nodes.
18 *
19 * To switch the controller to Classical CAN (CAN 2.0) only mode, add
20 * "renesas,no-can-fd" optional property to the device tree node. A h/w reset is
21 * also required to switch modes.
22 *
23 * Note: The h/w manual register naming convention is clumsy and not acceptable
24 * to use as it is in the driver. However, those names are added as comments
25 * wherever it is modified to a readable name.
26 */
27
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/kernel.h>
31#include <linux/types.h>
32#include <linux/interrupt.h>
33#include <linux/errno.h>
34#include <linux/netdevice.h>
35#include <linux/platform_device.h>
36#include <linux/can/led.h>
37#include <linux/can/dev.h>
38#include <linux/clk.h>
39#include <linux/of.h>
40#include <linux/of_device.h>
41#include <linux/bitmap.h>
42#include <linux/bitops.h>
43#include <linux/iopoll.h>
44
45#define RCANFD_DRV_NAME "rcar_canfd"
46
47/* Global register bits */
48
49/* RSCFDnCFDGRMCFG */
50#define RCANFD_GRMCFG_RCMC BIT(0)
51
52/* RSCFDnCFDGCFG / RSCFDnGCFG */
53#define RCANFD_GCFG_EEFE BIT(6)
54#define RCANFD_GCFG_CMPOC BIT(5) /* CAN FD only */
55#define RCANFD_GCFG_DCS BIT(4)
56#define RCANFD_GCFG_DCE BIT(1)
57#define RCANFD_GCFG_TPRI BIT(0)
58
59/* RSCFDnCFDGCTR / RSCFDnGCTR */
60#define RCANFD_GCTR_TSRST BIT(16)
61#define RCANFD_GCTR_CFMPOFIE BIT(11) /* CAN FD only */
62#define RCANFD_GCTR_THLEIE BIT(10)
63#define RCANFD_GCTR_MEIE BIT(9)
64#define RCANFD_GCTR_DEIE BIT(8)
65#define RCANFD_GCTR_GSLPR BIT(2)
66#define RCANFD_GCTR_GMDC_MASK (0x3)
67#define RCANFD_GCTR_GMDC_GOPM (0x0)
68#define RCANFD_GCTR_GMDC_GRESET (0x1)
69#define RCANFD_GCTR_GMDC_GTEST (0x2)
70
71/* RSCFDnCFDGSTS / RSCFDnGSTS */
72#define RCANFD_GSTS_GRAMINIT BIT(3)
73#define RCANFD_GSTS_GSLPSTS BIT(2)
74#define RCANFD_GSTS_GHLTSTS BIT(1)
75#define RCANFD_GSTS_GRSTSTS BIT(0)
76/* Non-operational status */
77#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
78
79/* RSCFDnCFDGERFL / RSCFDnGERFL */
80#define RCANFD_GERFL_EEF1 BIT(17)
81#define RCANFD_GERFL_EEF0 BIT(16)
82#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
83#define RCANFD_GERFL_THLES BIT(2)
84#define RCANFD_GERFL_MES BIT(1)
85#define RCANFD_GERFL_DEF BIT(0)
86
87#define RCANFD_GERFL_ERR(gpriv, x) ((x) & (RCANFD_GERFL_EEF1 |\
88 RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\
89 (gpriv->fdmode ?\
90 RCANFD_GERFL_CMPOF : 0)))
91
92/* AFL Rx rules registers */
93
94/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
95#define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8))
96#define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff)
97
98/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
99#define RCANFD_GAFLECTR_AFLDAE BIT(8)
100#define RCANFD_GAFLECTR_AFLPN(x) ((x) & 0x1f)
101
102/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
103#define RCANFD_GAFLID_GAFLLB BIT(29)
104
105/* RSCFDnCFDGAFLP1_j / RSCFDnGAFLP1_j */
106#define RCANFD_GAFLP1_GAFLFDP(x) (1 << (x))
107
108/* Channel register bits */
109
110/* RSCFDnCmCFG - Classical CAN only */
111#define RCANFD_CFG_SJW(x) (((x) & 0x3) << 24)
112#define RCANFD_CFG_TSEG2(x) (((x) & 0x7) << 20)
113#define RCANFD_CFG_TSEG1(x) (((x) & 0xf) << 16)
114#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
115
116/* RSCFDnCFDCmNCFG - CAN FD only */
117#define RCANFD_NCFG_NTSEG2(x) (((x) & 0x1f) << 24)
118#define RCANFD_NCFG_NTSEG1(x) (((x) & 0x7f) << 16)
119#define RCANFD_NCFG_NSJW(x) (((x) & 0x1f) << 11)
120#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
121
122/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
123#define RCANFD_CCTR_CTME BIT(24)
124#define RCANFD_CCTR_ERRD BIT(23)
125#define RCANFD_CCTR_BOM_MASK (0x3 << 21)
126#define RCANFD_CCTR_BOM_ISO (0x0 << 21)
127#define RCANFD_CCTR_BOM_BENTRY (0x1 << 21)
128#define RCANFD_CCTR_BOM_BEND (0x2 << 21)
129#define RCANFD_CCTR_TDCVFIE BIT(19)
130#define RCANFD_CCTR_SOCOIE BIT(18)
131#define RCANFD_CCTR_EOCOIE BIT(17)
132#define RCANFD_CCTR_TAIE BIT(16)
133#define RCANFD_CCTR_ALIE BIT(15)
134#define RCANFD_CCTR_BLIE BIT(14)
135#define RCANFD_CCTR_OLIE BIT(13)
136#define RCANFD_CCTR_BORIE BIT(12)
137#define RCANFD_CCTR_BOEIE BIT(11)
138#define RCANFD_CCTR_EPIE BIT(10)
139#define RCANFD_CCTR_EWIE BIT(9)
140#define RCANFD_CCTR_BEIE BIT(8)
141#define RCANFD_CCTR_CSLPR BIT(2)
142#define RCANFD_CCTR_CHMDC_MASK (0x3)
143#define RCANFD_CCTR_CHDMC_COPM (0x0)
144#define RCANFD_CCTR_CHDMC_CRESET (0x1)
145#define RCANFD_CCTR_CHDMC_CHLT (0x2)
146
147/* RSCFDnCFDCmSTS / RSCFDnCmSTS */
148#define RCANFD_CSTS_COMSTS BIT(7)
149#define RCANFD_CSTS_RECSTS BIT(6)
150#define RCANFD_CSTS_TRMSTS BIT(5)
151#define RCANFD_CSTS_BOSTS BIT(4)
152#define RCANFD_CSTS_EPSTS BIT(3)
153#define RCANFD_CSTS_SLPSTS BIT(2)
154#define RCANFD_CSTS_HLTSTS BIT(1)
155#define RCANFD_CSTS_CRSTSTS BIT(0)
156
157#define RCANFD_CSTS_TECCNT(x) (((x) >> 24) & 0xff)
158#define RCANFD_CSTS_RECCNT(x) (((x) >> 16) & 0xff)
159
160/* RSCFDnCFDCmERFL / RSCFDnCmERFL */
161#define RCANFD_CERFL_ADERR BIT(14)
162#define RCANFD_CERFL_B0ERR BIT(13)
163#define RCANFD_CERFL_B1ERR BIT(12)
164#define RCANFD_CERFL_CERR BIT(11)
165#define RCANFD_CERFL_AERR BIT(10)
166#define RCANFD_CERFL_FERR BIT(9)
167#define RCANFD_CERFL_SERR BIT(8)
168#define RCANFD_CERFL_ALF BIT(7)
169#define RCANFD_CERFL_BLF BIT(6)
170#define RCANFD_CERFL_OVLF BIT(5)
171#define RCANFD_CERFL_BORF BIT(4)
172#define RCANFD_CERFL_BOEF BIT(3)
173#define RCANFD_CERFL_EPF BIT(2)
174#define RCANFD_CERFL_EWF BIT(1)
175#define RCANFD_CERFL_BEF BIT(0)
176
177#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
178
179/* RSCFDnCFDCmDCFG */
180#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24)
181#define RCANFD_DCFG_DTSEG2(x) (((x) & 0x7) << 20)
182#define RCANFD_DCFG_DTSEG1(x) (((x) & 0xf) << 16)
183#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
184
185/* RSCFDnCFDCmFDCFG */
186#define RCANFD_FDCFG_TDCE BIT(9)
187#define RCANFD_FDCFG_TDCOC BIT(8)
188#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
189
190/* RSCFDnCFDRFCCx */
191#define RCANFD_RFCC_RFIM BIT(12)
192#define RCANFD_RFCC_RFDC(x) (((x) & 0x7) << 8)
193#define RCANFD_RFCC_RFPLS(x) (((x) & 0x7) << 4)
194#define RCANFD_RFCC_RFIE BIT(1)
195#define RCANFD_RFCC_RFE BIT(0)
196
197/* RSCFDnCFDRFSTSx */
198#define RCANFD_RFSTS_RFIF BIT(3)
199#define RCANFD_RFSTS_RFMLT BIT(2)
200#define RCANFD_RFSTS_RFFLL BIT(1)
201#define RCANFD_RFSTS_RFEMP BIT(0)
202
203/* RSCFDnCFDRFIDx */
204#define RCANFD_RFID_RFIDE BIT(31)
205#define RCANFD_RFID_RFRTR BIT(30)
206
207/* RSCFDnCFDRFPTRx */
208#define RCANFD_RFPTR_RFDLC(x) (((x) >> 28) & 0xf)
209#define RCANFD_RFPTR_RFPTR(x) (((x) >> 16) & 0xfff)
210#define RCANFD_RFPTR_RFTS(x) (((x) >> 0) & 0xffff)
211
212/* RSCFDnCFDRFFDSTSx */
213#define RCANFD_RFFDSTS_RFFDF BIT(2)
214#define RCANFD_RFFDSTS_RFBRS BIT(1)
215#define RCANFD_RFFDSTS_RFESI BIT(0)
216
217/* Common FIFO bits */
218
219/* RSCFDnCFDCFCCk */
220#define RCANFD_CFCC_CFTML(x) (((x) & 0xf) << 20)
221#define RCANFD_CFCC_CFM(x) (((x) & 0x3) << 16)
222#define RCANFD_CFCC_CFIM BIT(12)
223#define RCANFD_CFCC_CFDC(x) (((x) & 0x7) << 8)
224#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
225#define RCANFD_CFCC_CFTXIE BIT(2)
226#define RCANFD_CFCC_CFE BIT(0)
227
228/* RSCFDnCFDCFSTSk */
229#define RCANFD_CFSTS_CFMC(x) (((x) >> 8) & 0xff)
230#define RCANFD_CFSTS_CFTXIF BIT(4)
231#define RCANFD_CFSTS_CFMLT BIT(2)
232#define RCANFD_CFSTS_CFFLL BIT(1)
233#define RCANFD_CFSTS_CFEMP BIT(0)
234
235/* RSCFDnCFDCFIDk */
236#define RCANFD_CFID_CFIDE BIT(31)
237#define RCANFD_CFID_CFRTR BIT(30)
238#define RCANFD_CFID_CFID_MASK(x) ((x) & 0x1fffffff)
239
240/* RSCFDnCFDCFPTRk */
241#define RCANFD_CFPTR_CFDLC(x) (((x) & 0xf) << 28)
242#define RCANFD_CFPTR_CFPTR(x) (((x) & 0xfff) << 16)
243#define RCANFD_CFPTR_CFTS(x) (((x) & 0xff) << 0)
244
245/* RSCFDnCFDCFFDCSTSk */
246#define RCANFD_CFFDCSTS_CFFDF BIT(2)
247#define RCANFD_CFFDCSTS_CFBRS BIT(1)
248#define RCANFD_CFFDCSTS_CFESI BIT(0)
249
250/* This controller supports either Classical CAN only mode or CAN FD only mode.
251 * These modes are supported in two separate set of register maps & names.
252 * However, some of the register offsets are common for both modes. Those
253 * offsets are listed below as Common registers.
254 *
255 * The CAN FD only mode specific registers & Classical CAN only mode specific
256 * registers are listed separately. Their register names starts with
257 * RCANFD_F_xxx & RCANFD_C_xxx respectively.
258 */
259
260/* Common registers */
261
262/* RSCFDnCFDCmNCFG / RSCFDnCmCFG */
263#define RCANFD_CCFG(m) (0x0000 + (0x10 * (m)))
264/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
265#define RCANFD_CCTR(m) (0x0004 + (0x10 * (m)))
266/* RSCFDnCFDCmSTS / RSCFDnCmSTS */
267#define RCANFD_CSTS(m) (0x0008 + (0x10 * (m)))
268/* RSCFDnCFDCmERFL / RSCFDnCmERFL */
269#define RCANFD_CERFL(m) (0x000C + (0x10 * (m)))
270
271/* RSCFDnCFDGCFG / RSCFDnGCFG */
272#define RCANFD_GCFG (0x0084)
273/* RSCFDnCFDGCTR / RSCFDnGCTR */
274#define RCANFD_GCTR (0x0088)
275/* RSCFDnCFDGCTS / RSCFDnGCTS */
276#define RCANFD_GSTS (0x008c)
277/* RSCFDnCFDGERFL / RSCFDnGERFL */
278#define RCANFD_GERFL (0x0090)
279/* RSCFDnCFDGTSC / RSCFDnGTSC */
280#define RCANFD_GTSC (0x0094)
281/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
282#define RCANFD_GAFLECTR (0x0098)
283/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
284#define RCANFD_GAFLCFG0 (0x009c)
285/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */
286#define RCANFD_GAFLCFG1 (0x00a0)
287/* RSCFDnCFDRMNB / RSCFDnRMNB */
288#define RCANFD_RMNB (0x00a4)
289/* RSCFDnCFDRMND / RSCFDnRMND */
290#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
291
292/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
293#define RCANFD_RFCC(x) (0x00b8 + (0x04 * (x)))
294/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
295#define RCANFD_RFSTS(x) (0x00d8 + (0x04 * (x)))
296/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
297#define RCANFD_RFPCTR(x) (0x00f8 + (0x04 * (x)))
298
299/* Common FIFO Control registers */
300
301/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
302#define RCANFD_CFCC(ch, idx) (0x0118 + (0x0c * (ch)) + \
303 (0x04 * (idx)))
304/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
305#define RCANFD_CFSTS(ch, idx) (0x0178 + (0x0c * (ch)) + \
306 (0x04 * (idx)))
307/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
308#define RCANFD_CFPCTR(ch, idx) (0x01d8 + (0x0c * (ch)) + \
309 (0x04 * (idx)))
310
311/* RSCFDnCFDFESTS / RSCFDnFESTS */
312#define RCANFD_FESTS (0x0238)
313/* RSCFDnCFDFFSTS / RSCFDnFFSTS */
314#define RCANFD_FFSTS (0x023c)
315/* RSCFDnCFDFMSTS / RSCFDnFMSTS */
316#define RCANFD_FMSTS (0x0240)
317/* RSCFDnCFDRFISTS / RSCFDnRFISTS */
318#define RCANFD_RFISTS (0x0244)
319/* RSCFDnCFDCFRISTS / RSCFDnCFRISTS */
320#define RCANFD_CFRISTS (0x0248)
321/* RSCFDnCFDCFTISTS / RSCFDnCFTISTS */
322#define RCANFD_CFTISTS (0x024c)
323
324/* RSCFDnCFDTMCp / RSCFDnTMCp */
325#define RCANFD_TMC(p) (0x0250 + (0x01 * (p)))
326/* RSCFDnCFDTMSTSp / RSCFDnTMSTSp */
327#define RCANFD_TMSTS(p) (0x02d0 + (0x01 * (p)))
328
329/* RSCFDnCFDTMTRSTSp / RSCFDnTMTRSTSp */
330#define RCANFD_TMTRSTS(y) (0x0350 + (0x04 * (y)))
331/* RSCFDnCFDTMTARSTSp / RSCFDnTMTARSTSp */
332#define RCANFD_TMTARSTS(y) (0x0360 + (0x04 * (y)))
333/* RSCFDnCFDTMTCSTSp / RSCFDnTMTCSTSp */
334#define RCANFD_TMTCSTS(y) (0x0370 + (0x04 * (y)))
335/* RSCFDnCFDTMTASTSp / RSCFDnTMTASTSp */
336#define RCANFD_TMTASTS(y) (0x0380 + (0x04 * (y)))
337/* RSCFDnCFDTMIECy / RSCFDnTMIECy */
338#define RCANFD_TMIEC(y) (0x0390 + (0x04 * (y)))
339
340/* RSCFDnCFDTXQCCm / RSCFDnTXQCCm */
341#define RCANFD_TXQCC(m) (0x03a0 + (0x04 * (m)))
342/* RSCFDnCFDTXQSTSm / RSCFDnTXQSTSm */
343#define RCANFD_TXQSTS(m) (0x03c0 + (0x04 * (m)))
344/* RSCFDnCFDTXQPCTRm / RSCFDnTXQPCTRm */
345#define RCANFD_TXQPCTR(m) (0x03e0 + (0x04 * (m)))
346
347/* RSCFDnCFDTHLCCm / RSCFDnTHLCCm */
348#define RCANFD_THLCC(m) (0x0400 + (0x04 * (m)))
349/* RSCFDnCFDTHLSTSm / RSCFDnTHLSTSm */
350#define RCANFD_THLSTS(m) (0x0420 + (0x04 * (m)))
351/* RSCFDnCFDTHLPCTRm / RSCFDnTHLPCTRm */
352#define RCANFD_THLPCTR(m) (0x0440 + (0x04 * (m)))
353
354/* RSCFDnCFDGTINTSTS0 / RSCFDnGTINTSTS0 */
355#define RCANFD_GTINTSTS0 (0x0460)
356/* RSCFDnCFDGTINTSTS1 / RSCFDnGTINTSTS1 */
357#define RCANFD_GTINTSTS1 (0x0464)
358/* RSCFDnCFDGTSTCFG / RSCFDnGTSTCFG */
359#define RCANFD_GTSTCFG (0x0468)
360/* RSCFDnCFDGTSTCTR / RSCFDnGTSTCTR */
361#define RCANFD_GTSTCTR (0x046c)
362/* RSCFDnCFDGLOCKK / RSCFDnGLOCKK */
363#define RCANFD_GLOCKK (0x047c)
364/* RSCFDnCFDGRMCFG */
365#define RCANFD_GRMCFG (0x04fc)
366
367/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
368#define RCANFD_GAFLID(offset, j) ((offset) + (0x10 * (j)))
369/* RSCFDnCFDGAFLMj / RSCFDnGAFLMj */
370#define RCANFD_GAFLM(offset, j) ((offset) + 0x04 + (0x10 * (j)))
371/* RSCFDnCFDGAFLP0j / RSCFDnGAFLP0j */
372#define RCANFD_GAFLP0(offset, j) ((offset) + 0x08 + (0x10 * (j)))
373/* RSCFDnCFDGAFLP1j / RSCFDnGAFLP1j */
374#define RCANFD_GAFLP1(offset, j) ((offset) + 0x0c + (0x10 * (j)))
375
376/* Classical CAN only mode register map */
377
378/* RSCFDnGAFLXXXj offset */
379#define RCANFD_C_GAFL_OFFSET (0x0500)
380
381/* RSCFDnRMXXXq -> RCANFD_C_RMXXX(q) */
382#define RCANFD_C_RMID(q) (0x0600 + (0x10 * (q)))
383#define RCANFD_C_RMPTR(q) (0x0604 + (0x10 * (q)))
384#define RCANFD_C_RMDF0(q) (0x0608 + (0x10 * (q)))
385#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q)))
386
387/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
388#define RCANFD_C_RFOFFSET (0x0e00)
389#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
390#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + \
391 (0x10 * (x)))
392#define RCANFD_C_RFDF(x, df) (RCANFD_C_RFOFFSET + 0x08 + \
393 (0x10 * (x)) + (0x04 * (df)))
394
395/* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */
396#define RCANFD_C_CFOFFSET (0x0e80)
397#define RCANFD_C_CFID(ch, idx) (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \
398 (0x10 * (idx)))
399#define RCANFD_C_CFPTR(ch, idx) (RCANFD_C_CFOFFSET + 0x04 + \
400 (0x30 * (ch)) + (0x10 * (idx)))
401#define RCANFD_C_CFDF(ch, idx, df) (RCANFD_C_CFOFFSET + 0x08 + \
402 (0x30 * (ch)) + (0x10 * (idx)) + \
403 (0x04 * (df)))
404
405/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
406#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p)))
407#define RCANFD_C_TMPTR(p) (0x1004 + (0x10 * (p)))
408#define RCANFD_C_TMDF0(p) (0x1008 + (0x10 * (p)))
409#define RCANFD_C_TMDF1(p) (0x100c + (0x10 * (p)))
410
411/* RSCFDnTHLACCm */
412#define RCANFD_C_THLACC(m) (0x1800 + (0x04 * (m)))
413/* RSCFDnRPGACCr */
414#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
415
416/* CAN FD mode specific regsiter map */
417
418/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
419#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m)))
420#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
421#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
422#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
423#define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m)))
424
425/* RSCFDnCFDGAFLXXXj offset */
426#define RCANFD_F_GAFL_OFFSET (0x1000)
427
428/* RSCFDnCFDRMXXXq -> RCANFD_F_RMXXX(q) */
429#define RCANFD_F_RMID(q) (0x2000 + (0x20 * (q)))
430#define RCANFD_F_RMPTR(q) (0x2004 + (0x20 * (q)))
431#define RCANFD_F_RMFDSTS(q) (0x2008 + (0x20 * (q)))
432#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
433
434/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
435#define RCANFD_F_RFOFFSET (0x3000)
436#define RCANFD_F_RFID(x) (RCANFD_F_RFOFFSET + (0x80 * (x)))
437#define RCANFD_F_RFPTR(x) (RCANFD_F_RFOFFSET + 0x04 + \
438 (0x80 * (x)))
439#define RCANFD_F_RFFDSTS(x) (RCANFD_F_RFOFFSET + 0x08 + \
440 (0x80 * (x)))
441#define RCANFD_F_RFDF(x, df) (RCANFD_F_RFOFFSET + 0x0c + \
442 (0x80 * (x)) + (0x04 * (df)))
443
444/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
445#define RCANFD_F_CFOFFSET (0x3400)
446#define RCANFD_F_CFID(ch, idx) (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \
447 (0x80 * (idx)))
448#define RCANFD_F_CFPTR(ch, idx) (RCANFD_F_CFOFFSET + 0x04 + \
449 (0x180 * (ch)) + (0x80 * (idx)))
450#define RCANFD_F_CFFDCSTS(ch, idx) (RCANFD_F_CFOFFSET + 0x08 + \
451 (0x180 * (ch)) + (0x80 * (idx)))
452#define RCANFD_F_CFDF(ch, idx, df) (RCANFD_F_CFOFFSET + 0x0c + \
453 (0x180 * (ch)) + (0x80 * (idx)) + \
454 (0x04 * (df)))
455
456/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
457#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p)))
458#define RCANFD_F_TMPTR(p) (0x4004 + (0x20 * (p)))
459#define RCANFD_F_TMFDCTR(p) (0x4008 + (0x20 * (p)))
460#define RCANFD_F_TMDF(p, b) (0x400c + (0x20 * (p)) + (0x04 * (b)))
461
462/* RSCFDnCFDTHLACCm */
463#define RCANFD_F_THLACC(m) (0x6000 + (0x04 * (m)))
464/* RSCFDnCFDRPGACCr */
465#define RCANFD_F_RPGACC(r) (0x6400 + (0x04 * (r)))
466
467/* Constants */
468#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */
469#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */
470
471#define RCANFD_NUM_CHANNELS 2 /* Two channels max */
472#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1)
473
474#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16)
475#define RCANFD_CHANNEL_NUMRULES 1 /* only one rule per channel */
476
477/* Rx FIFO is a global resource of the controller. There are 8 such FIFOs
478 * available. Each channel gets a dedicated Rx FIFO (i.e.) the channel
479 * number is added to RFFIFO index.
480 */
481#define RCANFD_RFFIFO_IDX 0
482
483/* Tx/Rx or Common FIFO is a per channel resource. Each channel has 3 Common
484 * FIFOs dedicated to them. Use the first (index 0) FIFO out of the 3 for Tx.
485 */
486#define RCANFD_CFFIFO_IDX 0
487
488/* fCAN clock select register settings */
489enum rcar_canfd_fcanclk {
490 RCANFD_CANFDCLK = 0, /* CANFD clock */
491 RCANFD_EXTCLK, /* Externally input clock */
492};
493
494struct rcar_canfd_global;
495
496/* Channel priv data */
497struct rcar_canfd_channel {
498 struct can_priv can; /* Must be the first member */
499 struct net_device *ndev;
500 struct rcar_canfd_global *gpriv; /* Controller reference */
501 void __iomem *base; /* Register base address */
502 struct napi_struct napi;
503 u8 tx_len[RCANFD_FIFO_DEPTH]; /* For net stats */
504 u32 tx_head; /* Incremented on xmit */
505 u32 tx_tail; /* Incremented on xmit done */
506 u32 channel; /* Channel number */
507 spinlock_t tx_lock; /* To protect tx path */
508};
509
510/* Global priv data */
511struct rcar_canfd_global {
512 struct rcar_canfd_channel *ch[RCANFD_NUM_CHANNELS];
513 void __iomem *base; /* Register base address */
514 struct platform_device *pdev; /* Respective platform device */
515 struct clk *clkp; /* Peripheral clock */
516 struct clk *can_clk; /* fCAN clock */
517 enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */
518 unsigned long channels_mask; /* Enabled channels mask */
519 bool fdmode; /* CAN FD or Classical CAN only mode */
520};
521
522/* CAN FD mode nominal rate constants */
523static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
524 .name = RCANFD_DRV_NAME,
525 .tseg1_min = 2,
526 .tseg1_max = 128,
527 .tseg2_min = 2,
528 .tseg2_max = 32,
529 .sjw_max = 32,
530 .brp_min = 1,
531 .brp_max = 1024,
532 .brp_inc = 1,
533};
534
535/* CAN FD mode data rate constants */
536static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
537 .name = RCANFD_DRV_NAME,
538 .tseg1_min = 2,
539 .tseg1_max = 16,
540 .tseg2_min = 2,
541 .tseg2_max = 8,
542 .sjw_max = 8,
543 .brp_min = 1,
544 .brp_max = 256,
545 .brp_inc = 1,
546};
547
548/* Classical CAN mode bitrate constants */
549static const struct can_bittiming_const rcar_canfd_bittiming_const = {
550 .name = RCANFD_DRV_NAME,
551 .tseg1_min = 4,
552 .tseg1_max = 16,
553 .tseg2_min = 2,
554 .tseg2_max = 8,
555 .sjw_max = 4,
556 .brp_min = 1,
557 .brp_max = 1024,
558 .brp_inc = 1,
559};
560
561/* Helper functions */
562static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
563{
564 u32 data = readl(reg);
565
566 data &= ~mask;
567 data |= (val & mask);
568 writel(data, reg);
569}
570
571static inline u32 rcar_canfd_read(void __iomem *base, u32 offset)
572{
573 return readl(base + (offset));
574}
575
576static inline void rcar_canfd_write(void __iomem *base, u32 offset, u32 val)
577{
578 writel(val, base + (offset));
579}
580
581static void rcar_canfd_set_bit(void __iomem *base, u32 reg, u32 val)
582{
583 rcar_canfd_update(val, val, base + (reg));
584}
585
586static void rcar_canfd_clear_bit(void __iomem *base, u32 reg, u32 val)
587{
588 rcar_canfd_update(val, 0, base + (reg));
589}
590
591static void rcar_canfd_update_bit(void __iomem *base, u32 reg,
592 u32 mask, u32 val)
593{
594 rcar_canfd_update(mask, val, base + (reg));
595}
596
597static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
598 struct canfd_frame *cf, u32 off)
599{
600 u32 i, lwords;
601
602 lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
603 for (i = 0; i < lwords; i++)
604 *((u32 *)cf->data + i) =
605 rcar_canfd_read(priv->base, off + (i * sizeof(u32)));
606}
607
608static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
609 struct canfd_frame *cf, u32 off)
610{
611 u32 i, lwords;
612
613 lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
614 for (i = 0; i < lwords; i++)
615 rcar_canfd_write(priv->base, off + (i * sizeof(u32)),
616 *((u32 *)cf->data + i));
617}
618
619static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
620{
621 u32 i;
622
623 for (i = 0; i < RCANFD_FIFO_DEPTH; i++)
624 can_free_echo_skb(ndev, i);
625}
626
627static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
628{
629 u32 sts, ch;
630 int err;
631
632 /* Check RAMINIT flag as CAN RAM initialization takes place
633 * after the MCU reset
634 */
635 err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
636 !(sts & RCANFD_GSTS_GRAMINIT), 2, 500000);
637 if (err) {
638 dev_dbg(&gpriv->pdev->dev, "global raminit failed\n");
639 return err;
640 }
641
642 /* Transition to Global Reset mode */
643 rcar_canfd_clear_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
644 rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR,
645 RCANFD_GCTR_GMDC_MASK, RCANFD_GCTR_GMDC_GRESET);
646
647 /* Ensure Global reset mode */
648 err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
649 (sts & RCANFD_GSTS_GRSTSTS), 2, 500000);
650 if (err) {
651 dev_dbg(&gpriv->pdev->dev, "global reset failed\n");
652 return err;
653 }
654
655 /* Reset Global error flags */
656 rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
657
658 /* Set the controller into appropriate mode */
659 if (gpriv->fdmode)
660 rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
661 RCANFD_GRMCFG_RCMC);
662 else
663 rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
664 RCANFD_GRMCFG_RCMC);
665
666 /* Transition all Channels to reset mode */
667 for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
668 rcar_canfd_clear_bit(gpriv->base,
669 RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR);
670
671 rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
672 RCANFD_CCTR_CHMDC_MASK,
673 RCANFD_CCTR_CHDMC_CRESET);
674
675 /* Ensure Channel reset mode */
676 err = readl_poll_timeout((gpriv->base + RCANFD_CSTS(ch)), sts,
677 (sts & RCANFD_CSTS_CRSTSTS),
678 2, 500000);
679 if (err) {
680 dev_dbg(&gpriv->pdev->dev,
681 "channel %u reset failed\n", ch);
682 return err;
683 }
684 }
685 return 0;
686}
687
688static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
689{
690 u32 cfg, ch;
691
692 /* Global configuration settings */
693
694 /* ECC Error flag Enable */
695 cfg = RCANFD_GCFG_EEFE;
696
697 if (gpriv->fdmode)
698 /* Truncate payload to configured message size RFPLS */
699 cfg |= RCANFD_GCFG_CMPOC;
700
701 /* Set External Clock if selected */
702 if (gpriv->fcan != RCANFD_CANFDCLK)
703 cfg |= RCANFD_GCFG_DCS;
704
705 rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
706
707 /* Channel configuration settings */
708 for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
709 rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch),
710 RCANFD_CCTR_ERRD);
711 rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
712 RCANFD_CCTR_BOM_MASK,
713 RCANFD_CCTR_BOM_BENTRY);
714 }
715}
716
717static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
718 u32 ch)
719{
720 u32 cfg;
721 int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
722 u32 ridx = ch + RCANFD_RFFIFO_IDX;
723
724 if (ch == 0) {
725 start = 0; /* Channel 0 always starts from 0th rule */
726 } else {
727 /* Get number of Channel 0 rules and adjust */
728 cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0);
729 start = RCANFD_GAFLCFG_GETRNC(0, cfg);
730 }
731
732 /* Enable write access to entry */
733 page = RCANFD_GAFL_PAGENUM(start);
734 rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
735 (RCANFD_GAFLECTR_AFLPN(page) |
736 RCANFD_GAFLECTR_AFLDAE));
737
738 /* Write number of rules for channel */
739 rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0,
740 RCANFD_GAFLCFG_SETRNC(ch, num_rules));
741 if (gpriv->fdmode)
742 offset = RCANFD_F_GAFL_OFFSET;
743 else
744 offset = RCANFD_C_GAFL_OFFSET;
745
746 /* Accept all IDs */
747 rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0);
748 /* IDE or RTR is not considered for matching */
749 rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0);
750 /* Any data length accepted */
751 rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
752 /* Place the msg in corresponding Rx FIFO entry */
753 rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start),
754 RCANFD_GAFLP1_GAFLFDP(ridx));
755
756 /* Disable write access to page */
757 rcar_canfd_clear_bit(gpriv->base,
758 RCANFD_GAFLECTR, RCANFD_GAFLECTR_AFLDAE);
759}
760
761static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch)
762{
763 /* Rx FIFO is used for reception */
764 u32 cfg;
765 u16 rfdc, rfpls;
766
767 /* Select Rx FIFO based on channel */
768 u32 ridx = ch + RCANFD_RFFIFO_IDX;
769
770 rfdc = 2; /* b010 - 8 messages Rx FIFO depth */
771 if (gpriv->fdmode)
772 rfpls = 7; /* b111 - Max 64 bytes payload */
773 else
774 rfpls = 0; /* b000 - Max 8 bytes payload */
775
776 cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) |
777 RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE);
778 rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg);
779}
780
781static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
782{
783 /* Tx/Rx(Common) FIFO configured in Tx mode is
784 * used for transmission
785 *
786 * Each channel has 3 Common FIFO dedicated to them.
787 * Use the 1st (index 0) out of 3
788 */
789 u32 cfg;
790 u16 cftml, cfm, cfdc, cfpls;
791
792 cftml = 0; /* 0th buffer */
793 cfm = 1; /* b01 - Transmit mode */
794 cfdc = 2; /* b010 - 8 messages Tx FIFO depth */
795 if (gpriv->fdmode)
796 cfpls = 7; /* b111 - Max 64 bytes payload */
797 else
798 cfpls = 0; /* b000 - Max 8 bytes payload */
799
800 cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) |
801 RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) |
802 RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE);
803 rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg);
804
805 if (gpriv->fdmode)
806 /* Clear FD mode specific control/status register */
807 rcar_canfd_write(gpriv->base,
808 RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0);
809}
810
811static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv)
812{
813 u32 ctr;
814
815 /* Clear any stray error interrupt flags */
816 rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0);
817
818 /* Global interrupts setup */
819 ctr = RCANFD_GCTR_MEIE;
820 if (gpriv->fdmode)
821 ctr |= RCANFD_GCTR_CFMPOFIE;
822
823 rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, ctr);
824}
825
826static void rcar_canfd_disable_global_interrupts(struct rcar_canfd_global
827 *gpriv)
828{
829 /* Disable all interrupts */
830 rcar_canfd_write(gpriv->base, RCANFD_GCTR, 0);
831
832 /* Clear any stray error interrupt flags */
833 rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0);
834}
835
836static void rcar_canfd_enable_channel_interrupts(struct rcar_canfd_channel
837 *priv)
838{
839 u32 ctr, ch = priv->channel;
840
841 /* Clear any stray error flags */
842 rcar_canfd_write(priv->base, RCANFD_CERFL(ch), 0);
843
844 /* Channel interrupts setup */
845 ctr = (RCANFD_CCTR_TAIE |
846 RCANFD_CCTR_ALIE | RCANFD_CCTR_BLIE |
847 RCANFD_CCTR_OLIE | RCANFD_CCTR_BORIE |
848 RCANFD_CCTR_BOEIE | RCANFD_CCTR_EPIE |
849 RCANFD_CCTR_EWIE | RCANFD_CCTR_BEIE);
850 rcar_canfd_set_bit(priv->base, RCANFD_CCTR(ch), ctr);
851}
852
853static void rcar_canfd_disable_channel_interrupts(struct rcar_canfd_channel
854 *priv)
855{
856 u32 ctr, ch = priv->channel;
857
858 ctr = (RCANFD_CCTR_TAIE |
859 RCANFD_CCTR_ALIE | RCANFD_CCTR_BLIE |
860 RCANFD_CCTR_OLIE | RCANFD_CCTR_BORIE |
861 RCANFD_CCTR_BOEIE | RCANFD_CCTR_EPIE |
862 RCANFD_CCTR_EWIE | RCANFD_CCTR_BEIE);
863 rcar_canfd_clear_bit(priv->base, RCANFD_CCTR(ch), ctr);
864
865 /* Clear any stray error flags */
866 rcar_canfd_write(priv->base, RCANFD_CERFL(ch), 0);
867}
868
869static void rcar_canfd_global_error(struct net_device *ndev)
870{
871 struct rcar_canfd_channel *priv = netdev_priv(ndev);
872 struct rcar_canfd_global *gpriv = priv->gpriv;
873 struct net_device_stats *stats = &ndev->stats;
874 u32 ch = priv->channel;
875 u32 gerfl, sts;
876 u32 ridx = ch + RCANFD_RFFIFO_IDX;
877
878 gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
879 if ((gerfl & RCANFD_GERFL_EEF0) && (ch == 0)) {
880 netdev_dbg(ndev, "Ch0: ECC Error flag\n");
881 stats->tx_dropped++;
882 }
883 if ((gerfl & RCANFD_GERFL_EEF1) && (ch == 1)) {
884 netdev_dbg(ndev, "Ch1: ECC Error flag\n");
885 stats->tx_dropped++;
886 }
887 if (gerfl & RCANFD_GERFL_MES) {
888 sts = rcar_canfd_read(priv->base,
889 RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
890 if (sts & RCANFD_CFSTS_CFMLT) {
891 netdev_dbg(ndev, "Tx Message Lost flag\n");
892 stats->tx_dropped++;
893 rcar_canfd_write(priv->base,
894 RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
895 sts & ~RCANFD_CFSTS_CFMLT);
896 }
897
898 sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
899 if (sts & RCANFD_RFSTS_RFMLT) {
900 netdev_dbg(ndev, "Rx Message Lost flag\n");
901 stats->rx_dropped++;
902 rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
903 sts & ~RCANFD_RFSTS_RFMLT);
904 }
905 }
906 if (gpriv->fdmode && gerfl & RCANFD_GERFL_CMPOF) {
907 /* Message Lost flag will be set for respective channel
908 * when this condition happens with counters and flags
909 * already updated.
910 */
911 netdev_dbg(ndev, "global payload overflow interrupt\n");
912 }
913
914 /* Clear all global error interrupts. Only affected channels bits
915 * get cleared
916 */
917 rcar_canfd_write(priv->base, RCANFD_GERFL, 0);
918}
919
920static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
921 u16 txerr, u16 rxerr)
922{
923 struct rcar_canfd_channel *priv = netdev_priv(ndev);
924 struct net_device_stats *stats = &ndev->stats;
925 struct can_frame *cf;
926 struct sk_buff *skb;
927 u32 ch = priv->channel;
928
929 netdev_dbg(ndev, "ch erfl %x txerr %u rxerr %u\n", cerfl, txerr, rxerr);
930
931 /* Propagate the error condition to the CAN stack */
932 skb = alloc_can_err_skb(ndev, &cf);
933 if (!skb) {
934 stats->rx_dropped++;
935 return;
936 }
937
938 /* Channel error interrupts */
939 if (cerfl & RCANFD_CERFL_BEF) {
940 netdev_dbg(ndev, "Bus error\n");
941 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
942 cf->data[2] = CAN_ERR_PROT_UNSPEC;
943 priv->can.can_stats.bus_error++;
944 }
945 if (cerfl & RCANFD_CERFL_ADERR) {
946 netdev_dbg(ndev, "ACK Delimiter Error\n");
947 stats->tx_errors++;
948 cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
949 }
950 if (cerfl & RCANFD_CERFL_B0ERR) {
951 netdev_dbg(ndev, "Bit Error (dominant)\n");
952 stats->tx_errors++;
953 cf->data[2] |= CAN_ERR_PROT_BIT0;
954 }
955 if (cerfl & RCANFD_CERFL_B1ERR) {
956 netdev_dbg(ndev, "Bit Error (recessive)\n");
957 stats->tx_errors++;
958 cf->data[2] |= CAN_ERR_PROT_BIT1;
959 }
960 if (cerfl & RCANFD_CERFL_CERR) {
961 netdev_dbg(ndev, "CRC Error\n");
962 stats->rx_errors++;
963 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
964 }
965 if (cerfl & RCANFD_CERFL_AERR) {
966 netdev_dbg(ndev, "ACK Error\n");
967 stats->tx_errors++;
968 cf->can_id |= CAN_ERR_ACK;
969 cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
970 }
971 if (cerfl & RCANFD_CERFL_FERR) {
972 netdev_dbg(ndev, "Form Error\n");
973 stats->rx_errors++;
974 cf->data[2] |= CAN_ERR_PROT_FORM;
975 }
976 if (cerfl & RCANFD_CERFL_SERR) {
977 netdev_dbg(ndev, "Stuff Error\n");
978 stats->rx_errors++;
979 cf->data[2] |= CAN_ERR_PROT_STUFF;
980 }
981 if (cerfl & RCANFD_CERFL_ALF) {
982 netdev_dbg(ndev, "Arbitration lost Error\n");
983 priv->can.can_stats.arbitration_lost++;
984 cf->can_id |= CAN_ERR_LOSTARB;
985 cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC;
986 }
987 if (cerfl & RCANFD_CERFL_BLF) {
988 netdev_dbg(ndev, "Bus Lock Error\n");
989 stats->rx_errors++;
990 cf->can_id |= CAN_ERR_BUSERROR;
991 }
992 if (cerfl & RCANFD_CERFL_EWF) {
993 netdev_dbg(ndev, "Error warning interrupt\n");
994 priv->can.state = CAN_STATE_ERROR_WARNING;
995 priv->can.can_stats.error_warning++;
996 cf->can_id |= CAN_ERR_CRTL;
997 cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
998 CAN_ERR_CRTL_RX_WARNING;
999 cf->data[6] = txerr;
1000 cf->data[7] = rxerr;
1001 }
1002 if (cerfl & RCANFD_CERFL_EPF) {
1003 netdev_dbg(ndev, "Error passive interrupt\n");
1004 priv->can.state = CAN_STATE_ERROR_PASSIVE;
1005 priv->can.can_stats.error_passive++;
1006 cf->can_id |= CAN_ERR_CRTL;
1007 cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
1008 CAN_ERR_CRTL_RX_PASSIVE;
1009 cf->data[6] = txerr;
1010 cf->data[7] = rxerr;
1011 }
1012 if (cerfl & RCANFD_CERFL_BOEF) {
1013 netdev_dbg(ndev, "Bus-off entry interrupt\n");
1014 rcar_canfd_tx_failure_cleanup(ndev);
1015 priv->can.state = CAN_STATE_BUS_OFF;
1016 priv->can.can_stats.bus_off++;
1017 can_bus_off(ndev);
1018 cf->can_id |= CAN_ERR_BUSOFF;
1019 }
1020 if (cerfl & RCANFD_CERFL_OVLF) {
1021 netdev_dbg(ndev,
1022 "Overload Frame Transmission error interrupt\n");
1023 stats->tx_errors++;
1024 cf->can_id |= CAN_ERR_PROT;
1025 cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
1026 }
1027
1028 /* Clear channel error interrupts that are handled */
1029 rcar_canfd_write(priv->base, RCANFD_CERFL(ch),
1030 RCANFD_CERFL_ERR(~cerfl));
1031 stats->rx_packets++;
1032 stats->rx_bytes += cf->can_dlc;
1033 netif_rx(skb);
1034}
1035
1036static void rcar_canfd_tx_done(struct net_device *ndev)
1037{
1038 struct rcar_canfd_channel *priv = netdev_priv(ndev);
1039 struct net_device_stats *stats = &ndev->stats;
1040 u32 sts;
1041 unsigned long flags;
1042 u32 ch = priv->channel;
1043
1044 do {
1045 u8 unsent, sent;
1046
1047 sent = priv->tx_tail % RCANFD_FIFO_DEPTH;
1048 stats->tx_packets++;
1049 stats->tx_bytes += priv->tx_len[sent];
1050 priv->tx_len[sent] = 0;
1051 can_get_echo_skb(ndev, sent);
1052
1053 spin_lock_irqsave(&priv->tx_lock, flags);
1054 priv->tx_tail++;
1055 sts = rcar_canfd_read(priv->base,
1056 RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
1057 unsent = RCANFD_CFSTS_CFMC(sts);
1058
1059 /* Wake producer only when there is room */
1060 if (unsent != RCANFD_FIFO_DEPTH)
1061 netif_wake_queue(ndev);
1062
1063 if (priv->tx_head - priv->tx_tail <= unsent) {
1064 spin_unlock_irqrestore(&priv->tx_lock, flags);
1065 break;
1066 }
1067 spin_unlock_irqrestore(&priv->tx_lock, flags);
1068
1069 } while (1);
1070
1071 /* Clear interrupt */
1072 rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
1073 sts & ~RCANFD_CFSTS_CFTXIF);
1074 can_led_event(ndev, CAN_LED_EVENT_TX);
1075}
1076
1077static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
1078{
1079 struct rcar_canfd_global *gpriv = dev_id;
1080 struct net_device *ndev;
1081 struct rcar_canfd_channel *priv;
1082 u32 sts, gerfl;
1083 u32 ch, ridx;
1084
1085 /* Global error interrupts still indicate a condition specific
1086 * to a channel. RxFIFO interrupt is a global interrupt.
1087 */
1088 for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
1089 priv = gpriv->ch[ch];
1090 ndev = priv->ndev;
1091 ridx = ch + RCANFD_RFFIFO_IDX;
1092
1093 /* Global error interrupts */
1094 gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
1095 if (unlikely(RCANFD_GERFL_ERR(gpriv, gerfl)))
1096 rcar_canfd_global_error(ndev);
1097
1098 /* Handle Rx interrupts */
1099 sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
1100 if (likely(sts & RCANFD_RFSTS_RFIF)) {
1101 if (napi_schedule_prep(&priv->napi)) {
1102 /* Disable Rx FIFO interrupts */
1103 rcar_canfd_clear_bit(priv->base,
1104 RCANFD_RFCC(ridx),
1105 RCANFD_RFCC_RFIE);
1106 __napi_schedule(&priv->napi);
1107 }
1108 }
1109 }
1110 return IRQ_HANDLED;
1111}
1112
1113static void rcar_canfd_state_change(struct net_device *ndev,
1114 u16 txerr, u16 rxerr)
1115{
1116 struct rcar_canfd_channel *priv = netdev_priv(ndev);
1117 struct net_device_stats *stats = &ndev->stats;
1118 enum can_state rx_state, tx_state, state = priv->can.state;
1119 struct can_frame *cf;
1120 struct sk_buff *skb;
1121
1122 /* Handle transition from error to normal states */
1123 if (txerr < 96 && rxerr < 96)
1124 state = CAN_STATE_ERROR_ACTIVE;
1125 else if (txerr < 128 && rxerr < 128)
1126 state = CAN_STATE_ERROR_WARNING;
1127
1128 if (state != priv->can.state) {
1129 netdev_dbg(ndev, "state: new %d, old %d: txerr %u, rxerr %u\n",
1130 state, priv->can.state, txerr, rxerr);
1131 skb = alloc_can_err_skb(ndev, &cf);
1132 if (!skb) {
1133 stats->rx_dropped++;
1134 return;
1135 }
1136 tx_state = txerr >= rxerr ? state : 0;
1137 rx_state = txerr <= rxerr ? state : 0;
1138
1139 can_change_state(ndev, cf, tx_state, rx_state);
1140 stats->rx_packets++;
1141 stats->rx_bytes += cf->can_dlc;
1142 netif_rx(skb);
1143 }
1144}
1145
1146static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
1147{
1148 struct rcar_canfd_global *gpriv = dev_id;
1149 struct net_device *ndev;
1150 struct rcar_canfd_channel *priv;
1151 u32 sts, ch, cerfl;
1152 u16 txerr, rxerr;
1153
1154 /* Common FIFO is a per channel resource */
1155 for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
1156 priv = gpriv->ch[ch];
1157 ndev = priv->ndev;
1158
1159 /* Channel error interrupts */
1160 cerfl = rcar_canfd_read(priv->base, RCANFD_CERFL(ch));
1161 sts = rcar_canfd_read(priv->base, RCANFD_CSTS(ch));
1162 txerr = RCANFD_CSTS_TECCNT(sts);
1163 rxerr = RCANFD_CSTS_RECCNT(sts);
1164 if (unlikely(RCANFD_CERFL_ERR(cerfl)))
1165 rcar_canfd_error(ndev, cerfl, txerr, rxerr);
1166
1167 /* Handle state change to lower states */
1168 if (unlikely((priv->can.state != CAN_STATE_ERROR_ACTIVE) &&
1169 (priv->can.state != CAN_STATE_BUS_OFF)))
1170 rcar_canfd_state_change(ndev, txerr, rxerr);
1171
1172 /* Handle Tx interrupts */
1173 sts = rcar_canfd_read(priv->base,
1174 RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
1175 if (likely(sts & RCANFD_CFSTS_CFTXIF))
1176 rcar_canfd_tx_done(ndev);
1177 }
1178 return IRQ_HANDLED;
1179}
1180
1181static void rcar_canfd_set_bittiming(struct net_device *dev)
1182{
1183 struct rcar_canfd_channel *priv = netdev_priv(dev);
1184 const struct can_bittiming *bt = &priv->can.bittiming;
1185 const struct can_bittiming *dbt = &priv->can.data_bittiming;
1186 u16 brp, sjw, tseg1, tseg2;
1187 u32 cfg;
1188 u32 ch = priv->channel;
1189
1190 /* Nominal bit timing settings */
1191 brp = bt->brp - 1;
1192 sjw = bt->sjw - 1;
1193 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
1194 tseg2 = bt->phase_seg2 - 1;
1195
1196 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
1197 /* CAN FD only mode */
1198 cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) |
1199 RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2));
1200
1201 rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
1202 netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
1203 brp, sjw, tseg1, tseg2);
1204
1205 /* Data bit timing settings */
1206 brp = dbt->brp - 1;
1207 sjw = dbt->sjw - 1;
1208 tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
1209 tseg2 = dbt->phase_seg2 - 1;
1210
1211 cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) |
1212 RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2));
1213
1214 rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
1215 netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
1216 brp, sjw, tseg1, tseg2);
1217 } else {
1218 /* Classical CAN only mode */
1219 cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
1220 RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
1221
1222 rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
1223 netdev_dbg(priv->ndev,
1224 "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
1225 brp, sjw, tseg1, tseg2);
1226 }
1227}
1228
1229static int rcar_canfd_start(struct net_device *ndev)
1230{
1231 struct rcar_canfd_channel *priv = netdev_priv(ndev);
1232 int err = -EOPNOTSUPP;
1233 u32 sts, ch = priv->channel;
1234 u32 ridx = ch + RCANFD_RFFIFO_IDX;
1235
1236 rcar_canfd_set_bittiming(ndev);
1237
1238 rcar_canfd_enable_channel_interrupts(priv);
1239
1240 /* Set channel to Operational mode */
1241 rcar_canfd_update_bit(priv->base, RCANFD_CCTR(ch),
1242 RCANFD_CCTR_CHMDC_MASK, RCANFD_CCTR_CHDMC_COPM);
1243
1244 /* Verify channel mode change */
1245 err = readl_poll_timeout((priv->base + RCANFD_CSTS(ch)), sts,
1246 (sts & RCANFD_CSTS_COMSTS), 2, 500000);
1247 if (err) {
1248 netdev_err(ndev, "channel %u communication state failed\n", ch);
1249 goto fail_mode_change;
1250 }
1251
1252 /* Enable Common & Rx FIFO */
1253 rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
1254 RCANFD_CFCC_CFE);
1255 rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
1256
1257 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1258 return 0;
1259
1260fail_mode_change:
1261 rcar_canfd_disable_channel_interrupts(priv);
1262 return err;
1263}
1264
1265static int rcar_canfd_open(struct net_device *ndev)
1266{
1267 struct rcar_canfd_channel *priv = netdev_priv(ndev);
1268 struct rcar_canfd_global *gpriv = priv->gpriv;
1269 int err;
1270
1271 /* Peripheral clock is already enabled in probe */
1272 err = clk_prepare_enable(gpriv->can_clk);
1273 if (err) {
1274 netdev_err(ndev, "failed to enable CAN clock, error %d\n", err);
1275 goto out_clock;
1276 }
1277
1278 err = open_candev(ndev);
1279 if (err) {
1280 netdev_err(ndev, "open_candev() failed, error %d\n", err);
1281 goto out_can_clock;
1282 }
1283
1284 napi_enable(&priv->napi);
1285 err = rcar_canfd_start(ndev);
1286 if (err)
1287 goto out_close;
1288 netif_start_queue(ndev);
1289 can_led_event(ndev, CAN_LED_EVENT_OPEN);
1290 return 0;
1291out_close:
1292 napi_disable(&priv->napi);
1293 close_candev(ndev);
1294out_can_clock:
1295 clk_disable_unprepare(gpriv->can_clk);
1296out_clock:
1297 return err;
1298}
1299
1300static void rcar_canfd_stop(struct net_device *ndev)
1301{
1302 struct rcar_canfd_channel *priv = netdev_priv(ndev);
1303 int err;
1304 u32 sts, ch = priv->channel;
1305 u32 ridx = ch + RCANFD_RFFIFO_IDX;
1306
1307 /* Transition to channel reset mode */
1308 rcar_canfd_update_bit(priv->base, RCANFD_CCTR(ch),
1309 RCANFD_CCTR_CHMDC_MASK, RCANFD_CCTR_CHDMC_CRESET);
1310
1311 /* Check Channel reset mode */
1312 err = readl_poll_timeout((priv->base + RCANFD_CSTS(ch)), sts,
1313 (sts & RCANFD_CSTS_CRSTSTS), 2, 500000);
1314 if (err)
1315 netdev_err(ndev, "channel %u reset failed\n", ch);
1316
1317 rcar_canfd_disable_channel_interrupts(priv);
1318
1319 /* Disable Common & Rx FIFO */
1320 rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
1321 RCANFD_CFCC_CFE);
1322 rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
1323
1324 /* Set the state as STOPPED */
1325 priv->can.state = CAN_STATE_STOPPED;
1326}
1327
1328static int rcar_canfd_close(struct net_device *ndev)
1329{
1330 struct rcar_canfd_channel *priv = netdev_priv(ndev);
1331 struct rcar_canfd_global *gpriv = priv->gpriv;
1332
1333 netif_stop_queue(ndev);
1334 rcar_canfd_stop(ndev);
1335 napi_disable(&priv->napi);
1336 clk_disable_unprepare(gpriv->can_clk);
1337 close_candev(ndev);
1338 can_led_event(ndev, CAN_LED_EVENT_STOP);
1339 return 0;
1340}
1341
1342static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
1343 struct net_device *ndev)
1344{
1345 struct rcar_canfd_channel *priv = netdev_priv(ndev);
1346 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
1347 u32 sts = 0, id, dlc;
1348 unsigned long flags;
1349 u32 ch = priv->channel;
1350
1351 if (can_dropped_invalid_skb(ndev, skb))
1352 return NETDEV_TX_OK;
1353
1354 if (cf->can_id & CAN_EFF_FLAG) {
1355 id = cf->can_id & CAN_EFF_MASK;
1356 id |= RCANFD_CFID_CFIDE;
1357 } else {
1358 id = cf->can_id & CAN_SFF_MASK;
1359 }
1360
1361 if (cf->can_id & CAN_RTR_FLAG)
1362 id |= RCANFD_CFID_CFRTR;
1363
1364 dlc = RCANFD_CFPTR_CFDLC(can_len2dlc(cf->len));
1365
1366 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
1367 rcar_canfd_write(priv->base,
1368 RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id);
1369 rcar_canfd_write(priv->base,
1370 RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
1371
1372 if (can_is_canfd_skb(skb)) {
1373 /* CAN FD frame format */
1374 sts |= RCANFD_CFFDCSTS_CFFDF;
1375 if (cf->flags & CANFD_BRS)
1376 sts |= RCANFD_CFFDCSTS_CFBRS;
1377
1378 if (priv->can.state == CAN_STATE_ERROR_PASSIVE)
1379 sts |= RCANFD_CFFDCSTS_CFESI;
1380 }
1381
1382 rcar_canfd_write(priv->base,
1383 RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts);
1384
1385 rcar_canfd_put_data(priv, cf,
1386 RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
1387 } else {
1388 rcar_canfd_write(priv->base,
1389 RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id);
1390 rcar_canfd_write(priv->base,
1391 RCANFD_C_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
1392 rcar_canfd_put_data(priv, cf,
1393 RCANFD_C_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
1394 }
1395
1396 priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len;
1397 can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH);
1398
1399 spin_lock_irqsave(&priv->tx_lock, flags);
1400 priv->tx_head++;
1401
1402 /* Stop the queue if we've filled all FIFO entries */
1403 if (priv->tx_head - priv->tx_tail >= RCANFD_FIFO_DEPTH)
1404 netif_stop_queue(ndev);
1405
1406 /* Start Tx: Write 0xff to CFPC to increment the CPU-side
1407 * pointer for the Common FIFO
1408 */
1409 rcar_canfd_write(priv->base,
1410 RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff);
1411
1412 spin_unlock_irqrestore(&priv->tx_lock, flags);
1413 return NETDEV_TX_OK;
1414}
1415
1416static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
1417{
1418 struct net_device_stats *stats = &priv->ndev->stats;
1419 struct canfd_frame *cf;
1420 struct sk_buff *skb;
1421 u32 sts = 0, id, dlc;
1422 u32 ch = priv->channel;
1423 u32 ridx = ch + RCANFD_RFFIFO_IDX;
1424
1425 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
1426 id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx));
1427 dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx));
1428
1429 sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx));
1430 if (sts & RCANFD_RFFDSTS_RFFDF)
1431 skb = alloc_canfd_skb(priv->ndev, &cf);
1432 else
1433 skb = alloc_can_skb(priv->ndev,
1434 (struct can_frame **)&cf);
1435 } else {
1436 id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx));
1437 dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx));
1438 skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
1439 }
1440
1441 if (!skb) {
1442 stats->rx_dropped++;
1443 return;
1444 }
1445
1446 if (id & RCANFD_RFID_RFIDE)
1447 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
1448 else
1449 cf->can_id = id & CAN_SFF_MASK;
1450
1451 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
1452 if (sts & RCANFD_RFFDSTS_RFFDF)
1453 cf->len = can_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
1454 else
1455 cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc));
1456
1457 if (sts & RCANFD_RFFDSTS_RFESI) {
1458 cf->flags |= CANFD_ESI;
1459 netdev_dbg(priv->ndev, "ESI Error\n");
1460 }
1461
1462 if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) {
1463 cf->can_id |= CAN_RTR_FLAG;
1464 } else {
1465 if (sts & RCANFD_RFFDSTS_RFBRS)
1466 cf->flags |= CANFD_BRS;
1467
1468 rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0));
1469 }
1470 } else {
1471 cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc));
1472 if (id & RCANFD_RFID_RFRTR)
1473 cf->can_id |= CAN_RTR_FLAG;
1474 else
1475 rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
1476 }
1477
1478 /* Write 0xff to RFPC to increment the CPU-side
1479 * pointer of the Rx FIFO
1480 */
1481 rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff);
1482
1483 can_led_event(priv->ndev, CAN_LED_EVENT_RX);
1484
1485 stats->rx_bytes += cf->len;
1486 stats->rx_packets++;
1487 netif_receive_skb(skb);
1488}
1489
1490static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
1491{
1492 struct rcar_canfd_channel *priv =
1493 container_of(napi, struct rcar_canfd_channel, napi);
1494 int num_pkts;
1495 u32 sts;
1496 u32 ch = priv->channel;
1497 u32 ridx = ch + RCANFD_RFFIFO_IDX;
1498
1499 for (num_pkts = 0; num_pkts < quota; num_pkts++) {
1500 sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
1501 /* Check FIFO empty condition */
1502 if (sts & RCANFD_RFSTS_RFEMP)
1503 break;
1504
1505 rcar_canfd_rx_pkt(priv);
1506
1507 /* Clear interrupt bit */
1508 if (sts & RCANFD_RFSTS_RFIF)
1509 rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
1510 sts & ~RCANFD_RFSTS_RFIF);
1511 }
1512
1513 /* All packets processed */
1514 if (num_pkts < quota) {
1515 napi_complete(napi);
1516 /* Enable Rx FIFO interrupts */
1517 rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
1518 RCANFD_RFCC_RFIE);
1519 }
1520 return num_pkts;
1521}
1522
1523static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
1524{
1525 int err;
1526
1527 switch (mode) {
1528 case CAN_MODE_START:
1529 err = rcar_canfd_start(ndev);
1530 if (err)
1531 return err;
1532 netif_wake_queue(ndev);
1533 return 0;
1534 default:
1535 return -EOPNOTSUPP;
1536 }
1537}
1538
1539static int rcar_canfd_get_berr_counter(const struct net_device *dev,
1540 struct can_berr_counter *bec)
1541{
1542 struct rcar_canfd_channel *priv = netdev_priv(dev);
1543 u32 val, ch = priv->channel;
1544
1545 /* Peripheral clock is already enabled in probe */
1546 val = rcar_canfd_read(priv->base, RCANFD_CSTS(ch));
1547 bec->txerr = RCANFD_CSTS_TECCNT(val);
1548 bec->rxerr = RCANFD_CSTS_RECCNT(val);
1549 return 0;
1550}
1551
1552static const struct net_device_ops rcar_canfd_netdev_ops = {
1553 .ndo_open = rcar_canfd_open,
1554 .ndo_stop = rcar_canfd_close,
1555 .ndo_start_xmit = rcar_canfd_start_xmit,
1556 .ndo_change_mtu = can_change_mtu,
1557};
1558
1559static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
1560 u32 fcan_freq)
1561{
1562 struct platform_device *pdev = gpriv->pdev;
1563 struct rcar_canfd_channel *priv;
1564 struct net_device *ndev;
1565 int err = -ENODEV;
1566
1567 ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH);
1568 if (!ndev) {
1569 dev_err(&pdev->dev, "alloc_candev() failed\n");
1570 err = -ENOMEM;
1571 goto fail;
1572 }
1573 priv = netdev_priv(ndev);
1574
1575 ndev->netdev_ops = &rcar_canfd_netdev_ops;
1576 ndev->flags |= IFF_ECHO;
1577 priv->ndev = ndev;
1578 priv->base = gpriv->base;
1579 priv->channel = ch;
1580 priv->can.clock.freq = fcan_freq;
1581 dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
1582
1583 if (gpriv->fdmode) {
1584 priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const;
1585 priv->can.data_bittiming_const =
1586 &rcar_canfd_data_bittiming_const;
1587
1588 /* Controller starts in CAN FD only mode */
1589 can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
1590 priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
1591 } else {
1592 /* Controller starts in Classical CAN only mode */
1593 priv->can.bittiming_const = &rcar_canfd_bittiming_const;
1594 priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
1595 }
1596
1597 priv->can.do_set_mode = rcar_canfd_do_set_mode;
1598 priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter;
1599 priv->gpriv = gpriv;
1600 SET_NETDEV_DEV(ndev, &pdev->dev);
1601
1602 netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
1603 RCANFD_NAPI_WEIGHT);
1604 err = register_candev(ndev);
1605 if (err) {
1606 dev_err(&pdev->dev,
1607 "register_candev() failed, error %d\n", err);
1608 goto fail_candev;
1609 }
1610 spin_lock_init(&priv->tx_lock);
1611 devm_can_led_init(ndev);
1612 gpriv->ch[priv->channel] = priv;
1613 dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
1614 return 0;
1615
1616fail_candev:
1617 netif_napi_del(&priv->napi);
1618 free_candev(ndev);
1619fail:
1620 return err;
1621}
1622
1623static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
1624{
1625 struct rcar_canfd_channel *priv = gpriv->ch[ch];
1626
1627 if (priv) {
1628 unregister_candev(priv->ndev);
1629 netif_napi_del(&priv->napi);
1630 free_candev(priv->ndev);
1631 }
1632}
1633
1634static int rcar_canfd_probe(struct platform_device *pdev)
1635{
1636 struct resource *mem;
1637 void __iomem *addr;
1638 u32 sts, ch, fcan_freq;
1639 struct rcar_canfd_global *gpriv;
1640 struct device_node *of_child;
1641 unsigned long channels_mask = 0;
1642 int err, ch_irq, g_irq;
1643 bool fdmode = true; /* CAN FD only mode - default */
1644
1645 if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
1646 fdmode = false; /* Classical CAN only mode */
1647
1648 of_child = of_get_child_by_name(pdev->dev.of_node, "channel0");
1649 if (of_child && of_device_is_available(of_child))
1650 channels_mask |= BIT(0); /* Channel 0 */
1651
1652 of_child = of_get_child_by_name(pdev->dev.of_node, "channel1");
1653 if (of_child && of_device_is_available(of_child))
1654 channels_mask |= BIT(1); /* Channel 1 */
1655
1656 ch_irq = platform_get_irq(pdev, 0);
1657 if (ch_irq < 0) {
1658 dev_err(&pdev->dev, "no Channel IRQ resource\n");
1659 err = ch_irq;
1660 goto fail_dev;
1661 }
1662
1663 g_irq = platform_get_irq(pdev, 1);
1664 if (g_irq < 0) {
1665 dev_err(&pdev->dev, "no Global IRQ resource\n");
1666 err = g_irq;
1667 goto fail_dev;
1668 }
1669
1670 /* Global controller context */
1671 gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
1672 if (!gpriv) {
1673 err = -ENOMEM;
1674 goto fail_dev;
1675 }
1676 gpriv->pdev = pdev;
1677 gpriv->channels_mask = channels_mask;
1678 gpriv->fdmode = fdmode;
1679
1680 /* Peripheral clock */
1681 gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
1682 if (IS_ERR(gpriv->clkp)) {
1683 err = PTR_ERR(gpriv->clkp);
1684 dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
1685 err);
1686 goto fail_dev;
1687 }
1688
1689 /* fCAN clock: Pick External clock. If not available fallback to
1690 * CANFD clock
1691 */
1692 gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1693 if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
1694 gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
1695 if (IS_ERR(gpriv->can_clk)) {
1696 err = PTR_ERR(gpriv->can_clk);
1697 dev_err(&pdev->dev,
1698 "cannot get canfd clock, error %d\n", err);
1699 goto fail_dev;
1700 }
1701 gpriv->fcan = RCANFD_CANFDCLK;
1702
1703 } else {
1704 gpriv->fcan = RCANFD_EXTCLK;
1705 }
1706 fcan_freq = clk_get_rate(gpriv->can_clk);
1707
1708 if (gpriv->fcan == RCANFD_CANFDCLK)
1709 /* CANFD clock is further divided by (1/2) within the IP */
1710 fcan_freq /= 2;
1711
1712 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1713 addr = devm_ioremap_resource(&pdev->dev, mem);
1714 if (IS_ERR(addr)) {
1715 err = PTR_ERR(addr);
1716 goto fail_dev;
1717 }
1718 gpriv->base = addr;
1719
1720 /* Request IRQ that's common for both channels */
1721 err = devm_request_irq(&pdev->dev, ch_irq,
1722 rcar_canfd_channel_interrupt, 0,
1723 "canfd.chn", gpriv);
1724 if (err) {
1725 dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
1726 ch_irq, err);
1727 goto fail_dev;
1728 }
1729 err = devm_request_irq(&pdev->dev, g_irq,
1730 rcar_canfd_global_interrupt, 0,
1731 "canfd.gbl", gpriv);
1732 if (err) {
1733 dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
1734 g_irq, err);
1735 goto fail_dev;
1736 }
1737
1738 /* Enable peripheral clock for register access */
1739 err = clk_prepare_enable(gpriv->clkp);
1740 if (err) {
1741 dev_err(&pdev->dev,
1742 "failed to enable peripheral clock, error %d\n", err);
1743 goto fail_dev;
1744 }
1745
1746 err = rcar_canfd_reset_controller(gpriv);
1747 if (err) {
1748 dev_err(&pdev->dev, "reset controller failed\n");
1749 goto fail_clk;
1750 }
1751
1752 /* Controller in Global reset & Channel reset mode */
1753 rcar_canfd_configure_controller(gpriv);
1754
1755 /* Configure per channel attributes */
1756 for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
1757 /* Configure Channel's Rx fifo */
1758 rcar_canfd_configure_rx(gpriv, ch);
1759
1760 /* Configure Channel's Tx (Common) fifo */
1761 rcar_canfd_configure_tx(gpriv, ch);
1762
1763 /* Configure receive rules */
1764 rcar_canfd_configure_afl_rules(gpriv, ch);
1765 }
1766
1767 /* Configure common interrupts */
1768 rcar_canfd_enable_global_interrupts(gpriv);
1769
1770 /* Start Global operation mode */
1771 rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK,
1772 RCANFD_GCTR_GMDC_GOPM);
1773
1774 /* Verify mode change */
1775 err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
1776 !(sts & RCANFD_GSTS_GNOPM), 2, 500000);
1777 if (err) {
1778 dev_err(&pdev->dev, "global operational mode failed\n");
1779 goto fail_mode;
1780 }
1781
1782 for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
1783 err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq);
1784 if (err)
1785 goto fail_channel;
1786 }
1787
1788 platform_set_drvdata(pdev, gpriv);
1789 dev_info(&pdev->dev, "global operational state (clk %d, fdmode %d)\n",
1790 gpriv->fcan, gpriv->fdmode);
1791 return 0;
1792
1793fail_channel:
1794 for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
1795 rcar_canfd_channel_remove(gpriv, ch);
1796fail_mode:
1797 rcar_canfd_disable_global_interrupts(gpriv);
1798fail_clk:
1799 clk_disable_unprepare(gpriv->clkp);
1800fail_dev:
1801 return err;
1802}
1803
1804static int rcar_canfd_remove(struct platform_device *pdev)
1805{
1806 struct rcar_canfd_global *gpriv = platform_get_drvdata(pdev);
1807 u32 ch;
1808
1809 rcar_canfd_reset_controller(gpriv);
1810 rcar_canfd_disable_global_interrupts(gpriv);
1811
1812 for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
1813 rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
1814 rcar_canfd_channel_remove(gpriv, ch);
1815 }
1816
1817 /* Enter global sleep mode */
1818 rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
1819 clk_disable_unprepare(gpriv->clkp);
1820 return 0;
1821}
1822
1823static int __maybe_unused rcar_canfd_suspend(struct device *dev)
1824{
1825 return 0;
1826}
1827
1828static int __maybe_unused rcar_canfd_resume(struct device *dev)
1829{
1830 return 0;
1831}
1832
1833static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
1834 rcar_canfd_resume);
1835
1836static const struct of_device_id rcar_canfd_of_table[] = {
1837 { .compatible = "renesas,rcar-gen3-canfd" },
1838 { }
1839};
1840
1841MODULE_DEVICE_TABLE(of, rcar_canfd_of_table);
1842
1843static struct platform_driver rcar_canfd_driver = {
1844 .driver = {
1845 .name = RCANFD_DRV_NAME,
1846 .of_match_table = of_match_ptr(rcar_canfd_of_table),
1847 .pm = &rcar_canfd_pm_ops,
1848 },
1849 .probe = rcar_canfd_probe,
1850 .remove = rcar_canfd_remove,
1851};
1852
1853module_platform_driver(rcar_canfd_driver);
1854
1855MODULE_AUTHOR("Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>");
1856MODULE_LICENSE("GPL");
1857MODULE_DESCRIPTION("CAN FD driver for Renesas R-Car SoC");
1858MODULE_ALIAS("platform:" RCANFD_DRV_NAME);
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 76513dd780c7..79572457a2d6 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -203,14 +203,4 @@ static struct isa_driver tscan1_isa_driver = {
203 }, 203 },
204}; 204};
205 205
206static int __init tscan1_init(void) 206module_isa_driver(tscan1_isa_driver, TSCAN1_MAXDEV);
207{
208 return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
209}
210module_init(tscan1_init);
211
212static void __exit tscan1_exit(void)
213{
214 isa_unregister_driver(&tscan1_isa_driver);
215}
216module_exit(tscan1_exit);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 9a3f15cb7ef4..eb7173713bbc 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -354,7 +354,7 @@ static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
354{ 354{
355 struct slcan *sl = netdev_priv(dev); 355 struct slcan *sl = netdev_priv(dev);
356 356
357 if (skb->len != sizeof(struct can_frame)) 357 if (skb->len != CAN_MTU)
358 goto out; 358 goto out;
359 359
360 spin_lock(&sl->lock); 360 spin_lock(&sl->lock);
@@ -442,7 +442,7 @@ static void slc_setup(struct net_device *dev)
442 dev->addr_len = 0; 442 dev->addr_len = 0;
443 dev->tx_queue_len = 10; 443 dev->tx_queue_len = 10;
444 444
445 dev->mtu = sizeof(struct can_frame); 445 dev->mtu = CAN_MTU;
446 dev->type = ARPHRD_CAN; 446 dev->type = ARPHRD_CAN;
447 447
448 /* New-style flags. */ 448 /* New-style flags. */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index cf36d26ef002..f3f05fea8e1f 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1145,8 +1145,11 @@ static int mcp251x_can_probe(struct spi_device *spi)
1145 1145
1146 /* Here is OK to not lock the MCP, no one knows about it yet */ 1146 /* Here is OK to not lock the MCP, no one knows about it yet */
1147 ret = mcp251x_hw_probe(spi); 1147 ret = mcp251x_hw_probe(spi);
1148 if (ret) 1148 if (ret) {
1149 if (ret == -ENODEV)
1150 dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n", priv->model);
1149 goto error_probe; 1151 goto error_probe;
1152 }
1150 1153
1151 mcp251x_hw_sleep(spi); 1154 mcp251x_hw_sleep(spi);
1152 1155
@@ -1156,6 +1159,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
1156 1159
1157 devm_can_led_init(net); 1160 devm_can_led_init(net);
1158 1161
1162 netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
1159 return 0; 1163 return 0;
1160 1164
1161error_probe: 1165error_probe:
@@ -1168,6 +1172,7 @@ out_clk:
1168out_free: 1172out_free:
1169 free_candev(net); 1173 free_candev(net);
1170 1174
1175 dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
1171 return ret; 1176 return ret;
1172} 1177}
1173 1178
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index acb0c8490673..6f0cbc38782e 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -44,7 +44,9 @@ enum gs_usb_breq {
44 GS_USB_BREQ_MODE, 44 GS_USB_BREQ_MODE,
45 GS_USB_BREQ_BERR, 45 GS_USB_BREQ_BERR,
46 GS_USB_BREQ_BT_CONST, 46 GS_USB_BREQ_BT_CONST,
47 GS_USB_BREQ_DEVICE_CONFIG 47 GS_USB_BREQ_DEVICE_CONFIG,
48 GS_USB_BREQ_TIMESTAMP,
49 GS_USB_BREQ_IDENTIFY,
48}; 50};
49 51
50enum gs_can_mode { 52enum gs_can_mode {
@@ -63,6 +65,11 @@ enum gs_can_state {
63 GS_CAN_STATE_SLEEPING 65 GS_CAN_STATE_SLEEPING
64}; 66};
65 67
68enum gs_can_identify_mode {
69 GS_CAN_IDENTIFY_OFF = 0,
70 GS_CAN_IDENTIFY_ON
71};
72
66/* data types passed between host and device */ 73/* data types passed between host and device */
67struct gs_host_config { 74struct gs_host_config {
68 u32 byte_order; 75 u32 byte_order;
@@ -82,10 +89,10 @@ struct gs_device_config {
82} __packed; 89} __packed;
83 90
84#define GS_CAN_MODE_NORMAL 0 91#define GS_CAN_MODE_NORMAL 0
85#define GS_CAN_MODE_LISTEN_ONLY (1<<0) 92#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
86#define GS_CAN_MODE_LOOP_BACK (1<<1) 93#define GS_CAN_MODE_LOOP_BACK BIT(1)
87#define GS_CAN_MODE_TRIPLE_SAMPLE (1<<2) 94#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
88#define GS_CAN_MODE_ONE_SHOT (1<<3) 95#define GS_CAN_MODE_ONE_SHOT BIT(3)
89 96
90struct gs_device_mode { 97struct gs_device_mode {
91 u32 mode; 98 u32 mode;
@@ -106,10 +113,16 @@ struct gs_device_bittiming {
106 u32 brp; 113 u32 brp;
107} __packed; 114} __packed;
108 115
109#define GS_CAN_FEATURE_LISTEN_ONLY (1<<0) 116struct gs_identify_mode {
110#define GS_CAN_FEATURE_LOOP_BACK (1<<1) 117 u32 mode;
111#define GS_CAN_FEATURE_TRIPLE_SAMPLE (1<<2) 118} __packed;
112#define GS_CAN_FEATURE_ONE_SHOT (1<<3) 119
120#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
121#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
122#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
123#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
124#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
125#define GS_CAN_FEATURE_IDENTIFY BIT(5)
113 126
114struct gs_device_bt_const { 127struct gs_device_bt_const {
115 u32 feature; 128 u32 feature;
@@ -214,7 +227,8 @@ static void gs_free_tx_context(struct gs_tx_context *txc)
214 227
215/* Get a tx context by id. 228/* Get a tx context by id.
216 */ 229 */
217static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id) 230static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
231 unsigned int id)
218{ 232{
219 unsigned long flags; 233 unsigned long flags;
220 234
@@ -457,7 +471,8 @@ static void gs_usb_xmit_callback(struct urb *urb)
457 netif_wake_queue(netdev); 471 netif_wake_queue(netdev);
458} 472}
459 473
460static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev) 474static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
475 struct net_device *netdev)
461{ 476{
462 struct gs_can *dev = netdev_priv(netdev); 477 struct gs_can *dev = netdev_priv(netdev);
463 struct net_device_stats *stats = &dev->netdev->stats; 478 struct net_device_stats *stats = &dev->netdev->stats;
@@ -663,7 +678,8 @@ static int gs_can_open(struct net_device *netdev)
663 rc = usb_control_msg(interface_to_usbdev(dev->iface), 678 rc = usb_control_msg(interface_to_usbdev(dev->iface),
664 usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), 679 usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
665 GS_USB_BREQ_MODE, 680 GS_USB_BREQ_MODE,
666 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 681 USB_DIR_OUT | USB_TYPE_VENDOR |
682 USB_RECIP_INTERFACE,
667 dev->channel, 683 dev->channel,
668 0, 684 0,
669 dm, 685 dm,
@@ -726,7 +742,59 @@ static const struct net_device_ops gs_usb_netdev_ops = {
726 .ndo_change_mtu = can_change_mtu, 742 .ndo_change_mtu = can_change_mtu,
727}; 743};
728 744
729static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf) 745static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
746{
747 struct gs_can *dev = netdev_priv(netdev);
748 struct gs_identify_mode imode;
749 int rc;
750
751 if (do_identify)
752 imode.mode = GS_CAN_IDENTIFY_ON;
753 else
754 imode.mode = GS_CAN_IDENTIFY_OFF;
755
756 rc = usb_control_msg(interface_to_usbdev(dev->iface),
757 usb_sndctrlpipe(interface_to_usbdev(dev->iface),
758 0),
759 GS_USB_BREQ_IDENTIFY,
760 USB_DIR_OUT | USB_TYPE_VENDOR |
761 USB_RECIP_INTERFACE,
762 dev->channel,
763 0,
764 &imode,
765 sizeof(imode),
766 100);
767
768 return (rc > 0) ? 0 : rc;
769}
770
771/* blink LED's for finding the this interface */
772static int gs_usb_set_phys_id(struct net_device *dev,
773 enum ethtool_phys_id_state state)
774{
775 int rc = 0;
776
777 switch (state) {
778 case ETHTOOL_ID_ACTIVE:
779 rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON);
780 break;
781 case ETHTOOL_ID_INACTIVE:
782 rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF);
783 break;
784 default:
785 break;
786 }
787
788 return rc;
789}
790
791static const struct ethtool_ops gs_usb_ethtool_ops = {
792 .set_phys_id = gs_usb_set_phys_id,
793};
794
795static struct gs_can *gs_make_candev(unsigned int channel,
796 struct usb_interface *intf,
797 struct gs_device_config *dconf)
730{ 798{
731 struct gs_can *dev; 799 struct gs_can *dev;
732 struct net_device *netdev; 800 struct net_device *netdev;
@@ -814,10 +882,14 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
814 if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT) 882 if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
815 dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; 883 dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
816 884
817 kfree(bt_const);
818
819 SET_NETDEV_DEV(netdev, &intf->dev); 885 SET_NETDEV_DEV(netdev, &intf->dev);
820 886
887 if (dconf->sw_version > 1)
888 if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY)
889 netdev->ethtool_ops = &gs_usb_ethtool_ops;
890
891 kfree(bt_const);
892
821 rc = register_candev(dev->netdev); 893 rc = register_candev(dev->netdev);
822 if (rc) { 894 if (rc) {
823 free_candev(dev->netdev); 895 free_candev(dev->netdev);
@@ -835,19 +907,16 @@ static void gs_destroy_candev(struct gs_can *dev)
835 free_candev(dev->netdev); 907 free_candev(dev->netdev);
836} 908}
837 909
838static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) 910static int gs_usb_probe(struct usb_interface *intf,
911 const struct usb_device_id *id)
839{ 912{
840 struct gs_usb *dev; 913 struct gs_usb *dev;
841 int rc = -ENOMEM; 914 int rc = -ENOMEM;
842 unsigned int icount, i; 915 unsigned int icount, i;
843 struct gs_host_config *hconf; 916 struct gs_host_config hconf = {
844 struct gs_device_config *dconf; 917 .byte_order = 0x0000beef,
845 918 };
846 hconf = kmalloc(sizeof(*hconf), GFP_KERNEL); 919 struct gs_device_config dconf;
847 if (!hconf)
848 return -ENOMEM;
849
850 hconf->byte_order = 0x0000beef;
851 920
852 /* send host config */ 921 /* send host config */
853 rc = usb_control_msg(interface_to_usbdev(intf), 922 rc = usb_control_msg(interface_to_usbdev(intf),
@@ -856,22 +925,16 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
856 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 925 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
857 1, 926 1,
858 intf->altsetting[0].desc.bInterfaceNumber, 927 intf->altsetting[0].desc.bInterfaceNumber,
859 hconf, 928 &hconf,
860 sizeof(*hconf), 929 sizeof(hconf),
861 1000); 930 1000);
862 931
863 kfree(hconf);
864
865 if (rc < 0) { 932 if (rc < 0) {
866 dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", 933 dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
867 rc); 934 rc);
868 return rc; 935 return rc;
869 } 936 }
870 937
871 dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
872 if (!dconf)
873 return -ENOMEM;
874
875 /* read device config */ 938 /* read device config */
876 rc = usb_control_msg(interface_to_usbdev(intf), 939 rc = usb_control_msg(interface_to_usbdev(intf),
877 usb_rcvctrlpipe(interface_to_usbdev(intf), 0), 940 usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
@@ -879,22 +942,16 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
879 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 942 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
880 1, 943 1,
881 intf->altsetting[0].desc.bInterfaceNumber, 944 intf->altsetting[0].desc.bInterfaceNumber,
882 dconf, 945 &dconf,
883 sizeof(*dconf), 946 sizeof(dconf),
884 1000); 947 1000);
885 if (rc < 0) { 948 if (rc < 0) {
886 dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", 949 dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
887 rc); 950 rc);
888
889 kfree(dconf);
890
891 return rc; 951 return rc;
892 } 952 }
893 953
894 icount = dconf->icount+1; 954 icount = dconf.icount + 1;
895
896 kfree(dconf);
897
898 dev_info(&intf->dev, "Configuring for %d interfaces\n", icount); 955 dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
899 956
900 if (icount > GS_MAX_INTF) { 957 if (icount > GS_MAX_INTF) {
@@ -915,7 +972,7 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
915 dev->udev = interface_to_usbdev(intf); 972 dev->udev = interface_to_usbdev(intf);
916 973
917 for (i = 0; i < icount; i++) { 974 for (i = 0; i < icount; i++) {
918 dev->canch[i] = gs_make_candev(i, intf); 975 dev->canch[i] = gs_make_candev(i, intf, &dconf);
919 if (IS_ERR_OR_NULL(dev->canch[i])) { 976 if (IS_ERR_OR_NULL(dev->canch[i])) {
920 /* save error code to return later */ 977 /* save error code to return later */
921 rc = PTR_ERR(dev->canch[i]); 978 rc = PTR_ERR(dev->canch[i]);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 200663c43ce9..8f4544394f44 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -9,14 +9,6 @@ config NET_DSA_MV88E6060
9 This enables support for the Marvell 88E6060 ethernet switch 9 This enables support for the Marvell 88E6060 ethernet switch
10 chip. 10 chip.
11 11
12config NET_DSA_MV88E6XXX
13 tristate "Marvell 88E6xxx Ethernet switch chip support"
14 depends on NET_DSA
15 select NET_DSA_TAG_EDSA
16 ---help---
17 This enables support for most of the Marvell 88E6xxx models of
18 Ethernet switch chips, except 88E6060.
19
20config NET_DSA_BCM_SF2 12config NET_DSA_BCM_SF2
21 tristate "Broadcom Starfighter 2 Ethernet switch support" 13 tristate "Broadcom Starfighter 2 Ethernet switch support"
22 depends on HAS_IOMEM && NET_DSA 14 depends on HAS_IOMEM && NET_DSA
@@ -28,4 +20,8 @@ config NET_DSA_BCM_SF2
28 This enables support for the Broadcom Starfighter 2 Ethernet 20 This enables support for the Broadcom Starfighter 2 Ethernet
29 switch chips. 21 switch chips.
30 22
23source "drivers/net/dsa/b53/Kconfig"
24
25source "drivers/net/dsa/mv88e6xxx/Kconfig"
26
31endmenu 27endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 76b751dd9efd..ca1e71b853a6 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,3 +1,5 @@
1obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o 1obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
2obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
3obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o 2obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
3
4obj-y += b53/
5obj-y += mv88e6xxx/
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
new file mode 100644
index 000000000000..27f32a50df57
--- /dev/null
+++ b/drivers/net/dsa/b53/Kconfig
@@ -0,0 +1,33 @@
1menuconfig B53
2 tristate "Broadcom BCM53xx managed switch support"
3 depends on NET_DSA
4 help
5 This driver adds support for Broadcom managed switch chips. It supports
6 BCM5325E, BCM5365, BCM539x, BCM53115 and BCM53125 as well as BCM63XX
7 integrated switches.
8
9config B53_SPI_DRIVER
10 tristate "B53 SPI connected switch driver"
11 depends on B53 && SPI
12 help
13 Select to enable support for registering switches configured through SPI.
14
15config B53_MDIO_DRIVER
16 tristate "B53 MDIO connected switch driver"
17 depends on B53
18 help
19 Select to enable support for registering switches configured through MDIO.
20
21config B53_MMAP_DRIVER
22 tristate "B53 MMAP connected switch driver"
23 depends on B53 && HAS_IOMEM
24 help
25 Select to enable support for memory-mapped switches like the BCM63XX
26 integrated switches.
27
28config B53_SRAB_DRIVER
29 tristate "B53 SRAB connected switch driver"
30 depends on B53 && HAS_IOMEM
31 help
32 Select to enable support for memory-mapped Switch Register Access
33 Bridge Registers (SRAB) like it is found on the BCM53010
diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile
new file mode 100644
index 000000000000..7e6f9a8bfd75
--- /dev/null
+++ b/drivers/net/dsa/b53/Makefile
@@ -0,0 +1,6 @@
1obj-$(CONFIG_B53) += b53_common.o
2
3obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o
4obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o
5obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o
6obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
new file mode 100644
index 000000000000..444de66667b9
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -0,0 +1,1787 @@
1/*
2 * B53 switch driver main logic
3 *
4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/delay.h>
23#include <linux/export.h>
24#include <linux/gpio.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/platform_data/b53.h>
28#include <linux/phy.h>
29#include <linux/etherdevice.h>
30#include <linux/if_bridge.h>
31#include <net/dsa.h>
32#include <net/switchdev.h>
33
34#include "b53_regs.h"
35#include "b53_priv.h"
36
37struct b53_mib_desc {
38 u8 size;
39 u8 offset;
40 const char *name;
41};
42
43/* BCM5365 MIB counters */
44static const struct b53_mib_desc b53_mibs_65[] = {
45 { 8, 0x00, "TxOctets" },
46 { 4, 0x08, "TxDropPkts" },
47 { 4, 0x10, "TxBroadcastPkts" },
48 { 4, 0x14, "TxMulticastPkts" },
49 { 4, 0x18, "TxUnicastPkts" },
50 { 4, 0x1c, "TxCollisions" },
51 { 4, 0x20, "TxSingleCollision" },
52 { 4, 0x24, "TxMultipleCollision" },
53 { 4, 0x28, "TxDeferredTransmit" },
54 { 4, 0x2c, "TxLateCollision" },
55 { 4, 0x30, "TxExcessiveCollision" },
56 { 4, 0x38, "TxPausePkts" },
57 { 8, 0x44, "RxOctets" },
58 { 4, 0x4c, "RxUndersizePkts" },
59 { 4, 0x50, "RxPausePkts" },
60 { 4, 0x54, "Pkts64Octets" },
61 { 4, 0x58, "Pkts65to127Octets" },
62 { 4, 0x5c, "Pkts128to255Octets" },
63 { 4, 0x60, "Pkts256to511Octets" },
64 { 4, 0x64, "Pkts512to1023Octets" },
65 { 4, 0x68, "Pkts1024to1522Octets" },
66 { 4, 0x6c, "RxOversizePkts" },
67 { 4, 0x70, "RxJabbers" },
68 { 4, 0x74, "RxAlignmentErrors" },
69 { 4, 0x78, "RxFCSErrors" },
70 { 8, 0x7c, "RxGoodOctets" },
71 { 4, 0x84, "RxDropPkts" },
72 { 4, 0x88, "RxUnicastPkts" },
73 { 4, 0x8c, "RxMulticastPkts" },
74 { 4, 0x90, "RxBroadcastPkts" },
75 { 4, 0x94, "RxSAChanges" },
76 { 4, 0x98, "RxFragments" },
77};
78
79#define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
80
81/* BCM63xx MIB counters */
82static const struct b53_mib_desc b53_mibs_63xx[] = {
83 { 8, 0x00, "TxOctets" },
84 { 4, 0x08, "TxDropPkts" },
85 { 4, 0x0c, "TxQoSPkts" },
86 { 4, 0x10, "TxBroadcastPkts" },
87 { 4, 0x14, "TxMulticastPkts" },
88 { 4, 0x18, "TxUnicastPkts" },
89 { 4, 0x1c, "TxCollisions" },
90 { 4, 0x20, "TxSingleCollision" },
91 { 4, 0x24, "TxMultipleCollision" },
92 { 4, 0x28, "TxDeferredTransmit" },
93 { 4, 0x2c, "TxLateCollision" },
94 { 4, 0x30, "TxExcessiveCollision" },
95 { 4, 0x38, "TxPausePkts" },
96 { 8, 0x3c, "TxQoSOctets" },
97 { 8, 0x44, "RxOctets" },
98 { 4, 0x4c, "RxUndersizePkts" },
99 { 4, 0x50, "RxPausePkts" },
100 { 4, 0x54, "Pkts64Octets" },
101 { 4, 0x58, "Pkts65to127Octets" },
102 { 4, 0x5c, "Pkts128to255Octets" },
103 { 4, 0x60, "Pkts256to511Octets" },
104 { 4, 0x64, "Pkts512to1023Octets" },
105 { 4, 0x68, "Pkts1024to1522Octets" },
106 { 4, 0x6c, "RxOversizePkts" },
107 { 4, 0x70, "RxJabbers" },
108 { 4, 0x74, "RxAlignmentErrors" },
109 { 4, 0x78, "RxFCSErrors" },
110 { 8, 0x7c, "RxGoodOctets" },
111 { 4, 0x84, "RxDropPkts" },
112 { 4, 0x88, "RxUnicastPkts" },
113 { 4, 0x8c, "RxMulticastPkts" },
114 { 4, 0x90, "RxBroadcastPkts" },
115 { 4, 0x94, "RxSAChanges" },
116 { 4, 0x98, "RxFragments" },
117 { 4, 0xa0, "RxSymbolErrors" },
118 { 4, 0xa4, "RxQoSPkts" },
119 { 8, 0xa8, "RxQoSOctets" },
120 { 4, 0xb0, "Pkts1523to2047Octets" },
121 { 4, 0xb4, "Pkts2048to4095Octets" },
122 { 4, 0xb8, "Pkts4096to8191Octets" },
123 { 4, 0xbc, "Pkts8192to9728Octets" },
124 { 4, 0xc0, "RxDiscarded" },
125};
126
127#define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
128
129/* MIB counters */
130static const struct b53_mib_desc b53_mibs[] = {
131 { 8, 0x00, "TxOctets" },
132 { 4, 0x08, "TxDropPkts" },
133 { 4, 0x10, "TxBroadcastPkts" },
134 { 4, 0x14, "TxMulticastPkts" },
135 { 4, 0x18, "TxUnicastPkts" },
136 { 4, 0x1c, "TxCollisions" },
137 { 4, 0x20, "TxSingleCollision" },
138 { 4, 0x24, "TxMultipleCollision" },
139 { 4, 0x28, "TxDeferredTransmit" },
140 { 4, 0x2c, "TxLateCollision" },
141 { 4, 0x30, "TxExcessiveCollision" },
142 { 4, 0x38, "TxPausePkts" },
143 { 8, 0x50, "RxOctets" },
144 { 4, 0x58, "RxUndersizePkts" },
145 { 4, 0x5c, "RxPausePkts" },
146 { 4, 0x60, "Pkts64Octets" },
147 { 4, 0x64, "Pkts65to127Octets" },
148 { 4, 0x68, "Pkts128to255Octets" },
149 { 4, 0x6c, "Pkts256to511Octets" },
150 { 4, 0x70, "Pkts512to1023Octets" },
151 { 4, 0x74, "Pkts1024to1522Octets" },
152 { 4, 0x78, "RxOversizePkts" },
153 { 4, 0x7c, "RxJabbers" },
154 { 4, 0x80, "RxAlignmentErrors" },
155 { 4, 0x84, "RxFCSErrors" },
156 { 8, 0x88, "RxGoodOctets" },
157 { 4, 0x90, "RxDropPkts" },
158 { 4, 0x94, "RxUnicastPkts" },
159 { 4, 0x98, "RxMulticastPkts" },
160 { 4, 0x9c, "RxBroadcastPkts" },
161 { 4, 0xa0, "RxSAChanges" },
162 { 4, 0xa4, "RxFragments" },
163 { 4, 0xa8, "RxJumboPkts" },
164 { 4, 0xac, "RxSymbolErrors" },
165 { 4, 0xc0, "RxDiscarded" },
166};
167
168#define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
169
170static int b53_do_vlan_op(struct b53_device *dev, u8 op)
171{
172 unsigned int i;
173
174 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
175
176 for (i = 0; i < 10; i++) {
177 u8 vta;
178
179 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
180 if (!(vta & VTA_START_CMD))
181 return 0;
182
183 usleep_range(100, 200);
184 }
185
186 return -EIO;
187}
188
189static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
190 struct b53_vlan *vlan)
191{
192 if (is5325(dev)) {
193 u32 entry = 0;
194
195 if (vlan->members) {
196 entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
197 VA_UNTAG_S_25) | vlan->members;
198 if (dev->core_rev >= 3)
199 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
200 else
201 entry |= VA_VALID_25;
202 }
203
204 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
205 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
206 VTA_RW_STATE_WR | VTA_RW_OP_EN);
207 } else if (is5365(dev)) {
208 u16 entry = 0;
209
210 if (vlan->members)
211 entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
212 VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
213
214 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
215 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
216 VTA_RW_STATE_WR | VTA_RW_OP_EN);
217 } else {
218 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
219 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
220 (vlan->untag << VTE_UNTAG_S) | vlan->members);
221
222 b53_do_vlan_op(dev, VTA_CMD_WRITE);
223 }
224
225 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
226 vid, vlan->members, vlan->untag);
227}
228
229static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
230 struct b53_vlan *vlan)
231{
232 if (is5325(dev)) {
233 u32 entry = 0;
234
235 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
236 VTA_RW_STATE_RD | VTA_RW_OP_EN);
237 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
238
239 if (dev->core_rev >= 3)
240 vlan->valid = !!(entry & VA_VALID_25_R4);
241 else
242 vlan->valid = !!(entry & VA_VALID_25);
243 vlan->members = entry & VA_MEMBER_MASK;
244 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
245
246 } else if (is5365(dev)) {
247 u16 entry = 0;
248
249 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
250 VTA_RW_STATE_WR | VTA_RW_OP_EN);
251 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
252
253 vlan->valid = !!(entry & VA_VALID_65);
254 vlan->members = entry & VA_MEMBER_MASK;
255 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
256 } else {
257 u32 entry = 0;
258
259 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
260 b53_do_vlan_op(dev, VTA_CMD_READ);
261 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
262 vlan->members = entry & VTE_MEMBERS;
263 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
264 vlan->valid = true;
265 }
266}
267
268static void b53_set_forwarding(struct b53_device *dev, int enable)
269{
270 u8 mgmt;
271
272 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
273
274 if (enable)
275 mgmt |= SM_SW_FWD_EN;
276 else
277 mgmt &= ~SM_SW_FWD_EN;
278
279 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
280}
281
282static void b53_enable_vlan(struct b53_device *dev, bool enable)
283{
284 u8 mgmt, vc0, vc1, vc4 = 0, vc5;
285
286 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
287 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
288 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
289
290 if (is5325(dev) || is5365(dev)) {
291 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
292 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
293 } else if (is63xx(dev)) {
294 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
295 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
296 } else {
297 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
298 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
299 }
300
301 mgmt &= ~SM_SW_FWD_MODE;
302
303 if (enable) {
304 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
305 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
306 vc4 &= ~VC4_ING_VID_CHECK_MASK;
307 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
308 vc5 |= VC5_DROP_VTABLE_MISS;
309
310 if (is5325(dev))
311 vc0 &= ~VC0_RESERVED_1;
312
313 if (is5325(dev) || is5365(dev))
314 vc1 |= VC1_RX_MCST_TAG_EN;
315
316 } else {
317 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
318 vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
319 vc4 &= ~VC4_ING_VID_CHECK_MASK;
320 vc5 &= ~VC5_DROP_VTABLE_MISS;
321
322 if (is5325(dev) || is5365(dev))
323 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
324 else
325 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
326
327 if (is5325(dev) || is5365(dev))
328 vc1 &= ~VC1_RX_MCST_TAG_EN;
329 }
330
331 if (!is5325(dev) && !is5365(dev))
332 vc5 &= ~VC5_VID_FFF_EN;
333
334 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
335 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
336
337 if (is5325(dev) || is5365(dev)) {
338 /* enable the high 8 bit vid check on 5325 */
339 if (is5325(dev) && enable)
340 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
341 VC3_HIGH_8BIT_EN);
342 else
343 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
344
345 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
346 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
347 } else if (is63xx(dev)) {
348 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
349 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
350 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
351 } else {
352 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
353 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
354 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
355 }
356
357 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
358}
359
360static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
361{
362 u32 port_mask = 0;
363 u16 max_size = JMS_MIN_SIZE;
364
365 if (is5325(dev) || is5365(dev))
366 return -EINVAL;
367
368 if (enable) {
369 port_mask = dev->enabled_ports;
370 max_size = JMS_MAX_SIZE;
371 if (allow_10_100)
372 port_mask |= JPM_10_100_JUMBO_EN;
373 }
374
375 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
376 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
377}
378
379static int b53_flush_arl(struct b53_device *dev, u8 mask)
380{
381 unsigned int i;
382
383 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
384 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
385
386 for (i = 0; i < 10; i++) {
387 u8 fast_age_ctrl;
388
389 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
390 &fast_age_ctrl);
391
392 if (!(fast_age_ctrl & FAST_AGE_DONE))
393 goto out;
394
395 msleep(1);
396 }
397
398 return -ETIMEDOUT;
399out:
400 /* Only age dynamic entries (default behavior) */
401 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
402 return 0;
403}
404
405static int b53_fast_age_port(struct b53_device *dev, int port)
406{
407 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
408
409 return b53_flush_arl(dev, FAST_AGE_PORT);
410}
411
412static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
413{
414 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
415
416 return b53_flush_arl(dev, FAST_AGE_VLAN);
417}
418
419static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
420{
421 struct b53_device *dev = ds_to_priv(ds);
422 unsigned int i;
423 u16 pvlan;
424
425 /* Enable the IMP port to be in the same VLAN as the other ports
426 * on a per-port basis such that we only have Port i and IMP in
427 * the same VLAN.
428 */
429 b53_for_each_port(dev, i) {
430 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
431 pvlan |= BIT(cpu_port);
432 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
433 }
434}
435
436static int b53_enable_port(struct dsa_switch *ds, int port,
437 struct phy_device *phy)
438{
439 struct b53_device *dev = ds_to_priv(ds);
440 unsigned int cpu_port = dev->cpu_port;
441 u16 pvlan;
442
443 /* Clear the Rx and Tx disable bits and set to no spanning tree */
444 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
445
446 /* Set this port, and only this one to be in the default VLAN,
447 * if member of a bridge, restore its membership prior to
448 * bringing down this port.
449 */
450 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
451 pvlan &= ~0x1ff;
452 pvlan |= BIT(port);
453 pvlan |= dev->ports[port].vlan_ctl_mask;
454 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
455
456 b53_imp_vlan_setup(ds, cpu_port);
457
458 return 0;
459}
460
461static void b53_disable_port(struct dsa_switch *ds, int port,
462 struct phy_device *phy)
463{
464 struct b53_device *dev = ds_to_priv(ds);
465 u8 reg;
466
467 /* Disable Tx/Rx for the port */
468 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
469 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
470 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
471}
472
473static void b53_enable_cpu_port(struct b53_device *dev)
474{
475 unsigned int cpu_port = dev->cpu_port;
476 u8 port_ctrl;
477
478 /* BCM5325 CPU port is at 8 */
479 if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
480 cpu_port = B53_CPU_PORT;
481
482 port_ctrl = PORT_CTRL_RX_BCST_EN |
483 PORT_CTRL_RX_MCST_EN |
484 PORT_CTRL_RX_UCST_EN;
485 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(cpu_port), port_ctrl);
486}
487
488static void b53_enable_mib(struct b53_device *dev)
489{
490 u8 gc;
491
492 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
493 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
494 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
495}
496
497static int b53_configure_vlan(struct b53_device *dev)
498{
499 struct b53_vlan vl = { 0 };
500 int i;
501
502 /* clear all vlan entries */
503 if (is5325(dev) || is5365(dev)) {
504 for (i = 1; i < dev->num_vlans; i++)
505 b53_set_vlan_entry(dev, i, &vl);
506 } else {
507 b53_do_vlan_op(dev, VTA_CMD_CLEAR);
508 }
509
510 b53_enable_vlan(dev, false);
511
512 b53_for_each_port(dev, i)
513 b53_write16(dev, B53_VLAN_PAGE,
514 B53_VLAN_PORT_DEF_TAG(i), 1);
515
516 if (!is5325(dev) && !is5365(dev))
517 b53_set_jumbo(dev, dev->enable_jumbo, false);
518
519 return 0;
520}
521
522static void b53_switch_reset_gpio(struct b53_device *dev)
523{
524 int gpio = dev->reset_gpio;
525
526 if (gpio < 0)
527 return;
528
529 /* Reset sequence: RESET low(50ms)->high(20ms)
530 */
531 gpio_set_value(gpio, 0);
532 mdelay(50);
533
534 gpio_set_value(gpio, 1);
535 mdelay(20);
536
537 dev->current_page = 0xff;
538}
539
540static int b53_switch_reset(struct b53_device *dev)
541{
542 u8 mgmt;
543
544 b53_switch_reset_gpio(dev);
545
546 if (is539x(dev)) {
547 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
548 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
549 }
550
551 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
552
553 if (!(mgmt & SM_SW_FWD_EN)) {
554 mgmt &= ~SM_SW_FWD_MODE;
555 mgmt |= SM_SW_FWD_EN;
556
557 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
558 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
559
560 if (!(mgmt & SM_SW_FWD_EN)) {
561 dev_err(dev->dev, "Failed to enable switch!\n");
562 return -EINVAL;
563 }
564 }
565
566 b53_enable_mib(dev);
567
568 return b53_flush_arl(dev, FAST_AGE_STATIC);
569}
570
571static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
572{
573 struct b53_device *priv = ds_to_priv(ds);
574 u16 value = 0;
575 int ret;
576
577 if (priv->ops->phy_read16)
578 ret = priv->ops->phy_read16(priv, addr, reg, &value);
579 else
580 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
581 reg * 2, &value);
582
583 return ret ? ret : value;
584}
585
586static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
587{
588 struct b53_device *priv = ds_to_priv(ds);
589
590 if (priv->ops->phy_write16)
591 return priv->ops->phy_write16(priv, addr, reg, val);
592
593 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
594}
595
596static int b53_reset_switch(struct b53_device *priv)
597{
598 /* reset vlans */
599 priv->enable_jumbo = false;
600
601 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
602 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
603
604 return b53_switch_reset(priv);
605}
606
607static int b53_apply_config(struct b53_device *priv)
608{
609 /* disable switching */
610 b53_set_forwarding(priv, 0);
611
612 b53_configure_vlan(priv);
613
614 /* enable switching */
615 b53_set_forwarding(priv, 1);
616
617 return 0;
618}
619
620static void b53_reset_mib(struct b53_device *priv)
621{
622 u8 gc;
623
624 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
625
626 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
627 msleep(1);
628 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
629 msleep(1);
630}
631
632static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
633{
634 if (is5365(dev))
635 return b53_mibs_65;
636 else if (is63xx(dev))
637 return b53_mibs_63xx;
638 else
639 return b53_mibs;
640}
641
642static unsigned int b53_get_mib_size(struct b53_device *dev)
643{
644 if (is5365(dev))
645 return B53_MIBS_65_SIZE;
646 else if (is63xx(dev))
647 return B53_MIBS_63XX_SIZE;
648 else
649 return B53_MIBS_SIZE;
650}
651
652static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
653{
654 struct b53_device *dev = ds_to_priv(ds);
655 const struct b53_mib_desc *mibs = b53_get_mib(dev);
656 unsigned int mib_size = b53_get_mib_size(dev);
657 unsigned int i;
658
659 for (i = 0; i < mib_size; i++)
660 memcpy(data + i * ETH_GSTRING_LEN,
661 mibs[i].name, ETH_GSTRING_LEN);
662}
663
664static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
665 uint64_t *data)
666{
667 struct b53_device *dev = ds_to_priv(ds);
668 const struct b53_mib_desc *mibs = b53_get_mib(dev);
669 unsigned int mib_size = b53_get_mib_size(dev);
670 const struct b53_mib_desc *s;
671 unsigned int i;
672 u64 val = 0;
673
674 if (is5365(dev) && port == 5)
675 port = 8;
676
677 mutex_lock(&dev->stats_mutex);
678
679 for (i = 0; i < mib_size; i++) {
680 s = &mibs[i];
681
682 if (s->size == 8) {
683 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
684 } else {
685 u32 val32;
686
687 b53_read32(dev, B53_MIB_PAGE(port), s->offset,
688 &val32);
689 val = val32;
690 }
691 data[i] = (u64)val;
692 }
693
694 mutex_unlock(&dev->stats_mutex);
695}
696
697static int b53_get_sset_count(struct dsa_switch *ds)
698{
699 struct b53_device *dev = ds_to_priv(ds);
700
701 return b53_get_mib_size(dev);
702}
703
704static int b53_set_addr(struct dsa_switch *ds, u8 *addr)
705{
706 return 0;
707}
708
709static int b53_setup(struct dsa_switch *ds)
710{
711 struct b53_device *dev = ds_to_priv(ds);
712 unsigned int port;
713 int ret;
714
715 ret = b53_reset_switch(dev);
716 if (ret) {
717 dev_err(ds->dev, "failed to reset switch\n");
718 return ret;
719 }
720
721 b53_reset_mib(dev);
722
723 ret = b53_apply_config(dev);
724 if (ret)
725 dev_err(ds->dev, "failed to apply configuration\n");
726
727 for (port = 0; port < dev->num_ports; port++) {
728 if (BIT(port) & ds->enabled_port_mask)
729 b53_enable_port(ds, port, NULL);
730 else if (dsa_is_cpu_port(ds, port))
731 b53_enable_cpu_port(dev);
732 else
733 b53_disable_port(ds, port, NULL);
734 }
735
736 return ret;
737}
738
739static void b53_adjust_link(struct dsa_switch *ds, int port,
740 struct phy_device *phydev)
741{
742 struct b53_device *dev = ds_to_priv(ds);
743 u8 rgmii_ctrl = 0, reg = 0, off;
744
745 if (!phy_is_pseudo_fixed_link(phydev))
746 return;
747
748 /* Override the port settings */
749 if (port == dev->cpu_port) {
750 off = B53_PORT_OVERRIDE_CTRL;
751 reg = PORT_OVERRIDE_EN;
752 } else {
753 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
754 reg = GMII_PO_EN;
755 }
756
757 /* Set the link UP */
758 if (phydev->link)
759 reg |= PORT_OVERRIDE_LINK;
760
761 if (phydev->duplex == DUPLEX_FULL)
762 reg |= PORT_OVERRIDE_FULL_DUPLEX;
763
764 switch (phydev->speed) {
765 case 2000:
766 reg |= PORT_OVERRIDE_SPEED_2000M;
767 /* fallthrough */
768 case SPEED_1000:
769 reg |= PORT_OVERRIDE_SPEED_1000M;
770 break;
771 case SPEED_100:
772 reg |= PORT_OVERRIDE_SPEED_100M;
773 break;
774 case SPEED_10:
775 reg |= PORT_OVERRIDE_SPEED_10M;
776 break;
777 default:
778 dev_err(ds->dev, "unknown speed: %d\n", phydev->speed);
779 return;
780 }
781
782 /* Enable flow control on BCM5301x's CPU port */
783 if (is5301x(dev) && port == dev->cpu_port)
784 reg |= PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW;
785
786 if (phydev->pause) {
787 if (phydev->asym_pause)
788 reg |= PORT_OVERRIDE_TX_FLOW;
789 reg |= PORT_OVERRIDE_RX_FLOW;
790 }
791
792 b53_write8(dev, B53_CTRL_PAGE, off, reg);
793
794 if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
795 if (port == 8)
796 off = B53_RGMII_CTRL_IMP;
797 else
798 off = B53_RGMII_CTRL_P(port);
799
800 /* Configure the port RGMII clock delay by DLL disabled and
801 * tx_clk aligned timing (restoring to reset defaults)
802 */
803 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
804 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
805 RGMII_CTRL_TIMING_SEL);
806
807 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
808 * sure that we enable the port TX clock internal delay to
809 * account for this internal delay that is inserted, otherwise
810 * the switch won't be able to receive correctly.
811 *
812 * PHY_INTERFACE_MODE_RGMII means that we are not introducing
813 * any delay neither on transmission nor reception, so the
814 * BCM53125 must also be configured accordingly to account for
815 * the lack of delay and introduce
816 *
817 * The BCM53125 switch has its RX clock and TX clock control
818 * swapped, hence the reason why we modify the TX clock path in
819 * the "RGMII" case
820 */
821 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
822 rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
823 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
824 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
825 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
826 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
827
828 dev_info(ds->dev, "Configured port %d for %s\n", port,
829 phy_modes(phydev->interface));
830 }
831
832 /* configure MII port if necessary */
833 if (is5325(dev)) {
834 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
835 &reg);
836
837 /* reverse mii needs to be enabled */
838 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
839 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
840 reg | PORT_OVERRIDE_RV_MII_25);
841 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
842 &reg);
843
844 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
845 dev_err(ds->dev,
846 "Failed to enable reverse MII mode\n");
847 return;
848 }
849 }
850 } else if (is5301x(dev)) {
851 if (port != dev->cpu_port) {
852 u8 po_reg = B53_GMII_PORT_OVERRIDE_CTRL(dev->cpu_port);
853 u8 gmii_po;
854
855 b53_read8(dev, B53_CTRL_PAGE, po_reg, &gmii_po);
856 gmii_po |= GMII_PO_LINK |
857 GMII_PO_RX_FLOW |
858 GMII_PO_TX_FLOW |
859 GMII_PO_EN |
860 GMII_PO_SPEED_2000M;
861 b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po);
862 }
863 }
864}
865
866static int b53_vlan_filtering(struct dsa_switch *ds, int port,
867 bool vlan_filtering)
868{
869 return 0;
870}
871
872static int b53_vlan_prepare(struct dsa_switch *ds, int port,
873 const struct switchdev_obj_port_vlan *vlan,
874 struct switchdev_trans *trans)
875{
876 struct b53_device *dev = ds_to_priv(ds);
877
878 if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
879 return -EOPNOTSUPP;
880
881 if (vlan->vid_end > dev->num_vlans)
882 return -ERANGE;
883
884 b53_enable_vlan(dev, true);
885
886 return 0;
887}
888
889static void b53_vlan_add(struct dsa_switch *ds, int port,
890 const struct switchdev_obj_port_vlan *vlan,
891 struct switchdev_trans *trans)
892{
893 struct b53_device *dev = ds_to_priv(ds);
894 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
895 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
896 unsigned int cpu_port = dev->cpu_port;
897 struct b53_vlan *vl;
898 u16 vid;
899
900 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
901 vl = &dev->vlans[vid];
902
903 b53_get_vlan_entry(dev, vid, vl);
904
905 vl->members |= BIT(port) | BIT(cpu_port);
906 if (untagged)
907 vl->untag |= BIT(port) | BIT(cpu_port);
908 else
909 vl->untag &= ~(BIT(port) | BIT(cpu_port));
910
911 b53_set_vlan_entry(dev, vid, vl);
912 b53_fast_age_vlan(dev, vid);
913 }
914
915 if (pvid) {
916 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
917 vlan->vid_end);
918 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
919 vlan->vid_end);
920 b53_fast_age_vlan(dev, vid);
921 }
922}
923
924static int b53_vlan_del(struct dsa_switch *ds, int port,
925 const struct switchdev_obj_port_vlan *vlan)
926{
927 struct b53_device *dev = ds_to_priv(ds);
928 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
929 unsigned int cpu_port = dev->cpu_port;
930 struct b53_vlan *vl;
931 u16 vid;
932 u16 pvid;
933
934 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
935
936 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
937 vl = &dev->vlans[vid];
938
939 b53_get_vlan_entry(dev, vid, vl);
940
941 vl->members &= ~BIT(port);
942 if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
943 vl->members = 0;
944
945 if (pvid == vid) {
946 if (is5325(dev) || is5365(dev))
947 pvid = 1;
948 else
949 pvid = 0;
950 }
951
952 if (untagged) {
953 vl->untag &= ~(BIT(port));
954 if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
955 vl->untag = 0;
956 }
957
958 b53_set_vlan_entry(dev, vid, vl);
959 b53_fast_age_vlan(dev, vid);
960 }
961
962 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
963 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
964 b53_fast_age_vlan(dev, pvid);
965
966 return 0;
967}
968
969static int b53_vlan_dump(struct dsa_switch *ds, int port,
970 struct switchdev_obj_port_vlan *vlan,
971 int (*cb)(struct switchdev_obj *obj))
972{
973 struct b53_device *dev = ds_to_priv(ds);
974 u16 vid, vid_start = 0, pvid;
975 struct b53_vlan *vl;
976 int err = 0;
977
978 if (is5325(dev) || is5365(dev))
979 vid_start = 1;
980
981 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
982
983 /* Use our software cache for dumps, since we do not have any HW
984 * operation returning only the used/valid VLANs
985 */
986 for (vid = vid_start; vid < dev->num_vlans; vid++) {
987 vl = &dev->vlans[vid];
988
989 if (!vl->valid)
990 continue;
991
992 if (!(vl->members & BIT(port)))
993 continue;
994
995 vlan->vid_begin = vlan->vid_end = vid;
996 vlan->flags = 0;
997
998 if (vl->untag & BIT(port))
999 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1000 if (pvid == vid)
1001 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1002
1003 err = cb(&vlan->obj);
1004 if (err)
1005 break;
1006 }
1007
1008 return err;
1009}
1010
1011/* Address Resolution Logic routines */
1012static int b53_arl_op_wait(struct b53_device *dev)
1013{
1014 unsigned int timeout = 10;
1015 u8 reg;
1016
1017 do {
1018 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
1019 if (!(reg & ARLTBL_START_DONE))
1020 return 0;
1021
1022 usleep_range(1000, 2000);
1023 } while (timeout--);
1024
1025 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
1026
1027 return -ETIMEDOUT;
1028}
1029
1030static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
1031{
1032 u8 reg;
1033
1034 if (op > ARLTBL_RW)
1035 return -EINVAL;
1036
1037 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
1038 reg |= ARLTBL_START_DONE;
1039 if (op)
1040 reg |= ARLTBL_RW;
1041 else
1042 reg &= ~ARLTBL_RW;
1043 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
1044
1045 return b53_arl_op_wait(dev);
1046}
1047
1048static int b53_arl_read(struct b53_device *dev, u64 mac,
1049 u16 vid, struct b53_arl_entry *ent, u8 *idx,
1050 bool is_valid)
1051{
1052 unsigned int i;
1053 int ret;
1054
1055 ret = b53_arl_op_wait(dev);
1056 if (ret)
1057 return ret;
1058
1059 /* Read the bins */
1060 for (i = 0; i < dev->num_arl_entries; i++) {
1061 u64 mac_vid;
1062 u32 fwd_entry;
1063
1064 b53_read64(dev, B53_ARLIO_PAGE,
1065 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
1066 b53_read32(dev, B53_ARLIO_PAGE,
1067 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
1068 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1069
1070 if (!(fwd_entry & ARLTBL_VALID))
1071 continue;
1072 if ((mac_vid & ARLTBL_MAC_MASK) != mac)
1073 continue;
1074 *idx = i;
1075 }
1076
1077 return -ENOENT;
1078}
1079
1080static int b53_arl_op(struct b53_device *dev, int op, int port,
1081 const unsigned char *addr, u16 vid, bool is_valid)
1082{
1083 struct b53_arl_entry ent;
1084 u32 fwd_entry;
1085 u64 mac, mac_vid = 0;
1086 u8 idx = 0;
1087 int ret;
1088
1089 /* Convert the array into a 64-bit MAC */
1090 mac = b53_mac_to_u64(addr);
1091
1092 /* Perform a read for the given MAC and VID */
1093 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
1094 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
1095
1096 /* Issue a read operation for this MAC */
1097 ret = b53_arl_rw_op(dev, 1);
1098 if (ret)
1099 return ret;
1100
1101 ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid);
1102 /* If this is a read, just finish now */
1103 if (op)
1104 return ret;
1105
1106 /* We could not find a matching MAC, so reset to a new entry */
1107 if (ret) {
1108 fwd_entry = 0;
1109 idx = 1;
1110 }
1111
1112 memset(&ent, 0, sizeof(ent));
1113 ent.port = port;
1114 ent.is_valid = is_valid;
1115 ent.vid = vid;
1116 ent.is_static = true;
1117 memcpy(ent.mac, addr, ETH_ALEN);
1118 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
1119
1120 b53_write64(dev, B53_ARLIO_PAGE,
1121 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
1122 b53_write32(dev, B53_ARLIO_PAGE,
1123 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
1124
1125 return b53_arl_rw_op(dev, 0);
1126}
1127
1128static int b53_fdb_prepare(struct dsa_switch *ds, int port,
1129 const struct switchdev_obj_port_fdb *fdb,
1130 struct switchdev_trans *trans)
1131{
1132 struct b53_device *priv = ds_to_priv(ds);
1133
1134 /* 5325 and 5365 require some more massaging, but could
1135 * be supported eventually
1136 */
1137 if (is5325(priv) || is5365(priv))
1138 return -EOPNOTSUPP;
1139
1140 return 0;
1141}
1142
1143static void b53_fdb_add(struct dsa_switch *ds, int port,
1144 const struct switchdev_obj_port_fdb *fdb,
1145 struct switchdev_trans *trans)
1146{
1147 struct b53_device *priv = ds_to_priv(ds);
1148
1149 if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
1150 pr_err("%s: failed to add MAC address\n", __func__);
1151}
1152
1153static int b53_fdb_del(struct dsa_switch *ds, int port,
1154 const struct switchdev_obj_port_fdb *fdb)
1155{
1156 struct b53_device *priv = ds_to_priv(ds);
1157
1158 return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
1159}
1160
1161static int b53_arl_search_wait(struct b53_device *dev)
1162{
1163 unsigned int timeout = 1000;
1164 u8 reg;
1165
1166 do {
1167 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
1168 if (!(reg & ARL_SRCH_STDN))
1169 return 0;
1170
1171 if (reg & ARL_SRCH_VLID)
1172 return 0;
1173
1174 usleep_range(1000, 2000);
1175 } while (timeout--);
1176
1177 return -ETIMEDOUT;
1178}
1179
1180static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
1181 struct b53_arl_entry *ent)
1182{
1183 u64 mac_vid;
1184 u32 fwd_entry;
1185
1186 b53_read64(dev, B53_ARLIO_PAGE,
1187 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
1188 b53_read32(dev, B53_ARLIO_PAGE,
1189 B53_ARL_SRCH_RSTL(idx), &fwd_entry);
1190 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1191}
1192
1193static int b53_fdb_copy(struct net_device *dev, int port,
1194 const struct b53_arl_entry *ent,
1195 struct switchdev_obj_port_fdb *fdb,
1196 int (*cb)(struct switchdev_obj *obj))
1197{
1198 if (!ent->is_valid)
1199 return 0;
1200
1201 if (port != ent->port)
1202 return 0;
1203
1204 ether_addr_copy(fdb->addr, ent->mac);
1205 fdb->vid = ent->vid;
1206 fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
1207
1208 return cb(&fdb->obj);
1209}
1210
1211static int b53_fdb_dump(struct dsa_switch *ds, int port,
1212 struct switchdev_obj_port_fdb *fdb,
1213 int (*cb)(struct switchdev_obj *obj))
1214{
1215 struct b53_device *priv = ds_to_priv(ds);
1216 struct net_device *dev = ds->ports[port].netdev;
1217 struct b53_arl_entry results[2];
1218 unsigned int count = 0;
1219 int ret;
1220 u8 reg;
1221
1222 /* Start search operation */
1223 reg = ARL_SRCH_STDN;
1224 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
1225
1226 do {
1227 ret = b53_arl_search_wait(priv);
1228 if (ret)
1229 return ret;
1230
1231 b53_arl_search_rd(priv, 0, &results[0]);
1232 ret = b53_fdb_copy(dev, port, &results[0], fdb, cb);
1233 if (ret)
1234 return ret;
1235
1236 if (priv->num_arl_entries > 2) {
1237 b53_arl_search_rd(priv, 1, &results[1]);
1238 ret = b53_fdb_copy(dev, port, &results[1], fdb, cb);
1239 if (ret)
1240 return ret;
1241
1242 if (!results[0].is_valid && !results[1].is_valid)
1243 break;
1244 }
1245
1246 } while (count++ < 1024);
1247
1248 return 0;
1249}
1250
1251static int b53_br_join(struct dsa_switch *ds, int port,
1252 struct net_device *bridge)
1253{
1254 struct b53_device *dev = ds_to_priv(ds);
1255 u16 pvlan, reg;
1256 unsigned int i;
1257
1258 dev->ports[port].bridge_dev = bridge;
1259 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1260
1261 b53_for_each_port(dev, i) {
1262 if (dev->ports[i].bridge_dev != bridge)
1263 continue;
1264
1265 /* Add this local port to the remote port VLAN control
1266 * membership and update the remote port bitmask
1267 */
1268 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
1269 reg |= BIT(port);
1270 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1271 dev->ports[i].vlan_ctl_mask = reg;
1272
1273 pvlan |= BIT(i);
1274 }
1275
1276 /* Configure the local port VLAN control membership to include
1277 * remote ports and update the local port bitmask
1278 */
1279 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1280 dev->ports[port].vlan_ctl_mask = pvlan;
1281
1282 return 0;
1283}
1284
1285static void b53_br_leave(struct dsa_switch *ds, int port)
1286{
1287 struct b53_device *dev = ds_to_priv(ds);
1288 struct net_device *bridge = dev->ports[port].bridge_dev;
1289 struct b53_vlan *vl = &dev->vlans[0];
1290 unsigned int i;
1291 u16 pvlan, reg, pvid;
1292
1293 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1294
1295 b53_for_each_port(dev, i) {
1296 /* Don't touch the remaining ports */
1297 if (dev->ports[i].bridge_dev != bridge)
1298 continue;
1299
1300 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
1301 reg &= ~BIT(port);
1302 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1303 dev->ports[port].vlan_ctl_mask = reg;
1304
1305 /* Prevent self removal to preserve isolation */
1306 if (port != i)
1307 pvlan &= ~BIT(i);
1308 }
1309
1310 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1311 dev->ports[port].vlan_ctl_mask = pvlan;
1312 dev->ports[port].bridge_dev = NULL;
1313
1314 if (is5325(dev) || is5365(dev))
1315 pvid = 1;
1316 else
1317 pvid = 0;
1318
1319 b53_get_vlan_entry(dev, pvid, vl);
1320 vl->members |= BIT(port) | BIT(dev->cpu_port);
1321 vl->untag |= BIT(port) | BIT(dev->cpu_port);
1322 b53_set_vlan_entry(dev, pvid, vl);
1323}
1324
1325static void b53_br_set_stp_state(struct dsa_switch *ds, int port,
1326 u8 state)
1327{
1328 struct b53_device *dev = ds_to_priv(ds);
1329 u8 hw_state, cur_hw_state;
1330 u8 reg;
1331
1332 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
1333 cur_hw_state = reg & PORT_CTRL_STP_STATE_MASK;
1334
1335 switch (state) {
1336 case BR_STATE_DISABLED:
1337 hw_state = PORT_CTRL_DIS_STATE;
1338 break;
1339 case BR_STATE_LISTENING:
1340 hw_state = PORT_CTRL_LISTEN_STATE;
1341 break;
1342 case BR_STATE_LEARNING:
1343 hw_state = PORT_CTRL_LEARN_STATE;
1344 break;
1345 case BR_STATE_FORWARDING:
1346 hw_state = PORT_CTRL_FWD_STATE;
1347 break;
1348 case BR_STATE_BLOCKING:
1349 hw_state = PORT_CTRL_BLOCK_STATE;
1350 break;
1351 default:
1352 dev_err(ds->dev, "invalid STP state: %d\n", state);
1353 return;
1354 }
1355
1356 /* Fast-age ARL entries if we are moving a port from Learning or
1357 * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
1358 * state (hw_state)
1359 */
1360 if (cur_hw_state != hw_state) {
1361 if (cur_hw_state >= PORT_CTRL_LEARN_STATE &&
1362 hw_state <= PORT_CTRL_LISTEN_STATE) {
1363 if (b53_fast_age_port(dev, port)) {
1364 dev_err(ds->dev, "fast ageing failed\n");
1365 return;
1366 }
1367 }
1368 }
1369
1370 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
1371 reg &= ~PORT_CTRL_STP_STATE_MASK;
1372 reg |= hw_state;
1373 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
1374}
1375
1376static struct dsa_switch_driver b53_switch_ops = {
1377 .tag_protocol = DSA_TAG_PROTO_NONE,
1378 .setup = b53_setup,
1379 .set_addr = b53_set_addr,
1380 .get_strings = b53_get_strings,
1381 .get_ethtool_stats = b53_get_ethtool_stats,
1382 .get_sset_count = b53_get_sset_count,
1383 .phy_read = b53_phy_read16,
1384 .phy_write = b53_phy_write16,
1385 .adjust_link = b53_adjust_link,
1386 .port_enable = b53_enable_port,
1387 .port_disable = b53_disable_port,
1388 .port_bridge_join = b53_br_join,
1389 .port_bridge_leave = b53_br_leave,
1390 .port_stp_state_set = b53_br_set_stp_state,
1391 .port_vlan_filtering = b53_vlan_filtering,
1392 .port_vlan_prepare = b53_vlan_prepare,
1393 .port_vlan_add = b53_vlan_add,
1394 .port_vlan_del = b53_vlan_del,
1395 .port_vlan_dump = b53_vlan_dump,
1396 .port_fdb_prepare = b53_fdb_prepare,
1397 .port_fdb_dump = b53_fdb_dump,
1398 .port_fdb_add = b53_fdb_add,
1399 .port_fdb_del = b53_fdb_del,
1400};
1401
1402struct b53_chip_data {
1403 u32 chip_id;
1404 const char *dev_name;
1405 u16 vlans;
1406 u16 enabled_ports;
1407 u8 cpu_port;
1408 u8 vta_regs[3];
1409 u8 arl_entries;
1410 u8 duplex_reg;
1411 u8 jumbo_pm_reg;
1412 u8 jumbo_size_reg;
1413};
1414
1415#define B53_VTA_REGS \
1416 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
1417#define B53_VTA_REGS_9798 \
1418 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
1419#define B53_VTA_REGS_63XX \
1420 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
1421
1422static const struct b53_chip_data b53_switch_chips[] = {
1423 {
1424 .chip_id = BCM5325_DEVICE_ID,
1425 .dev_name = "BCM5325",
1426 .vlans = 16,
1427 .enabled_ports = 0x1f,
1428 .arl_entries = 2,
1429 .cpu_port = B53_CPU_PORT_25,
1430 .duplex_reg = B53_DUPLEX_STAT_FE,
1431 },
1432 {
1433 .chip_id = BCM5365_DEVICE_ID,
1434 .dev_name = "BCM5365",
1435 .vlans = 256,
1436 .enabled_ports = 0x1f,
1437 .arl_entries = 2,
1438 .cpu_port = B53_CPU_PORT_25,
1439 .duplex_reg = B53_DUPLEX_STAT_FE,
1440 },
1441 {
1442 .chip_id = BCM5395_DEVICE_ID,
1443 .dev_name = "BCM5395",
1444 .vlans = 4096,
1445 .enabled_ports = 0x1f,
1446 .arl_entries = 4,
1447 .cpu_port = B53_CPU_PORT,
1448 .vta_regs = B53_VTA_REGS,
1449 .duplex_reg = B53_DUPLEX_STAT_GE,
1450 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1451 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1452 },
1453 {
1454 .chip_id = BCM5397_DEVICE_ID,
1455 .dev_name = "BCM5397",
1456 .vlans = 4096,
1457 .enabled_ports = 0x1f,
1458 .arl_entries = 4,
1459 .cpu_port = B53_CPU_PORT,
1460 .vta_regs = B53_VTA_REGS_9798,
1461 .duplex_reg = B53_DUPLEX_STAT_GE,
1462 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1463 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1464 },
1465 {
1466 .chip_id = BCM5398_DEVICE_ID,
1467 .dev_name = "BCM5398",
1468 .vlans = 4096,
1469 .enabled_ports = 0x7f,
1470 .arl_entries = 4,
1471 .cpu_port = B53_CPU_PORT,
1472 .vta_regs = B53_VTA_REGS_9798,
1473 .duplex_reg = B53_DUPLEX_STAT_GE,
1474 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1475 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1476 },
1477 {
1478 .chip_id = BCM53115_DEVICE_ID,
1479 .dev_name = "BCM53115",
1480 .vlans = 4096,
1481 .enabled_ports = 0x1f,
1482 .arl_entries = 4,
1483 .vta_regs = B53_VTA_REGS,
1484 .cpu_port = B53_CPU_PORT,
1485 .duplex_reg = B53_DUPLEX_STAT_GE,
1486 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1487 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1488 },
1489 {
1490 .chip_id = BCM53125_DEVICE_ID,
1491 .dev_name = "BCM53125",
1492 .vlans = 4096,
1493 .enabled_ports = 0xff,
1494 .cpu_port = B53_CPU_PORT,
1495 .vta_regs = B53_VTA_REGS,
1496 .duplex_reg = B53_DUPLEX_STAT_GE,
1497 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1498 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1499 },
1500 {
1501 .chip_id = BCM53128_DEVICE_ID,
1502 .dev_name = "BCM53128",
1503 .vlans = 4096,
1504 .enabled_ports = 0x1ff,
1505 .arl_entries = 4,
1506 .cpu_port = B53_CPU_PORT,
1507 .vta_regs = B53_VTA_REGS,
1508 .duplex_reg = B53_DUPLEX_STAT_GE,
1509 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1510 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1511 },
1512 {
1513 .chip_id = BCM63XX_DEVICE_ID,
1514 .dev_name = "BCM63xx",
1515 .vlans = 4096,
1516 .enabled_ports = 0, /* pdata must provide them */
1517 .arl_entries = 4,
1518 .cpu_port = B53_CPU_PORT,
1519 .vta_regs = B53_VTA_REGS_63XX,
1520 .duplex_reg = B53_DUPLEX_STAT_63XX,
1521 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
1522 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
1523 },
1524 {
1525 .chip_id = BCM53010_DEVICE_ID,
1526 .dev_name = "BCM53010",
1527 .vlans = 4096,
1528 .enabled_ports = 0x1f,
1529 .arl_entries = 4,
1530 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
1531 .vta_regs = B53_VTA_REGS,
1532 .duplex_reg = B53_DUPLEX_STAT_GE,
1533 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1534 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1535 },
1536 {
1537 .chip_id = BCM53011_DEVICE_ID,
1538 .dev_name = "BCM53011",
1539 .vlans = 4096,
1540 .enabled_ports = 0x1bf,
1541 .arl_entries = 4,
1542 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
1543 .vta_regs = B53_VTA_REGS,
1544 .duplex_reg = B53_DUPLEX_STAT_GE,
1545 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1546 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1547 },
1548 {
1549 .chip_id = BCM53012_DEVICE_ID,
1550 .dev_name = "BCM53012",
1551 .vlans = 4096,
1552 .enabled_ports = 0x1bf,
1553 .arl_entries = 4,
1554 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
1555 .vta_regs = B53_VTA_REGS,
1556 .duplex_reg = B53_DUPLEX_STAT_GE,
1557 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1558 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1559 },
1560 {
1561 .chip_id = BCM53018_DEVICE_ID,
1562 .dev_name = "BCM53018",
1563 .vlans = 4096,
1564 .enabled_ports = 0x1f,
1565 .arl_entries = 4,
1566 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
1567 .vta_regs = B53_VTA_REGS,
1568 .duplex_reg = B53_DUPLEX_STAT_GE,
1569 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1570 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1571 },
1572 {
1573 .chip_id = BCM53019_DEVICE_ID,
1574 .dev_name = "BCM53019",
1575 .vlans = 4096,
1576 .enabled_ports = 0x1f,
1577 .arl_entries = 4,
1578 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
1579 .vta_regs = B53_VTA_REGS,
1580 .duplex_reg = B53_DUPLEX_STAT_GE,
1581 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1582 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1583 },
1584};
1585
1586static int b53_switch_init(struct b53_device *dev)
1587{
1588 struct dsa_switch *ds = dev->ds;
1589 unsigned int i;
1590 int ret;
1591
1592 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
1593 const struct b53_chip_data *chip = &b53_switch_chips[i];
1594
1595 if (chip->chip_id == dev->chip_id) {
1596 if (!dev->enabled_ports)
1597 dev->enabled_ports = chip->enabled_ports;
1598 dev->name = chip->dev_name;
1599 dev->duplex_reg = chip->duplex_reg;
1600 dev->vta_regs[0] = chip->vta_regs[0];
1601 dev->vta_regs[1] = chip->vta_regs[1];
1602 dev->vta_regs[2] = chip->vta_regs[2];
1603 dev->jumbo_pm_reg = chip->jumbo_pm_reg;
1604 ds->drv = &b53_switch_ops;
1605 dev->cpu_port = chip->cpu_port;
1606 dev->num_vlans = chip->vlans;
1607 dev->num_arl_entries = chip->arl_entries;
1608 break;
1609 }
1610 }
1611
1612 /* check which BCM5325x version we have */
1613 if (is5325(dev)) {
1614 u8 vc4;
1615
1616 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
1617
1618 /* check reserved bits */
1619 switch (vc4 & 3) {
1620 case 1:
1621 /* BCM5325E */
1622 break;
1623 case 3:
1624 /* BCM5325F - do not use port 4 */
1625 dev->enabled_ports &= ~BIT(4);
1626 break;
1627 default:
1628/* On the BCM47XX SoCs this is the supported internal switch.*/
1629#ifndef CONFIG_BCM47XX
1630 /* BCM5325M */
1631 return -EINVAL;
1632#else
1633 break;
1634#endif
1635 }
1636 } else if (dev->chip_id == BCM53115_DEVICE_ID) {
1637 u64 strap_value;
1638
1639 b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
1640 /* use second IMP port if GMII is enabled */
1641 if (strap_value & SV_GMII_CTRL_115)
1642 dev->cpu_port = 5;
1643 }
1644
1645 /* cpu port is always last */
1646 dev->num_ports = dev->cpu_port + 1;
1647 dev->enabled_ports |= BIT(dev->cpu_port);
1648
1649 dev->ports = devm_kzalloc(dev->dev,
1650 sizeof(struct b53_port) * dev->num_ports,
1651 GFP_KERNEL);
1652 if (!dev->ports)
1653 return -ENOMEM;
1654
1655 dev->vlans = devm_kzalloc(dev->dev,
1656 sizeof(struct b53_vlan) * dev->num_vlans,
1657 GFP_KERNEL);
1658 if (!dev->vlans)
1659 return -ENOMEM;
1660
1661 dev->reset_gpio = b53_switch_get_reset_gpio(dev);
1662 if (dev->reset_gpio >= 0) {
1663 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
1664 GPIOF_OUT_INIT_HIGH, "robo_reset");
1665 if (ret)
1666 return ret;
1667 }
1668
1669 return 0;
1670}
1671
1672struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops,
1673 void *priv)
1674{
1675 struct dsa_switch *ds;
1676 struct b53_device *dev;
1677
1678 ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL);
1679 if (!ds)
1680 return NULL;
1681
1682 dev = (struct b53_device *)(ds + 1);
1683
1684 ds->priv = dev;
1685 ds->dev = base;
1686 dev->dev = base;
1687
1688 dev->ds = ds;
1689 dev->priv = priv;
1690 dev->ops = ops;
1691 mutex_init(&dev->reg_mutex);
1692 mutex_init(&dev->stats_mutex);
1693
1694 return dev;
1695}
1696EXPORT_SYMBOL(b53_switch_alloc);
1697
1698int b53_switch_detect(struct b53_device *dev)
1699{
1700 u32 id32;
1701 u16 tmp;
1702 u8 id8;
1703 int ret;
1704
1705 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
1706 if (ret)
1707 return ret;
1708
1709 switch (id8) {
1710 case 0:
1711 /* BCM5325 and BCM5365 do not have this register so reads
1712 * return 0. But the read operation did succeed, so assume this
1713 * is one of them.
1714 *
1715 * Next check if we can write to the 5325's VTA register; for
1716 * 5365 it is read only.
1717 */
1718 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
1719 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
1720
1721 if (tmp == 0xf)
1722 dev->chip_id = BCM5325_DEVICE_ID;
1723 else
1724 dev->chip_id = BCM5365_DEVICE_ID;
1725 break;
1726 case BCM5395_DEVICE_ID:
1727 case BCM5397_DEVICE_ID:
1728 case BCM5398_DEVICE_ID:
1729 dev->chip_id = id8;
1730 break;
1731 default:
1732 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
1733 if (ret)
1734 return ret;
1735
1736 switch (id32) {
1737 case BCM53115_DEVICE_ID:
1738 case BCM53125_DEVICE_ID:
1739 case BCM53128_DEVICE_ID:
1740 case BCM53010_DEVICE_ID:
1741 case BCM53011_DEVICE_ID:
1742 case BCM53012_DEVICE_ID:
1743 case BCM53018_DEVICE_ID:
1744 case BCM53019_DEVICE_ID:
1745 dev->chip_id = id32;
1746 break;
1747 default:
1748 pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
1749 id8, id32);
1750 return -ENODEV;
1751 }
1752 }
1753
1754 if (dev->chip_id == BCM5325_DEVICE_ID)
1755 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
1756 &dev->core_rev);
1757 else
1758 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
1759 &dev->core_rev);
1760}
1761EXPORT_SYMBOL(b53_switch_detect);
1762
1763int b53_switch_register(struct b53_device *dev)
1764{
1765 int ret;
1766
1767 if (dev->pdata) {
1768 dev->chip_id = dev->pdata->chip_id;
1769 dev->enabled_ports = dev->pdata->enabled_ports;
1770 }
1771
1772 if (!dev->chip_id && b53_switch_detect(dev))
1773 return -EINVAL;
1774
1775 ret = b53_switch_init(dev);
1776 if (ret)
1777 return ret;
1778
1779 pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
1780
1781 return dsa_register_switch(dev->ds, dev->ds->dev->of_node);
1782}
1783EXPORT_SYMBOL(b53_switch_register);
1784
1785MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
1786MODULE_DESCRIPTION("B53 switch library");
1787MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
new file mode 100644
index 000000000000..aa87c3fffdac
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -0,0 +1,392 @@
1/*
2 * B53 register access through MII registers
3 *
4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <linux/kernel.h>
20#include <linux/phy.h>
21#include <linux/module.h>
22#include <linux/delay.h>
23#include <linux/brcmphy.h>
24#include <linux/rtnetlink.h>
25#include <net/dsa.h>
26
27#include "b53_priv.h"
28
29/* MII registers */
30#define REG_MII_PAGE 0x10 /* MII Page register */
31#define REG_MII_ADDR 0x11 /* MII Address register */
32#define REG_MII_DATA0 0x18 /* MII Data register 0 */
33#define REG_MII_DATA1 0x19 /* MII Data register 1 */
34#define REG_MII_DATA2 0x1a /* MII Data register 2 */
35#define REG_MII_DATA3 0x1b /* MII Data register 3 */
36
37#define REG_MII_PAGE_ENABLE BIT(0)
38#define REG_MII_ADDR_WRITE BIT(0)
39#define REG_MII_ADDR_READ BIT(1)
40
41static int b53_mdio_op(struct b53_device *dev, u8 page, u8 reg, u16 op)
42{
43 int i;
44 u16 v;
45 int ret;
46 struct mii_bus *bus = dev->priv;
47
48 if (dev->current_page != page) {
49 /* set page number */
50 v = (page << 8) | REG_MII_PAGE_ENABLE;
51 ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
52 REG_MII_PAGE, v);
53 if (ret)
54 return ret;
55 dev->current_page = page;
56 }
57
58 /* set register address */
59 v = (reg << 8) | op;
60 ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_ADDR, v);
61 if (ret)
62 return ret;
63
64 /* check if operation completed */
65 for (i = 0; i < 5; ++i) {
66 v = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
67 REG_MII_ADDR);
68 if (!(v & (REG_MII_ADDR_WRITE | REG_MII_ADDR_READ)))
69 break;
70 usleep_range(10, 100);
71 }
72
73 if (WARN_ON(i == 5))
74 return -EIO;
75
76 return 0;
77}
78
79static int b53_mdio_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
80{
81 struct mii_bus *bus = dev->priv;
82 int ret;
83
84 ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
85 if (ret)
86 return ret;
87
88 *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
89 REG_MII_DATA0) & 0xff;
90
91 return 0;
92}
93
94static int b53_mdio_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
95{
96 struct mii_bus *bus = dev->priv;
97 int ret;
98
99 ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
100 if (ret)
101 return ret;
102
103 *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
104
105 return 0;
106}
107
108static int b53_mdio_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
109{
110 struct mii_bus *bus = dev->priv;
111 int ret;
112
113 ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
114 if (ret)
115 return ret;
116
117 *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
118 *val |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
119 REG_MII_DATA1) << 16;
120
121 return 0;
122}
123
124static int b53_mdio_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
125{
126 struct mii_bus *bus = dev->priv;
127 u64 temp = 0;
128 int i;
129 int ret;
130
131 ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
132 if (ret)
133 return ret;
134
135 for (i = 2; i >= 0; i--) {
136 temp <<= 16;
137 temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
138 REG_MII_DATA0 + i);
139 }
140
141 *val = temp;
142
143 return 0;
144}
145
146static int b53_mdio_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
147{
148 struct mii_bus *bus = dev->priv;
149 u64 temp = 0;
150 int i;
151 int ret;
152
153 ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
154 if (ret)
155 return ret;
156
157 for (i = 3; i >= 0; i--) {
158 temp <<= 16;
159 temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
160 REG_MII_DATA0 + i);
161 }
162
163 *val = temp;
164
165 return 0;
166}
167
168static int b53_mdio_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
169{
170 struct mii_bus *bus = dev->priv;
171 int ret;
172
173 ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
174 REG_MII_DATA0, value);
175 if (ret)
176 return ret;
177
178 return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
179}
180
181static int b53_mdio_write16(struct b53_device *dev, u8 page, u8 reg,
182 u16 value)
183{
184 struct mii_bus *bus = dev->priv;
185 int ret;
186
187 ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
188 REG_MII_DATA0, value);
189 if (ret)
190 return ret;
191
192 return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
193}
194
195static int b53_mdio_write32(struct b53_device *dev, u8 page, u8 reg,
196 u32 value)
197{
198 struct mii_bus *bus = dev->priv;
199 unsigned int i;
200 u32 temp = value;
201
202 for (i = 0; i < 2; i++) {
203 int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
204 REG_MII_DATA0 + i,
205 temp & 0xffff);
206 if (ret)
207 return ret;
208 temp >>= 16;
209 }
210
211 return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
212}
213
214static int b53_mdio_write48(struct b53_device *dev, u8 page, u8 reg,
215 u64 value)
216{
217 struct mii_bus *bus = dev->priv;
218 unsigned int i;
219 u64 temp = value;
220
221 for (i = 0; i < 3; i++) {
222 int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
223 REG_MII_DATA0 + i,
224 temp & 0xffff);
225 if (ret)
226 return ret;
227 temp >>= 16;
228 }
229
230 return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
231}
232
233static int b53_mdio_write64(struct b53_device *dev, u8 page, u8 reg,
234 u64 value)
235{
236 struct mii_bus *bus = dev->priv;
237 unsigned int i;
238 u64 temp = value;
239
240 for (i = 0; i < 4; i++) {
241 int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
242 REG_MII_DATA0 + i,
243 temp & 0xffff);
244 if (ret)
245 return ret;
246 temp >>= 16;
247 }
248
249 return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
250}
251
252static int b53_mdio_phy_read16(struct b53_device *dev, int addr, int reg,
253 u16 *value)
254{
255 struct mii_bus *bus = dev->priv;
256
257 *value = mdiobus_read_nested(bus, addr, reg);
258
259 return 0;
260}
261
262static int b53_mdio_phy_write16(struct b53_device *dev, int addr, int reg,
263 u16 value)
264{
265 struct mii_bus *bus = dev->bus;
266
267 return mdiobus_write_nested(bus, addr, reg, value);
268}
269
270static struct b53_io_ops b53_mdio_ops = {
271 .read8 = b53_mdio_read8,
272 .read16 = b53_mdio_read16,
273 .read32 = b53_mdio_read32,
274 .read48 = b53_mdio_read48,
275 .read64 = b53_mdio_read64,
276 .write8 = b53_mdio_write8,
277 .write16 = b53_mdio_write16,
278 .write32 = b53_mdio_write32,
279 .write48 = b53_mdio_write48,
280 .write64 = b53_mdio_write64,
281 .phy_read16 = b53_mdio_phy_read16,
282 .phy_write16 = b53_mdio_phy_write16,
283};
284
285#define B53_BRCM_OUI_1 0x0143bc00
286#define B53_BRCM_OUI_2 0x03625c00
287#define B53_BRCM_OUI_3 0x00406000
288
289static int b53_mdio_probe(struct mdio_device *mdiodev)
290{
291 struct b53_device *dev;
292 u32 phy_id;
293 int ret;
294
295 /* allow the generic PHY driver to take over the non-management MDIO
296 * addresses
297 */
298 if (mdiodev->addr != BRCM_PSEUDO_PHY_ADDR && mdiodev->addr != 0) {
299 dev_err(&mdiodev->dev, "leaving address %d to PHY\n",
300 mdiodev->addr);
301 return -ENODEV;
302 }
303
304 /* read the first port's id */
305 phy_id = mdiobus_read(mdiodev->bus, 0, 2) << 16;
306 phy_id |= mdiobus_read(mdiodev->bus, 0, 3);
307
308 /* BCM5325, BCM539x (OUI_1)
309 * BCM53125, BCM53128 (OUI_2)
310 * BCM5365 (OUI_3)
311 */
312 if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
313 (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
314 (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) {
315 dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
316 return -ENODEV;
317 }
318
319 /* First probe will come from SWITCH_MDIO controller on the 7445D0
320 * switch, which will conflict with the 7445 integrated switch
321 * pseudo-phy (we end-up programming both). In that case, we return
322 * -EPROBE_DEFER for the first time we get here, and wait until we come
323 * back with the slave MDIO bus which has the correct indirection
324 * layer setup
325 */
326 if (of_machine_is_compatible("brcm,bcm7445d0") &&
327 strcmp(mdiodev->bus->name, "sf2 slave mii"))
328 return -EPROBE_DEFER;
329
330 dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus);
331 if (!dev)
332 return -ENOMEM;
333
334 /* we don't use page 0xff, so force a page set */
335 dev->current_page = 0xff;
336 dev->bus = mdiodev->bus;
337
338 dev_set_drvdata(&mdiodev->dev, dev);
339
340 ret = b53_switch_register(dev);
341 if (ret) {
342 dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret);
343 return ret;
344 }
345
346 return ret;
347}
348
349static void b53_mdio_remove(struct mdio_device *mdiodev)
350{
351 struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
352 struct dsa_switch *ds = dev->ds;
353
354 dsa_unregister_switch(ds);
355}
356
357static const struct of_device_id b53_of_match[] = {
358 { .compatible = "brcm,bcm5325" },
359 { .compatible = "brcm,bcm53115" },
360 { .compatible = "brcm,bcm53125" },
361 { .compatible = "brcm,bcm53128" },
362 { .compatible = "brcm,bcm5365" },
363 { .compatible = "brcm,bcm5395" },
364 { .compatible = "brcm,bcm5397" },
365 { .compatible = "brcm,bcm5398" },
366 { /* sentinel */ },
367};
368MODULE_DEVICE_TABLE(of, b53_of_match);
369
370static struct mdio_driver b53_mdio_driver = {
371 .probe = b53_mdio_probe,
372 .remove = b53_mdio_remove,
373 .mdiodrv.driver = {
374 .name = "bcm53xx",
375 .of_match_table = b53_of_match,
376 },
377};
378
379static int __init b53_mdio_driver_register(void)
380{
381 return mdio_driver_register(&b53_mdio_driver);
382}
383module_init(b53_mdio_driver_register);
384
385static void __exit b53_mdio_driver_unregister(void)
386{
387 mdio_driver_unregister(&b53_mdio_driver);
388}
389module_exit(b53_mdio_driver_unregister);
390
391MODULE_DESCRIPTION("B53 MDIO access driver");
392MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
new file mode 100644
index 000000000000..21f1068b0804
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -0,0 +1,274 @@
1/*
2 * B53 register access through memory mapped registers
3 *
4 * Copyright (C) 2012-2013 Jonas Gorski <jogo@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <linux/kernel.h>
20#include <linux/kconfig.h>
21#include <linux/module.h>
22#include <linux/io.h>
23#include <linux/platform_device.h>
24#include <linux/platform_data/b53.h>
25
26#include "b53_priv.h"
27
28struct b53_mmap_priv {
29 void __iomem *regs;
30};
31
32static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
33{
34 u8 __iomem *regs = dev->priv;
35
36 *val = readb(regs + (page << 8) + reg);
37
38 return 0;
39}
40
41static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
42{
43 u8 __iomem *regs = dev->priv;
44
45 if (WARN_ON(reg % 2))
46 return -EINVAL;
47
48 if (dev->pdata && dev->pdata->big_endian)
49 *val = ioread16be(regs + (page << 8) + reg);
50 else
51 *val = readw(regs + (page << 8) + reg);
52
53 return 0;
54}
55
56static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
57{
58 u8 __iomem *regs = dev->priv;
59
60 if (WARN_ON(reg % 4))
61 return -EINVAL;
62
63 if (dev->pdata && dev->pdata->big_endian)
64 *val = ioread32be(regs + (page << 8) + reg);
65 else
66 *val = readl(regs + (page << 8) + reg);
67
68 return 0;
69}
70
71static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
72{
73 u8 __iomem *regs = dev->priv;
74
75 if (WARN_ON(reg % 2))
76 return -EINVAL;
77
78 if (reg % 4) {
79 u16 lo;
80 u32 hi;
81
82 if (dev->pdata && dev->pdata->big_endian) {
83 lo = ioread16be(regs + (page << 8) + reg);
84 hi = ioread32be(regs + (page << 8) + reg + 2);
85 } else {
86 lo = readw(regs + (page << 8) + reg);
87 hi = readl(regs + (page << 8) + reg + 2);
88 }
89
90 *val = ((u64)hi << 16) | lo;
91 } else {
92 u32 lo;
93 u16 hi;
94
95 if (dev->pdata && dev->pdata->big_endian) {
96 lo = ioread32be(regs + (page << 8) + reg);
97 hi = ioread16be(regs + (page << 8) + reg + 4);
98 } else {
99 lo = readl(regs + (page << 8) + reg);
100 hi = readw(regs + (page << 8) + reg + 4);
101 }
102
103 *val = ((u64)hi << 32) | lo;
104 }
105
106 return 0;
107}
108
109static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
110{
111 u8 __iomem *regs = dev->priv;
112 u32 hi, lo;
113
114 if (WARN_ON(reg % 4))
115 return -EINVAL;
116
117 if (dev->pdata && dev->pdata->big_endian) {
118 lo = ioread32be(regs + (page << 8) + reg);
119 hi = ioread32be(regs + (page << 8) + reg + 4);
120 } else {
121 lo = readl(regs + (page << 8) + reg);
122 hi = readl(regs + (page << 8) + reg + 4);
123 }
124
125 *val = ((u64)hi << 32) | lo;
126
127 return 0;
128}
129
130static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
131{
132 u8 __iomem *regs = dev->priv;
133
134 writeb(value, regs + (page << 8) + reg);
135
136 return 0;
137}
138
139static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg,
140 u16 value)
141{
142 u8 __iomem *regs = dev->priv;
143
144 if (WARN_ON(reg % 2))
145 return -EINVAL;
146
147 if (dev->pdata && dev->pdata->big_endian)
148 iowrite16be(value, regs + (page << 8) + reg);
149 else
150 writew(value, regs + (page << 8) + reg);
151
152 return 0;
153}
154
155static int b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg,
156 u32 value)
157{
158 u8 __iomem *regs = dev->priv;
159
160 if (WARN_ON(reg % 4))
161 return -EINVAL;
162
163 if (dev->pdata && dev->pdata->big_endian)
164 iowrite32be(value, regs + (page << 8) + reg);
165 else
166 writel(value, regs + (page << 8) + reg);
167
168 return 0;
169}
170
171static int b53_mmap_write48(struct b53_device *dev, u8 page, u8 reg,
172 u64 value)
173{
174 if (WARN_ON(reg % 2))
175 return -EINVAL;
176
177 if (reg % 4) {
178 u32 hi = (u32)(value >> 16);
179 u16 lo = (u16)value;
180
181 b53_mmap_write16(dev, page, reg, lo);
182 b53_mmap_write32(dev, page, reg + 2, hi);
183 } else {
184 u16 hi = (u16)(value >> 32);
185 u32 lo = (u32)value;
186
187 b53_mmap_write32(dev, page, reg, lo);
188 b53_mmap_write16(dev, page, reg + 4, hi);
189 }
190
191 return 0;
192}
193
194static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
195 u64 value)
196{
197 u32 hi, lo;
198
199 hi = upper_32_bits(value);
200 lo = lower_32_bits(value);
201
202 if (WARN_ON(reg % 4))
203 return -EINVAL;
204
205 b53_mmap_write32(dev, page, reg, lo);
206 b53_mmap_write32(dev, page, reg + 4, hi);
207
208 return 0;
209}
210
211static struct b53_io_ops b53_mmap_ops = {
212 .read8 = b53_mmap_read8,
213 .read16 = b53_mmap_read16,
214 .read32 = b53_mmap_read32,
215 .read48 = b53_mmap_read48,
216 .read64 = b53_mmap_read64,
217 .write8 = b53_mmap_write8,
218 .write16 = b53_mmap_write16,
219 .write32 = b53_mmap_write32,
220 .write48 = b53_mmap_write48,
221 .write64 = b53_mmap_write64,
222};
223
224static int b53_mmap_probe(struct platform_device *pdev)
225{
226 struct b53_platform_data *pdata = pdev->dev.platform_data;
227 struct b53_device *dev;
228
229 if (!pdata)
230 return -EINVAL;
231
232 dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, pdata->regs);
233 if (!dev)
234 return -ENOMEM;
235
236 if (pdata)
237 dev->pdata = pdata;
238
239 platform_set_drvdata(pdev, dev);
240
241 return b53_switch_register(dev);
242}
243
244static int b53_mmap_remove(struct platform_device *pdev)
245{
246 struct b53_device *dev = platform_get_drvdata(pdev);
247
248 if (dev)
249 b53_switch_remove(dev);
250
251 return 0;
252}
253
254static const struct of_device_id b53_mmap_of_table[] = {
255 { .compatible = "brcm,bcm3384-switch" },
256 { .compatible = "brcm,bcm6328-switch" },
257 { .compatible = "brcm,bcm6368-switch" },
258 { .compatible = "brcm,bcm63xx-switch" },
259 { /* sentinel */ },
260};
261
262static struct platform_driver b53_mmap_driver = {
263 .probe = b53_mmap_probe,
264 .remove = b53_mmap_remove,
265 .driver = {
266 .name = "b53-switch",
267 .of_match_table = b53_mmap_of_table,
268 },
269};
270
271module_platform_driver(b53_mmap_driver);
272MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
273MODULE_DESCRIPTION("B53 MMAP access driver");
274MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
new file mode 100644
index 000000000000..5d8c602fb877
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -0,0 +1,387 @@
1/*
2 * B53 common definitions
3 *
4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef __B53_PRIV_H
20#define __B53_PRIV_H
21
22#include <linux/kernel.h>
23#include <linux/mutex.h>
24#include <linux/phy.h>
25#include <net/dsa.h>
26
27#include "b53_regs.h"
28
29struct b53_device;
30struct net_device;
31
32struct b53_io_ops {
33 int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
34 int (*read16)(struct b53_device *dev, u8 page, u8 reg, u16 *value);
35 int (*read32)(struct b53_device *dev, u8 page, u8 reg, u32 *value);
36 int (*read48)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
37 int (*read64)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
38 int (*write8)(struct b53_device *dev, u8 page, u8 reg, u8 value);
39 int (*write16)(struct b53_device *dev, u8 page, u8 reg, u16 value);
40 int (*write32)(struct b53_device *dev, u8 page, u8 reg, u32 value);
41 int (*write48)(struct b53_device *dev, u8 page, u8 reg, u64 value);
42 int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value);
43 int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value);
44 int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
45};
46
47enum {
48 BCM5325_DEVICE_ID = 0x25,
49 BCM5365_DEVICE_ID = 0x65,
50 BCM5395_DEVICE_ID = 0x95,
51 BCM5397_DEVICE_ID = 0x97,
52 BCM5398_DEVICE_ID = 0x98,
53 BCM53115_DEVICE_ID = 0x53115,
54 BCM53125_DEVICE_ID = 0x53125,
55 BCM53128_DEVICE_ID = 0x53128,
56 BCM63XX_DEVICE_ID = 0x6300,
57 BCM53010_DEVICE_ID = 0x53010,
58 BCM53011_DEVICE_ID = 0x53011,
59 BCM53012_DEVICE_ID = 0x53012,
60 BCM53018_DEVICE_ID = 0x53018,
61 BCM53019_DEVICE_ID = 0x53019,
62};
63
64#define B53_N_PORTS 9
65#define B53_N_PORTS_25 6
66
67struct b53_port {
68 u16 vlan_ctl_mask;
69 struct net_device *bridge_dev;
70};
71
72struct b53_vlan {
73 u16 members;
74 u16 untag;
75 bool valid;
76};
77
78struct b53_device {
79 struct dsa_switch *ds;
80 struct b53_platform_data *pdata;
81 const char *name;
82
83 struct mutex reg_mutex;
84 struct mutex stats_mutex;
85 const struct b53_io_ops *ops;
86
87 /* chip specific data */
88 u32 chip_id;
89 u8 core_rev;
90 u8 vta_regs[3];
91 u8 duplex_reg;
92 u8 jumbo_pm_reg;
93 u8 jumbo_size_reg;
94 int reset_gpio;
95 u8 num_arl_entries;
96
97 /* used ports mask */
98 u16 enabled_ports;
99 unsigned int cpu_port;
100
101 /* connect specific data */
102 u8 current_page;
103 struct device *dev;
104
105 /* Master MDIO bus we got probed from */
106 struct mii_bus *bus;
107
108 void *priv;
109
110 /* run time configuration */
111 bool enable_jumbo;
112
113 unsigned int num_vlans;
114 struct b53_vlan *vlans;
115 unsigned int num_ports;
116 struct b53_port *ports;
117};
118
119#define b53_for_each_port(dev, i) \
120 for (i = 0; i < B53_N_PORTS; i++) \
121 if (dev->enabled_ports & BIT(i))
122
123
124static inline int is5325(struct b53_device *dev)
125{
126 return dev->chip_id == BCM5325_DEVICE_ID;
127}
128
129static inline int is5365(struct b53_device *dev)
130{
131#ifdef CONFIG_BCM47XX
132 return dev->chip_id == BCM5365_DEVICE_ID;
133#else
134 return 0;
135#endif
136}
137
138static inline int is5397_98(struct b53_device *dev)
139{
140 return dev->chip_id == BCM5397_DEVICE_ID ||
141 dev->chip_id == BCM5398_DEVICE_ID;
142}
143
144static inline int is539x(struct b53_device *dev)
145{
146 return dev->chip_id == BCM5395_DEVICE_ID ||
147 dev->chip_id == BCM5397_DEVICE_ID ||
148 dev->chip_id == BCM5398_DEVICE_ID;
149}
150
151static inline int is531x5(struct b53_device *dev)
152{
153 return dev->chip_id == BCM53115_DEVICE_ID ||
154 dev->chip_id == BCM53125_DEVICE_ID ||
155 dev->chip_id == BCM53128_DEVICE_ID;
156}
157
158static inline int is63xx(struct b53_device *dev)
159{
160#ifdef CONFIG_BCM63XX
161 return dev->chip_id == BCM63XX_DEVICE_ID;
162#else
163 return 0;
164#endif
165}
166
167static inline int is5301x(struct b53_device *dev)
168{
169 return dev->chip_id == BCM53010_DEVICE_ID ||
170 dev->chip_id == BCM53011_DEVICE_ID ||
171 dev->chip_id == BCM53012_DEVICE_ID ||
172 dev->chip_id == BCM53018_DEVICE_ID ||
173 dev->chip_id == BCM53019_DEVICE_ID;
174}
175
176#define B53_CPU_PORT_25 5
177#define B53_CPU_PORT 8
178
179static inline int is_cpu_port(struct b53_device *dev, int port)
180{
181 return dev->cpu_port;
182}
183
184struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops,
185 void *priv);
186
187int b53_switch_detect(struct b53_device *dev);
188
189int b53_switch_register(struct b53_device *dev);
190
191static inline void b53_switch_remove(struct b53_device *dev)
192{
193 dsa_unregister_switch(dev->ds);
194}
195
196static inline int b53_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
197{
198 int ret;
199
200 mutex_lock(&dev->reg_mutex);
201 ret = dev->ops->read8(dev, page, reg, val);
202 mutex_unlock(&dev->reg_mutex);
203
204 return ret;
205}
206
207static inline int b53_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
208{
209 int ret;
210
211 mutex_lock(&dev->reg_mutex);
212 ret = dev->ops->read16(dev, page, reg, val);
213 mutex_unlock(&dev->reg_mutex);
214
215 return ret;
216}
217
218static inline int b53_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
219{
220 int ret;
221
222 mutex_lock(&dev->reg_mutex);
223 ret = dev->ops->read32(dev, page, reg, val);
224 mutex_unlock(&dev->reg_mutex);
225
226 return ret;
227}
228
229static inline int b53_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
230{
231 int ret;
232
233 mutex_lock(&dev->reg_mutex);
234 ret = dev->ops->read48(dev, page, reg, val);
235 mutex_unlock(&dev->reg_mutex);
236
237 return ret;
238}
239
240static inline int b53_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
241{
242 int ret;
243
244 mutex_lock(&dev->reg_mutex);
245 ret = dev->ops->read64(dev, page, reg, val);
246 mutex_unlock(&dev->reg_mutex);
247
248 return ret;
249}
250
251static inline int b53_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
252{
253 int ret;
254
255 mutex_lock(&dev->reg_mutex);
256 ret = dev->ops->write8(dev, page, reg, value);
257 mutex_unlock(&dev->reg_mutex);
258
259 return ret;
260}
261
262static inline int b53_write16(struct b53_device *dev, u8 page, u8 reg,
263 u16 value)
264{
265 int ret;
266
267 mutex_lock(&dev->reg_mutex);
268 ret = dev->ops->write16(dev, page, reg, value);
269 mutex_unlock(&dev->reg_mutex);
270
271 return ret;
272}
273
274static inline int b53_write32(struct b53_device *dev, u8 page, u8 reg,
275 u32 value)
276{
277 int ret;
278
279 mutex_lock(&dev->reg_mutex);
280 ret = dev->ops->write32(dev, page, reg, value);
281 mutex_unlock(&dev->reg_mutex);
282
283 return ret;
284}
285
286static inline int b53_write48(struct b53_device *dev, u8 page, u8 reg,
287 u64 value)
288{
289 int ret;
290
291 mutex_lock(&dev->reg_mutex);
292 ret = dev->ops->write48(dev, page, reg, value);
293 mutex_unlock(&dev->reg_mutex);
294
295 return ret;
296}
297
298static inline int b53_write64(struct b53_device *dev, u8 page, u8 reg,
299 u64 value)
300{
301 int ret;
302
303 mutex_lock(&dev->reg_mutex);
304 ret = dev->ops->write64(dev, page, reg, value);
305 mutex_unlock(&dev->reg_mutex);
306
307 return ret;
308}
309
310struct b53_arl_entry {
311 u8 port;
312 u8 mac[ETH_ALEN];
313 u16 vid;
314 u8 is_valid:1;
315 u8 is_age:1;
316 u8 is_static:1;
317};
318
319static inline void b53_mac_from_u64(u64 src, u8 *dst)
320{
321 unsigned int i;
322
323 for (i = 0; i < ETH_ALEN; i++)
324 dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff;
325}
326
327static inline u64 b53_mac_to_u64(const u8 *src)
328{
329 unsigned int i;
330 u64 dst = 0;
331
332 for (i = 0; i < ETH_ALEN; i++)
333 dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i);
334
335 return dst;
336}
337
338static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
339 u64 mac_vid, u32 fwd_entry)
340{
341 memset(ent, 0, sizeof(*ent));
342 ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK;
343 ent->is_valid = !!(fwd_entry & ARLTBL_VALID);
344 ent->is_age = !!(fwd_entry & ARLTBL_AGE);
345 ent->is_static = !!(fwd_entry & ARLTBL_STATIC);
346 b53_mac_from_u64(mac_vid, ent->mac);
347 ent->vid = mac_vid >> ARLTBL_VID_S;
348}
349
350static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
351 const struct b53_arl_entry *ent)
352{
353 *mac_vid = b53_mac_to_u64(ent->mac);
354 *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S;
355 *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK;
356 if (ent->is_valid)
357 *fwd_entry |= ARLTBL_VALID;
358 if (ent->is_static)
359 *fwd_entry |= ARLTBL_STATIC;
360 if (ent->is_age)
361 *fwd_entry |= ARLTBL_AGE;
362}
363
364#ifdef CONFIG_BCM47XX
365
366#include <linux/version.h>
367#include <linux/bcm47xx_nvram.h>
368#include <bcm47xx_board.h>
369static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
370{
371 enum bcm47xx_board board = bcm47xx_board_get();
372
373 switch (board) {
374 case BCM47XX_BOARD_LINKSYS_WRT300NV11:
375 case BCM47XX_BOARD_LINKSYS_WRT310NV1:
376 return 8;
377 default:
378 return bcm47xx_nvram_gpio_pin("robo_reset");
379 }
380}
381#else
382static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
383{
384 return -ENOENT;
385}
386#endif
387#endif
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
new file mode 100644
index 000000000000..8f12bddd5dc9
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -0,0 +1,434 @@
1/*
2 * B53 register definitions
3 *
4 * Copyright (C) 2004 Broadcom Corporation
5 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#ifndef __B53_REGS_H
21#define __B53_REGS_H
22
23/* Management Port (SMP) Page offsets */
24#define B53_CTRL_PAGE 0x00 /* Control */
25#define B53_STAT_PAGE 0x01 /* Status */
26#define B53_MGMT_PAGE 0x02 /* Management Mode */
27#define B53_MIB_AC_PAGE 0x03 /* MIB Autocast */
28#define B53_ARLCTRL_PAGE 0x04 /* ARL Control */
29#define B53_ARLIO_PAGE 0x05 /* ARL Access */
30#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */
31#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */
32
33/* PHY Registers */
34#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */
35#define B53_IM_PORT_PAGE 0x18 /* Inverse MII Port (to EMAC) */
36#define B53_ALL_PORT_PAGE 0x19 /* All ports MII (broadcast) */
37
38/* MIB registers */
39#define B53_MIB_PAGE(i) (0x20 + (i))
40
41/* Quality of Service (QoS) Registers */
42#define B53_QOS_PAGE 0x30
43
44/* Port VLAN Page */
45#define B53_PVLAN_PAGE 0x31
46
47/* VLAN Registers */
48#define B53_VLAN_PAGE 0x34
49
50/* Jumbo Frame Registers */
51#define B53_JUMBO_PAGE 0x40
52
53/* CFP Configuration Registers Page */
54#define B53_CFP_PAGE 0xa1
55
56/*************************************************************************
57 * Control Page registers
58 *************************************************************************/
59
60/* Port Control Register (8 bit) */
61#define B53_PORT_CTRL(i) (0x00 + (i))
62#define PORT_CTRL_RX_DISABLE BIT(0)
63#define PORT_CTRL_TX_DISABLE BIT(1)
64#define PORT_CTRL_RX_BCST_EN BIT(2) /* Broadcast RX (P8 only) */
65#define PORT_CTRL_RX_MCST_EN BIT(3) /* Multicast RX (P8 only) */
66#define PORT_CTRL_RX_UCST_EN BIT(4) /* Unicast RX (P8 only) */
67#define PORT_CTRL_STP_STATE_S 5
68#define PORT_CTRL_NO_STP (0 << PORT_CTRL_STP_STATE_S)
69#define PORT_CTRL_DIS_STATE (1 << PORT_CTRL_STP_STATE_S)
70#define PORT_CTRL_BLOCK_STATE (2 << PORT_CTRL_STP_STATE_S)
71#define PORT_CTRL_LISTEN_STATE (3 << PORT_CTRL_STP_STATE_S)
72#define PORT_CTRL_LEARN_STATE (4 << PORT_CTRL_STP_STATE_S)
73#define PORT_CTRL_FWD_STATE (5 << PORT_CTRL_STP_STATE_S)
74#define PORT_CTRL_STP_STATE_MASK (0x7 << PORT_CTRL_STP_STATE_S)
75
76/* SMP Control Register (8 bit) */
77#define B53_SMP_CTRL 0x0a
78
79/* Switch Mode Control Register (8 bit) */
80#define B53_SWITCH_MODE 0x0b
81#define SM_SW_FWD_MODE BIT(0) /* 1 = Managed Mode */
82#define SM_SW_FWD_EN BIT(1) /* Forwarding Enable */
83
84/* IMP Port state override register (8 bit) */
85#define B53_PORT_OVERRIDE_CTRL 0x0e
86#define PORT_OVERRIDE_LINK BIT(0)
87#define PORT_OVERRIDE_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */
88#define PORT_OVERRIDE_SPEED_S 2
89#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S)
90#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S)
91#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S)
92#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */
93#define PORT_OVERRIDE_RX_FLOW BIT(4)
94#define PORT_OVERRIDE_TX_FLOW BIT(5)
95#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */
96#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */
97
98/* Power-down mode control */
99#define B53_PD_MODE_CTRL_25 0x0f
100
101/* IP Multicast control (8 bit) */
102#define B53_IP_MULTICAST_CTRL 0x21
103#define B53_IPMC_FWD_EN BIT(1)
104#define B53_UC_FWD_EN BIT(6)
105#define B53_MC_FWD_EN BIT(7)
106
107/* (16 bit) */
108#define B53_UC_FLOOD_MASK 0x32
109#define B53_MC_FLOOD_MASK 0x34
110#define B53_IPMC_FLOOD_MASK 0x36
111
112/*
113 * Override Ports 0-7 State on devices with xMII interfaces (8 bit)
114 *
115 * For port 8 still use B53_PORT_OVERRIDE_CTRL
116 * Please note that not all ports are available on every hardware, e.g. BCM5301X
117 * don't include overriding port 6, BCM63xx also have some limitations.
118 */
119#define B53_GMII_PORT_OVERRIDE_CTRL(i) (0x58 + (i))
120#define GMII_PO_LINK BIT(0)
121#define GMII_PO_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */
122#define GMII_PO_SPEED_S 2
123#define GMII_PO_SPEED_10M (0 << GMII_PO_SPEED_S)
124#define GMII_PO_SPEED_100M (1 << GMII_PO_SPEED_S)
125#define GMII_PO_SPEED_1000M (2 << GMII_PO_SPEED_S)
126#define GMII_PO_RX_FLOW BIT(4)
127#define GMII_PO_TX_FLOW BIT(5)
128#define GMII_PO_EN BIT(6) /* Use the register contents */
129#define GMII_PO_SPEED_2000M BIT(7) /* BCM5301X only, requires setting 1000M */
130
131#define B53_RGMII_CTRL_IMP 0x60
132#define RGMII_CTRL_ENABLE_GMII BIT(7)
133#define RGMII_CTRL_TIMING_SEL BIT(2)
134#define RGMII_CTRL_DLL_RXC BIT(1)
135#define RGMII_CTRL_DLL_TXC BIT(0)
136
137#define B53_RGMII_CTRL_P(i) (B53_RGMII_CTRL_IMP + (i))
138
139/* Software reset register (8 bit) */
140#define B53_SOFTRESET 0x79
141#define SW_RST BIT(7)
142#define EN_SW_RST BIT(4)
143
144/* Fast Aging Control register (8 bit) */
145#define B53_FAST_AGE_CTRL 0x88
146#define FAST_AGE_STATIC BIT(0)
147#define FAST_AGE_DYNAMIC BIT(1)
148#define FAST_AGE_PORT BIT(2)
149#define FAST_AGE_VLAN BIT(3)
150#define FAST_AGE_STP BIT(4)
151#define FAST_AGE_MC BIT(5)
152#define FAST_AGE_DONE BIT(7)
153
154/* Fast Aging Port Control register (8 bit) */
155#define B53_FAST_AGE_PORT_CTRL 0x89
156
157/* Fast Aging VID Control register (16 bit) */
158#define B53_FAST_AGE_VID_CTRL 0x8a
159
160/*************************************************************************
161 * Status Page registers
162 *************************************************************************/
163
164/* Link Status Summary Register (16bit) */
165#define B53_LINK_STAT 0x00
166
167/* Link Status Change Register (16 bit) */
168#define B53_LINK_STAT_CHANGE 0x02
169
170/* Port Speed Summary Register (16 bit for FE, 32 bit for GE) */
171#define B53_SPEED_STAT 0x04
172#define SPEED_PORT_FE(reg, port) (((reg) >> (port)) & 1)
173#define SPEED_PORT_GE(reg, port) (((reg) >> 2 * (port)) & 3)
174#define SPEED_STAT_10M 0
175#define SPEED_STAT_100M 1
176#define SPEED_STAT_1000M 2
177
178/* Duplex Status Summary (16 bit) */
179#define B53_DUPLEX_STAT_FE 0x06
180#define B53_DUPLEX_STAT_GE 0x08
181#define B53_DUPLEX_STAT_63XX 0x0c
182
183/* Revision ID register for BCM5325 */
184#define B53_REV_ID_25 0x50
185
186/* Strap Value (48 bit) */
187#define B53_STRAP_VALUE 0x70
188#define SV_GMII_CTRL_115 BIT(27)
189
190/*************************************************************************
191 * Management Mode Page Registers
192 *************************************************************************/
193
194/* Global Management Config Register (8 bit) */
195#define B53_GLOBAL_CONFIG 0x00
196#define GC_RESET_MIB 0x01
197#define GC_RX_BPDU_EN 0x02
198#define GC_MIB_AC_HDR_EN 0x10
199#define GC_MIB_AC_EN 0x20
200#define GC_FRM_MGMT_PORT_M 0xC0
201#define GC_FRM_MGMT_PORT_04 0x00
202#define GC_FRM_MGMT_PORT_MII 0x80
203
204/* Broadcom Header control register (8 bit) */
205#define B53_BRCM_HDR 0x03
206#define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */
207#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
208
209/* Device ID register (8 or 32 bit) */
210#define B53_DEVICE_ID 0x30
211
212/* Revision ID register (8 bit) */
213#define B53_REV_ID 0x40
214
215/*************************************************************************
216 * ARL Access Page Registers
217 *************************************************************************/
218
219/* VLAN Table Access Register (8 bit) */
220#define B53_VT_ACCESS 0x80
221#define B53_VT_ACCESS_9798 0x60 /* for BCM5397/BCM5398 */
222#define B53_VT_ACCESS_63XX 0x60 /* for BCM6328/62/68 */
223#define VTA_CMD_WRITE 0
224#define VTA_CMD_READ 1
225#define VTA_CMD_CLEAR 2
226#define VTA_START_CMD BIT(7)
227
228/* VLAN Table Index Register (16 bit) */
229#define B53_VT_INDEX 0x81
230#define B53_VT_INDEX_9798 0x61
231#define B53_VT_INDEX_63XX 0x62
232
233/* VLAN Table Entry Register (32 bit) */
234#define B53_VT_ENTRY 0x83
235#define B53_VT_ENTRY_9798 0x63
236#define B53_VT_ENTRY_63XX 0x64
237#define VTE_MEMBERS 0x1ff
238#define VTE_UNTAG_S 9
239#define VTE_UNTAG (0x1ff << 9)
240
241/*************************************************************************
242 * ARL I/O Registers
243 *************************************************************************/
244
245/* ARL Table Read/Write Register (8 bit) */
246#define B53_ARLTBL_RW_CTRL 0x00
247#define ARLTBL_RW BIT(0)
248#define ARLTBL_START_DONE BIT(7)
249
250/* MAC Address Index Register (48 bit) */
251#define B53_MAC_ADDR_IDX 0x02
252
253/* VLAN ID Index Register (16 bit) */
254#define B53_VLAN_ID_IDX 0x08
255
256/* ARL Table MAC/VID Entry N Registers (64 bit)
257 *
258 * BCM5325 and BCM5365 share most definitions below
259 */
260#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
261#define ARLTBL_MAC_MASK 0xffffffffffff
262#define ARLTBL_VID_S 48
263#define ARLTBL_VID_MASK_25 0xff
264#define ARLTBL_VID_MASK 0xfff
265#define ARLTBL_DATA_PORT_ID_S_25 48
266#define ARLTBL_DATA_PORT_ID_MASK_25 0xf
267#define ARLTBL_AGE_25 BIT(61)
268#define ARLTBL_STATIC_25 BIT(62)
269#define ARLTBL_VALID_25 BIT(63)
270
271/* ARL Table Data Entry N Registers (32 bit) */
272#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08)
273#define ARLTBL_DATA_PORT_ID_MASK 0x1ff
274#define ARLTBL_TC(tc) ((3 & tc) << 11)
275#define ARLTBL_AGE BIT(14)
276#define ARLTBL_STATIC BIT(15)
277#define ARLTBL_VALID BIT(16)
278
279/* ARL Search Control Register (8 bit) */
280#define B53_ARL_SRCH_CTL 0x50
281#define B53_ARL_SRCH_CTL_25 0x20
282#define ARL_SRCH_VLID BIT(0)
283#define ARL_SRCH_STDN BIT(7)
284
285/* ARL Search Address Register (16 bit) */
286#define B53_ARL_SRCH_ADDR 0x51
287#define B53_ARL_SRCH_ADDR_25 0x22
288#define B53_ARL_SRCH_ADDR_65 0x24
289#define ARL_ADDR_MASK GENMASK(14, 0)
290
291/* ARL Search MAC/VID Result (64 bit) */
292#define B53_ARL_SRCH_RSTL_0_MACVID 0x60
293
294/* Single register search result on 5325 */
295#define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24
296/* Single register search result on 5365 */
297#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30
298
299/* ARL Search Data Result (32 bit) */
300#define B53_ARL_SRCH_RSTL_0 0x68
301
302#define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10))
303#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10))
304
305/*************************************************************************
306 * Port VLAN Registers
307 *************************************************************************/
308
309/* Port VLAN mask (16 bit) IMP port is always 8, also on 5325 & co */
310#define B53_PVLAN_PORT_MASK(i) ((i) * 2)
311
312/*************************************************************************
313 * 802.1Q Page Registers
314 *************************************************************************/
315
316/* Global QoS Control (8 bit) */
317#define B53_QOS_GLOBAL_CTL 0x00
318
319/* Enable 802.1Q for individual Ports (16 bit) */
320#define B53_802_1P_EN 0x04
321
322/*************************************************************************
323 * VLAN Page Registers
324 *************************************************************************/
325
326/* VLAN Control 0 (8 bit) */
327#define B53_VLAN_CTRL0 0x00
328#define VC0_8021PF_CTRL_MASK 0x3
329#define VC0_8021PF_CTRL_NONE 0x0
330#define VC0_8021PF_CTRL_CHANGE_PRI 0x1
331#define VC0_8021PF_CTRL_CHANGE_VID 0x2
332#define VC0_8021PF_CTRL_CHANGE_BOTH 0x3
333#define VC0_8021QF_CTRL_MASK 0xc
334#define VC0_8021QF_CTRL_CHANGE_PRI 0x1
335#define VC0_8021QF_CTRL_CHANGE_VID 0x2
336#define VC0_8021QF_CTRL_CHANGE_BOTH 0x3
337#define VC0_RESERVED_1 BIT(1)
338#define VC0_DROP_VID_MISS BIT(4)
339#define VC0_VID_HASH_VID BIT(5)
340#define VC0_VID_CHK_EN BIT(6) /* Use VID,DA or VID,SA */
341#define VC0_VLAN_EN BIT(7) /* 802.1Q VLAN Enabled */
342
343/* VLAN Control 1 (8 bit) */
344#define B53_VLAN_CTRL1 0x01
345#define VC1_RX_MCST_TAG_EN BIT(1)
346#define VC1_RX_MCST_FWD_EN BIT(2)
347#define VC1_RX_MCST_UNTAG_EN BIT(3)
348
349/* VLAN Control 2 (8 bit) */
350#define B53_VLAN_CTRL2 0x02
351
352/* VLAN Control 3 (8 bit when BCM5325, 16 bit else) */
353#define B53_VLAN_CTRL3 0x03
354#define B53_VLAN_CTRL3_63XX 0x04
355#define VC3_MAXSIZE_1532 BIT(6) /* 5325 only */
356#define VC3_HIGH_8BIT_EN BIT(7) /* 5325 only */
357
358/* VLAN Control 4 (8 bit) */
359#define B53_VLAN_CTRL4 0x05
360#define B53_VLAN_CTRL4_25 0x04
361#define B53_VLAN_CTRL4_63XX 0x06
362#define VC4_ING_VID_CHECK_S 6
363#define VC4_ING_VID_CHECK_MASK (0x3 << VC4_ING_VID_CHECK_S)
364#define VC4_ING_VID_VIO_FWD 0 /* forward, but do not learn */
365#define VC4_ING_VID_VIO_DROP 1 /* drop VID violations */
366#define VC4_NO_ING_VID_CHK 2 /* do not check */
367#define VC4_ING_VID_VIO_TO_IMP 3 /* redirect to MII port */
368
369/* VLAN Control 5 (8 bit) */
370#define B53_VLAN_CTRL5 0x06
371#define B53_VLAN_CTRL5_25 0x05
372#define B53_VLAN_CTRL5_63XX 0x07
373#define VC5_VID_FFF_EN BIT(2)
374#define VC5_DROP_VTABLE_MISS BIT(3)
375
376/* VLAN Control 6 (8 bit) */
377#define B53_VLAN_CTRL6 0x07
378#define B53_VLAN_CTRL6_63XX 0x08
379
380/* VLAN Table Access Register (16 bit) */
381#define B53_VLAN_TABLE_ACCESS_25 0x06 /* BCM5325E/5350 */
382#define B53_VLAN_TABLE_ACCESS_65 0x08 /* BCM5365 */
383#define VTA_VID_LOW_MASK_25 0xf
384#define VTA_VID_LOW_MASK_65 0xff
385#define VTA_VID_HIGH_S_25 4
386#define VTA_VID_HIGH_S_65 8
387#define VTA_VID_HIGH_MASK_25 (0xff << VTA_VID_HIGH_S_25E)
388#define VTA_VID_HIGH_MASK_65 (0xf << VTA_VID_HIGH_S_65)
389#define VTA_RW_STATE BIT(12)
390#define VTA_RW_STATE_RD 0
391#define VTA_RW_STATE_WR BIT(12)
392#define VTA_RW_OP_EN BIT(13)
393
394/* VLAN Read/Write Registers for (16/32 bit) */
395#define B53_VLAN_WRITE_25 0x08
396#define B53_VLAN_WRITE_65 0x0a
397#define B53_VLAN_READ 0x0c
398#define VA_MEMBER_MASK 0x3f
399#define VA_UNTAG_S_25 6
400#define VA_UNTAG_MASK_25 0x3f
401#define VA_UNTAG_S_65 7
402#define VA_UNTAG_MASK_65 0x1f
403#define VA_VID_HIGH_S 12
404#define VA_VID_HIGH_MASK (0xffff << VA_VID_HIGH_S)
405#define VA_VALID_25 BIT(20)
406#define VA_VALID_25_R4 BIT(24)
407#define VA_VALID_65 BIT(14)
408
409/* VLAN Port Default Tag (16 bit) */
410#define B53_VLAN_PORT_DEF_TAG(i) (0x10 + 2 * (i))
411
412/*************************************************************************
413 * Jumbo Frame Page Registers
414 *************************************************************************/
415
416/* Jumbo Enable Port Mask (bit i == port i enabled) (32 bit) */
417#define B53_JUMBO_PORT_MASK 0x01
418#define B53_JUMBO_PORT_MASK_63XX 0x04
419#define JPM_10_100_JUMBO_EN BIT(24) /* GigE always enabled */
420
421/* Good Frame Max Size without 802.1Q TAG (16 bit) */
422#define B53_JUMBO_MAX_SIZE 0x05
423#define B53_JUMBO_MAX_SIZE_63XX 0x08
424#define JMS_MIN_SIZE 1518
425#define JMS_MAX_SIZE 9724
426
427/*************************************************************************
428 * CFP Configuration Page Registers
429 *************************************************************************/
430
431/* CFP Control Register with ports map (8 bit) */
432#define B53_CFP_CTRL 0x00
433
434#endif /* !__B53_REGS_H */
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
new file mode 100644
index 000000000000..2bda0b5f1578
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -0,0 +1,331 @@
1/*
2 * B53 register access through SPI
3 *
4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <asm/unaligned.h>
20
21#include <linux/delay.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/spi/spi.h>
25#include <linux/platform_data/b53.h>
26
27#include "b53_priv.h"
28
29#define B53_SPI_DATA 0xf0
30
31#define B53_SPI_STATUS 0xfe
32#define B53_SPI_CMD_SPIF BIT(7)
33#define B53_SPI_CMD_RACK BIT(5)
34
35#define B53_SPI_CMD_READ 0x00
36#define B53_SPI_CMD_WRITE 0x01
37#define B53_SPI_CMD_NORMAL 0x60
38#define B53_SPI_CMD_FAST 0x10
39
40#define B53_SPI_PAGE_SELECT 0xff
41
42static inline int b53_spi_read_reg(struct spi_device *spi, u8 reg, u8 *val,
43 unsigned int len)
44{
45 u8 txbuf[2];
46
47 txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_READ;
48 txbuf[1] = reg;
49
50 return spi_write_then_read(spi, txbuf, 2, val, len);
51}
52
53static inline int b53_spi_clear_status(struct spi_device *spi)
54{
55 unsigned int i;
56 u8 rxbuf;
57 int ret;
58
59 for (i = 0; i < 10; i++) {
60 ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
61 if (ret)
62 return ret;
63
64 if (!(rxbuf & B53_SPI_CMD_SPIF))
65 break;
66
67 mdelay(1);
68 }
69
70 if (i == 10)
71 return -EIO;
72
73 return 0;
74}
75
76static inline int b53_spi_set_page(struct spi_device *spi, u8 page)
77{
78 u8 txbuf[3];
79
80 txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
81 txbuf[1] = B53_SPI_PAGE_SELECT;
82 txbuf[2] = page;
83
84 return spi_write(spi, txbuf, sizeof(txbuf));
85}
86
87static inline int b53_prepare_reg_access(struct spi_device *spi, u8 page)
88{
89 int ret = b53_spi_clear_status(spi);
90
91 if (ret)
92 return ret;
93
94 return b53_spi_set_page(spi, page);
95}
96
97static int b53_spi_prepare_reg_read(struct spi_device *spi, u8 reg)
98{
99 u8 rxbuf;
100 int retry_count;
101 int ret;
102
103 ret = b53_spi_read_reg(spi, reg, &rxbuf, 1);
104 if (ret)
105 return ret;
106
107 for (retry_count = 0; retry_count < 10; retry_count++) {
108 ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
109 if (ret)
110 return ret;
111
112 if (rxbuf & B53_SPI_CMD_RACK)
113 break;
114
115 mdelay(1);
116 }
117
118 if (retry_count == 10)
119 return -EIO;
120
121 return 0;
122}
123
124static int b53_spi_read(struct b53_device *dev, u8 page, u8 reg, u8 *data,
125 unsigned int len)
126{
127 struct spi_device *spi = dev->priv;
128 int ret;
129
130 ret = b53_prepare_reg_access(spi, page);
131 if (ret)
132 return ret;
133
134 ret = b53_spi_prepare_reg_read(spi, reg);
135 if (ret)
136 return ret;
137
138 return b53_spi_read_reg(spi, B53_SPI_DATA, data, len);
139}
140
141static int b53_spi_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
142{
143 return b53_spi_read(dev, page, reg, val, 1);
144}
145
146static int b53_spi_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
147{
148 int ret = b53_spi_read(dev, page, reg, (u8 *)val, 2);
149
150 if (!ret)
151 *val = le16_to_cpu(*val);
152
153 return ret;
154}
155
156static int b53_spi_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
157{
158 int ret = b53_spi_read(dev, page, reg, (u8 *)val, 4);
159
160 if (!ret)
161 *val = le32_to_cpu(*val);
162
163 return ret;
164}
165
166static int b53_spi_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
167{
168 int ret;
169
170 *val = 0;
171 ret = b53_spi_read(dev, page, reg, (u8 *)val, 6);
172 if (!ret)
173 *val = le64_to_cpu(*val);
174
175 return ret;
176}
177
178static int b53_spi_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
179{
180 int ret = b53_spi_read(dev, page, reg, (u8 *)val, 8);
181
182 if (!ret)
183 *val = le64_to_cpu(*val);
184
185 return ret;
186}
187
188static int b53_spi_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
189{
190 struct spi_device *spi = dev->priv;
191 int ret;
192 u8 txbuf[3];
193
194 ret = b53_prepare_reg_access(spi, page);
195 if (ret)
196 return ret;
197
198 txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
199 txbuf[1] = reg;
200 txbuf[2] = value;
201
202 return spi_write(spi, txbuf, sizeof(txbuf));
203}
204
205static int b53_spi_write16(struct b53_device *dev, u8 page, u8 reg, u16 value)
206{
207 struct spi_device *spi = dev->priv;
208 int ret;
209 u8 txbuf[4];
210
211 ret = b53_prepare_reg_access(spi, page);
212 if (ret)
213 return ret;
214
215 txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
216 txbuf[1] = reg;
217 put_unaligned_le16(value, &txbuf[2]);
218
219 return spi_write(spi, txbuf, sizeof(txbuf));
220}
221
222static int b53_spi_write32(struct b53_device *dev, u8 page, u8 reg, u32 value)
223{
224 struct spi_device *spi = dev->priv;
225 int ret;
226 u8 txbuf[6];
227
228 ret = b53_prepare_reg_access(spi, page);
229 if (ret)
230 return ret;
231
232 txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
233 txbuf[1] = reg;
234 put_unaligned_le32(value, &txbuf[2]);
235
236 return spi_write(spi, txbuf, sizeof(txbuf));
237}
238
239static int b53_spi_write48(struct b53_device *dev, u8 page, u8 reg, u64 value)
240{
241 struct spi_device *spi = dev->priv;
242 int ret;
243 u8 txbuf[10];
244
245 ret = b53_prepare_reg_access(spi, page);
246 if (ret)
247 return ret;
248
249 txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
250 txbuf[1] = reg;
251 put_unaligned_le64(value, &txbuf[2]);
252
253 return spi_write(spi, txbuf, sizeof(txbuf) - 2);
254}
255
256static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value)
257{
258 struct spi_device *spi = dev->priv;
259 int ret;
260 u8 txbuf[10];
261
262 ret = b53_prepare_reg_access(spi, page);
263 if (ret)
264 return ret;
265
266 txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
267 txbuf[1] = reg;
268 put_unaligned_le64(value, &txbuf[2]);
269
270 return spi_write(spi, txbuf, sizeof(txbuf));
271}
272
273static struct b53_io_ops b53_spi_ops = {
274 .read8 = b53_spi_read8,
275 .read16 = b53_spi_read16,
276 .read32 = b53_spi_read32,
277 .read48 = b53_spi_read48,
278 .read64 = b53_spi_read64,
279 .write8 = b53_spi_write8,
280 .write16 = b53_spi_write16,
281 .write32 = b53_spi_write32,
282 .write48 = b53_spi_write48,
283 .write64 = b53_spi_write64,
284};
285
286static int b53_spi_probe(struct spi_device *spi)
287{
288 struct b53_device *dev;
289 int ret;
290
291 dev = b53_switch_alloc(&spi->dev, &b53_spi_ops, spi);
292 if (!dev)
293 return -ENOMEM;
294
295 if (spi->dev.platform_data)
296 dev->pdata = spi->dev.platform_data;
297
298 ret = b53_switch_register(dev);
299 if (ret)
300 return ret;
301
302 spi_set_drvdata(spi, dev);
303
304 return 0;
305}
306
307static int b53_spi_remove(struct spi_device *spi)
308{
309 struct b53_device *dev = spi_get_drvdata(spi);
310
311 if (dev)
312 b53_switch_remove(dev);
313
314 return 0;
315}
316
317static struct spi_driver b53_spi_driver = {
318 .driver = {
319 .name = "b53-switch",
320 .bus = &spi_bus_type,
321 .owner = THIS_MODULE,
322 },
323 .probe = b53_spi_probe,
324 .remove = b53_spi_remove,
325};
326
327module_spi_driver(b53_spi_driver);
328
329MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
330MODULE_DESCRIPTION("B53 SPI access driver");
331MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
new file mode 100644
index 000000000000..70fd47284535
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -0,0 +1,415 @@
1/*
2 * B53 register access through Switch Register Access Bridge Registers
3 *
4 * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/delay.h>
22#include <linux/platform_device.h>
23#include <linux/platform_data/b53.h>
24
25#include "b53_priv.h"
26
27/* command and status register of the SRAB */
28#define B53_SRAB_CMDSTAT 0x2c
29#define B53_SRAB_CMDSTAT_RST BIT(2)
30#define B53_SRAB_CMDSTAT_WRITE BIT(1)
31#define B53_SRAB_CMDSTAT_GORDYN BIT(0)
32#define B53_SRAB_CMDSTAT_PAGE 24
33#define B53_SRAB_CMDSTAT_REG 16
34
35/* high order word of write data to switch registe */
36#define B53_SRAB_WD_H 0x30
37
38/* low order word of write data to switch registe */
39#define B53_SRAB_WD_L 0x34
40
41/* high order word of read data from switch register */
42#define B53_SRAB_RD_H 0x38
43
44/* low order word of read data from switch register */
45#define B53_SRAB_RD_L 0x3c
46
47/* command and status register of the SRAB */
48#define B53_SRAB_CTRLS 0x40
49#define B53_SRAB_CTRLS_RCAREQ BIT(3)
50#define B53_SRAB_CTRLS_RCAGNT BIT(4)
51#define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6)
52
53/* the register captures interrupt pulses from the switch */
54#define B53_SRAB_INTR 0x44
55#define B53_SRAB_INTR_P(x) BIT(x)
56#define B53_SRAB_SWITCH_PHY BIT(8)
57#define B53_SRAB_1588_SYNC BIT(9)
58#define B53_SRAB_IMP1_SLEEP_TIMER BIT(10)
59#define B53_SRAB_P7_SLEEP_TIMER BIT(11)
60#define B53_SRAB_IMP0_SLEEP_TIMER BIT(12)
61
62struct b53_srab_priv {
63 void __iomem *regs;
64};
65
66static int b53_srab_request_grant(struct b53_device *dev)
67{
68 struct b53_srab_priv *priv = dev->priv;
69 u8 __iomem *regs = priv->regs;
70 u32 ctrls;
71 int i;
72
73 ctrls = readl(regs + B53_SRAB_CTRLS);
74 ctrls |= B53_SRAB_CTRLS_RCAREQ;
75 writel(ctrls, regs + B53_SRAB_CTRLS);
76
77 for (i = 0; i < 20; i++) {
78 ctrls = readl(regs + B53_SRAB_CTRLS);
79 if (ctrls & B53_SRAB_CTRLS_RCAGNT)
80 break;
81 usleep_range(10, 100);
82 }
83 if (WARN_ON(i == 5))
84 return -EIO;
85
86 return 0;
87}
88
89static void b53_srab_release_grant(struct b53_device *dev)
90{
91 struct b53_srab_priv *priv = dev->priv;
92 u8 __iomem *regs = priv->regs;
93 u32 ctrls;
94
95 ctrls = readl(regs + B53_SRAB_CTRLS);
96 ctrls &= ~B53_SRAB_CTRLS_RCAREQ;
97 writel(ctrls, regs + B53_SRAB_CTRLS);
98}
99
100static int b53_srab_op(struct b53_device *dev, u8 page, u8 reg, u32 op)
101{
102 struct b53_srab_priv *priv = dev->priv;
103 u8 __iomem *regs = priv->regs;
104 int i;
105 u32 cmdstat;
106
107 /* set register address */
108 cmdstat = (page << B53_SRAB_CMDSTAT_PAGE) |
109 (reg << B53_SRAB_CMDSTAT_REG) |
110 B53_SRAB_CMDSTAT_GORDYN |
111 op;
112 writel(cmdstat, regs + B53_SRAB_CMDSTAT);
113
114 /* check if operation completed */
115 for (i = 0; i < 5; ++i) {
116 cmdstat = readl(regs + B53_SRAB_CMDSTAT);
117 if (!(cmdstat & B53_SRAB_CMDSTAT_GORDYN))
118 break;
119 usleep_range(10, 100);
120 }
121
122 if (WARN_ON(i == 5))
123 return -EIO;
124
125 return 0;
126}
127
128static int b53_srab_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
129{
130 struct b53_srab_priv *priv = dev->priv;
131 u8 __iomem *regs = priv->regs;
132 int ret = 0;
133
134 ret = b53_srab_request_grant(dev);
135 if (ret)
136 goto err;
137
138 ret = b53_srab_op(dev, page, reg, 0);
139 if (ret)
140 goto err;
141
142 *val = readl(regs + B53_SRAB_RD_L) & 0xff;
143
144err:
145 b53_srab_release_grant(dev);
146
147 return ret;
148}
149
150static int b53_srab_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
151{
152 struct b53_srab_priv *priv = dev->priv;
153 u8 __iomem *regs = priv->regs;
154 int ret = 0;
155
156 ret = b53_srab_request_grant(dev);
157 if (ret)
158 goto err;
159
160 ret = b53_srab_op(dev, page, reg, 0);
161 if (ret)
162 goto err;
163
164 *val = readl(regs + B53_SRAB_RD_L) & 0xffff;
165
166err:
167 b53_srab_release_grant(dev);
168
169 return ret;
170}
171
172static int b53_srab_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
173{
174 struct b53_srab_priv *priv = dev->priv;
175 u8 __iomem *regs = priv->regs;
176 int ret = 0;
177
178 ret = b53_srab_request_grant(dev);
179 if (ret)
180 goto err;
181
182 ret = b53_srab_op(dev, page, reg, 0);
183 if (ret)
184 goto err;
185
186 *val = readl(regs + B53_SRAB_RD_L);
187
188err:
189 b53_srab_release_grant(dev);
190
191 return ret;
192}
193
194static int b53_srab_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
195{
196 struct b53_srab_priv *priv = dev->priv;
197 u8 __iomem *regs = priv->regs;
198 int ret = 0;
199
200 ret = b53_srab_request_grant(dev);
201 if (ret)
202 goto err;
203
204 ret = b53_srab_op(dev, page, reg, 0);
205 if (ret)
206 goto err;
207
208 *val = readl(regs + B53_SRAB_RD_L);
209 *val += ((u64)readl(regs + B53_SRAB_RD_H) & 0xffff) << 32;
210
211err:
212 b53_srab_release_grant(dev);
213
214 return ret;
215}
216
217static int b53_srab_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
218{
219 struct b53_srab_priv *priv = dev->priv;
220 u8 __iomem *regs = priv->regs;
221 int ret = 0;
222
223 ret = b53_srab_request_grant(dev);
224 if (ret)
225 goto err;
226
227 ret = b53_srab_op(dev, page, reg, 0);
228 if (ret)
229 goto err;
230
231 *val = readl(regs + B53_SRAB_RD_L);
232 *val += (u64)readl(regs + B53_SRAB_RD_H) << 32;
233
234err:
235 b53_srab_release_grant(dev);
236
237 return ret;
238}
239
240static int b53_srab_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
241{
242 struct b53_srab_priv *priv = dev->priv;
243 u8 __iomem *regs = priv->regs;
244 int ret = 0;
245
246 ret = b53_srab_request_grant(dev);
247 if (ret)
248 goto err;
249
250 writel(value, regs + B53_SRAB_WD_L);
251
252 ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
253
254err:
255 b53_srab_release_grant(dev);
256
257 return ret;
258}
259
260static int b53_srab_write16(struct b53_device *dev, u8 page, u8 reg,
261 u16 value)
262{
263 struct b53_srab_priv *priv = dev->priv;
264 u8 __iomem *regs = priv->regs;
265 int ret = 0;
266
267 ret = b53_srab_request_grant(dev);
268 if (ret)
269 goto err;
270
271 writel(value, regs + B53_SRAB_WD_L);
272
273 ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
274
275err:
276 b53_srab_release_grant(dev);
277
278 return ret;
279}
280
281static int b53_srab_write32(struct b53_device *dev, u8 page, u8 reg,
282 u32 value)
283{
284 struct b53_srab_priv *priv = dev->priv;
285 u8 __iomem *regs = priv->regs;
286 int ret = 0;
287
288 ret = b53_srab_request_grant(dev);
289 if (ret)
290 goto err;
291
292 writel(value, regs + B53_SRAB_WD_L);
293
294 ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
295
296err:
297 b53_srab_release_grant(dev);
298
299 return ret;
300}
301
302static int b53_srab_write48(struct b53_device *dev, u8 page, u8 reg,
303 u64 value)
304{
305 struct b53_srab_priv *priv = dev->priv;
306 u8 __iomem *regs = priv->regs;
307 int ret = 0;
308
309 ret = b53_srab_request_grant(dev);
310 if (ret)
311 goto err;
312
313 writel((u32)value, regs + B53_SRAB_WD_L);
314 writel((u16)(value >> 32), regs + B53_SRAB_WD_H);
315
316 ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
317
318err:
319 b53_srab_release_grant(dev);
320
321 return ret;
322}
323
324static int b53_srab_write64(struct b53_device *dev, u8 page, u8 reg,
325 u64 value)
326{
327 struct b53_srab_priv *priv = dev->priv;
328 u8 __iomem *regs = priv->regs;
329 int ret = 0;
330
331 ret = b53_srab_request_grant(dev);
332 if (ret)
333 goto err;
334
335 writel((u32)value, regs + B53_SRAB_WD_L);
336 writel((u32)(value >> 32), regs + B53_SRAB_WD_H);
337
338 ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
339
340err:
341 b53_srab_release_grant(dev);
342
343 return ret;
344}
345
346static struct b53_io_ops b53_srab_ops = {
347 .read8 = b53_srab_read8,
348 .read16 = b53_srab_read16,
349 .read32 = b53_srab_read32,
350 .read48 = b53_srab_read48,
351 .read64 = b53_srab_read64,
352 .write8 = b53_srab_write8,
353 .write16 = b53_srab_write16,
354 .write32 = b53_srab_write32,
355 .write48 = b53_srab_write48,
356 .write64 = b53_srab_write64,
357};
358
359static int b53_srab_probe(struct platform_device *pdev)
360{
361 struct b53_srab_priv *priv;
362 struct b53_device *dev;
363 struct resource *r;
364
365 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
366 if (!priv)
367 return -ENOMEM;
368
369 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
370 priv->regs = devm_ioremap_resource(&pdev->dev, r);
371 if (IS_ERR(priv->regs))
372 return -ENOMEM;
373
374 dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv);
375 if (!dev)
376 return -ENOMEM;
377
378 platform_set_drvdata(pdev, dev);
379
380 return b53_switch_register(dev);
381}
382
383static int b53_srab_remove(struct platform_device *pdev)
384{
385 struct b53_device *dev = platform_get_drvdata(pdev);
386
387 if (dev)
388 b53_switch_remove(dev);
389
390 return 0;
391}
392
393static const struct of_device_id b53_srab_of_match[] = {
394 { .compatible = "brcm,bcm53010-srab" },
395 { .compatible = "brcm,bcm53011-srab" },
396 { .compatible = "brcm,bcm53012-srab" },
397 { .compatible = "brcm,bcm53018-srab" },
398 { .compatible = "brcm,bcm53019-srab" },
399 { .compatible = "brcm,bcm5301x-srab" },
400 { /* sentinel */ },
401};
402
403static struct platform_driver b53_srab_driver = {
404 .probe = b53_srab_probe,
405 .remove = b53_srab_remove,
406 .driver = {
407 .name = "b53-srab-switch",
408 .of_match_table = b53_srab_of_match,
409 },
410};
411
412module_platform_driver(b53_srab_driver);
413MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
414MODULE_DESCRIPTION("B53 Switch Register Access Bridge Registers (SRAB) access driver");
415MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 10ddd5a5dfb6..cd1d630ae3a9 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -22,6 +22,7 @@
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23#include <linux/of_address.h> 23#include <linux/of_address.h>
24#include <linux/of_net.h> 24#include <linux/of_net.h>
25#include <linux/of_mdio.h>
25#include <net/dsa.h> 26#include <net/dsa.h>
26#include <linux/ethtool.h> 27#include <linux/ethtool.h>
27#include <linux/if_bridge.h> 28#include <linux/if_bridge.h>
@@ -460,19 +461,13 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
460 return 0; 461 return 0;
461} 462}
462 463
463/* Fast-ageing of ARL entries for a given port, equivalent to an ARL 464static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv)
464 * flush for that port.
465 */
466static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
467{ 465{
468 struct bcm_sf2_priv *priv = ds_to_priv(ds);
469 unsigned int timeout = 1000; 466 unsigned int timeout = 1000;
470 u32 reg; 467 u32 reg;
471 468
472 core_writel(priv, port, CORE_FAST_AGE_PORT);
473
474 reg = core_readl(priv, CORE_FAST_AGE_CTRL); 469 reg = core_readl(priv, CORE_FAST_AGE_CTRL);
475 reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; 470 reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
476 core_writel(priv, reg, CORE_FAST_AGE_CTRL); 471 core_writel(priv, reg, CORE_FAST_AGE_CTRL);
477 472
478 do { 473 do {
@@ -491,13 +486,98 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
491 return 0; 486 return 0;
492} 487}
493 488
489/* Fast-ageing of ARL entries for a given port, equivalent to an ARL
490 * flush for that port.
491 */
492static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
493{
494 struct bcm_sf2_priv *priv = ds_to_priv(ds);
495
496 core_writel(priv, port, CORE_FAST_AGE_PORT);
497
498 return bcm_sf2_fast_age_op(priv);
499}
500
501static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid)
502{
503 core_writel(priv, vid, CORE_FAST_AGE_VID);
504
505 return bcm_sf2_fast_age_op(priv);
506}
507
508static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
509{
510 unsigned int timeout = 10;
511 u32 reg;
512
513 do {
514 reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
515 if (!(reg & ARLA_VTBL_STDN))
516 return 0;
517
518 usleep_range(1000, 2000);
519 } while (timeout--);
520
521 return -ETIMEDOUT;
522}
523
524static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
525{
526 core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
527
528 return bcm_sf2_vlan_op_wait(priv);
529}
530
531static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
532 struct bcm_sf2_vlan *vlan)
533{
534 int ret;
535
536 core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
537 core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members,
538 CORE_ARLA_VTBL_ENTRY);
539
540 ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE);
541 if (ret)
542 pr_err("failed to write VLAN entry\n");
543}
544
545static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
546 struct bcm_sf2_vlan *vlan)
547{
548 u32 entry;
549 int ret;
550
551 core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
552
553 ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ);
554 if (ret)
555 return ret;
556
557 entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY);
558 vlan->members = entry & FWD_MAP_MASK;
559 vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK;
560
561 return 0;
562}
563
494static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, 564static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
495 struct net_device *bridge) 565 struct net_device *bridge)
496{ 566{
497 struct bcm_sf2_priv *priv = ds_to_priv(ds); 567 struct bcm_sf2_priv *priv = ds_to_priv(ds);
568 s8 cpu_port = ds->dst->cpu_port;
498 unsigned int i; 569 unsigned int i;
499 u32 reg, p_ctl; 570 u32 reg, p_ctl;
500 571
572 /* Make this port leave the all VLANs join since we will have proper
573 * VLAN entries from now on
574 */
575 reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
576 reg &= ~BIT(port);
577 if ((reg & BIT(cpu_port)) == BIT(cpu_port))
578 reg &= ~BIT(cpu_port);
579 core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
580
501 priv->port_sts[port].bridge_dev = bridge; 581 priv->port_sts[port].bridge_dev = bridge;
502 p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); 582 p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
503 583
@@ -529,6 +609,7 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
529{ 609{
530 struct bcm_sf2_priv *priv = ds_to_priv(ds); 610 struct bcm_sf2_priv *priv = ds_to_priv(ds);
531 struct net_device *bridge = priv->port_sts[port].bridge_dev; 611 struct net_device *bridge = priv->port_sts[port].bridge_dev;
612 s8 cpu_port = ds->dst->cpu_port;
532 unsigned int i; 613 unsigned int i;
533 u32 reg, p_ctl; 614 u32 reg, p_ctl;
534 615
@@ -552,6 +633,13 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
552 core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); 633 core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
553 priv->port_sts[port].vlan_ctl_mask = p_ctl; 634 priv->port_sts[port].vlan_ctl_mask = p_ctl;
554 priv->port_sts[port].bridge_dev = NULL; 635 priv->port_sts[port].bridge_dev = NULL;
636
637 /* Make this port join all VLANs without VLAN entries */
638 reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
639 reg |= BIT(port);
640 if (!(reg & BIT(cpu_port)))
641 reg |= BIT(cpu_port);
642 core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
555} 643}
556 644
557static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, 645static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
@@ -804,7 +892,7 @@ static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
804 int (*cb)(struct switchdev_obj *obj)) 892 int (*cb)(struct switchdev_obj *obj))
805{ 893{
806 struct bcm_sf2_priv *priv = ds_to_priv(ds); 894 struct bcm_sf2_priv *priv = ds_to_priv(ds);
807 struct net_device *dev = ds->ports[port]; 895 struct net_device *dev = ds->ports[port].netdev;
808 struct bcm_sf2_arl_entry results[2]; 896 struct bcm_sf2_arl_entry results[2];
809 unsigned int count = 0; 897 unsigned int count = 0;
810 int ret; 898 int ret;
@@ -836,6 +924,66 @@ static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
836 return 0; 924 return 0;
837} 925}
838 926
927static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
928 int regnum, u16 val)
929{
930 int ret = 0;
931 u32 reg;
932
933 reg = reg_readl(priv, REG_SWITCH_CNTRL);
934 reg |= MDIO_MASTER_SEL;
935 reg_writel(priv, reg, REG_SWITCH_CNTRL);
936
937 /* Page << 8 | offset */
938 reg = 0x70;
939 reg <<= 2;
940 core_writel(priv, addr, reg);
941
942 /* Page << 8 | offset */
943 reg = 0x80 << 8 | regnum << 1;
944 reg <<= 2;
945
946 if (op)
947 ret = core_readl(priv, reg);
948 else
949 core_writel(priv, val, reg);
950
951 reg = reg_readl(priv, REG_SWITCH_CNTRL);
952 reg &= ~MDIO_MASTER_SEL;
953 reg_writel(priv, reg, REG_SWITCH_CNTRL);
954
955 return ret & 0xffff;
956}
957
958static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
959{
960 struct bcm_sf2_priv *priv = bus->priv;
961
962 /* Intercept reads from Broadcom pseudo-PHY address, else, send
963 * them to our master MDIO bus controller
964 */
965 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
966 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
967 else
968 return mdiobus_read(priv->master_mii_bus, addr, regnum);
969}
970
971static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
972 u16 val)
973{
974 struct bcm_sf2_priv *priv = bus->priv;
975
976 /* Intercept writes to the Broadcom pseudo-PHY address, else,
977 * send them to our master MDIO bus controller
978 */
979 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
980 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
981 else
982 mdiobus_write(priv->master_mii_bus, addr, regnum, val);
983
984 return 0;
985}
986
839static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) 987static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
840{ 988{
841 struct bcm_sf2_priv *priv = dev_id; 989 struct bcm_sf2_priv *priv = dev_id;
@@ -932,133 +1080,70 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
932 } 1080 }
933} 1081}
934 1082
935static int bcm_sf2_sw_setup(struct dsa_switch *ds) 1083static int bcm_sf2_mdio_register(struct dsa_switch *ds)
936{ 1084{
937 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
938 struct bcm_sf2_priv *priv = ds_to_priv(ds); 1085 struct bcm_sf2_priv *priv = ds_to_priv(ds);
939 struct device_node *dn; 1086 struct device_node *dn;
940 void __iomem **base; 1087 static int index;
941 unsigned int port; 1088 int err;
942 unsigned int i; 1089
943 u32 reg, rev; 1090 /* Find our integrated MDIO bus node */
944 int ret; 1091 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
945 1092 priv->master_mii_bus = of_mdio_find_bus(dn);
946 spin_lock_init(&priv->indir_lock); 1093 if (!priv->master_mii_bus)
947 mutex_init(&priv->stats_mutex); 1094 return -EPROBE_DEFER;
948 1095
949 /* All the interesting properties are at the parent device_node 1096 get_device(&priv->master_mii_bus->dev);
950 * level 1097 priv->master_mii_dn = dn;
951 */ 1098
952 dn = ds->cd->of_node->parent; 1099 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
953 bcm_sf2_identify_ports(priv, ds->cd->of_node); 1100 if (!priv->slave_mii_bus)
954 1101 return -ENOMEM;
955 priv->irq0 = irq_of_parse_and_map(dn, 0); 1102
956 priv->irq1 = irq_of_parse_and_map(dn, 1); 1103 priv->slave_mii_bus->priv = priv;
957 1104 priv->slave_mii_bus->name = "sf2 slave mii";
958 base = &priv->core; 1105 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
959 for (i = 0; i < BCM_SF2_REGS_NUM; i++) { 1106 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
960 *base = of_iomap(dn, i); 1107 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
961 if (*base == NULL) { 1108 index++);
962 pr_err("unable to find register: %s\n", reg_names[i]); 1109 priv->slave_mii_bus->dev.of_node = dn;
963 ret = -ENOMEM; 1110
964 goto out_unmap; 1111 /* Include the pseudo-PHY address to divert reads towards our
965 } 1112 * workaround. This is only required for 7445D0, since 7445E0
966 base++; 1113 * disconnects the internal switch pseudo-PHY such that we can use the
967 } 1114 * regular SWITCH_MDIO master controller instead.
968
969 ret = bcm_sf2_sw_rst(priv);
970 if (ret) {
971 pr_err("unable to software reset switch: %d\n", ret);
972 goto out_unmap;
973 }
974
975 /* Disable all interrupts and request them */
976 bcm_sf2_intr_disable(priv);
977
978 ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
979 "switch_0", priv);
980 if (ret < 0) {
981 pr_err("failed to request switch_0 IRQ\n");
982 goto out_unmap;
983 }
984
985 ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
986 "switch_1", priv);
987 if (ret < 0) {
988 pr_err("failed to request switch_1 IRQ\n");
989 goto out_free_irq0;
990 }
991
992 /* Reset the MIB counters */
993 reg = core_readl(priv, CORE_GMNCFGCFG);
994 reg |= RST_MIB_CNT;
995 core_writel(priv, reg, CORE_GMNCFGCFG);
996 reg &= ~RST_MIB_CNT;
997 core_writel(priv, reg, CORE_GMNCFGCFG);
998
999 /* Get the maximum number of ports for this switch */
1000 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1001 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1002 priv->hw_params.num_ports = DSA_MAX_PORTS;
1003
1004 /* Assume a single GPHY setup if we can't read that property */
1005 if (of_property_read_u32(dn, "brcm,num-gphy",
1006 &priv->hw_params.num_gphy))
1007 priv->hw_params.num_gphy = 1;
1008
1009 /* Enable all valid ports and disable those unused */
1010 for (port = 0; port < priv->hw_params.num_ports; port++) {
1011 /* IMP port receives special treatment */
1012 if ((1 << port) & ds->enabled_port_mask)
1013 bcm_sf2_port_setup(ds, port, NULL);
1014 else if (dsa_is_cpu_port(ds, port))
1015 bcm_sf2_imp_setup(ds, port);
1016 else
1017 bcm_sf2_port_disable(ds, port, NULL);
1018 }
1019
1020 /* Include the pseudo-PHY address and the broadcast PHY address to
1021 * divert reads towards our workaround. This is only required for
1022 * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
1023 * that we can use the regular SWITCH_MDIO master controller instead.
1024 * 1115 *
1025 * By default, DSA initializes ds->phys_mii_mask to 1116 * Here we flag the pseudo PHY as needing special treatment and would
1026 * ds->enabled_port_mask to have a 1:1 mapping between Port address 1117 * otherwise make all other PHY read/writes go to the master MDIO bus
1027 * and PHY address in order to utilize the slave_mii_bus instance to 1118 * controller that comes with this switch backed by the "mdio-unimac"
1028 * read from Port PHYs. This is not what we want here, so we 1119 * driver.
1029 * initialize phys_mii_mask 0 to always utilize the "master" MDIO
1030 * bus backed by the "mdio-unimac" driver.
1031 */ 1120 */
1032 if (of_machine_is_compatible("brcm,bcm7445d0")) 1121 if (of_machine_is_compatible("brcm,bcm7445d0"))
1033 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); 1122 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
1034 else 1123 else
1035 ds->phys_mii_mask = 0; 1124 priv->indir_phy_mask = 0;
1036 1125
1037 rev = reg_readl(priv, REG_SWITCH_REVISION); 1126 ds->phys_mii_mask = priv->indir_phy_mask;
1038 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 1127 ds->slave_mii_bus = priv->slave_mii_bus;
1039 SWITCH_TOP_REV_MASK; 1128 priv->slave_mii_bus->parent = ds->dev->parent;
1040 priv->hw_params.core_rev = (rev & SF2_REV_MASK); 1129 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
1041 1130
1042 rev = reg_readl(priv, REG_PHY_REVISION); 1131 if (dn)
1043 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; 1132 err = of_mdiobus_register(priv->slave_mii_bus, dn);
1133 else
1134 err = mdiobus_register(priv->slave_mii_bus);
1044 1135
1045 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", 1136 if (err)
1046 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, 1137 of_node_put(dn);
1047 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1048 priv->core, priv->irq0, priv->irq1);
1049 1138
1050 return 0; 1139 return err;
1140}
1051 1141
1052out_free_irq0: 1142static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
1053 free_irq(priv->irq0, priv); 1143{
1054out_unmap: 1144 mdiobus_unregister(priv->slave_mii_bus);
1055 base = &priv->core; 1145 if (priv->master_mii_dn)
1056 for (i = 0; i < BCM_SF2_REGS_NUM; i++) { 1146 of_node_put(priv->master_mii_dn);
1057 if (*base)
1058 iounmap(*base);
1059 base++;
1060 }
1061 return ret;
1062} 1147}
1063 1148
1064static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr) 1149static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
@@ -1078,68 +1163,6 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
1078 return priv->hw_params.gphy_rev; 1163 return priv->hw_params.gphy_rev;
1079} 1164}
1080 1165
1081static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
1082 int regnum, u16 val)
1083{
1084 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1085 int ret = 0;
1086 u32 reg;
1087
1088 reg = reg_readl(priv, REG_SWITCH_CNTRL);
1089 reg |= MDIO_MASTER_SEL;
1090 reg_writel(priv, reg, REG_SWITCH_CNTRL);
1091
1092 /* Page << 8 | offset */
1093 reg = 0x70;
1094 reg <<= 2;
1095 core_writel(priv, addr, reg);
1096
1097 /* Page << 8 | offset */
1098 reg = 0x80 << 8 | regnum << 1;
1099 reg <<= 2;
1100
1101 if (op)
1102 ret = core_readl(priv, reg);
1103 else
1104 core_writel(priv, val, reg);
1105
1106 reg = reg_readl(priv, REG_SWITCH_CNTRL);
1107 reg &= ~MDIO_MASTER_SEL;
1108 reg_writel(priv, reg, REG_SWITCH_CNTRL);
1109
1110 return ret & 0xffff;
1111}
1112
1113static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
1114{
1115 /* Intercept reads from the MDIO broadcast address or Broadcom
1116 * pseudo-PHY address
1117 */
1118 switch (addr) {
1119 case 0:
1120 case BRCM_PSEUDO_PHY_ADDR:
1121 return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
1122 default:
1123 return 0xffff;
1124 }
1125}
1126
1127static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
1128 u16 val)
1129{
1130 /* Intercept writes to the MDIO broadcast address or Broadcom
1131 * pseudo-PHY address
1132 */
1133 switch (addr) {
1134 case 0:
1135 case BRCM_PSEUDO_PHY_ADDR:
1136 bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
1137 break;
1138 }
1139
1140 return 0;
1141}
1142
1143static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, 1166static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
1144 struct phy_device *phydev) 1167 struct phy_device *phydev)
1145{ 1168{
@@ -1248,7 +1271,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
1248 * state machine and make it go in PHY_FORCING state instead. 1271 * state machine and make it go in PHY_FORCING state instead.
1249 */ 1272 */
1250 if (!status->link) 1273 if (!status->link)
1251 netif_carrier_off(ds->ports[port]); 1274 netif_carrier_off(ds->ports[port].netdev);
1252 status->duplex = 1; 1275 status->duplex = 1;
1253 } else { 1276 } else {
1254 status->link = 1; 1277 status->link = 1;
@@ -1370,14 +1393,309 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
1370 return p->ethtool_ops->set_wol(p, wol); 1393 return p->ethtool_ops->set_wol(p, wol);
1371} 1394}
1372 1395
1396static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable)
1397{
1398 u32 mgmt, vc0, vc1, vc4, vc5;
1399
1400 mgmt = core_readl(priv, CORE_SWMODE);
1401 vc0 = core_readl(priv, CORE_VLAN_CTRL0);
1402 vc1 = core_readl(priv, CORE_VLAN_CTRL1);
1403 vc4 = core_readl(priv, CORE_VLAN_CTRL4);
1404 vc5 = core_readl(priv, CORE_VLAN_CTRL5);
1405
1406 mgmt &= ~SW_FWDG_MODE;
1407
1408 if (enable) {
1409 vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL;
1410 vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP;
1411 vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
1412 vc4 |= INGR_VID_CHK_DROP;
1413 vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD;
1414 } else {
1415 vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL);
1416 vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP);
1417 vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
1418 vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD);
1419 vc4 |= INGR_VID_CHK_VID_VIOL_IMP;
1420 }
1421
1422 core_writel(priv, vc0, CORE_VLAN_CTRL0);
1423 core_writel(priv, vc1, CORE_VLAN_CTRL1);
1424 core_writel(priv, 0, CORE_VLAN_CTRL3);
1425 core_writel(priv, vc4, CORE_VLAN_CTRL4);
1426 core_writel(priv, vc5, CORE_VLAN_CTRL5);
1427 core_writel(priv, mgmt, CORE_SWMODE);
1428}
1429
1430static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
1431{
1432 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1433 unsigned int port;
1434
1435 /* Clear all VLANs */
1436 bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR);
1437
1438 for (port = 0; port < priv->hw_params.num_ports; port++) {
1439 if (!((1 << port) & ds->enabled_port_mask))
1440 continue;
1441
1442 core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port));
1443 }
1444}
1445
1446static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port,
1447 bool vlan_filtering)
1448{
1449 return 0;
1450}
1451
1452static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port,
1453 const struct switchdev_obj_port_vlan *vlan,
1454 struct switchdev_trans *trans)
1455{
1456 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1457
1458 bcm_sf2_enable_vlan(priv, true);
1459
1460 return 0;
1461}
1462
1463static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port,
1464 const struct switchdev_obj_port_vlan *vlan,
1465 struct switchdev_trans *trans)
1466{
1467 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1468 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1469 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1470 s8 cpu_port = ds->dst->cpu_port;
1471 struct bcm_sf2_vlan *vl;
1472 u16 vid;
1473
1474 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1475 vl = &priv->vlans[vid];
1476
1477 bcm_sf2_get_vlan_entry(priv, vid, vl);
1478
1479 vl->members |= BIT(port) | BIT(cpu_port);
1480 if (untagged)
1481 vl->untag |= BIT(port) | BIT(cpu_port);
1482 else
1483 vl->untag &= ~(BIT(port) | BIT(cpu_port));
1484
1485 bcm_sf2_set_vlan_entry(priv, vid, vl);
1486 bcm_sf2_sw_fast_age_vlan(priv, vid);
1487 }
1488
1489 if (pvid) {
1490 core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port));
1491 core_writel(priv, vlan->vid_end,
1492 CORE_DEFAULT_1Q_TAG_P(cpu_port));
1493 bcm_sf2_sw_fast_age_vlan(priv, vid);
1494 }
1495}
1496
1497static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port,
1498 const struct switchdev_obj_port_vlan *vlan)
1499{
1500 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1501 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1502 s8 cpu_port = ds->dst->cpu_port;
1503 struct bcm_sf2_vlan *vl;
1504 u16 vid, pvid;
1505 int ret;
1506
1507 pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
1508
1509 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1510 vl = &priv->vlans[vid];
1511
1512 ret = bcm_sf2_get_vlan_entry(priv, vid, vl);
1513 if (ret)
1514 return ret;
1515
1516 vl->members &= ~BIT(port);
1517 if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
1518 vl->members = 0;
1519 if (pvid == vid)
1520 pvid = 0;
1521 if (untagged) {
1522 vl->untag &= ~BIT(port);
1523 if ((vl->untag & BIT(port)) == BIT(cpu_port))
1524 vl->untag = 0;
1525 }
1526
1527 bcm_sf2_set_vlan_entry(priv, vid, vl);
1528 bcm_sf2_sw_fast_age_vlan(priv, vid);
1529 }
1530
1531 core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port));
1532 core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port));
1533 bcm_sf2_sw_fast_age_vlan(priv, vid);
1534
1535 return 0;
1536}
1537
1538static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port,
1539 struct switchdev_obj_port_vlan *vlan,
1540 int (*cb)(struct switchdev_obj *obj))
1541{
1542 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1543 struct bcm_sf2_port_status *p = &priv->port_sts[port];
1544 struct bcm_sf2_vlan *vl;
1545 u16 vid, pvid;
1546 int err = 0;
1547
1548 pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
1549
1550 for (vid = 0; vid < VLAN_N_VID; vid++) {
1551 vl = &priv->vlans[vid];
1552
1553 if (!(vl->members & BIT(port)))
1554 continue;
1555
1556 vlan->vid_begin = vlan->vid_end = vid;
1557 vlan->flags = 0;
1558
1559 if (vl->untag & BIT(port))
1560 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1561 if (p->pvid == vid)
1562 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1563
1564 err = cb(&vlan->obj);
1565 if (err)
1566 break;
1567 }
1568
1569 return err;
1570}
1571
1572static int bcm_sf2_sw_setup(struct dsa_switch *ds)
1573{
1574 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
1575 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1576 struct device_node *dn;
1577 void __iomem **base;
1578 unsigned int port;
1579 unsigned int i;
1580 u32 reg, rev;
1581 int ret;
1582
1583 spin_lock_init(&priv->indir_lock);
1584 mutex_init(&priv->stats_mutex);
1585
1586 /* All the interesting properties are at the parent device_node
1587 * level
1588 */
1589 dn = ds->cd->of_node->parent;
1590 bcm_sf2_identify_ports(priv, ds->cd->of_node);
1591
1592 priv->irq0 = irq_of_parse_and_map(dn, 0);
1593 priv->irq1 = irq_of_parse_and_map(dn, 1);
1594
1595 base = &priv->core;
1596 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1597 *base = of_iomap(dn, i);
1598 if (*base == NULL) {
1599 pr_err("unable to find register: %s\n", reg_names[i]);
1600 ret = -ENOMEM;
1601 goto out_unmap;
1602 }
1603 base++;
1604 }
1605
1606 ret = bcm_sf2_sw_rst(priv);
1607 if (ret) {
1608 pr_err("unable to software reset switch: %d\n", ret);
1609 goto out_unmap;
1610 }
1611
1612 ret = bcm_sf2_mdio_register(ds);
1613 if (ret) {
1614 pr_err("failed to register MDIO bus\n");
1615 goto out_unmap;
1616 }
1617
1618 /* Disable all interrupts and request them */
1619 bcm_sf2_intr_disable(priv);
1620
1621 ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
1622 "switch_0", priv);
1623 if (ret < 0) {
1624 pr_err("failed to request switch_0 IRQ\n");
1625 goto out_unmap;
1626 }
1627
1628 ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
1629 "switch_1", priv);
1630 if (ret < 0) {
1631 pr_err("failed to request switch_1 IRQ\n");
1632 goto out_free_irq0;
1633 }
1634
1635 /* Reset the MIB counters */
1636 reg = core_readl(priv, CORE_GMNCFGCFG);
1637 reg |= RST_MIB_CNT;
1638 core_writel(priv, reg, CORE_GMNCFGCFG);
1639 reg &= ~RST_MIB_CNT;
1640 core_writel(priv, reg, CORE_GMNCFGCFG);
1641
1642 /* Get the maximum number of ports for this switch */
1643 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1644 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1645 priv->hw_params.num_ports = DSA_MAX_PORTS;
1646
1647 /* Assume a single GPHY setup if we can't read that property */
1648 if (of_property_read_u32(dn, "brcm,num-gphy",
1649 &priv->hw_params.num_gphy))
1650 priv->hw_params.num_gphy = 1;
1651
1652 /* Enable all valid ports and disable those unused */
1653 for (port = 0; port < priv->hw_params.num_ports; port++) {
1654 /* IMP port receives special treatment */
1655 if ((1 << port) & ds->enabled_port_mask)
1656 bcm_sf2_port_setup(ds, port, NULL);
1657 else if (dsa_is_cpu_port(ds, port))
1658 bcm_sf2_imp_setup(ds, port);
1659 else
1660 bcm_sf2_port_disable(ds, port, NULL);
1661 }
1662
1663 bcm_sf2_sw_configure_vlan(ds);
1664
1665 rev = reg_readl(priv, REG_SWITCH_REVISION);
1666 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1667 SWITCH_TOP_REV_MASK;
1668 priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1669
1670 rev = reg_readl(priv, REG_PHY_REVISION);
1671 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1672
1673 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1674 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1675 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1676 priv->core, priv->irq0, priv->irq1);
1677
1678 return 0;
1679
1680out_free_irq0:
1681 free_irq(priv->irq0, priv);
1682out_unmap:
1683 base = &priv->core;
1684 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1685 if (*base)
1686 iounmap(*base);
1687 base++;
1688 }
1689 bcm_sf2_mdio_unregister(priv);
1690 return ret;
1691}
1692
1373static struct dsa_switch_driver bcm_sf2_switch_driver = { 1693static struct dsa_switch_driver bcm_sf2_switch_driver = {
1374 .tag_protocol = DSA_TAG_PROTO_BRCM, 1694 .tag_protocol = DSA_TAG_PROTO_BRCM,
1375 .probe = bcm_sf2_sw_drv_probe, 1695 .probe = bcm_sf2_sw_drv_probe,
1376 .setup = bcm_sf2_sw_setup, 1696 .setup = bcm_sf2_sw_setup,
1377 .set_addr = bcm_sf2_sw_set_addr, 1697 .set_addr = bcm_sf2_sw_set_addr,
1378 .get_phy_flags = bcm_sf2_sw_get_phy_flags, 1698 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
1379 .phy_read = bcm_sf2_sw_phy_read,
1380 .phy_write = bcm_sf2_sw_phy_write,
1381 .get_strings = bcm_sf2_sw_get_strings, 1699 .get_strings = bcm_sf2_sw_get_strings,
1382 .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, 1700 .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
1383 .get_sset_count = bcm_sf2_sw_get_sset_count, 1701 .get_sset_count = bcm_sf2_sw_get_sset_count,
@@ -1398,6 +1716,11 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = {
1398 .port_fdb_add = bcm_sf2_sw_fdb_add, 1716 .port_fdb_add = bcm_sf2_sw_fdb_add,
1399 .port_fdb_del = bcm_sf2_sw_fdb_del, 1717 .port_fdb_del = bcm_sf2_sw_fdb_del,
1400 .port_fdb_dump = bcm_sf2_sw_fdb_dump, 1718 .port_fdb_dump = bcm_sf2_sw_fdb_dump,
1719 .port_vlan_filtering = bcm_sf2_sw_vlan_filtering,
1720 .port_vlan_prepare = bcm_sf2_sw_vlan_prepare,
1721 .port_vlan_add = bcm_sf2_sw_vlan_add,
1722 .port_vlan_del = bcm_sf2_sw_vlan_del,
1723 .port_vlan_dump = bcm_sf2_sw_vlan_dump,
1401}; 1724};
1402 1725
1403static int __init bcm_sf2_init(void) 1726static int __init bcm_sf2_init(void)
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 200b1f5fdb56..463bed8cbe4c 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -21,6 +21,7 @@
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/bitops.h> 23#include <linux/bitops.h>
24#include <linux/if_vlan.h>
24 25
25#include <net/dsa.h> 26#include <net/dsa.h>
26 27
@@ -50,6 +51,7 @@ struct bcm_sf2_port_status {
50 struct ethtool_eee eee; 51 struct ethtool_eee eee;
51 52
52 u32 vlan_ctl_mask; 53 u32 vlan_ctl_mask;
54 u16 pvid;
53 55
54 struct net_device *bridge_dev; 56 struct net_device *bridge_dev;
55}; 57};
@@ -63,6 +65,11 @@ struct bcm_sf2_arl_entry {
63 u8 is_static:1; 65 u8 is_static:1;
64}; 66};
65 67
68struct bcm_sf2_vlan {
69 u16 members;
70 u16 untag;
71};
72
66static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst) 73static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst)
67{ 74{
68 unsigned int i; 75 unsigned int i;
@@ -142,6 +149,15 @@ struct bcm_sf2_priv {
142 149
143 /* Bitmask of ports having an integrated PHY */ 150 /* Bitmask of ports having an integrated PHY */
144 unsigned int int_phy_mask; 151 unsigned int int_phy_mask;
152
153 /* Master and slave MDIO bus controller */
154 unsigned int indir_phy_mask;
155 struct device_node *master_mii_dn;
156 struct mii_bus *slave_mii_bus;
157 struct mii_bus *master_mii_bus;
158
159 /* Cache of programmed VLANs */
160 struct bcm_sf2_vlan vlans[VLAN_N_VID];
145}; 161};
146 162
147struct bcm_sf2_hw_stats { 163struct bcm_sf2_hw_stats {
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 97780d43b5c0..9f2a9cb42074 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -274,6 +274,23 @@
274#define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40)) 274#define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40))
275#define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40)) 275#define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40))
276 276
277#define CORE_ARLA_VTBL_RWCTRL 0x1600
278#define ARLA_VTBL_CMD_WRITE 0
279#define ARLA_VTBL_CMD_READ 1
280#define ARLA_VTBL_CMD_CLEAR 2
281#define ARLA_VTBL_STDN (1 << 7)
282
283#define CORE_ARLA_VTBL_ADDR 0x1604
284#define VTBL_ADDR_INDEX_MASK 0xfff
285
286#define CORE_ARLA_VTBL_ENTRY 0x160c
287#define FWD_MAP_MASK 0x1ff
288#define UNTAG_MAP_MASK 0x1ff
289#define UNTAG_MAP_SHIFT 9
290#define MSTP_INDEX_MASK 0x7
291#define MSTP_INDEX_SHIFT 18
292#define FWD_MODE (1 << 21)
293
277#define CORE_MEM_PSM_VDD_CTRL 0x2380 294#define CORE_MEM_PSM_VDD_CTRL 0x2380
278#define P_TXQ_PSM_VDD_SHIFT 2 295#define P_TXQ_PSM_VDD_SHIFT 2
279#define P_TXQ_PSM_VDD_MASK 0x3 296#define P_TXQ_PSM_VDD_MASK 0x3
@@ -287,6 +304,59 @@
287#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) 304#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8))
288#define PORT_VLAN_CTRL_MASK 0x1ff 305#define PORT_VLAN_CTRL_MASK 0x1ff
289 306
307#define CORE_VLAN_CTRL0 0xd000
308#define CHANGE_1P_VID_INNER (1 << 0)
309#define CHANGE_1P_VID_OUTER (1 << 1)
310#define CHANGE_1Q_VID (1 << 3)
311#define VLAN_LEARN_MODE_SVL (0 << 5)
312#define VLAN_LEARN_MODE_IVL (3 << 5)
313#define VLAN_EN (1 << 7)
314
315#define CORE_VLAN_CTRL1 0xd004
316#define EN_RSV_MCAST_FWDMAP (1 << 2)
317#define EN_RSV_MCAST_UNTAG (1 << 3)
318#define EN_IPMC_BYPASS_FWDMAP (1 << 5)
319#define EN_IPMC_BYPASS_UNTAG (1 << 6)
320
321#define CORE_VLAN_CTRL2 0xd008
322#define EN_MIIM_BYPASS_V_FWDMAP (1 << 2)
323#define EN_GMRP_GVRP_V_FWDMAP (1 << 5)
324#define EN_GMRP_GVRP_UNTAG_MAP (1 << 6)
325
326#define CORE_VLAN_CTRL3 0xd00c
327#define EN_DROP_NON1Q_MASK 0x1ff
328
329#define CORE_VLAN_CTRL4 0xd014
330#define RESV_MCAST_FLOOD (1 << 1)
331#define EN_DOUBLE_TAG_MASK 0x3
332#define EN_DOUBLE_TAG_SHIFT 2
333#define EN_MGE_REV_GMRP (1 << 4)
334#define EN_MGE_REV_GVRP (1 << 5)
335#define INGR_VID_CHK_SHIFT 6
336#define INGR_VID_CHK_MASK 0x3
337#define INGR_VID_CHK_FWD (0 << INGR_VID_CHK_SHIFT)
338#define INGR_VID_CHK_DROP (1 << INGR_VID_CHK_SHIFT)
339#define INGR_VID_CHK_NO_CHK (2 << INGR_VID_CHK_SHIFT)
340#define INGR_VID_CHK_VID_VIOL_IMP (3 << INGR_VID_CHK_SHIFT)
341
342#define CORE_VLAN_CTRL5 0xd018
343#define EN_CPU_RX_BYP_INNER_CRCCHCK (1 << 0)
344#define EN_VID_FFF_FWD (1 << 2)
345#define DROP_VTABLE_MISS (1 << 3)
346#define EGRESS_DIR_FRM_BYP_TRUNK_EN (1 << 4)
347#define PRESV_NON1Q (1 << 6)
348
349#define CORE_VLAN_CTRL6 0xd01c
350#define STRICT_SFD_DETECT (1 << 0)
351#define DIS_ARL_BUST_LMIT (1 << 4)
352
353#define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8))
354#define CFI_SHIFT 12
355#define PRI_SHIFT 13
356#define PRI_MASK 0x7
357
358#define CORE_JOIN_ALL_VLAN_EN 0xd140
359
290#define CORE_EEE_EN_CTRL 0x24800 360#define CORE_EEE_EN_CTRL 0x24800
291#define CORE_EEE_LPI_INDICATE 0x24810 361#define CORE_EEE_LPI_INDICATE 0x24810
292 362
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
new file mode 100644
index 000000000000..490bc06f993e
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -0,0 +1,7 @@
1config NET_DSA_MV88E6XXX
2 tristate "Marvell 88E6xxx Ethernet switch fabric support"
3 depends on NET_DSA
4 select NET_DSA_TAG_EDSA
5 help
6 This driver adds support for most of the Marvell 88E6xxx models of
7 Ethernet switch chips, except 88E6060.
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
new file mode 100644
index 000000000000..6e29a75ee2f7
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_NET_DSA_MV88E6XXX) += chip.o
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx/chip.c
index ba9dfc9421ef..5cb06f7673af 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support 2 * Marvell 88e6xxx Ethernet switch single-chip support
3 *
3 * Copyright (c) 2008 Marvell Semiconductor 4 * Copyright (c) 2008 Marvell Semiconductor
4 * 5 *
5 * Copyright (c) 2015 CMC Electronics, Inc. 6 * Copyright (c) 2015 CMC Electronics, Inc.
@@ -21,6 +22,8 @@
21#include <linux/list.h> 22#include <linux/list.h>
22#include <linux/mdio.h> 23#include <linux/mdio.h>
23#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/of_device.h>
26#include <linux/of_mdio.h>
24#include <linux/netdevice.h> 27#include <linux/netdevice.h>
25#include <linux/gpio/consumer.h> 28#include <linux/gpio/consumer.h>
26#include <linux/phy.h> 29#include <linux/phy.h>
@@ -28,29 +31,82 @@
28#include <net/switchdev.h> 31#include <net/switchdev.h>
29#include "mv88e6xxx.h" 32#include "mv88e6xxx.h"
30 33
31static void assert_smi_lock(struct mv88e6xxx_priv_state *ps) 34static void assert_reg_lock(struct mv88e6xxx_chip *chip)
32{ 35{
33 if (unlikely(!mutex_is_locked(&ps->smi_mutex))) { 36 if (unlikely(!mutex_is_locked(&chip->reg_lock))) {
34 dev_err(ps->dev, "SMI lock not held!\n"); 37 dev_err(chip->dev, "Switch registers lock not held!\n");
35 dump_stack(); 38 dump_stack();
36 } 39 }
37} 40}
38 41
39/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will 42/* The switch ADDR[4:1] configuration pins define the chip SMI device address
40 * use all 32 SMI bus addresses on its SMI bus, and all switch registers 43 * (ADDR[0] is always zero, thus only even SMI addresses can be strapped).
41 * will be directly accessible on some {device address,register address} 44 *
42 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch 45 * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it
43 * will only respond to SMI transactions to that specific address, and 46 * is the only device connected to the SMI master. In this mode it responds to
44 * an indirect addressing mechanism needs to be used to access its 47 * all 32 possible SMI addresses, and thus maps directly the internal devices.
45 * registers. 48 *
49 * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing
50 * multiple devices to share the SMI interface. In this mode it responds to only
51 * 2 registers, used to indirectly access the internal SMI devices.
46 */ 52 */
47static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr) 53
54static int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip,
55 int addr, int reg, u16 *val)
56{
57 if (!chip->smi_ops)
58 return -EOPNOTSUPP;
59
60 return chip->smi_ops->read(chip, addr, reg, val);
61}
62
63static int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip,
64 int addr, int reg, u16 val)
65{
66 if (!chip->smi_ops)
67 return -EOPNOTSUPP;
68
69 return chip->smi_ops->write(chip, addr, reg, val);
70}
71
72static int mv88e6xxx_smi_single_chip_read(struct mv88e6xxx_chip *chip,
73 int addr, int reg, u16 *val)
74{
75 int ret;
76
77 ret = mdiobus_read_nested(chip->bus, addr, reg);
78 if (ret < 0)
79 return ret;
80
81 *val = ret & 0xffff;
82
83 return 0;
84}
85
86static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_chip *chip,
87 int addr, int reg, u16 val)
88{
89 int ret;
90
91 ret = mdiobus_write_nested(chip->bus, addr, reg, val);
92 if (ret < 0)
93 return ret;
94
95 return 0;
96}
97
98static const struct mv88e6xxx_ops mv88e6xxx_smi_single_chip_ops = {
99 .read = mv88e6xxx_smi_single_chip_read,
100 .write = mv88e6xxx_smi_single_chip_write,
101};
102
103static int mv88e6xxx_smi_multi_chip_wait(struct mv88e6xxx_chip *chip)
48{ 104{
49 int ret; 105 int ret;
50 int i; 106 int i;
51 107
52 for (i = 0; i < 16; i++) { 108 for (i = 0; i < 16; i++) {
53 ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD); 109 ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_CMD);
54 if (ret < 0) 110 if (ret < 0)
55 return ret; 111 return ret;
56 112
@@ -61,143 +117,168 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
61 return -ETIMEDOUT; 117 return -ETIMEDOUT;
62} 118}
63 119
64static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, 120static int mv88e6xxx_smi_multi_chip_read(struct mv88e6xxx_chip *chip,
65 int reg) 121 int addr, int reg, u16 *val)
66{ 122{
67 int ret; 123 int ret;
68 124
69 if (sw_addr == 0)
70 return mdiobus_read_nested(bus, addr, reg);
71
72 /* Wait for the bus to become free. */ 125 /* Wait for the bus to become free. */
73 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); 126 ret = mv88e6xxx_smi_multi_chip_wait(chip);
74 if (ret < 0) 127 if (ret < 0)
75 return ret; 128 return ret;
76 129
77 /* Transmit the read command. */ 130 /* Transmit the read command. */
78 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD, 131 ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD,
79 SMI_CMD_OP_22_READ | (addr << 5) | reg); 132 SMI_CMD_OP_22_READ | (addr << 5) | reg);
80 if (ret < 0) 133 if (ret < 0)
81 return ret; 134 return ret;
82 135
83 /* Wait for the read command to complete. */ 136 /* Wait for the read command to complete. */
84 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); 137 ret = mv88e6xxx_smi_multi_chip_wait(chip);
85 if (ret < 0) 138 if (ret < 0)
86 return ret; 139 return ret;
87 140
88 /* Read the data. */ 141 /* Read the data. */
89 ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA); 142 ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_DATA);
90 if (ret < 0)
91 return ret;
92
93 return ret & 0xffff;
94}
95
96static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps,
97 int addr, int reg)
98{
99 int ret;
100
101 assert_smi_lock(ps);
102
103 ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg);
104 if (ret < 0) 143 if (ret < 0)
105 return ret; 144 return ret;
106 145
107 dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", 146 *val = ret & 0xffff;
108 addr, reg, ret);
109
110 return ret;
111}
112
113int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg)
114{
115 int ret;
116 147
117 mutex_lock(&ps->smi_mutex); 148 return 0;
118 ret = _mv88e6xxx_reg_read(ps, addr, reg);
119 mutex_unlock(&ps->smi_mutex);
120
121 return ret;
122} 149}
123 150
124static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, 151static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_chip *chip,
125 int reg, u16 val) 152 int addr, int reg, u16 val)
126{ 153{
127 int ret; 154 int ret;
128 155
129 if (sw_addr == 0)
130 return mdiobus_write_nested(bus, addr, reg, val);
131
132 /* Wait for the bus to become free. */ 156 /* Wait for the bus to become free. */
133 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); 157 ret = mv88e6xxx_smi_multi_chip_wait(chip);
134 if (ret < 0) 158 if (ret < 0)
135 return ret; 159 return ret;
136 160
137 /* Transmit the data to write. */ 161 /* Transmit the data to write. */
138 ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val); 162 ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_DATA, val);
139 if (ret < 0) 163 if (ret < 0)
140 return ret; 164 return ret;
141 165
142 /* Transmit the write command. */ 166 /* Transmit the write command. */
143 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD, 167 ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD,
144 SMI_CMD_OP_22_WRITE | (addr << 5) | reg); 168 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
145 if (ret < 0) 169 if (ret < 0)
146 return ret; 170 return ret;
147 171
148 /* Wait for the write command to complete. */ 172 /* Wait for the write command to complete. */
149 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); 173 ret = mv88e6xxx_smi_multi_chip_wait(chip);
150 if (ret < 0) 174 if (ret < 0)
151 return ret; 175 return ret;
152 176
153 return 0; 177 return 0;
154} 178}
155 179
156static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, 180static const struct mv88e6xxx_ops mv88e6xxx_smi_multi_chip_ops = {
157 int reg, u16 val) 181 .read = mv88e6xxx_smi_multi_chip_read,
182 .write = mv88e6xxx_smi_multi_chip_write,
183};
184
185static int mv88e6xxx_read(struct mv88e6xxx_chip *chip,
186 int addr, int reg, u16 *val)
158{ 187{
159 assert_smi_lock(ps); 188 int err;
189
190 assert_reg_lock(chip);
191
192 err = mv88e6xxx_smi_read(chip, addr, reg, val);
193 if (err)
194 return err;
195
196 dev_dbg(chip->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
197 addr, reg, *val);
198
199 return 0;
200}
201
202static int mv88e6xxx_write(struct mv88e6xxx_chip *chip,
203 int addr, int reg, u16 val)
204{
205 int err;
160 206
161 dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", 207 assert_reg_lock(chip);
208
209 err = mv88e6xxx_smi_write(chip, addr, reg, val);
210 if (err)
211 return err;
212
213 dev_dbg(chip->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
162 addr, reg, val); 214 addr, reg, val);
163 215
164 return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val); 216 return 0;
217}
218
219static int _mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg)
220{
221 u16 val;
222 int err;
223
224 err = mv88e6xxx_read(chip, addr, reg, &val);
225 if (err)
226 return err;
227
228 return val;
165} 229}
166 230
167int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, 231static int mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg)
168 int reg, u16 val)
169{ 232{
170 int ret; 233 int ret;
171 234
172 mutex_lock(&ps->smi_mutex); 235 mutex_lock(&chip->reg_lock);
173 ret = _mv88e6xxx_reg_write(ps, addr, reg, val); 236 ret = _mv88e6xxx_reg_read(chip, addr, reg);
174 mutex_unlock(&ps->smi_mutex); 237 mutex_unlock(&chip->reg_lock);
238
239 return ret;
240}
241
242static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr,
243 int reg, u16 val)
244{
245 return mv88e6xxx_write(chip, addr, reg, val);
246}
247
248static int mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr,
249 int reg, u16 val)
250{
251 int ret;
252
253 mutex_lock(&chip->reg_lock);
254 ret = _mv88e6xxx_reg_write(chip, addr, reg, val);
255 mutex_unlock(&chip->reg_lock);
175 256
176 return ret; 257 return ret;
177} 258}
178 259
179static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr) 260static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
180{ 261{
181 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 262 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
182 int err; 263 int err;
183 264
184 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01, 265 err = mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MAC_01,
185 (addr[0] << 8) | addr[1]); 266 (addr[0] << 8) | addr[1]);
186 if (err) 267 if (err)
187 return err; 268 return err;
188 269
189 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23, 270 err = mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MAC_23,
190 (addr[2] << 8) | addr[3]); 271 (addr[2] << 8) | addr[3]);
191 if (err) 272 if (err)
192 return err; 273 return err;
193 274
194 return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45, 275 return mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MAC_45,
195 (addr[4] << 8) | addr[5]); 276 (addr[4] << 8) | addr[5]);
196} 277}
197 278
198static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) 279static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
199{ 280{
200 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 281 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
201 int ret; 282 int ret;
202 int i; 283 int i;
203 284
@@ -205,7 +286,7 @@ static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
205 int j; 286 int j;
206 287
207 /* Write the MAC address byte. */ 288 /* Write the MAC address byte. */
208 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, 289 ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
209 GLOBAL2_SWITCH_MAC_BUSY | 290 GLOBAL2_SWITCH_MAC_BUSY |
210 (i << 8) | addr[i]); 291 (i << 8) | addr[i]);
211 if (ret) 292 if (ret)
@@ -213,7 +294,7 @@ static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
213 294
214 /* Wait for the write to complete. */ 295 /* Wait for the write to complete. */
215 for (j = 0; j < 16; j++) { 296 for (j = 0; j < 16; j++) {
216 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, 297 ret = mv88e6xxx_reg_read(chip, REG_GLOBAL2,
217 GLOBAL2_SWITCH_MAC); 298 GLOBAL2_SWITCH_MAC);
218 if (ret < 0) 299 if (ret < 0)
219 return ret; 300 return ret;
@@ -228,49 +309,49 @@ static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
228 return 0; 309 return 0;
229} 310}
230 311
231int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) 312static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
232{ 313{
233 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 314 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
234 315
235 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SWITCH_MAC)) 316 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SWITCH_MAC))
236 return mv88e6xxx_set_addr_indirect(ds, addr); 317 return mv88e6xxx_set_addr_indirect(ds, addr);
237 else 318 else
238 return mv88e6xxx_set_addr_direct(ds, addr); 319 return mv88e6xxx_set_addr_direct(ds, addr);
239} 320}
240 321
241static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr, 322static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_chip *chip,
242 int regnum) 323 int addr, int regnum)
243{ 324{
244 if (addr >= 0) 325 if (addr >= 0)
245 return _mv88e6xxx_reg_read(ps, addr, regnum); 326 return _mv88e6xxx_reg_read(chip, addr, regnum);
246 return 0xffff; 327 return 0xffff;
247} 328}
248 329
249static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr, 330static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_chip *chip,
250 int regnum, u16 val) 331 int addr, int regnum, u16 val)
251{ 332{
252 if (addr >= 0) 333 if (addr >= 0)
253 return _mv88e6xxx_reg_write(ps, addr, regnum, val); 334 return _mv88e6xxx_reg_write(chip, addr, regnum, val);
254 return 0; 335 return 0;
255} 336}
256 337
257static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps) 338static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
258{ 339{
259 int ret; 340 int ret;
260 unsigned long timeout; 341 unsigned long timeout;
261 342
262 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL); 343 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
263 if (ret < 0) 344 if (ret < 0)
264 return ret; 345 return ret;
265 346
266 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, 347 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
267 ret & ~GLOBAL_CONTROL_PPU_ENABLE); 348 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
268 if (ret) 349 if (ret)
269 return ret; 350 return ret;
270 351
271 timeout = jiffies + 1 * HZ; 352 timeout = jiffies + 1 * HZ;
272 while (time_before(jiffies, timeout)) { 353 while (time_before(jiffies, timeout)) {
273 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS); 354 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
274 if (ret < 0) 355 if (ret < 0)
275 return ret; 356 return ret;
276 357
@@ -283,23 +364,23 @@ static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
283 return -ETIMEDOUT; 364 return -ETIMEDOUT;
284} 365}
285 366
286static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps) 367static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip)
287{ 368{
288 int ret, err; 369 int ret, err;
289 unsigned long timeout; 370 unsigned long timeout;
290 371
291 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL); 372 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
292 if (ret < 0) 373 if (ret < 0)
293 return ret; 374 return ret;
294 375
295 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, 376 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
296 ret | GLOBAL_CONTROL_PPU_ENABLE); 377 ret | GLOBAL_CONTROL_PPU_ENABLE);
297 if (err) 378 if (err)
298 return err; 379 return err;
299 380
300 timeout = jiffies + 1 * HZ; 381 timeout = jiffies + 1 * HZ;
301 while (time_before(jiffies, timeout)) { 382 while (time_before(jiffies, timeout)) {
302 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS); 383 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
303 if (ret < 0) 384 if (ret < 0)
304 return ret; 385 return ret;
305 386
@@ -314,143 +395,148 @@ static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
314 395
315static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly) 396static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
316{ 397{
317 struct mv88e6xxx_priv_state *ps; 398 struct mv88e6xxx_chip *chip;
399
400 chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work);
318 401
319 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work); 402 mutex_lock(&chip->reg_lock);
320 if (mutex_trylock(&ps->ppu_mutex)) { 403
321 if (mv88e6xxx_ppu_enable(ps) == 0) 404 if (mutex_trylock(&chip->ppu_mutex)) {
322 ps->ppu_disabled = 0; 405 if (mv88e6xxx_ppu_enable(chip) == 0)
323 mutex_unlock(&ps->ppu_mutex); 406 chip->ppu_disabled = 0;
407 mutex_unlock(&chip->ppu_mutex);
324 } 408 }
409
410 mutex_unlock(&chip->reg_lock);
325} 411}
326 412
327static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps) 413static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
328{ 414{
329 struct mv88e6xxx_priv_state *ps = (void *)_ps; 415 struct mv88e6xxx_chip *chip = (void *)_ps;
330 416
331 schedule_work(&ps->ppu_work); 417 schedule_work(&chip->ppu_work);
332} 418}
333 419
334static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps) 420static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_chip *chip)
335{ 421{
336 int ret; 422 int ret;
337 423
338 mutex_lock(&ps->ppu_mutex); 424 mutex_lock(&chip->ppu_mutex);
339 425
340 /* If the PHY polling unit is enabled, disable it so that 426 /* If the PHY polling unit is enabled, disable it so that
341 * we can access the PHY registers. If it was already 427 * we can access the PHY registers. If it was already
342 * disabled, cancel the timer that is going to re-enable 428 * disabled, cancel the timer that is going to re-enable
343 * it. 429 * it.
344 */ 430 */
345 if (!ps->ppu_disabled) { 431 if (!chip->ppu_disabled) {
346 ret = mv88e6xxx_ppu_disable(ps); 432 ret = mv88e6xxx_ppu_disable(chip);
347 if (ret < 0) { 433 if (ret < 0) {
348 mutex_unlock(&ps->ppu_mutex); 434 mutex_unlock(&chip->ppu_mutex);
349 return ret; 435 return ret;
350 } 436 }
351 ps->ppu_disabled = 1; 437 chip->ppu_disabled = 1;
352 } else { 438 } else {
353 del_timer(&ps->ppu_timer); 439 del_timer(&chip->ppu_timer);
354 ret = 0; 440 ret = 0;
355 } 441 }
356 442
357 return ret; 443 return ret;
358} 444}
359 445
360static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps) 446static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_chip *chip)
361{ 447{
362 /* Schedule a timer to re-enable the PHY polling unit. */ 448 /* Schedule a timer to re-enable the PHY polling unit. */
363 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10)); 449 mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10));
364 mutex_unlock(&ps->ppu_mutex); 450 mutex_unlock(&chip->ppu_mutex);
365} 451}
366 452
367void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps) 453static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip)
368{ 454{
369 mutex_init(&ps->ppu_mutex); 455 mutex_init(&chip->ppu_mutex);
370 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work); 456 INIT_WORK(&chip->ppu_work, mv88e6xxx_ppu_reenable_work);
371 init_timer(&ps->ppu_timer); 457 init_timer(&chip->ppu_timer);
372 ps->ppu_timer.data = (unsigned long)ps; 458 chip->ppu_timer.data = (unsigned long)chip;
373 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer; 459 chip->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
374} 460}
375 461
376static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr, 462static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_chip *chip, int addr,
377 int regnum) 463 int regnum)
378{ 464{
379 int ret; 465 int ret;
380 466
381 ret = mv88e6xxx_ppu_access_get(ps); 467 ret = mv88e6xxx_ppu_access_get(chip);
382 if (ret >= 0) { 468 if (ret >= 0) {
383 ret = _mv88e6xxx_reg_read(ps, addr, regnum); 469 ret = _mv88e6xxx_reg_read(chip, addr, regnum);
384 mv88e6xxx_ppu_access_put(ps); 470 mv88e6xxx_ppu_access_put(chip);
385 } 471 }
386 472
387 return ret; 473 return ret;
388} 474}
389 475
390static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr, 476static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_chip *chip, int addr,
391 int regnum, u16 val) 477 int regnum, u16 val)
392{ 478{
393 int ret; 479 int ret;
394 480
395 ret = mv88e6xxx_ppu_access_get(ps); 481 ret = mv88e6xxx_ppu_access_get(chip);
396 if (ret >= 0) { 482 if (ret >= 0) {
397 ret = _mv88e6xxx_reg_write(ps, addr, regnum, val); 483 ret = _mv88e6xxx_reg_write(chip, addr, regnum, val);
398 mv88e6xxx_ppu_access_put(ps); 484 mv88e6xxx_ppu_access_put(chip);
399 } 485 }
400 486
401 return ret; 487 return ret;
402} 488}
403 489
404static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps) 490static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip)
405{ 491{
406 return ps->info->family == MV88E6XXX_FAMILY_6065; 492 return chip->info->family == MV88E6XXX_FAMILY_6065;
407} 493}
408 494
409static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps) 495static bool mv88e6xxx_6095_family(struct mv88e6xxx_chip *chip)
410{ 496{
411 return ps->info->family == MV88E6XXX_FAMILY_6095; 497 return chip->info->family == MV88E6XXX_FAMILY_6095;
412} 498}
413 499
414static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps) 500static bool mv88e6xxx_6097_family(struct mv88e6xxx_chip *chip)
415{ 501{
416 return ps->info->family == MV88E6XXX_FAMILY_6097; 502 return chip->info->family == MV88E6XXX_FAMILY_6097;
417} 503}
418 504
419static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps) 505static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip)
420{ 506{
421 return ps->info->family == MV88E6XXX_FAMILY_6165; 507 return chip->info->family == MV88E6XXX_FAMILY_6165;
422} 508}
423 509
424static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps) 510static bool mv88e6xxx_6185_family(struct mv88e6xxx_chip *chip)
425{ 511{
426 return ps->info->family == MV88E6XXX_FAMILY_6185; 512 return chip->info->family == MV88E6XXX_FAMILY_6185;
427} 513}
428 514
429static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps) 515static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
430{ 516{
431 return ps->info->family == MV88E6XXX_FAMILY_6320; 517 return chip->info->family == MV88E6XXX_FAMILY_6320;
432} 518}
433 519
434static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps) 520static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip)
435{ 521{
436 return ps->info->family == MV88E6XXX_FAMILY_6351; 522 return chip->info->family == MV88E6XXX_FAMILY_6351;
437} 523}
438 524
439static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps) 525static bool mv88e6xxx_6352_family(struct mv88e6xxx_chip *chip)
440{ 526{
441 return ps->info->family == MV88E6XXX_FAMILY_6352; 527 return chip->info->family == MV88E6XXX_FAMILY_6352;
442} 528}
443 529
444static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps) 530static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
445{ 531{
446 return ps->info->num_databases; 532 return chip->info->num_databases;
447} 533}
448 534
449static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps) 535static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_chip *chip)
450{ 536{
451 /* Does the device have dedicated FID registers for ATU and VTU ops? */ 537 /* Does the device have dedicated FID registers for ATU and VTU ops? */
452 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) || 538 if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
453 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) 539 mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip))
454 return true; 540 return true;
455 541
456 return false; 542 return false;
@@ -463,16 +549,16 @@ static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
463static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, 549static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
464 struct phy_device *phydev) 550 struct phy_device *phydev)
465{ 551{
466 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 552 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
467 u32 reg; 553 u32 reg;
468 int ret; 554 int ret;
469 555
470 if (!phy_is_pseudo_fixed_link(phydev)) 556 if (!phy_is_pseudo_fixed_link(phydev))
471 return; 557 return;
472 558
473 mutex_lock(&ps->smi_mutex); 559 mutex_lock(&chip->reg_lock);
474 560
475 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL); 561 ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
476 if (ret < 0) 562 if (ret < 0)
477 goto out; 563 goto out;
478 564
@@ -484,9 +570,9 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
484 570
485 reg |= PORT_PCS_CTRL_FORCE_LINK; 571 reg |= PORT_PCS_CTRL_FORCE_LINK;
486 if (phydev->link) 572 if (phydev->link)
487 reg |= PORT_PCS_CTRL_LINK_UP; 573 reg |= PORT_PCS_CTRL_LINK_UP;
488 574
489 if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100) 575 if (mv88e6xxx_6065_family(chip) && phydev->speed > SPEED_100)
490 goto out; 576 goto out;
491 577
492 switch (phydev->speed) { 578 switch (phydev->speed) {
@@ -508,8 +594,8 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
508 if (phydev->duplex == DUPLEX_FULL) 594 if (phydev->duplex == DUPLEX_FULL)
509 reg |= PORT_PCS_CTRL_DUPLEX_FULL; 595 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
510 596
511 if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) && 597 if ((mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip)) &&
512 (port >= ps->info->num_ports - 2)) { 598 (port >= chip->info->num_ports - 2)) {
513 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) 599 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
514 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK; 600 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
515 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) 601 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
@@ -518,19 +604,19 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
518 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK | 604 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
519 PORT_PCS_CTRL_RGMII_DELAY_TXCLK); 605 PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
520 } 606 }
521 _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg); 607 _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PCS_CTRL, reg);
522 608
523out: 609out:
524 mutex_unlock(&ps->smi_mutex); 610 mutex_unlock(&chip->reg_lock);
525} 611}
526 612
527static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps) 613static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip)
528{ 614{
529 int ret; 615 int ret;
530 int i; 616 int i;
531 617
532 for (i = 0; i < 10; i++) { 618 for (i = 0; i < 10; i++) {
533 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP); 619 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_OP);
534 if ((ret & GLOBAL_STATS_OP_BUSY) == 0) 620 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
535 return 0; 621 return 0;
536 } 622 }
@@ -538,30 +624,29 @@ static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
538 return -ETIMEDOUT; 624 return -ETIMEDOUT;
539} 625}
540 626
541static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps, 627static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
542 int port)
543{ 628{
544 int ret; 629 int ret;
545 630
546 if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps)) 631 if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
547 port = (port + 1) << 5; 632 port = (port + 1) << 5;
548 633
549 /* Snapshot the hardware statistics counters for this port. */ 634 /* Snapshot the hardware statistics counters for this port. */
550 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP, 635 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
551 GLOBAL_STATS_OP_CAPTURE_PORT | 636 GLOBAL_STATS_OP_CAPTURE_PORT |
552 GLOBAL_STATS_OP_HIST_RX_TX | port); 637 GLOBAL_STATS_OP_HIST_RX_TX | port);
553 if (ret < 0) 638 if (ret < 0)
554 return ret; 639 return ret;
555 640
556 /* Wait for the snapshotting to complete. */ 641 /* Wait for the snapshotting to complete. */
557 ret = _mv88e6xxx_stats_wait(ps); 642 ret = _mv88e6xxx_stats_wait(chip);
558 if (ret < 0) 643 if (ret < 0)
559 return ret; 644 return ret;
560 645
561 return 0; 646 return 0;
562} 647}
563 648
564static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps, 649static void _mv88e6xxx_stats_read(struct mv88e6xxx_chip *chip,
565 int stat, u32 *val) 650 int stat, u32 *val)
566{ 651{
567 u32 _val; 652 u32 _val;
@@ -569,23 +654,23 @@ static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
569 654
570 *val = 0; 655 *val = 0;
571 656
572 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP, 657 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
573 GLOBAL_STATS_OP_READ_CAPTURED | 658 GLOBAL_STATS_OP_READ_CAPTURED |
574 GLOBAL_STATS_OP_HIST_RX_TX | stat); 659 GLOBAL_STATS_OP_HIST_RX_TX | stat);
575 if (ret < 0) 660 if (ret < 0)
576 return; 661 return;
577 662
578 ret = _mv88e6xxx_stats_wait(ps); 663 ret = _mv88e6xxx_stats_wait(chip);
579 if (ret < 0) 664 if (ret < 0)
580 return; 665 return;
581 666
582 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32); 667 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
583 if (ret < 0) 668 if (ret < 0)
584 return; 669 return;
585 670
586 _val = ret << 16; 671 _val = ret << 16;
587 672
588 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01); 673 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
589 if (ret < 0) 674 if (ret < 0)
590 return; 675 return;
591 676
@@ -654,26 +739,26 @@ static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
654 { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, }, 739 { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
655}; 740};
656 741
657static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps, 742static bool mv88e6xxx_has_stat(struct mv88e6xxx_chip *chip,
658 struct mv88e6xxx_hw_stat *stat) 743 struct mv88e6xxx_hw_stat *stat)
659{ 744{
660 switch (stat->type) { 745 switch (stat->type) {
661 case BANK0: 746 case BANK0:
662 return true; 747 return true;
663 case BANK1: 748 case BANK1:
664 return mv88e6xxx_6320_family(ps); 749 return mv88e6xxx_6320_family(chip);
665 case PORT: 750 case PORT:
666 return mv88e6xxx_6095_family(ps) || 751 return mv88e6xxx_6095_family(chip) ||
667 mv88e6xxx_6185_family(ps) || 752 mv88e6xxx_6185_family(chip) ||
668 mv88e6xxx_6097_family(ps) || 753 mv88e6xxx_6097_family(chip) ||
669 mv88e6xxx_6165_family(ps) || 754 mv88e6xxx_6165_family(chip) ||
670 mv88e6xxx_6351_family(ps) || 755 mv88e6xxx_6351_family(chip) ||
671 mv88e6xxx_6352_family(ps); 756 mv88e6xxx_6352_family(chip);
672 } 757 }
673 return false; 758 return false;
674} 759}
675 760
676static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps, 761static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
677 struct mv88e6xxx_hw_stat *s, 762 struct mv88e6xxx_hw_stat *s,
678 int port) 763 int port)
679{ 764{
@@ -684,13 +769,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
684 769
685 switch (s->type) { 770 switch (s->type) {
686 case PORT: 771 case PORT:
687 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg); 772 ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), s->reg);
688 if (ret < 0) 773 if (ret < 0)
689 return UINT64_MAX; 774 return UINT64_MAX;
690 775
691 low = ret; 776 low = ret;
692 if (s->sizeof_stat == 4) { 777 if (s->sizeof_stat == 4) {
693 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), 778 ret = _mv88e6xxx_reg_read(chip, REG_PORT(port),
694 s->reg + 1); 779 s->reg + 1);
695 if (ret < 0) 780 if (ret < 0)
696 return UINT64_MAX; 781 return UINT64_MAX;
@@ -699,9 +784,9 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
699 break; 784 break;
700 case BANK0: 785 case BANK0:
701 case BANK1: 786 case BANK1:
702 _mv88e6xxx_stats_read(ps, s->reg, &low); 787 _mv88e6xxx_stats_read(chip, s->reg, &low);
703 if (s->sizeof_stat == 8) 788 if (s->sizeof_stat == 8)
704 _mv88e6xxx_stats_read(ps, s->reg + 1, &high); 789 _mv88e6xxx_stats_read(chip, s->reg + 1, &high);
705 } 790 }
706 value = (((u64)high) << 16) | low; 791 value = (((u64)high) << 16) | low;
707 return value; 792 return value;
@@ -710,13 +795,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
710static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, 795static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
711 uint8_t *data) 796 uint8_t *data)
712{ 797{
713 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 798 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
714 struct mv88e6xxx_hw_stat *stat; 799 struct mv88e6xxx_hw_stat *stat;
715 int i, j; 800 int i, j;
716 801
717 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { 802 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
718 stat = &mv88e6xxx_hw_stats[i]; 803 stat = &mv88e6xxx_hw_stats[i];
719 if (mv88e6xxx_has_stat(ps, stat)) { 804 if (mv88e6xxx_has_stat(chip, stat)) {
720 memcpy(data + j * ETH_GSTRING_LEN, stat->string, 805 memcpy(data + j * ETH_GSTRING_LEN, stat->string,
721 ETH_GSTRING_LEN); 806 ETH_GSTRING_LEN);
722 j++; 807 j++;
@@ -726,13 +811,13 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
726 811
727static int mv88e6xxx_get_sset_count(struct dsa_switch *ds) 812static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
728{ 813{
729 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 814 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
730 struct mv88e6xxx_hw_stat *stat; 815 struct mv88e6xxx_hw_stat *stat;
731 int i, j; 816 int i, j;
732 817
733 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { 818 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
734 stat = &mv88e6xxx_hw_stats[i]; 819 stat = &mv88e6xxx_hw_stats[i];
735 if (mv88e6xxx_has_stat(ps, stat)) 820 if (mv88e6xxx_has_stat(chip, stat))
736 j++; 821 j++;
737 } 822 }
738 return j; 823 return j;
@@ -741,27 +826,27 @@ static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
741static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, 826static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
742 uint64_t *data) 827 uint64_t *data)
743{ 828{
744 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 829 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
745 struct mv88e6xxx_hw_stat *stat; 830 struct mv88e6xxx_hw_stat *stat;
746 int ret; 831 int ret;
747 int i, j; 832 int i, j;
748 833
749 mutex_lock(&ps->smi_mutex); 834 mutex_lock(&chip->reg_lock);
750 835
751 ret = _mv88e6xxx_stats_snapshot(ps, port); 836 ret = _mv88e6xxx_stats_snapshot(chip, port);
752 if (ret < 0) { 837 if (ret < 0) {
753 mutex_unlock(&ps->smi_mutex); 838 mutex_unlock(&chip->reg_lock);
754 return; 839 return;
755 } 840 }
756 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { 841 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
757 stat = &mv88e6xxx_hw_stats[i]; 842 stat = &mv88e6xxx_hw_stats[i];
758 if (mv88e6xxx_has_stat(ps, stat)) { 843 if (mv88e6xxx_has_stat(chip, stat)) {
759 data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port); 844 data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port);
760 j++; 845 j++;
761 } 846 }
762 } 847 }
763 848
764 mutex_unlock(&ps->smi_mutex); 849 mutex_unlock(&chip->reg_lock);
765} 850}
766 851
767static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) 852static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
@@ -772,7 +857,7 @@ static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
772static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, 857static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
773 struct ethtool_regs *regs, void *_p) 858 struct ethtool_regs *regs, void *_p)
774{ 859{
775 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 860 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
776 u16 *p = _p; 861 u16 *p = _p;
777 int i; 862 int i;
778 863
@@ -780,20 +865,20 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
780 865
781 memset(p, 0xff, 32 * sizeof(u16)); 866 memset(p, 0xff, 32 * sizeof(u16));
782 867
783 mutex_lock(&ps->smi_mutex); 868 mutex_lock(&chip->reg_lock);
784 869
785 for (i = 0; i < 32; i++) { 870 for (i = 0; i < 32; i++) {
786 int ret; 871 int ret;
787 872
788 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), i); 873 ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), i);
789 if (ret >= 0) 874 if (ret >= 0)
790 p[i] = ret; 875 p[i] = ret;
791 } 876 }
792 877
793 mutex_unlock(&ps->smi_mutex); 878 mutex_unlock(&chip->reg_lock);
794} 879}
795 880
796static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset, 881static int _mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg, int offset,
797 u16 mask) 882 u16 mask)
798{ 883{
799 unsigned long timeout = jiffies + HZ / 10; 884 unsigned long timeout = jiffies + HZ / 10;
@@ -801,7 +886,7 @@ static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
801 while (time_before(jiffies, timeout)) { 886 while (time_before(jiffies, timeout)) {
802 int ret; 887 int ret;
803 888
804 ret = _mv88e6xxx_reg_read(ps, reg, offset); 889 ret = _mv88e6xxx_reg_read(chip, reg, offset);
805 if (ret < 0) 890 if (ret < 0)
806 return ret; 891 return ret;
807 if (!(ret & mask)) 892 if (!(ret & mask))
@@ -812,48 +897,48 @@ static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
812 return -ETIMEDOUT; 897 return -ETIMEDOUT;
813} 898}
814 899
815static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, 900static int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg,
816 int offset, u16 mask) 901 int offset, u16 mask)
817{ 902{
818 int ret; 903 int ret;
819 904
820 mutex_lock(&ps->smi_mutex); 905 mutex_lock(&chip->reg_lock);
821 ret = _mv88e6xxx_wait(ps, reg, offset, mask); 906 ret = _mv88e6xxx_wait(chip, reg, offset, mask);
822 mutex_unlock(&ps->smi_mutex); 907 mutex_unlock(&chip->reg_lock);
823 908
824 return ret; 909 return ret;
825} 910}
826 911
827static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps) 912static int mv88e6xxx_mdio_wait(struct mv88e6xxx_chip *chip)
828{ 913{
829 return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP, 914 return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
830 GLOBAL2_SMI_OP_BUSY); 915 GLOBAL2_SMI_OP_BUSY);
831} 916}
832 917
833static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds) 918static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
834{ 919{
835 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 920 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
836 921
837 return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, 922 return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
838 GLOBAL2_EEPROM_OP_LOAD); 923 GLOBAL2_EEPROM_OP_LOAD);
839} 924}
840 925
841static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds) 926static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
842{ 927{
843 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 928 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
844 929
845 return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, 930 return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
846 GLOBAL2_EEPROM_OP_BUSY); 931 GLOBAL2_EEPROM_OP_BUSY);
847} 932}
848 933
849static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr) 934static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
850{ 935{
851 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 936 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
852 int ret; 937 int ret;
853 938
854 mutex_lock(&ps->eeprom_mutex); 939 mutex_lock(&chip->eeprom_mutex);
855 940
856 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, 941 ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
857 GLOBAL2_EEPROM_OP_READ | 942 GLOBAL2_EEPROM_OP_READ |
858 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); 943 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
859 if (ret < 0) 944 if (ret < 0)
@@ -863,18 +948,18 @@ static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
863 if (ret < 0) 948 if (ret < 0)
864 goto error; 949 goto error;
865 950
866 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA); 951 ret = mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
867error: 952error:
868 mutex_unlock(&ps->eeprom_mutex); 953 mutex_unlock(&chip->eeprom_mutex);
869 return ret; 954 return ret;
870} 955}
871 956
872static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) 957static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
873{ 958{
874 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 959 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
875 960
876 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) 961 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM))
877 return ps->eeprom_len; 962 return chip->eeprom_len;
878 963
879 return 0; 964 return 0;
880} 965}
@@ -882,12 +967,12 @@ static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
882static int mv88e6xxx_get_eeprom(struct dsa_switch *ds, 967static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
883 struct ethtool_eeprom *eeprom, u8 *data) 968 struct ethtool_eeprom *eeprom, u8 *data)
884{ 969{
885 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 970 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
886 int offset; 971 int offset;
887 int len; 972 int len;
888 int ret; 973 int ret;
889 974
890 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) 975 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM))
891 return -EOPNOTSUPP; 976 return -EOPNOTSUPP;
892 977
893 offset = eeprom->offset; 978 offset = eeprom->offset;
@@ -948,10 +1033,10 @@ static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
948 1033
949static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds) 1034static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
950{ 1035{
951 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1036 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
952 int ret; 1037 int ret;
953 1038
954 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP); 1039 ret = mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
955 if (ret < 0) 1040 if (ret < 0)
956 return ret; 1041 return ret;
957 1042
@@ -964,16 +1049,16 @@ static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
964static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr, 1049static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
965 u16 data) 1050 u16 data)
966{ 1051{
967 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1052 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
968 int ret; 1053 int ret;
969 1054
970 mutex_lock(&ps->eeprom_mutex); 1055 mutex_lock(&chip->eeprom_mutex);
971 1056
972 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); 1057 ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
973 if (ret < 0) 1058 if (ret < 0)
974 goto error; 1059 goto error;
975 1060
976 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, 1061 ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
977 GLOBAL2_EEPROM_OP_WRITE | 1062 GLOBAL2_EEPROM_OP_WRITE |
978 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); 1063 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
979 if (ret < 0) 1064 if (ret < 0)
@@ -981,19 +1066,19 @@ static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
981 1066
982 ret = mv88e6xxx_eeprom_busy_wait(ds); 1067 ret = mv88e6xxx_eeprom_busy_wait(ds);
983error: 1068error:
984 mutex_unlock(&ps->eeprom_mutex); 1069 mutex_unlock(&chip->eeprom_mutex);
985 return ret; 1070 return ret;
986} 1071}
987 1072
988static int mv88e6xxx_set_eeprom(struct dsa_switch *ds, 1073static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
989 struct ethtool_eeprom *eeprom, u8 *data) 1074 struct ethtool_eeprom *eeprom, u8 *data)
990{ 1075{
991 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1076 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
992 int offset; 1077 int offset;
993 int ret; 1078 int ret;
994 int len; 1079 int len;
995 1080
996 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) 1081 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM))
997 return -EOPNOTSUPP; 1082 return -EOPNOTSUPP;
998 1083
999 if (eeprom->magic != 0xc3ec4951) 1084 if (eeprom->magic != 0xc3ec4951)
@@ -1065,67 +1150,67 @@ static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
1065 return 0; 1150 return 0;
1066} 1151}
1067 1152
1068static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps) 1153static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip)
1069{ 1154{
1070 return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP, 1155 return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP,
1071 GLOBAL_ATU_OP_BUSY); 1156 GLOBAL_ATU_OP_BUSY);
1072} 1157}
1073 1158
1074static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps, 1159static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_chip *chip,
1075 int addr, int regnum) 1160 int addr, int regnum)
1076{ 1161{
1077 int ret; 1162 int ret;
1078 1163
1079 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP, 1164 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
1080 GLOBAL2_SMI_OP_22_READ | (addr << 5) | 1165 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
1081 regnum); 1166 regnum);
1082 if (ret < 0) 1167 if (ret < 0)
1083 return ret; 1168 return ret;
1084 1169
1085 ret = _mv88e6xxx_phy_wait(ps); 1170 ret = mv88e6xxx_mdio_wait(chip);
1086 if (ret < 0) 1171 if (ret < 0)
1087 return ret; 1172 return ret;
1088 1173
1089 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA); 1174 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA);
1090 1175
1091 return ret; 1176 return ret;
1092} 1177}
1093 1178
1094static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps, 1179static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_chip *chip,
1095 int addr, int regnum, u16 val) 1180 int addr, int regnum, u16 val)
1096{ 1181{
1097 int ret; 1182 int ret;
1098 1183
1099 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val); 1184 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
1100 if (ret < 0) 1185 if (ret < 0)
1101 return ret; 1186 return ret;
1102 1187
1103 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP, 1188 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
1104 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | 1189 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
1105 regnum); 1190 regnum);
1106 1191
1107 return _mv88e6xxx_phy_wait(ps); 1192 return mv88e6xxx_mdio_wait(chip);
1108} 1193}
1109 1194
1110static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, 1195static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
1111 struct ethtool_eee *e) 1196 struct ethtool_eee *e)
1112{ 1197{
1113 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1198 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
1114 int reg; 1199 int reg;
1115 1200
1116 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE)) 1201 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE))
1117 return -EOPNOTSUPP; 1202 return -EOPNOTSUPP;
1118 1203
1119 mutex_lock(&ps->smi_mutex); 1204 mutex_lock(&chip->reg_lock);
1120 1205
1121 reg = _mv88e6xxx_phy_read_indirect(ps, port, 16); 1206 reg = mv88e6xxx_mdio_read_indirect(chip, port, 16);
1122 if (reg < 0) 1207 if (reg < 0)
1123 goto out; 1208 goto out;
1124 1209
1125 e->eee_enabled = !!(reg & 0x0200); 1210 e->eee_enabled = !!(reg & 0x0200);
1126 e->tx_lpi_enabled = !!(reg & 0x0100); 1211 e->tx_lpi_enabled = !!(reg & 0x0100);
1127 1212
1128 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS); 1213 reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS);
1129 if (reg < 0) 1214 if (reg < 0)
1130 goto out; 1215 goto out;
1131 1216
@@ -1133,23 +1218,23 @@ static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
1133 reg = 0; 1218 reg = 0;
1134 1219
1135out: 1220out:
1136 mutex_unlock(&ps->smi_mutex); 1221 mutex_unlock(&chip->reg_lock);
1137 return reg; 1222 return reg;
1138} 1223}
1139 1224
1140static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, 1225static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
1141 struct phy_device *phydev, struct ethtool_eee *e) 1226 struct phy_device *phydev, struct ethtool_eee *e)
1142{ 1227{
1143 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1228 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
1144 int reg; 1229 int reg;
1145 int ret; 1230 int ret;
1146 1231
1147 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE)) 1232 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE))
1148 return -EOPNOTSUPP; 1233 return -EOPNOTSUPP;
1149 1234
1150 mutex_lock(&ps->smi_mutex); 1235 mutex_lock(&chip->reg_lock);
1151 1236
1152 ret = _mv88e6xxx_phy_read_indirect(ps, port, 16); 1237 ret = mv88e6xxx_mdio_read_indirect(chip, port, 16);
1153 if (ret < 0) 1238 if (ret < 0)
1154 goto out; 1239 goto out;
1155 1240
@@ -1159,28 +1244,29 @@ static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
1159 if (e->tx_lpi_enabled) 1244 if (e->tx_lpi_enabled)
1160 reg |= 0x0100; 1245 reg |= 0x0100;
1161 1246
1162 ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg); 1247 ret = mv88e6xxx_mdio_write_indirect(chip, port, 16, reg);
1163out: 1248out:
1164 mutex_unlock(&ps->smi_mutex); 1249 mutex_unlock(&chip->reg_lock);
1165 1250
1166 return ret; 1251 return ret;
1167} 1252}
1168 1253
1169static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd) 1254static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd)
1170{ 1255{
1171 int ret; 1256 int ret;
1172 1257
1173 if (mv88e6xxx_has_fid_reg(ps)) { 1258 if (mv88e6xxx_has_fid_reg(chip)) {
1174 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid); 1259 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_FID,
1260 fid);
1175 if (ret < 0) 1261 if (ret < 0)
1176 return ret; 1262 return ret;
1177 } else if (mv88e6xxx_num_databases(ps) == 256) { 1263 } else if (mv88e6xxx_num_databases(chip) == 256) {
1178 /* ATU DBNum[7:4] are located in ATU Control 15:12 */ 1264 /* ATU DBNum[7:4] are located in ATU Control 15:12 */
1179 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL); 1265 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL);
1180 if (ret < 0) 1266 if (ret < 0)
1181 return ret; 1267 return ret;
1182 1268
1183 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL, 1269 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
1184 (ret & 0xfff) | 1270 (ret & 0xfff) |
1185 ((fid << 8) & 0xf000)); 1271 ((fid << 8) & 0xf000));
1186 if (ret < 0) 1272 if (ret < 0)
@@ -1190,14 +1276,14 @@ static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
1190 cmd |= fid & 0xf; 1276 cmd |= fid & 0xf;
1191 } 1277 }
1192 1278
1193 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd); 1279 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
1194 if (ret < 0) 1280 if (ret < 0)
1195 return ret; 1281 return ret;
1196 1282
1197 return _mv88e6xxx_atu_wait(ps); 1283 return _mv88e6xxx_atu_wait(chip);
1198} 1284}
1199 1285
1200static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps, 1286static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip,
1201 struct mv88e6xxx_atu_entry *entry) 1287 struct mv88e6xxx_atu_entry *entry)
1202{ 1288{
1203 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK; 1289 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
@@ -1217,21 +1303,21 @@ static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
1217 data |= (entry->portv_trunkid << shift) & mask; 1303 data |= (entry->portv_trunkid << shift) & mask;
1218 } 1304 }
1219 1305
1220 return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data); 1306 return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_DATA, data);
1221} 1307}
1222 1308
1223static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps, 1309static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip,
1224 struct mv88e6xxx_atu_entry *entry, 1310 struct mv88e6xxx_atu_entry *entry,
1225 bool static_too) 1311 bool static_too)
1226{ 1312{
1227 int op; 1313 int op;
1228 int err; 1314 int err;
1229 1315
1230 err = _mv88e6xxx_atu_wait(ps); 1316 err = _mv88e6xxx_atu_wait(chip);
1231 if (err) 1317 if (err)
1232 return err; 1318 return err;
1233 1319
1234 err = _mv88e6xxx_atu_data_write(ps, entry); 1320 err = _mv88e6xxx_atu_data_write(chip, entry);
1235 if (err) 1321 if (err)
1236 return err; 1322 return err;
1237 1323
@@ -1243,10 +1329,10 @@ static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
1243 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC; 1329 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
1244 } 1330 }
1245 1331
1246 return _mv88e6xxx_atu_cmd(ps, entry->fid, op); 1332 return _mv88e6xxx_atu_cmd(chip, entry->fid, op);
1247} 1333}
1248 1334
1249static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps, 1335static int _mv88e6xxx_atu_flush(struct mv88e6xxx_chip *chip,
1250 u16 fid, bool static_too) 1336 u16 fid, bool static_too)
1251{ 1337{
1252 struct mv88e6xxx_atu_entry entry = { 1338 struct mv88e6xxx_atu_entry entry = {
@@ -1254,10 +1340,10 @@ static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
1254 .state = 0, /* EntryState bits must be 0 */ 1340 .state = 0, /* EntryState bits must be 0 */
1255 }; 1341 };
1256 1342
1257 return _mv88e6xxx_atu_flush_move(ps, &entry, static_too); 1343 return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
1258} 1344}
1259 1345
1260static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid, 1346static int _mv88e6xxx_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
1261 int from_port, int to_port, bool static_too) 1347 int from_port, int to_port, bool static_too)
1262{ 1348{
1263 struct mv88e6xxx_atu_entry entry = { 1349 struct mv88e6xxx_atu_entry entry = {
@@ -1272,14 +1358,14 @@ static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
1272 entry.portv_trunkid = (to_port & 0x0f) << 4; 1358 entry.portv_trunkid = (to_port & 0x0f) << 4;
1273 entry.portv_trunkid |= from_port & 0x0f; 1359 entry.portv_trunkid |= from_port & 0x0f;
1274 1360
1275 return _mv88e6xxx_atu_flush_move(ps, &entry, static_too); 1361 return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
1276} 1362}
1277 1363
1278static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid, 1364static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid,
1279 int port, bool static_too) 1365 int port, bool static_too)
1280{ 1366{
1281 /* Destination port 0xF means remove the entries */ 1367 /* Destination port 0xF means remove the entries */
1282 return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too); 1368 return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too);
1283} 1369}
1284 1370
1285static const char * const mv88e6xxx_port_state_names[] = { 1371static const char * const mv88e6xxx_port_state_names[] = {
@@ -1289,14 +1375,14 @@ static const char * const mv88e6xxx_port_state_names[] = {
1289 [PORT_CONTROL_STATE_FORWARDING] = "Forwarding", 1375 [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
1290}; 1376};
1291 1377
1292static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port, 1378static int _mv88e6xxx_port_state(struct mv88e6xxx_chip *chip, int port,
1293 u8 state) 1379 u8 state)
1294{ 1380{
1295 struct dsa_switch *ds = ps->ds; 1381 struct dsa_switch *ds = chip->ds;
1296 int reg, ret = 0; 1382 int reg, ret = 0;
1297 u8 oldstate; 1383 u8 oldstate;
1298 1384
1299 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL); 1385 reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL);
1300 if (reg < 0) 1386 if (reg < 0)
1301 return reg; 1387 return reg;
1302 1388
@@ -1308,21 +1394,21 @@ static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
1308 * Blocking or Listening state. 1394 * Blocking or Listening state.
1309 */ 1395 */
1310 if ((oldstate == PORT_CONTROL_STATE_LEARNING || 1396 if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
1311 oldstate == PORT_CONTROL_STATE_FORWARDING) 1397 oldstate == PORT_CONTROL_STATE_FORWARDING) &&
1312 && (state == PORT_CONTROL_STATE_DISABLED || 1398 (state == PORT_CONTROL_STATE_DISABLED ||
1313 state == PORT_CONTROL_STATE_BLOCKING)) { 1399 state == PORT_CONTROL_STATE_BLOCKING)) {
1314 ret = _mv88e6xxx_atu_remove(ps, 0, port, false); 1400 ret = _mv88e6xxx_atu_remove(chip, 0, port, false);
1315 if (ret) 1401 if (ret)
1316 return ret; 1402 return ret;
1317 } 1403 }
1318 1404
1319 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state; 1405 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1320 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL, 1406 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL,
1321 reg); 1407 reg);
1322 if (ret) 1408 if (ret)
1323 return ret; 1409 return ret;
1324 1410
1325 netdev_dbg(ds->ports[port], "PortState %s (was %s)\n", 1411 netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n",
1326 mv88e6xxx_port_state_names[state], 1412 mv88e6xxx_port_state_names[state],
1327 mv88e6xxx_port_state_names[oldstate]); 1413 mv88e6xxx_port_state_names[oldstate]);
1328 } 1414 }
@@ -1330,12 +1416,11 @@ static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
1330 return ret; 1416 return ret;
1331} 1417}
1332 1418
1333static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps, 1419static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
1334 int port)
1335{ 1420{
1336 struct net_device *bridge = ps->ports[port].bridge_dev; 1421 struct net_device *bridge = chip->ports[port].bridge_dev;
1337 const u16 mask = (1 << ps->info->num_ports) - 1; 1422 const u16 mask = (1 << chip->info->num_ports) - 1;
1338 struct dsa_switch *ds = ps->ds; 1423 struct dsa_switch *ds = chip->ds;
1339 u16 output_ports = 0; 1424 u16 output_ports = 0;
1340 int reg; 1425 int reg;
1341 int i; 1426 int i;
@@ -1344,9 +1429,9 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
1344 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { 1429 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
1345 output_ports = mask; 1430 output_ports = mask;
1346 } else { 1431 } else {
1347 for (i = 0; i < ps->info->num_ports; ++i) { 1432 for (i = 0; i < chip->info->num_ports; ++i) {
1348 /* allow sending frames to every group member */ 1433 /* allow sending frames to every group member */
1349 if (bridge && ps->ports[i].bridge_dev == bridge) 1434 if (bridge && chip->ports[i].bridge_dev == bridge)
1350 output_ports |= BIT(i); 1435 output_ports |= BIT(i);
1351 1436
1352 /* allow sending frames to CPU port and DSA link(s) */ 1437 /* allow sending frames to CPU port and DSA link(s) */
@@ -1358,24 +1443,24 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
1358 /* prevent frames from going back out of the port they came in on */ 1443 /* prevent frames from going back out of the port they came in on */
1359 output_ports &= ~BIT(port); 1444 output_ports &= ~BIT(port);
1360 1445
1361 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN); 1446 reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
1362 if (reg < 0) 1447 if (reg < 0)
1363 return reg; 1448 return reg;
1364 1449
1365 reg &= ~mask; 1450 reg &= ~mask;
1366 reg |= output_ports & mask; 1451 reg |= output_ports & mask;
1367 1452
1368 return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg); 1453 return _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, reg);
1369} 1454}
1370 1455
1371static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, 1456static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
1372 u8 state) 1457 u8 state)
1373{ 1458{
1374 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1459 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
1375 int stp_state; 1460 int stp_state;
1376 int err; 1461 int err;
1377 1462
1378 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE)) 1463 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_PORTSTATE))
1379 return; 1464 return;
1380 1465
1381 switch (state) { 1466 switch (state) {
@@ -1395,23 +1480,24 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
1395 break; 1480 break;
1396 } 1481 }
1397 1482
1398 mutex_lock(&ps->smi_mutex); 1483 mutex_lock(&chip->reg_lock);
1399 err = _mv88e6xxx_port_state(ps, port, stp_state); 1484 err = _mv88e6xxx_port_state(chip, port, stp_state);
1400 mutex_unlock(&ps->smi_mutex); 1485 mutex_unlock(&chip->reg_lock);
1401 1486
1402 if (err) 1487 if (err)
1403 netdev_err(ds->ports[port], "failed to update state to %s\n", 1488 netdev_err(ds->ports[port].netdev,
1489 "failed to update state to %s\n",
1404 mv88e6xxx_port_state_names[stp_state]); 1490 mv88e6xxx_port_state_names[stp_state]);
1405} 1491}
1406 1492
1407static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port, 1493static int _mv88e6xxx_port_pvid(struct mv88e6xxx_chip *chip, int port,
1408 u16 *new, u16 *old) 1494 u16 *new, u16 *old)
1409{ 1495{
1410 struct dsa_switch *ds = ps->ds; 1496 struct dsa_switch *ds = chip->ds;
1411 u16 pvid; 1497 u16 pvid;
1412 int ret; 1498 int ret;
1413 1499
1414 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN); 1500 ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_DEFAULT_VLAN);
1415 if (ret < 0) 1501 if (ret < 0)
1416 return ret; 1502 return ret;
1417 1503
@@ -1421,13 +1507,13 @@ static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
1421 ret &= ~PORT_DEFAULT_VLAN_MASK; 1507 ret &= ~PORT_DEFAULT_VLAN_MASK;
1422 ret |= *new & PORT_DEFAULT_VLAN_MASK; 1508 ret |= *new & PORT_DEFAULT_VLAN_MASK;
1423 1509
1424 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), 1510 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
1425 PORT_DEFAULT_VLAN, ret); 1511 PORT_DEFAULT_VLAN, ret);
1426 if (ret < 0) 1512 if (ret < 0)
1427 return ret; 1513 return ret;
1428 1514
1429 netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new, 1515 netdev_dbg(ds->ports[port].netdev,
1430 pvid); 1516 "DefaultVID %d (was %d)\n", *new, pvid);
1431 } 1517 }
1432 1518
1433 if (old) 1519 if (old)
@@ -1436,47 +1522,47 @@ static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
1436 return 0; 1522 return 0;
1437} 1523}
1438 1524
1439static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps, 1525static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_chip *chip,
1440 int port, u16 *pvid) 1526 int port, u16 *pvid)
1441{ 1527{
1442 return _mv88e6xxx_port_pvid(ps, port, NULL, pvid); 1528 return _mv88e6xxx_port_pvid(chip, port, NULL, pvid);
1443} 1529}
1444 1530
1445static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps, 1531static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_chip *chip,
1446 int port, u16 pvid) 1532 int port, u16 pvid)
1447{ 1533{
1448 return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL); 1534 return _mv88e6xxx_port_pvid(chip, port, &pvid, NULL);
1449} 1535}
1450 1536
1451static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps) 1537static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip)
1452{ 1538{
1453 return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP, 1539 return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP,
1454 GLOBAL_VTU_OP_BUSY); 1540 GLOBAL_VTU_OP_BUSY);
1455} 1541}
1456 1542
1457static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op) 1543static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op)
1458{ 1544{
1459 int ret; 1545 int ret;
1460 1546
1461 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op); 1547 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_OP, op);
1462 if (ret < 0) 1548 if (ret < 0)
1463 return ret; 1549 return ret;
1464 1550
1465 return _mv88e6xxx_vtu_wait(ps); 1551 return _mv88e6xxx_vtu_wait(chip);
1466} 1552}
1467 1553
1468static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps) 1554static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_chip *chip)
1469{ 1555{
1470 int ret; 1556 int ret;
1471 1557
1472 ret = _mv88e6xxx_vtu_wait(ps); 1558 ret = _mv88e6xxx_vtu_wait(chip);
1473 if (ret < 0) 1559 if (ret < 0)
1474 return ret; 1560 return ret;
1475 1561
1476 return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL); 1562 return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_FLUSH_ALL);
1477} 1563}
1478 1564
1479static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps, 1565static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
1480 struct mv88e6xxx_vtu_stu_entry *entry, 1566 struct mv88e6xxx_vtu_stu_entry *entry,
1481 unsigned int nibble_offset) 1567 unsigned int nibble_offset)
1482{ 1568{
@@ -1485,7 +1571,7 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
1485 int ret; 1571 int ret;
1486 1572
1487 for (i = 0; i < 3; ++i) { 1573 for (i = 0; i < 3; ++i) {
1488 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 1574 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
1489 GLOBAL_VTU_DATA_0_3 + i); 1575 GLOBAL_VTU_DATA_0_3 + i);
1490 if (ret < 0) 1576 if (ret < 0)
1491 return ret; 1577 return ret;
@@ -1493,7 +1579,7 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
1493 regs[i] = ret; 1579 regs[i] = ret;
1494 } 1580 }
1495 1581
1496 for (i = 0; i < ps->info->num_ports; ++i) { 1582 for (i = 0; i < chip->info->num_ports; ++i) {
1497 unsigned int shift = (i % 4) * 4 + nibble_offset; 1583 unsigned int shift = (i % 4) * 4 + nibble_offset;
1498 u16 reg = regs[i / 4]; 1584 u16 reg = regs[i / 4];
1499 1585
@@ -1503,19 +1589,19 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
1503 return 0; 1589 return 0;
1504} 1590}
1505 1591
1506static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_priv_state *ps, 1592static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_chip *chip,
1507 struct mv88e6xxx_vtu_stu_entry *entry) 1593 struct mv88e6xxx_vtu_stu_entry *entry)
1508{ 1594{
1509 return _mv88e6xxx_vtu_stu_data_read(ps, entry, 0); 1595 return _mv88e6xxx_vtu_stu_data_read(chip, entry, 0);
1510} 1596}
1511 1597
1512static int mv88e6xxx_stu_data_read(struct mv88e6xxx_priv_state *ps, 1598static int mv88e6xxx_stu_data_read(struct mv88e6xxx_chip *chip,
1513 struct mv88e6xxx_vtu_stu_entry *entry) 1599 struct mv88e6xxx_vtu_stu_entry *entry)
1514{ 1600{
1515 return _mv88e6xxx_vtu_stu_data_read(ps, entry, 2); 1601 return _mv88e6xxx_vtu_stu_data_read(chip, entry, 2);
1516} 1602}
1517 1603
1518static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps, 1604static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip,
1519 struct mv88e6xxx_vtu_stu_entry *entry, 1605 struct mv88e6xxx_vtu_stu_entry *entry,
1520 unsigned int nibble_offset) 1606 unsigned int nibble_offset)
1521{ 1607{
@@ -1523,7 +1609,7 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
1523 int i; 1609 int i;
1524 int ret; 1610 int ret;
1525 1611
1526 for (i = 0; i < ps->info->num_ports; ++i) { 1612 for (i = 0; i < chip->info->num_ports; ++i) {
1527 unsigned int shift = (i % 4) * 4 + nibble_offset; 1613 unsigned int shift = (i % 4) * 4 + nibble_offset;
1528 u8 data = entry->data[i]; 1614 u8 data = entry->data[i];
1529 1615
@@ -1531,7 +1617,7 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
1531 } 1617 }
1532 1618
1533 for (i = 0; i < 3; ++i) { 1619 for (i = 0; i < 3; ++i) {
1534 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 1620 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL,
1535 GLOBAL_VTU_DATA_0_3 + i, regs[i]); 1621 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1536 if (ret < 0) 1622 if (ret < 0)
1537 return ret; 1623 return ret;
@@ -1540,39 +1626,39 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
1540 return 0; 1626 return 0;
1541} 1627}
1542 1628
1543static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_priv_state *ps, 1629static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_chip *chip,
1544 struct mv88e6xxx_vtu_stu_entry *entry) 1630 struct mv88e6xxx_vtu_stu_entry *entry)
1545{ 1631{
1546 return _mv88e6xxx_vtu_stu_data_write(ps, entry, 0); 1632 return _mv88e6xxx_vtu_stu_data_write(chip, entry, 0);
1547} 1633}
1548 1634
1549static int mv88e6xxx_stu_data_write(struct mv88e6xxx_priv_state *ps, 1635static int mv88e6xxx_stu_data_write(struct mv88e6xxx_chip *chip,
1550 struct mv88e6xxx_vtu_stu_entry *entry) 1636 struct mv88e6xxx_vtu_stu_entry *entry)
1551{ 1637{
1552 return _mv88e6xxx_vtu_stu_data_write(ps, entry, 2); 1638 return _mv88e6xxx_vtu_stu_data_write(chip, entry, 2);
1553} 1639}
1554 1640
1555static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid) 1641static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_chip *chip, u16 vid)
1556{ 1642{
1557 return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, 1643 return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID,
1558 vid & GLOBAL_VTU_VID_MASK); 1644 vid & GLOBAL_VTU_VID_MASK);
1559} 1645}
1560 1646
1561static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps, 1647static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip,
1562 struct mv88e6xxx_vtu_stu_entry *entry) 1648 struct mv88e6xxx_vtu_stu_entry *entry)
1563{ 1649{
1564 struct mv88e6xxx_vtu_stu_entry next = { 0 }; 1650 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1565 int ret; 1651 int ret;
1566 1652
1567 ret = _mv88e6xxx_vtu_wait(ps); 1653 ret = _mv88e6xxx_vtu_wait(chip);
1568 if (ret < 0) 1654 if (ret < 0)
1569 return ret; 1655 return ret;
1570 1656
1571 ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT); 1657 ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
1572 if (ret < 0) 1658 if (ret < 0)
1573 return ret; 1659 return ret;
1574 1660
1575 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID); 1661 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
1576 if (ret < 0) 1662 if (ret < 0)
1577 return ret; 1663 return ret;
1578 1664
@@ -1580,22 +1666,22 @@ static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
1580 next.valid = !!(ret & GLOBAL_VTU_VID_VALID); 1666 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1581 1667
1582 if (next.valid) { 1668 if (next.valid) {
1583 ret = mv88e6xxx_vtu_data_read(ps, &next); 1669 ret = mv88e6xxx_vtu_data_read(chip, &next);
1584 if (ret < 0) 1670 if (ret < 0)
1585 return ret; 1671 return ret;
1586 1672
1587 if (mv88e6xxx_has_fid_reg(ps)) { 1673 if (mv88e6xxx_has_fid_reg(chip)) {
1588 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 1674 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
1589 GLOBAL_VTU_FID); 1675 GLOBAL_VTU_FID);
1590 if (ret < 0) 1676 if (ret < 0)
1591 return ret; 1677 return ret;
1592 1678
1593 next.fid = ret & GLOBAL_VTU_FID_MASK; 1679 next.fid = ret & GLOBAL_VTU_FID_MASK;
1594 } else if (mv88e6xxx_num_databases(ps) == 256) { 1680 } else if (mv88e6xxx_num_databases(chip) == 256) {
1595 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and 1681 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1596 * VTU DBNum[3:0] are located in VTU Operation 3:0 1682 * VTU DBNum[3:0] are located in VTU Operation 3:0
1597 */ 1683 */
1598 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 1684 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
1599 GLOBAL_VTU_OP); 1685 GLOBAL_VTU_OP);
1600 if (ret < 0) 1686 if (ret < 0)
1601 return ret; 1687 return ret;
@@ -1604,8 +1690,8 @@ static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
1604 next.fid |= ret & 0xf; 1690 next.fid |= ret & 0xf;
1605 } 1691 }
1606 1692
1607 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) { 1693 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
1608 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 1694 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
1609 GLOBAL_VTU_SID); 1695 GLOBAL_VTU_SID);
1610 if (ret < 0) 1696 if (ret < 0)
1611 return ret; 1697 return ret;
@@ -1622,26 +1708,26 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
1622 struct switchdev_obj_port_vlan *vlan, 1708 struct switchdev_obj_port_vlan *vlan,
1623 int (*cb)(struct switchdev_obj *obj)) 1709 int (*cb)(struct switchdev_obj *obj))
1624{ 1710{
1625 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1711 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
1626 struct mv88e6xxx_vtu_stu_entry next; 1712 struct mv88e6xxx_vtu_stu_entry next;
1627 u16 pvid; 1713 u16 pvid;
1628 int err; 1714 int err;
1629 1715
1630 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) 1716 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
1631 return -EOPNOTSUPP; 1717 return -EOPNOTSUPP;
1632 1718
1633 mutex_lock(&ps->smi_mutex); 1719 mutex_lock(&chip->reg_lock);
1634 1720
1635 err = _mv88e6xxx_port_pvid_get(ps, port, &pvid); 1721 err = _mv88e6xxx_port_pvid_get(chip, port, &pvid);
1636 if (err) 1722 if (err)
1637 goto unlock; 1723 goto unlock;
1638 1724
1639 err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK); 1725 err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK);
1640 if (err) 1726 if (err)
1641 goto unlock; 1727 goto unlock;
1642 1728
1643 do { 1729 do {
1644 err = _mv88e6xxx_vtu_getnext(ps, &next); 1730 err = _mv88e6xxx_vtu_getnext(chip, &next);
1645 if (err) 1731 if (err)
1646 break; 1732 break;
1647 1733
@@ -1652,7 +1738,8 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
1652 continue; 1738 continue;
1653 1739
1654 /* reinit and dump this VLAN obj */ 1740 /* reinit and dump this VLAN obj */
1655 vlan->vid_begin = vlan->vid_end = next.vid; 1741 vlan->vid_begin = next.vid;
1742 vlan->vid_end = next.vid;
1656 vlan->flags = 0; 1743 vlan->flags = 0;
1657 1744
1658 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) 1745 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
@@ -1667,19 +1754,19 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
1667 } while (next.vid < GLOBAL_VTU_VID_MASK); 1754 } while (next.vid < GLOBAL_VTU_VID_MASK);
1668 1755
1669unlock: 1756unlock:
1670 mutex_unlock(&ps->smi_mutex); 1757 mutex_unlock(&chip->reg_lock);
1671 1758
1672 return err; 1759 return err;
1673} 1760}
1674 1761
1675static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps, 1762static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
1676 struct mv88e6xxx_vtu_stu_entry *entry) 1763 struct mv88e6xxx_vtu_stu_entry *entry)
1677{ 1764{
1678 u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE; 1765 u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
1679 u16 reg = 0; 1766 u16 reg = 0;
1680 int ret; 1767 int ret;
1681 1768
1682 ret = _mv88e6xxx_vtu_wait(ps); 1769 ret = _mv88e6xxx_vtu_wait(chip);
1683 if (ret < 0) 1770 if (ret < 0)
1684 return ret; 1771 return ret;
1685 1772
@@ -1687,23 +1774,25 @@ static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
1687 goto loadpurge; 1774 goto loadpurge;
1688 1775
1689 /* Write port member tags */ 1776 /* Write port member tags */
1690 ret = mv88e6xxx_vtu_data_write(ps, entry); 1777 ret = mv88e6xxx_vtu_data_write(chip, entry);
1691 if (ret < 0) 1778 if (ret < 0)
1692 return ret; 1779 return ret;
1693 1780
1694 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) { 1781 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
1695 reg = entry->sid & GLOBAL_VTU_SID_MASK; 1782 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1696 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg); 1783 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
1784 reg);
1697 if (ret < 0) 1785 if (ret < 0)
1698 return ret; 1786 return ret;
1699 } 1787 }
1700 1788
1701 if (mv88e6xxx_has_fid_reg(ps)) { 1789 if (mv88e6xxx_has_fid_reg(chip)) {
1702 reg = entry->fid & GLOBAL_VTU_FID_MASK; 1790 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1703 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg); 1791 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_FID,
1792 reg);
1704 if (ret < 0) 1793 if (ret < 0)
1705 return ret; 1794 return ret;
1706 } else if (mv88e6xxx_num_databases(ps) == 256) { 1795 } else if (mv88e6xxx_num_databases(chip) == 256) {
1707 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and 1796 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1708 * VTU DBNum[3:0] are located in VTU Operation 3:0 1797 * VTU DBNum[3:0] are located in VTU Operation 3:0
1709 */ 1798 */
@@ -1714,46 +1803,46 @@ static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
1714 reg = GLOBAL_VTU_VID_VALID; 1803 reg = GLOBAL_VTU_VID_VALID;
1715loadpurge: 1804loadpurge:
1716 reg |= entry->vid & GLOBAL_VTU_VID_MASK; 1805 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1717 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg); 1806 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1718 if (ret < 0) 1807 if (ret < 0)
1719 return ret; 1808 return ret;
1720 1809
1721 return _mv88e6xxx_vtu_cmd(ps, op); 1810 return _mv88e6xxx_vtu_cmd(chip, op);
1722} 1811}
1723 1812
1724static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid, 1813static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid,
1725 struct mv88e6xxx_vtu_stu_entry *entry) 1814 struct mv88e6xxx_vtu_stu_entry *entry)
1726{ 1815{
1727 struct mv88e6xxx_vtu_stu_entry next = { 0 }; 1816 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1728 int ret; 1817 int ret;
1729 1818
1730 ret = _mv88e6xxx_vtu_wait(ps); 1819 ret = _mv88e6xxx_vtu_wait(chip);
1731 if (ret < 0) 1820 if (ret < 0)
1732 return ret; 1821 return ret;
1733 1822
1734 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, 1823 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
1735 sid & GLOBAL_VTU_SID_MASK); 1824 sid & GLOBAL_VTU_SID_MASK);
1736 if (ret < 0) 1825 if (ret < 0)
1737 return ret; 1826 return ret;
1738 1827
1739 ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT); 1828 ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
1740 if (ret < 0) 1829 if (ret < 0)
1741 return ret; 1830 return ret;
1742 1831
1743 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID); 1832 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_SID);
1744 if (ret < 0) 1833 if (ret < 0)
1745 return ret; 1834 return ret;
1746 1835
1747 next.sid = ret & GLOBAL_VTU_SID_MASK; 1836 next.sid = ret & GLOBAL_VTU_SID_MASK;
1748 1837
1749 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID); 1838 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
1750 if (ret < 0) 1839 if (ret < 0)
1751 return ret; 1840 return ret;
1752 1841
1753 next.valid = !!(ret & GLOBAL_VTU_VID_VALID); 1842 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1754 1843
1755 if (next.valid) { 1844 if (next.valid) {
1756 ret = mv88e6xxx_stu_data_read(ps, &next); 1845 ret = mv88e6xxx_stu_data_read(chip, &next);
1757 if (ret < 0) 1846 if (ret < 0)
1758 return ret; 1847 return ret;
1759 } 1848 }
@@ -1762,13 +1851,13 @@ static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
1762 return 0; 1851 return 0;
1763} 1852}
1764 1853
1765static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps, 1854static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
1766 struct mv88e6xxx_vtu_stu_entry *entry) 1855 struct mv88e6xxx_vtu_stu_entry *entry)
1767{ 1856{
1768 u16 reg = 0; 1857 u16 reg = 0;
1769 int ret; 1858 int ret;
1770 1859
1771 ret = _mv88e6xxx_vtu_wait(ps); 1860 ret = _mv88e6xxx_vtu_wait(chip);
1772 if (ret < 0) 1861 if (ret < 0)
1773 return ret; 1862 return ret;
1774 1863
@@ -1776,41 +1865,41 @@ static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
1776 goto loadpurge; 1865 goto loadpurge;
1777 1866
1778 /* Write port states */ 1867 /* Write port states */
1779 ret = mv88e6xxx_stu_data_write(ps, entry); 1868 ret = mv88e6xxx_stu_data_write(chip, entry);
1780 if (ret < 0) 1869 if (ret < 0)
1781 return ret; 1870 return ret;
1782 1871
1783 reg = GLOBAL_VTU_VID_VALID; 1872 reg = GLOBAL_VTU_VID_VALID;
1784loadpurge: 1873loadpurge:
1785 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg); 1874 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1786 if (ret < 0) 1875 if (ret < 0)
1787 return ret; 1876 return ret;
1788 1877
1789 reg = entry->sid & GLOBAL_VTU_SID_MASK; 1878 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1790 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg); 1879 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1791 if (ret < 0) 1880 if (ret < 0)
1792 return ret; 1881 return ret;
1793 1882
1794 return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE); 1883 return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1795} 1884}
1796 1885
1797static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port, 1886static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port,
1798 u16 *new, u16 *old) 1887 u16 *new, u16 *old)
1799{ 1888{
1800 struct dsa_switch *ds = ps->ds; 1889 struct dsa_switch *ds = chip->ds;
1801 u16 upper_mask; 1890 u16 upper_mask;
1802 u16 fid; 1891 u16 fid;
1803 int ret; 1892 int ret;
1804 1893
1805 if (mv88e6xxx_num_databases(ps) == 4096) 1894 if (mv88e6xxx_num_databases(chip) == 4096)
1806 upper_mask = 0xff; 1895 upper_mask = 0xff;
1807 else if (mv88e6xxx_num_databases(ps) == 256) 1896 else if (mv88e6xxx_num_databases(chip) == 256)
1808 upper_mask = 0xf; 1897 upper_mask = 0xf;
1809 else 1898 else
1810 return -EOPNOTSUPP; 1899 return -EOPNOTSUPP;
1811 1900
1812 /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */ 1901 /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
1813 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN); 1902 ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
1814 if (ret < 0) 1903 if (ret < 0)
1815 return ret; 1904 return ret;
1816 1905
@@ -1820,14 +1909,14 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
1820 ret &= ~PORT_BASE_VLAN_FID_3_0_MASK; 1909 ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
1821 ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK; 1910 ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
1822 1911
1823 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, 1912 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN,
1824 ret); 1913 ret);
1825 if (ret < 0) 1914 if (ret < 0)
1826 return ret; 1915 return ret;
1827 } 1916 }
1828 1917
1829 /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */ 1918 /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
1830 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1); 1919 ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_1);
1831 if (ret < 0) 1920 if (ret < 0)
1832 return ret; 1921 return ret;
1833 1922
@@ -1837,12 +1926,13 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
1837 ret &= ~upper_mask; 1926 ret &= ~upper_mask;
1838 ret |= (*new >> 4) & upper_mask; 1927 ret |= (*new >> 4) & upper_mask;
1839 1928
1840 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 1929 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
1841 ret); 1930 ret);
1842 if (ret < 0) 1931 if (ret < 0)
1843 return ret; 1932 return ret;
1844 1933
1845 netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid); 1934 netdev_dbg(ds->ports[port].netdev,
1935 "FID %d (was %d)\n", *new, fid);
1846 } 1936 }
1847 1937
1848 if (old) 1938 if (old)
@@ -1851,19 +1941,19 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
1851 return 0; 1941 return 0;
1852} 1942}
1853 1943
1854static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps, 1944static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_chip *chip,
1855 int port, u16 *fid) 1945 int port, u16 *fid)
1856{ 1946{
1857 return _mv88e6xxx_port_fid(ps, port, NULL, fid); 1947 return _mv88e6xxx_port_fid(chip, port, NULL, fid);
1858} 1948}
1859 1949
1860static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps, 1950static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_chip *chip,
1861 int port, u16 fid) 1951 int port, u16 fid)
1862{ 1952{
1863 return _mv88e6xxx_port_fid(ps, port, &fid, NULL); 1953 return _mv88e6xxx_port_fid(chip, port, &fid, NULL);
1864} 1954}
1865 1955
1866static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid) 1956static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
1867{ 1957{
1868 DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); 1958 DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
1869 struct mv88e6xxx_vtu_stu_entry vlan; 1959 struct mv88e6xxx_vtu_stu_entry vlan;
@@ -1872,8 +1962,8 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
1872 bitmap_zero(fid_bitmap, MV88E6XXX_N_FID); 1962 bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
1873 1963
1874 /* Set every FID bit used by the (un)bridged ports */ 1964 /* Set every FID bit used by the (un)bridged ports */
1875 for (i = 0; i < ps->info->num_ports; ++i) { 1965 for (i = 0; i < chip->info->num_ports; ++i) {
1876 err = _mv88e6xxx_port_fid_get(ps, i, fid); 1966 err = _mv88e6xxx_port_fid_get(chip, i, fid);
1877 if (err) 1967 if (err)
1878 return err; 1968 return err;
1879 1969
@@ -1881,12 +1971,12 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
1881 } 1971 }
1882 1972
1883 /* Set every FID bit used by the VLAN entries */ 1973 /* Set every FID bit used by the VLAN entries */
1884 err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK); 1974 err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK);
1885 if (err) 1975 if (err)
1886 return err; 1976 return err;
1887 1977
1888 do { 1978 do {
1889 err = _mv88e6xxx_vtu_getnext(ps, &vlan); 1979 err = _mv88e6xxx_vtu_getnext(chip, &vlan);
1890 if (err) 1980 if (err)
1891 return err; 1981 return err;
1892 1982
@@ -1900,35 +1990,35 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
1900 * databases are not needed. Return the next positive available. 1990 * databases are not needed. Return the next positive available.
1901 */ 1991 */
1902 *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1); 1992 *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
1903 if (unlikely(*fid >= mv88e6xxx_num_databases(ps))) 1993 if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
1904 return -ENOSPC; 1994 return -ENOSPC;
1905 1995
1906 /* Clear the database */ 1996 /* Clear the database */
1907 return _mv88e6xxx_atu_flush(ps, *fid, true); 1997 return _mv88e6xxx_atu_flush(chip, *fid, true);
1908} 1998}
1909 1999
1910static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid, 2000static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
1911 struct mv88e6xxx_vtu_stu_entry *entry) 2001 struct mv88e6xxx_vtu_stu_entry *entry)
1912{ 2002{
1913 struct dsa_switch *ds = ps->ds; 2003 struct dsa_switch *ds = chip->ds;
1914 struct mv88e6xxx_vtu_stu_entry vlan = { 2004 struct mv88e6xxx_vtu_stu_entry vlan = {
1915 .valid = true, 2005 .valid = true,
1916 .vid = vid, 2006 .vid = vid,
1917 }; 2007 };
1918 int i, err; 2008 int i, err;
1919 2009
1920 err = _mv88e6xxx_fid_new(ps, &vlan.fid); 2010 err = _mv88e6xxx_fid_new(chip, &vlan.fid);
1921 if (err) 2011 if (err)
1922 return err; 2012 return err;
1923 2013
1924 /* exclude all ports except the CPU and DSA ports */ 2014 /* exclude all ports except the CPU and DSA ports */
1925 for (i = 0; i < ps->info->num_ports; ++i) 2015 for (i = 0; i < chip->info->num_ports; ++i)
1926 vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i) 2016 vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
1927 ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED 2017 ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
1928 : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; 2018 : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1929 2019
1930 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) || 2020 if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
1931 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) { 2021 mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) {
1932 struct mv88e6xxx_vtu_stu_entry vstp; 2022 struct mv88e6xxx_vtu_stu_entry vstp;
1933 2023
1934 /* Adding a VTU entry requires a valid STU entry. As VSTP is not 2024 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
@@ -1936,7 +2026,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
1936 * entries. Thus, validate the SID 0. 2026 * entries. Thus, validate the SID 0.
1937 */ 2027 */
1938 vlan.sid = 0; 2028 vlan.sid = 0;
1939 err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp); 2029 err = _mv88e6xxx_stu_getnext(chip, GLOBAL_VTU_SID_MASK, &vstp);
1940 if (err) 2030 if (err)
1941 return err; 2031 return err;
1942 2032
@@ -1945,7 +2035,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
1945 vstp.valid = true; 2035 vstp.valid = true;
1946 vstp.sid = vlan.sid; 2036 vstp.sid = vlan.sid;
1947 2037
1948 err = _mv88e6xxx_stu_loadpurge(ps, &vstp); 2038 err = _mv88e6xxx_stu_loadpurge(chip, &vstp);
1949 if (err) 2039 if (err)
1950 return err; 2040 return err;
1951 } 2041 }
@@ -1955,7 +2045,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
1955 return 0; 2045 return 0;
1956} 2046}
1957 2047
1958static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid, 2048static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
1959 struct mv88e6xxx_vtu_stu_entry *entry, bool creat) 2049 struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
1960{ 2050{
1961 int err; 2051 int err;
@@ -1963,11 +2053,11 @@ static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
1963 if (!vid) 2053 if (!vid)
1964 return -EINVAL; 2054 return -EINVAL;
1965 2055
1966 err = _mv88e6xxx_vtu_vid_write(ps, vid - 1); 2056 err = _mv88e6xxx_vtu_vid_write(chip, vid - 1);
1967 if (err) 2057 if (err)
1968 return err; 2058 return err;
1969 2059
1970 err = _mv88e6xxx_vtu_getnext(ps, entry); 2060 err = _mv88e6xxx_vtu_getnext(chip, entry);
1971 if (err) 2061 if (err)
1972 return err; 2062 return err;
1973 2063
@@ -1978,7 +2068,7 @@ static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
1978 * -EOPNOTSUPP to inform bridge about an eventual software VLAN. 2068 * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
1979 */ 2069 */
1980 2070
1981 err = _mv88e6xxx_vtu_new(ps, vid, entry); 2071 err = _mv88e6xxx_vtu_new(chip, vid, entry);
1982 } 2072 }
1983 2073
1984 return err; 2074 return err;
@@ -1987,21 +2077,21 @@ static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
1987static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, 2077static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
1988 u16 vid_begin, u16 vid_end) 2078 u16 vid_begin, u16 vid_end)
1989{ 2079{
1990 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2080 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
1991 struct mv88e6xxx_vtu_stu_entry vlan; 2081 struct mv88e6xxx_vtu_stu_entry vlan;
1992 int i, err; 2082 int i, err;
1993 2083
1994 if (!vid_begin) 2084 if (!vid_begin)
1995 return -EOPNOTSUPP; 2085 return -EOPNOTSUPP;
1996 2086
1997 mutex_lock(&ps->smi_mutex); 2087 mutex_lock(&chip->reg_lock);
1998 2088
1999 err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1); 2089 err = _mv88e6xxx_vtu_vid_write(chip, vid_begin - 1);
2000 if (err) 2090 if (err)
2001 goto unlock; 2091 goto unlock;
2002 2092
2003 do { 2093 do {
2004 err = _mv88e6xxx_vtu_getnext(ps, &vlan); 2094 err = _mv88e6xxx_vtu_getnext(chip, &vlan);
2005 if (err) 2095 if (err)
2006 goto unlock; 2096 goto unlock;
2007 2097
@@ -2011,7 +2101,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
2011 if (vlan.vid > vid_end) 2101 if (vlan.vid > vid_end)
2012 break; 2102 break;
2013 2103
2014 for (i = 0; i < ps->info->num_ports; ++i) { 2104 for (i = 0; i < chip->info->num_ports; ++i) {
2015 if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) 2105 if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
2016 continue; 2106 continue;
2017 2107
@@ -2019,21 +2109,21 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
2019 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) 2109 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
2020 continue; 2110 continue;
2021 2111
2022 if (ps->ports[i].bridge_dev == 2112 if (chip->ports[i].bridge_dev ==
2023 ps->ports[port].bridge_dev) 2113 chip->ports[port].bridge_dev)
2024 break; /* same bridge, check next VLAN */ 2114 break; /* same bridge, check next VLAN */
2025 2115
2026 netdev_warn(ds->ports[port], 2116 netdev_warn(ds->ports[port].netdev,
2027 "hardware VLAN %d already used by %s\n", 2117 "hardware VLAN %d already used by %s\n",
2028 vlan.vid, 2118 vlan.vid,
2029 netdev_name(ps->ports[i].bridge_dev)); 2119 netdev_name(chip->ports[i].bridge_dev));
2030 err = -EOPNOTSUPP; 2120 err = -EOPNOTSUPP;
2031 goto unlock; 2121 goto unlock;
2032 } 2122 }
2033 } while (vlan.vid < vid_end); 2123 } while (vlan.vid < vid_end);
2034 2124
2035unlock: 2125unlock:
2036 mutex_unlock(&ps->smi_mutex); 2126 mutex_unlock(&chip->reg_lock);
2037 2127
2038 return err; 2128 return err;
2039} 2129}
@@ -2048,17 +2138,17 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
2048static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, 2138static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
2049 bool vlan_filtering) 2139 bool vlan_filtering)
2050{ 2140{
2051 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2141 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
2052 u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE : 2142 u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
2053 PORT_CONTROL_2_8021Q_DISABLED; 2143 PORT_CONTROL_2_8021Q_DISABLED;
2054 int ret; 2144 int ret;
2055 2145
2056 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) 2146 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
2057 return -EOPNOTSUPP; 2147 return -EOPNOTSUPP;
2058 2148
2059 mutex_lock(&ps->smi_mutex); 2149 mutex_lock(&chip->reg_lock);
2060 2150
2061 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2); 2151 ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_2);
2062 if (ret < 0) 2152 if (ret < 0)
2063 goto unlock; 2153 goto unlock;
2064 2154
@@ -2068,31 +2158,32 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
2068 ret &= ~PORT_CONTROL_2_8021Q_MASK; 2158 ret &= ~PORT_CONTROL_2_8021Q_MASK;
2069 ret |= new & PORT_CONTROL_2_8021Q_MASK; 2159 ret |= new & PORT_CONTROL_2_8021Q_MASK;
2070 2160
2071 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2, 2161 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_2,
2072 ret); 2162 ret);
2073 if (ret < 0) 2163 if (ret < 0)
2074 goto unlock; 2164 goto unlock;
2075 2165
2076 netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n", 2166 netdev_dbg(ds->ports[port].netdev, "802.1Q Mode %s (was %s)\n",
2077 mv88e6xxx_port_8021q_mode_names[new], 2167 mv88e6xxx_port_8021q_mode_names[new],
2078 mv88e6xxx_port_8021q_mode_names[old]); 2168 mv88e6xxx_port_8021q_mode_names[old]);
2079 } 2169 }
2080 2170
2081 ret = 0; 2171 ret = 0;
2082unlock: 2172unlock:
2083 mutex_unlock(&ps->smi_mutex); 2173 mutex_unlock(&chip->reg_lock);
2084 2174
2085 return ret; 2175 return ret;
2086} 2176}
2087 2177
2088static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, 2178static int
2089 const struct switchdev_obj_port_vlan *vlan, 2179mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
2090 struct switchdev_trans *trans) 2180 const struct switchdev_obj_port_vlan *vlan,
2181 struct switchdev_trans *trans)
2091{ 2182{
2092 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2183 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
2093 int err; 2184 int err;
2094 2185
2095 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) 2186 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
2096 return -EOPNOTSUPP; 2187 return -EOPNOTSUPP;
2097 2188
2098 /* If the requested port doesn't belong to the same bridge as the VLAN 2189 /* If the requested port doesn't belong to the same bridge as the VLAN
@@ -2109,13 +2200,13 @@ static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
2109 return 0; 2200 return 0;
2110} 2201}
2111 2202
2112static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port, 2203static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
2113 u16 vid, bool untagged) 2204 u16 vid, bool untagged)
2114{ 2205{
2115 struct mv88e6xxx_vtu_stu_entry vlan; 2206 struct mv88e6xxx_vtu_stu_entry vlan;
2116 int err; 2207 int err;
2117 2208
2118 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true); 2209 err = _mv88e6xxx_vtu_get(chip, vid, &vlan, true);
2119 if (err) 2210 if (err)
2120 return err; 2211 return err;
2121 2212
@@ -2123,43 +2214,44 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
2123 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED : 2214 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
2124 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED; 2215 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
2125 2216
2126 return _mv88e6xxx_vtu_loadpurge(ps, &vlan); 2217 return _mv88e6xxx_vtu_loadpurge(chip, &vlan);
2127} 2218}
2128 2219
2129static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, 2220static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
2130 const struct switchdev_obj_port_vlan *vlan, 2221 const struct switchdev_obj_port_vlan *vlan,
2131 struct switchdev_trans *trans) 2222 struct switchdev_trans *trans)
2132{ 2223{
2133 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2224 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
2134 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 2225 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2135 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 2226 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2136 u16 vid; 2227 u16 vid;
2137 2228
2138 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) 2229 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
2139 return; 2230 return;
2140 2231
2141 mutex_lock(&ps->smi_mutex); 2232 mutex_lock(&chip->reg_lock);
2142 2233
2143 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) 2234 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
2144 if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged)) 2235 if (_mv88e6xxx_port_vlan_add(chip, port, vid, untagged))
2145 netdev_err(ds->ports[port], "failed to add VLAN %d%c\n", 2236 netdev_err(ds->ports[port].netdev,
2237 "failed to add VLAN %d%c\n",
2146 vid, untagged ? 'u' : 't'); 2238 vid, untagged ? 'u' : 't');
2147 2239
2148 if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end)) 2240 if (pvid && _mv88e6xxx_port_pvid_set(chip, port, vlan->vid_end))
2149 netdev_err(ds->ports[port], "failed to set PVID %d\n", 2241 netdev_err(ds->ports[port].netdev, "failed to set PVID %d\n",
2150 vlan->vid_end); 2242 vlan->vid_end);
2151 2243
2152 mutex_unlock(&ps->smi_mutex); 2244 mutex_unlock(&chip->reg_lock);
2153} 2245}
2154 2246
2155static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps, 2247static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
2156 int port, u16 vid) 2248 int port, u16 vid)
2157{ 2249{
2158 struct dsa_switch *ds = ps->ds; 2250 struct dsa_switch *ds = chip->ds;
2159 struct mv88e6xxx_vtu_stu_entry vlan; 2251 struct mv88e6xxx_vtu_stu_entry vlan;
2160 int i, err; 2252 int i, err;
2161 2253
2162 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false); 2254 err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
2163 if (err) 2255 if (err)
2164 return err; 2256 return err;
2165 2257
@@ -2171,7 +2263,7 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
2171 2263
2172 /* keep the VLAN unless all ports are excluded */ 2264 /* keep the VLAN unless all ports are excluded */
2173 vlan.valid = false; 2265 vlan.valid = false;
2174 for (i = 0; i < ps->info->num_ports; ++i) { 2266 for (i = 0; i < chip->info->num_ports; ++i) {
2175 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) 2267 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
2176 continue; 2268 continue;
2177 2269
@@ -2181,55 +2273,55 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
2181 } 2273 }
2182 } 2274 }
2183 2275
2184 err = _mv88e6xxx_vtu_loadpurge(ps, &vlan); 2276 err = _mv88e6xxx_vtu_loadpurge(chip, &vlan);
2185 if (err) 2277 if (err)
2186 return err; 2278 return err;
2187 2279
2188 return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false); 2280 return _mv88e6xxx_atu_remove(chip, vlan.fid, port, false);
2189} 2281}
2190 2282
2191static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, 2283static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
2192 const struct switchdev_obj_port_vlan *vlan) 2284 const struct switchdev_obj_port_vlan *vlan)
2193{ 2285{
2194 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2286 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
2195 u16 pvid, vid; 2287 u16 pvid, vid;
2196 int err = 0; 2288 int err = 0;
2197 2289
2198 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) 2290 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
2199 return -EOPNOTSUPP; 2291 return -EOPNOTSUPP;
2200 2292
2201 mutex_lock(&ps->smi_mutex); 2293 mutex_lock(&chip->reg_lock);
2202 2294
2203 err = _mv88e6xxx_port_pvid_get(ps, port, &pvid); 2295 err = _mv88e6xxx_port_pvid_get(chip, port, &pvid);
2204 if (err) 2296 if (err)
2205 goto unlock; 2297 goto unlock;
2206 2298
2207 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { 2299 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
2208 err = _mv88e6xxx_port_vlan_del(ps, port, vid); 2300 err = _mv88e6xxx_port_vlan_del(chip, port, vid);
2209 if (err) 2301 if (err)
2210 goto unlock; 2302 goto unlock;
2211 2303
2212 if (vid == pvid) { 2304 if (vid == pvid) {
2213 err = _mv88e6xxx_port_pvid_set(ps, port, 0); 2305 err = _mv88e6xxx_port_pvid_set(chip, port, 0);
2214 if (err) 2306 if (err)
2215 goto unlock; 2307 goto unlock;
2216 } 2308 }
2217 } 2309 }
2218 2310
2219unlock: 2311unlock:
2220 mutex_unlock(&ps->smi_mutex); 2312 mutex_unlock(&chip->reg_lock);
2221 2313
2222 return err; 2314 return err;
2223} 2315}
2224 2316
2225static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps, 2317static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
2226 const unsigned char *addr) 2318 const unsigned char *addr)
2227{ 2319{
2228 int i, ret; 2320 int i, ret;
2229 2321
2230 for (i = 0; i < 3; i++) { 2322 for (i = 0; i < 3; i++) {
2231 ret = _mv88e6xxx_reg_write( 2323 ret = _mv88e6xxx_reg_write(
2232 ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i, 2324 chip, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
2233 (addr[i * 2] << 8) | addr[i * 2 + 1]); 2325 (addr[i * 2] << 8) | addr[i * 2 + 1]);
2234 if (ret < 0) 2326 if (ret < 0)
2235 return ret; 2327 return ret;
@@ -2238,13 +2330,13 @@ static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
2238 return 0; 2330 return 0;
2239} 2331}
2240 2332
2241static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps, 2333static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip,
2242 unsigned char *addr) 2334 unsigned char *addr)
2243{ 2335{
2244 int i, ret; 2336 int i, ret;
2245 2337
2246 for (i = 0; i < 3; i++) { 2338 for (i = 0; i < 3; i++) {
2247 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 2339 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
2248 GLOBAL_ATU_MAC_01 + i); 2340 GLOBAL_ATU_MAC_01 + i);
2249 if (ret < 0) 2341 if (ret < 0)
2250 return ret; 2342 return ret;
@@ -2255,27 +2347,27 @@ static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
2255 return 0; 2347 return 0;
2256} 2348}
2257 2349
2258static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps, 2350static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip,
2259 struct mv88e6xxx_atu_entry *entry) 2351 struct mv88e6xxx_atu_entry *entry)
2260{ 2352{
2261 int ret; 2353 int ret;
2262 2354
2263 ret = _mv88e6xxx_atu_wait(ps); 2355 ret = _mv88e6xxx_atu_wait(chip);
2264 if (ret < 0) 2356 if (ret < 0)
2265 return ret; 2357 return ret;
2266 2358
2267 ret = _mv88e6xxx_atu_mac_write(ps, entry->mac); 2359 ret = _mv88e6xxx_atu_mac_write(chip, entry->mac);
2268 if (ret < 0) 2360 if (ret < 0)
2269 return ret; 2361 return ret;
2270 2362
2271 ret = _mv88e6xxx_atu_data_write(ps, entry); 2363 ret = _mv88e6xxx_atu_data_write(chip, entry);
2272 if (ret < 0) 2364 if (ret < 0)
2273 return ret; 2365 return ret;
2274 2366
2275 return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB); 2367 return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
2276} 2368}
2277 2369
2278static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port, 2370static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_chip *chip, int port,
2279 const unsigned char *addr, u16 vid, 2371 const unsigned char *addr, u16 vid,
2280 u8 state) 2372 u8 state)
2281{ 2373{
@@ -2285,9 +2377,9 @@ static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
2285 2377
2286 /* Null VLAN ID corresponds to the port private database */ 2378 /* Null VLAN ID corresponds to the port private database */
2287 if (vid == 0) 2379 if (vid == 0)
2288 err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid); 2380 err = _mv88e6xxx_port_fid_get(chip, port, &vlan.fid);
2289 else 2381 else
2290 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false); 2382 err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
2291 if (err) 2383 if (err)
2292 return err; 2384 return err;
2293 2385
@@ -2299,16 +2391,16 @@ static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
2299 entry.portv_trunkid = BIT(port); 2391 entry.portv_trunkid = BIT(port);
2300 } 2392 }
2301 2393
2302 return _mv88e6xxx_atu_load(ps, &entry); 2394 return _mv88e6xxx_atu_load(chip, &entry);
2303} 2395}
2304 2396
2305static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, 2397static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
2306 const struct switchdev_obj_port_fdb *fdb, 2398 const struct switchdev_obj_port_fdb *fdb,
2307 struct switchdev_trans *trans) 2399 struct switchdev_trans *trans)
2308{ 2400{
2309 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2401 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
2310 2402
2311 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) 2403 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU))
2312 return -EOPNOTSUPP; 2404 return -EOPNOTSUPP;
2313 2405
2314 /* We don't need any dynamic resource from the kernel (yet), 2406 /* We don't need any dynamic resource from the kernel (yet),
@@ -2324,35 +2416,36 @@ static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
2324 int state = is_multicast_ether_addr(fdb->addr) ? 2416 int state = is_multicast_ether_addr(fdb->addr) ?
2325 GLOBAL_ATU_DATA_STATE_MC_STATIC : 2417 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2326 GLOBAL_ATU_DATA_STATE_UC_STATIC; 2418 GLOBAL_ATU_DATA_STATE_UC_STATIC;
2327 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2419 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
2328 2420
2329 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) 2421 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU))
2330 return; 2422 return;
2331 2423
2332 mutex_lock(&ps->smi_mutex); 2424 mutex_lock(&chip->reg_lock);
2333 if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state)) 2425 if (_mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid, state))
2334 netdev_err(ds->ports[port], "failed to load MAC address\n"); 2426 netdev_err(ds->ports[port].netdev,
2335 mutex_unlock(&ps->smi_mutex); 2427 "failed to load MAC address\n");
2428 mutex_unlock(&chip->reg_lock);
2336} 2429}
2337 2430
2338static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, 2431static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
2339 const struct switchdev_obj_port_fdb *fdb) 2432 const struct switchdev_obj_port_fdb *fdb)
2340{ 2433{
2341 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2434 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
2342 int ret; 2435 int ret;
2343 2436
2344 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) 2437 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU))
2345 return -EOPNOTSUPP; 2438 return -EOPNOTSUPP;
2346 2439
2347 mutex_lock(&ps->smi_mutex); 2440 mutex_lock(&chip->reg_lock);
2348 ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, 2441 ret = _mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid,
2349 GLOBAL_ATU_DATA_STATE_UNUSED); 2442 GLOBAL_ATU_DATA_STATE_UNUSED);
2350 mutex_unlock(&ps->smi_mutex); 2443 mutex_unlock(&chip->reg_lock);
2351 2444
2352 return ret; 2445 return ret;
2353} 2446}
2354 2447
2355static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid, 2448static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
2356 struct mv88e6xxx_atu_entry *entry) 2449 struct mv88e6xxx_atu_entry *entry)
2357{ 2450{
2358 struct mv88e6xxx_atu_entry next = { 0 }; 2451 struct mv88e6xxx_atu_entry next = { 0 };
@@ -2360,19 +2453,19 @@ static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
2360 2453
2361 next.fid = fid; 2454 next.fid = fid;
2362 2455
2363 ret = _mv88e6xxx_atu_wait(ps); 2456 ret = _mv88e6xxx_atu_wait(chip);
2364 if (ret < 0) 2457 if (ret < 0)
2365 return ret; 2458 return ret;
2366 2459
2367 ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB); 2460 ret = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
2368 if (ret < 0) 2461 if (ret < 0)
2369 return ret; 2462 return ret;
2370 2463
2371 ret = _mv88e6xxx_atu_mac_read(ps, next.mac); 2464 ret = _mv88e6xxx_atu_mac_read(chip, next.mac);
2372 if (ret < 0) 2465 if (ret < 0)
2373 return ret; 2466 return ret;
2374 2467
2375 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA); 2468 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_DATA);
2376 if (ret < 0) 2469 if (ret < 0)
2377 return ret; 2470 return ret;
2378 2471
@@ -2397,7 +2490,7 @@ static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
2397 return 0; 2490 return 0;
2398} 2491}
2399 2492
2400static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps, 2493static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_chip *chip,
2401 u16 fid, u16 vid, int port, 2494 u16 fid, u16 vid, int port,
2402 struct switchdev_obj_port_fdb *fdb, 2495 struct switchdev_obj_port_fdb *fdb,
2403 int (*cb)(struct switchdev_obj *obj)) 2496 int (*cb)(struct switchdev_obj *obj))
@@ -2407,12 +2500,12 @@ static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
2407 }; 2500 };
2408 int err; 2501 int err;
2409 2502
2410 err = _mv88e6xxx_atu_mac_write(ps, addr.mac); 2503 err = _mv88e6xxx_atu_mac_write(chip, addr.mac);
2411 if (err) 2504 if (err)
2412 return err; 2505 return err;
2413 2506
2414 do { 2507 do {
2415 err = _mv88e6xxx_atu_getnext(ps, fid, &addr); 2508 err = _mv88e6xxx_atu_getnext(chip, fid, &addr);
2416 if (err) 2509 if (err)
2417 break; 2510 break;
2418 2511
@@ -2442,48 +2535,48 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
2442 struct switchdev_obj_port_fdb *fdb, 2535 struct switchdev_obj_port_fdb *fdb,
2443 int (*cb)(struct switchdev_obj *obj)) 2536 int (*cb)(struct switchdev_obj *obj))
2444{ 2537{
2445 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2538 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
2446 struct mv88e6xxx_vtu_stu_entry vlan = { 2539 struct mv88e6xxx_vtu_stu_entry vlan = {
2447 .vid = GLOBAL_VTU_VID_MASK, /* all ones */ 2540 .vid = GLOBAL_VTU_VID_MASK, /* all ones */
2448 }; 2541 };
2449 u16 fid; 2542 u16 fid;
2450 int err; 2543 int err;
2451 2544
2452 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) 2545 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU))
2453 return -EOPNOTSUPP; 2546 return -EOPNOTSUPP;
2454 2547
2455 mutex_lock(&ps->smi_mutex); 2548 mutex_lock(&chip->reg_lock);
2456 2549
2457 /* Dump port's default Filtering Information Database (VLAN ID 0) */ 2550 /* Dump port's default Filtering Information Database (VLAN ID 0) */
2458 err = _mv88e6xxx_port_fid_get(ps, port, &fid); 2551 err = _mv88e6xxx_port_fid_get(chip, port, &fid);
2459 if (err) 2552 if (err)
2460 goto unlock; 2553 goto unlock;
2461 2554
2462 err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb); 2555 err = _mv88e6xxx_port_fdb_dump_one(chip, fid, 0, port, fdb, cb);
2463 if (err) 2556 if (err)
2464 goto unlock; 2557 goto unlock;
2465 2558
2466 /* Dump VLANs' Filtering Information Databases */ 2559 /* Dump VLANs' Filtering Information Databases */
2467 err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid); 2560 err = _mv88e6xxx_vtu_vid_write(chip, vlan.vid);
2468 if (err) 2561 if (err)
2469 goto unlock; 2562 goto unlock;
2470 2563
2471 do { 2564 do {
2472 err = _mv88e6xxx_vtu_getnext(ps, &vlan); 2565 err = _mv88e6xxx_vtu_getnext(chip, &vlan);
2473 if (err) 2566 if (err)
2474 break; 2567 break;
2475 2568
2476 if (!vlan.valid) 2569 if (!vlan.valid)
2477 break; 2570 break;
2478 2571
2479 err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port, 2572 err = _mv88e6xxx_port_fdb_dump_one(chip, vlan.fid, vlan.vid,
2480 fdb, cb); 2573 port, fdb, cb);
2481 if (err) 2574 if (err)
2482 break; 2575 break;
2483 } while (vlan.vid < GLOBAL_VTU_VID_MASK); 2576 } while (vlan.vid < GLOBAL_VTU_VID_MASK);
2484 2577
2485unlock: 2578unlock:
2486 mutex_unlock(&ps->smi_mutex); 2579 mutex_unlock(&chip->reg_lock);
2487 2580
2488 return err; 2581 return err;
2489} 2582}
@@ -2491,100 +2584,101 @@ unlock:
2491static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, 2584static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2492 struct net_device *bridge) 2585 struct net_device *bridge)
2493{ 2586{
2494 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2587 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
2495 int i, err = 0; 2588 int i, err = 0;
2496 2589
2497 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE)) 2590 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VLANTABLE))
2498 return -EOPNOTSUPP; 2591 return -EOPNOTSUPP;
2499 2592
2500 mutex_lock(&ps->smi_mutex); 2593 mutex_lock(&chip->reg_lock);
2501 2594
2502 /* Assign the bridge and remap each port's VLANTable */ 2595 /* Assign the bridge and remap each port's VLANTable */
2503 ps->ports[port].bridge_dev = bridge; 2596 chip->ports[port].bridge_dev = bridge;
2504 2597
2505 for (i = 0; i < ps->info->num_ports; ++i) { 2598 for (i = 0; i < chip->info->num_ports; ++i) {
2506 if (ps->ports[i].bridge_dev == bridge) { 2599 if (chip->ports[i].bridge_dev == bridge) {
2507 err = _mv88e6xxx_port_based_vlan_map(ps, i); 2600 err = _mv88e6xxx_port_based_vlan_map(chip, i);
2508 if (err) 2601 if (err)
2509 break; 2602 break;
2510 } 2603 }
2511 } 2604 }
2512 2605
2513 mutex_unlock(&ps->smi_mutex); 2606 mutex_unlock(&chip->reg_lock);
2514 2607
2515 return err; 2608 return err;
2516} 2609}
2517 2610
2518static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) 2611static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
2519{ 2612{
2520 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2613 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
2521 struct net_device *bridge = ps->ports[port].bridge_dev; 2614 struct net_device *bridge = chip->ports[port].bridge_dev;
2522 int i; 2615 int i;
2523 2616
2524 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE)) 2617 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VLANTABLE))
2525 return; 2618 return;
2526 2619
2527 mutex_lock(&ps->smi_mutex); 2620 mutex_lock(&chip->reg_lock);
2528 2621
2529 /* Unassign the bridge and remap each port's VLANTable */ 2622 /* Unassign the bridge and remap each port's VLANTable */
2530 ps->ports[port].bridge_dev = NULL; 2623 chip->ports[port].bridge_dev = NULL;
2531 2624
2532 for (i = 0; i < ps->info->num_ports; ++i) 2625 for (i = 0; i < chip->info->num_ports; ++i)
2533 if (i == port || ps->ports[i].bridge_dev == bridge) 2626 if (i == port || chip->ports[i].bridge_dev == bridge)
2534 if (_mv88e6xxx_port_based_vlan_map(ps, i)) 2627 if (_mv88e6xxx_port_based_vlan_map(chip, i))
2535 netdev_warn(ds->ports[i], "failed to remap\n"); 2628 netdev_warn(ds->ports[i].netdev,
2629 "failed to remap\n");
2536 2630
2537 mutex_unlock(&ps->smi_mutex); 2631 mutex_unlock(&chip->reg_lock);
2538} 2632}
2539 2633
2540static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps, 2634static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_chip *chip,
2541 int port, int page, int reg, int val) 2635 int port, int page, int reg, int val)
2542{ 2636{
2543 int ret; 2637 int ret;
2544 2638
2545 ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page); 2639 ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page);
2546 if (ret < 0) 2640 if (ret < 0)
2547 goto restore_page_0; 2641 goto restore_page_0;
2548 2642
2549 ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val); 2643 ret = mv88e6xxx_mdio_write_indirect(chip, port, reg, val);
2550restore_page_0: 2644restore_page_0:
2551 _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0); 2645 mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0);
2552 2646
2553 return ret; 2647 return ret;
2554} 2648}
2555 2649
2556static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps, 2650static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_chip *chip,
2557 int port, int page, int reg) 2651 int port, int page, int reg)
2558{ 2652{
2559 int ret; 2653 int ret;
2560 2654
2561 ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page); 2655 ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page);
2562 if (ret < 0) 2656 if (ret < 0)
2563 goto restore_page_0; 2657 goto restore_page_0;
2564 2658
2565 ret = _mv88e6xxx_phy_read_indirect(ps, port, reg); 2659 ret = mv88e6xxx_mdio_read_indirect(chip, port, reg);
2566restore_page_0: 2660restore_page_0:
2567 _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0); 2661 mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0);
2568 2662
2569 return ret; 2663 return ret;
2570} 2664}
2571 2665
2572static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps) 2666static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
2573{ 2667{
2574 bool ppu_active = mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE); 2668 bool ppu_active = mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE);
2575 u16 is_reset = (ppu_active ? 0x8800 : 0xc800); 2669 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2576 struct gpio_desc *gpiod = ps->reset; 2670 struct gpio_desc *gpiod = chip->reset;
2577 unsigned long timeout; 2671 unsigned long timeout;
2578 int ret; 2672 int ret;
2579 int i; 2673 int i;
2580 2674
2581 /* Set all ports to the disabled state. */ 2675 /* Set all ports to the disabled state. */
2582 for (i = 0; i < ps->info->num_ports; i++) { 2676 for (i = 0; i < chip->info->num_ports; i++) {
2583 ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL); 2677 ret = _mv88e6xxx_reg_read(chip, REG_PORT(i), PORT_CONTROL);
2584 if (ret < 0) 2678 if (ret < 0)
2585 return ret; 2679 return ret;
2586 2680
2587 ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL, 2681 ret = _mv88e6xxx_reg_write(chip, REG_PORT(i), PORT_CONTROL,
2588 ret & 0xfffc); 2682 ret & 0xfffc);
2589 if (ret) 2683 if (ret)
2590 return ret; 2684 return ret;
@@ -2606,16 +2700,16 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps)
2606 * through global registers 0x18 and 0x19. 2700 * through global registers 0x18 and 0x19.
2607 */ 2701 */
2608 if (ppu_active) 2702 if (ppu_active)
2609 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000); 2703 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc000);
2610 else 2704 else
2611 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400); 2705 ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc400);
2612 if (ret) 2706 if (ret)
2613 return ret; 2707 return ret;
2614 2708
2615 /* Wait up to one second for reset to complete. */ 2709 /* Wait up to one second for reset to complete. */
2616 timeout = jiffies + 1 * HZ; 2710 timeout = jiffies + 1 * HZ;
2617 while (time_before(jiffies, timeout)) { 2711 while (time_before(jiffies, timeout)) {
2618 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00); 2712 ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, 0x00);
2619 if (ret < 0) 2713 if (ret < 0)
2620 return ret; 2714 return ret;
2621 2715
@@ -2631,49 +2725,49 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps)
2631 return ret; 2725 return ret;
2632} 2726}
2633 2727
2634static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps) 2728static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_chip *chip)
2635{ 2729{
2636 int ret; 2730 int ret;
2637 2731
2638 ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES, 2732 ret = _mv88e6xxx_mdio_page_read(chip, REG_FIBER_SERDES,
2639 MII_BMCR); 2733 PAGE_FIBER_SERDES, MII_BMCR);
2640 if (ret < 0) 2734 if (ret < 0)
2641 return ret; 2735 return ret;
2642 2736
2643 if (ret & BMCR_PDOWN) { 2737 if (ret & BMCR_PDOWN) {
2644 ret &= ~BMCR_PDOWN; 2738 ret &= ~BMCR_PDOWN;
2645 ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES, 2739 ret = _mv88e6xxx_mdio_page_write(chip, REG_FIBER_SERDES,
2646 PAGE_FIBER_SERDES, MII_BMCR, 2740 PAGE_FIBER_SERDES, MII_BMCR,
2647 ret); 2741 ret);
2648 } 2742 }
2649 2743
2650 return ret; 2744 return ret;
2651} 2745}
2652 2746
2653static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) 2747static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
2654{ 2748{
2655 struct dsa_switch *ds = ps->ds; 2749 struct dsa_switch *ds = chip->ds;
2656 int ret; 2750 int ret;
2657 u16 reg; 2751 u16 reg;
2658 2752
2659 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || 2753 if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
2660 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || 2754 mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
2661 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || 2755 mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
2662 mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) { 2756 mv88e6xxx_6065_family(chip) || mv88e6xxx_6320_family(chip)) {
2663 /* MAC Forcing register: don't force link, speed, 2757 /* MAC Forcing register: don't force link, speed,
2664 * duplex or flow control state to any particular 2758 * duplex or flow control state to any particular
2665 * values on physical ports, but force the CPU port 2759 * values on physical ports, but force the CPU port
2666 * and all DSA ports to their maximum bandwidth and 2760 * and all DSA ports to their maximum bandwidth and
2667 * full duplex. 2761 * full duplex.
2668 */ 2762 */
2669 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL); 2763 reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
2670 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { 2764 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
2671 reg &= ~PORT_PCS_CTRL_UNFORCED; 2765 reg &= ~PORT_PCS_CTRL_UNFORCED;
2672 reg |= PORT_PCS_CTRL_FORCE_LINK | 2766 reg |= PORT_PCS_CTRL_FORCE_LINK |
2673 PORT_PCS_CTRL_LINK_UP | 2767 PORT_PCS_CTRL_LINK_UP |
2674 PORT_PCS_CTRL_DUPLEX_FULL | 2768 PORT_PCS_CTRL_DUPLEX_FULL |
2675 PORT_PCS_CTRL_FORCE_DUPLEX; 2769 PORT_PCS_CTRL_FORCE_DUPLEX;
2676 if (mv88e6xxx_6065_family(ps)) 2770 if (mv88e6xxx_6065_family(chip))
2677 reg |= PORT_PCS_CTRL_100; 2771 reg |= PORT_PCS_CTRL_100;
2678 else 2772 else
2679 reg |= PORT_PCS_CTRL_1000; 2773 reg |= PORT_PCS_CTRL_1000;
@@ -2681,7 +2775,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2681 reg |= PORT_PCS_CTRL_UNFORCED; 2775 reg |= PORT_PCS_CTRL_UNFORCED;
2682 } 2776 }
2683 2777
2684 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), 2778 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2685 PORT_PCS_CTRL, reg); 2779 PORT_PCS_CTRL, reg);
2686 if (ret) 2780 if (ret)
2687 return ret; 2781 return ret;
@@ -2702,41 +2796,46 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2702 * forwarding of unknown unicasts and multicasts. 2796 * forwarding of unknown unicasts and multicasts.
2703 */ 2797 */
2704 reg = 0; 2798 reg = 0;
2705 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || 2799 if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
2706 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || 2800 mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
2707 mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) || 2801 mv88e6xxx_6095_family(chip) || mv88e6xxx_6065_family(chip) ||
2708 mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) 2802 mv88e6xxx_6185_family(chip) || mv88e6xxx_6320_family(chip))
2709 reg = PORT_CONTROL_IGMP_MLD_SNOOP | 2803 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
2710 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP | 2804 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
2711 PORT_CONTROL_STATE_FORWARDING; 2805 PORT_CONTROL_STATE_FORWARDING;
2712 if (dsa_is_cpu_port(ds, port)) { 2806 if (dsa_is_cpu_port(ds, port)) {
2713 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) 2807 if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip))
2714 reg |= PORT_CONTROL_DSA_TAG; 2808 reg |= PORT_CONTROL_DSA_TAG;
2715 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || 2809 if (mv88e6xxx_6352_family(chip) ||
2716 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || 2810 mv88e6xxx_6351_family(chip) ||
2717 mv88e6xxx_6320_family(ps)) { 2811 mv88e6xxx_6165_family(chip) ||
2718 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) 2812 mv88e6xxx_6097_family(chip) ||
2719 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; 2813 mv88e6xxx_6320_family(chip)) {
2720 else 2814 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA |
2721 reg |= PORT_CONTROL_FRAME_MODE_DSA; 2815 PORT_CONTROL_FORWARD_UNKNOWN |
2722 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2723 PORT_CONTROL_FORWARD_UNKNOWN_MC; 2816 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2724 } 2817 }
2725 2818
2726 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || 2819 if (mv88e6xxx_6352_family(chip) ||
2727 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || 2820 mv88e6xxx_6351_family(chip) ||
2728 mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) || 2821 mv88e6xxx_6165_family(chip) ||
2729 mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) { 2822 mv88e6xxx_6097_family(chip) ||
2730 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) 2823 mv88e6xxx_6095_family(chip) ||
2731 reg |= PORT_CONTROL_EGRESS_ADD_TAG; 2824 mv88e6xxx_6065_family(chip) ||
2825 mv88e6xxx_6185_family(chip) ||
2826 mv88e6xxx_6320_family(chip)) {
2827 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
2732 } 2828 }
2733 } 2829 }
2734 if (dsa_is_dsa_port(ds, port)) { 2830 if (dsa_is_dsa_port(ds, port)) {
2735 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) 2831 if (mv88e6xxx_6095_family(chip) ||
2832 mv88e6xxx_6185_family(chip))
2736 reg |= PORT_CONTROL_DSA_TAG; 2833 reg |= PORT_CONTROL_DSA_TAG;
2737 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || 2834 if (mv88e6xxx_6352_family(chip) ||
2738 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || 2835 mv88e6xxx_6351_family(chip) ||
2739 mv88e6xxx_6320_family(ps)) { 2836 mv88e6xxx_6165_family(chip) ||
2837 mv88e6xxx_6097_family(chip) ||
2838 mv88e6xxx_6320_family(chip)) {
2740 reg |= PORT_CONTROL_FRAME_MODE_DSA; 2839 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2741 } 2840 }
2742 2841
@@ -2745,7 +2844,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2745 PORT_CONTROL_FORWARD_UNKNOWN_MC; 2844 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2746 } 2845 }
2747 if (reg) { 2846 if (reg) {
2748 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), 2847 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2749 PORT_CONTROL, reg); 2848 PORT_CONTROL, reg);
2750 if (ret) 2849 if (ret)
2751 return ret; 2850 return ret;
@@ -2754,15 +2853,15 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2754 /* If this port is connected to a SerDes, make sure the SerDes is not 2853 /* If this port is connected to a SerDes, make sure the SerDes is not
2755 * powered down. 2854 * powered down.
2756 */ 2855 */
2757 if (mv88e6xxx_6352_family(ps)) { 2856 if (mv88e6xxx_6352_family(chip)) {
2758 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS); 2857 ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS);
2759 if (ret < 0) 2858 if (ret < 0)
2760 return ret; 2859 return ret;
2761 ret &= PORT_STATUS_CMODE_MASK; 2860 ret &= PORT_STATUS_CMODE_MASK;
2762 if ((ret == PORT_STATUS_CMODE_100BASE_X) || 2861 if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
2763 (ret == PORT_STATUS_CMODE_1000BASE_X) || 2862 (ret == PORT_STATUS_CMODE_1000BASE_X) ||
2764 (ret == PORT_STATUS_CMODE_SGMII)) { 2863 (ret == PORT_STATUS_CMODE_SGMII)) {
2765 ret = mv88e6xxx_power_on_serdes(ps); 2864 ret = mv88e6xxx_power_on_serdes(chip);
2766 if (ret < 0) 2865 if (ret < 0)
2767 return ret; 2866 return ret;
2768 } 2867 }
@@ -2775,17 +2874,17 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2775 * copy of all transmitted/received frames on this port to the CPU. 2874 * copy of all transmitted/received frames on this port to the CPU.
2776 */ 2875 */
2777 reg = 0; 2876 reg = 0;
2778 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || 2877 if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
2779 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || 2878 mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
2780 mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) || 2879 mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) ||
2781 mv88e6xxx_6185_family(ps)) 2880 mv88e6xxx_6185_family(chip))
2782 reg = PORT_CONTROL_2_MAP_DA; 2881 reg = PORT_CONTROL_2_MAP_DA;
2783 2882
2784 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || 2883 if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
2785 mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps)) 2884 mv88e6xxx_6165_family(chip) || mv88e6xxx_6320_family(chip))
2786 reg |= PORT_CONTROL_2_JUMBO_10240; 2885 reg |= PORT_CONTROL_2_JUMBO_10240;
2787 2886
2788 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) { 2887 if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) {
2789 /* Set the upstream port this port should use */ 2888 /* Set the upstream port this port should use */
2790 reg |= dsa_upstream_port(ds); 2889 reg |= dsa_upstream_port(ds);
2791 /* enable forwarding of unknown multicast addresses to 2890 /* enable forwarding of unknown multicast addresses to
@@ -2798,7 +2897,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2798 reg |= PORT_CONTROL_2_8021Q_DISABLED; 2897 reg |= PORT_CONTROL_2_8021Q_DISABLED;
2799 2898
2800 if (reg) { 2899 if (reg) {
2801 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), 2900 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2802 PORT_CONTROL_2, reg); 2901 PORT_CONTROL_2, reg);
2803 if (ret) 2902 if (ret)
2804 return ret; 2903 return ret;
@@ -2814,24 +2913,25 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2814 if (dsa_is_cpu_port(ds, port)) 2913 if (dsa_is_cpu_port(ds, port))
2815 reg = 0; 2914 reg = 0;
2816 2915
2817 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg); 2916 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_ASSOC_VECTOR,
2917 reg);
2818 if (ret) 2918 if (ret)
2819 return ret; 2919 return ret;
2820 2920
2821 /* Egress rate control 2: disable egress rate control. */ 2921 /* Egress rate control 2: disable egress rate control. */
2822 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2, 2922 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_RATE_CONTROL_2,
2823 0x0000); 2923 0x0000);
2824 if (ret) 2924 if (ret)
2825 return ret; 2925 return ret;
2826 2926
2827 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || 2927 if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
2828 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || 2928 mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
2829 mv88e6xxx_6320_family(ps)) { 2929 mv88e6xxx_6320_family(chip)) {
2830 /* Do not limit the period of time that this port can 2930 /* Do not limit the period of time that this port can
2831 * be paused for by the remote end or the period of 2931 * be paused for by the remote end or the period of
2832 * time that this port can pause the remote end. 2932 * time that this port can pause the remote end.
2833 */ 2933 */
2834 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), 2934 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2835 PORT_PAUSE_CTRL, 0x0000); 2935 PORT_PAUSE_CTRL, 0x0000);
2836 if (ret) 2936 if (ret)
2837 return ret; 2937 return ret;
@@ -2840,12 +2940,12 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2840 * address database entries that this port is allowed 2940 * address database entries that this port is allowed
2841 * to use. 2941 * to use.
2842 */ 2942 */
2843 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), 2943 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2844 PORT_ATU_CONTROL, 0x0000); 2944 PORT_ATU_CONTROL, 0x0000);
2845 /* Priority Override: disable DA, SA and VTU priority 2945 /* Priority Override: disable DA, SA and VTU priority
2846 * override. 2946 * override.
2847 */ 2947 */
2848 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), 2948 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2849 PORT_PRI_OVERRIDE, 0x0000); 2949 PORT_PRI_OVERRIDE, 0x0000);
2850 if (ret) 2950 if (ret)
2851 return ret; 2951 return ret;
@@ -2853,14 +2953,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2853 /* Port Ethertype: use the Ethertype DSA Ethertype 2953 /* Port Ethertype: use the Ethertype DSA Ethertype
2854 * value. 2954 * value.
2855 */ 2955 */
2856 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), 2956 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2857 PORT_ETH_TYPE, ETH_P_EDSA); 2957 PORT_ETH_TYPE, ETH_P_EDSA);
2858 if (ret) 2958 if (ret)
2859 return ret; 2959 return ret;
2860 /* Tag Remap: use an identity 802.1p prio -> switch 2960 /* Tag Remap: use an identity 802.1p prio -> switch
2861 * prio mapping. 2961 * prio mapping.
2862 */ 2962 */
2863 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), 2963 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2864 PORT_TAG_REGMAP_0123, 0x3210); 2964 PORT_TAG_REGMAP_0123, 0x3210);
2865 if (ret) 2965 if (ret)
2866 return ret; 2966 return ret;
@@ -2868,18 +2968,18 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2868 /* Tag Remap 2: use an identity 802.1p prio -> switch 2968 /* Tag Remap 2: use an identity 802.1p prio -> switch
2869 * prio mapping. 2969 * prio mapping.
2870 */ 2970 */
2871 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), 2971 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2872 PORT_TAG_REGMAP_4567, 0x7654); 2972 PORT_TAG_REGMAP_4567, 0x7654);
2873 if (ret) 2973 if (ret)
2874 return ret; 2974 return ret;
2875 } 2975 }
2876 2976
2877 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || 2977 if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
2878 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || 2978 mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
2879 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || 2979 mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
2880 mv88e6xxx_6320_family(ps)) { 2980 mv88e6xxx_6320_family(chip)) {
2881 /* Rate Control: disable ingress rate limiting. */ 2981 /* Rate Control: disable ingress rate limiting. */
2882 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), 2982 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2883 PORT_RATE_CONTROL, 0x0001); 2983 PORT_RATE_CONTROL, 0x0001);
2884 if (ret) 2984 if (ret)
2885 return ret; 2985 return ret;
@@ -2888,7 +2988,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2888 /* Port Control 1: disable trunking, disable sending 2988 /* Port Control 1: disable trunking, disable sending
2889 * learning messages to this port. 2989 * learning messages to this port.
2890 */ 2990 */
2891 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000); 2991 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
2992 0x0000);
2892 if (ret) 2993 if (ret)
2893 return ret; 2994 return ret;
2894 2995
@@ -2896,18 +2997,18 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2896 * database, and allow bidirectional communication between the 2997 * database, and allow bidirectional communication between the
2897 * CPU and DSA port(s), and the other ports. 2998 * CPU and DSA port(s), and the other ports.
2898 */ 2999 */
2899 ret = _mv88e6xxx_port_fid_set(ps, port, 0); 3000 ret = _mv88e6xxx_port_fid_set(chip, port, 0);
2900 if (ret) 3001 if (ret)
2901 return ret; 3002 return ret;
2902 3003
2903 ret = _mv88e6xxx_port_based_vlan_map(ps, port); 3004 ret = _mv88e6xxx_port_based_vlan_map(chip, port);
2904 if (ret) 3005 if (ret)
2905 return ret; 3006 return ret;
2906 3007
2907 /* Default VLAN ID and priority: don't set a default VLAN 3008 /* Default VLAN ID and priority: don't set a default VLAN
2908 * ID, and set the default packet priority to zero. 3009 * ID, and set the default packet priority to zero.
2909 */ 3010 */
2910 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN, 3011 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_DEFAULT_VLAN,
2911 0x0000); 3012 0x0000);
2912 if (ret) 3013 if (ret)
2913 return ret; 3014 return ret;
@@ -2915,9 +3016,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2915 return 0; 3016 return 0;
2916} 3017}
2917 3018
2918static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) 3019static int mv88e6xxx_setup_global(struct mv88e6xxx_chip *chip)
2919{ 3020{
2920 struct dsa_switch *ds = ps->ds; 3021 struct dsa_switch *ds = chip->ds;
2921 u32 upstream_port = dsa_upstream_port(ds); 3022 u32 upstream_port = dsa_upstream_port(ds);
2922 u16 reg; 3023 u16 reg;
2923 int err; 3024 int err;
@@ -2927,11 +3028,11 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
2927 * and mask all interrupt sources. 3028 * and mask all interrupt sources.
2928 */ 3029 */
2929 reg = 0; 3030 reg = 0;
2930 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU) || 3031 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU) ||
2931 mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE)) 3032 mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE))
2932 reg |= GLOBAL_CONTROL_PPU_ENABLE; 3033 reg |= GLOBAL_CONTROL_PPU_ENABLE;
2933 3034
2934 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, reg); 3035 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, reg);
2935 if (err) 3036 if (err)
2936 return err; 3037 return err;
2937 3038
@@ -2941,12 +3042,13 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
2941 reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | 3042 reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
2942 upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | 3043 upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
2943 upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; 3044 upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
2944 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); 3045 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MONITOR_CONTROL,
3046 reg);
2945 if (err) 3047 if (err)
2946 return err; 3048 return err;
2947 3049
2948 /* Disable remote management, and set the switch's DSA device number. */ 3050 /* Disable remote management, and set the switch's DSA device number. */
2949 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2, 3051 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL_2,
2950 GLOBAL_CONTROL_2_MULTIPLE_CASCADE | 3052 GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
2951 (ds->index & 0x1f)); 3053 (ds->index & 0x1f));
2952 if (err) 3054 if (err)
@@ -2956,46 +3058,47 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
2956 * enable address learn messages to be sent to all message 3058 * enable address learn messages to be sent to all message
2957 * ports. 3059 * ports.
2958 */ 3060 */
2959 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL, 3061 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
2960 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL); 3062 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2961 if (err) 3063 if (err)
2962 return err; 3064 return err;
2963 3065
2964 /* Configure the IP ToS mapping registers. */ 3066 /* Configure the IP ToS mapping registers. */
2965 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000); 3067 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2966 if (err) 3068 if (err)
2967 return err; 3069 return err;
2968 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000); 3070 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2969 if (err) 3071 if (err)
2970 return err; 3072 return err;
2971 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555); 3073 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2972 if (err) 3074 if (err)
2973 return err; 3075 return err;
2974 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555); 3076 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2975 if (err) 3077 if (err)
2976 return err; 3078 return err;
2977 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa); 3079 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2978 if (err) 3080 if (err)
2979 return err; 3081 return err;
2980 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa); 3082 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2981 if (err) 3083 if (err)
2982 return err; 3084 return err;
2983 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff); 3085 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2984 if (err) 3086 if (err)
2985 return err; 3087 return err;
2986 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff); 3088 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2987 if (err) 3089 if (err)
2988 return err; 3090 return err;
2989 3091
2990 /* Configure the IEEE 802.1p priority mapping register. */ 3092 /* Configure the IEEE 802.1p priority mapping register. */
2991 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41); 3093 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2992 if (err) 3094 if (err)
2993 return err; 3095 return err;
2994 3096
2995 /* Send all frames with destination addresses matching 3097 /* Send all frames with destination addresses matching
2996 * 01:80:c2:00:00:0x to the CPU port. 3098 * 01:80:c2:00:00:0x to the CPU port.
2997 */ 3099 */
2998 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff); 3100 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X,
3101 0xffff);
2999 if (err) 3102 if (err)
3000 return err; 3103 return err;
3001 3104
@@ -3004,7 +3107,7 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
3004 * highest, and send all special multicast frames to the CPU 3107 * highest, and send all special multicast frames to the CPU
3005 * port at the highest priority. 3108 * port at the highest priority.
3006 */ 3109 */
3007 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, 3110 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
3008 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 | 3111 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
3009 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI); 3112 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
3010 if (err) 3113 if (err)
@@ -3014,12 +3117,11 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
3014 for (i = 0; i < 32; i++) { 3117 for (i = 0; i < 32; i++) {
3015 int nexthop = 0x1f; 3118 int nexthop = 0x1f;
3016 3119
3017 if (ps->ds->cd->rtable && 3120 if (i != ds->index && i < DSA_MAX_SWITCHES)
3018 i != ps->ds->index && i < ps->ds->dst->pd->nr_chips) 3121 nexthop = ds->rtable[i] & 0x1f;
3019 nexthop = ps->ds->cd->rtable[i] & 0x1f;
3020 3122
3021 err = _mv88e6xxx_reg_write( 3123 err = _mv88e6xxx_reg_write(
3022 ps, REG_GLOBAL2, 3124 chip, REG_GLOBAL2,
3023 GLOBAL2_DEVICE_MAPPING, 3125 GLOBAL2_DEVICE_MAPPING,
3024 GLOBAL2_DEVICE_MAPPING_UPDATE | 3126 GLOBAL2_DEVICE_MAPPING_UPDATE |
3025 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop); 3127 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop);
@@ -3029,10 +3131,11 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
3029 3131
3030 /* Clear all trunk masks. */ 3132 /* Clear all trunk masks. */
3031 for (i = 0; i < 8; i++) { 3133 for (i = 0; i < 8; i++) {
3032 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, 3134 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2,
3135 GLOBAL2_TRUNK_MASK,
3033 0x8000 | 3136 0x8000 |
3034 (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) | 3137 (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
3035 ((1 << ps->info->num_ports) - 1)); 3138 ((1 << chip->info->num_ports) - 1));
3036 if (err) 3139 if (err)
3037 return err; 3140 return err;
3038 } 3141 }
@@ -3040,7 +3143,7 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
3040 /* Clear all trunk mappings. */ 3143 /* Clear all trunk mappings. */
3041 for (i = 0; i < 16; i++) { 3144 for (i = 0; i < 16; i++) {
3042 err = _mv88e6xxx_reg_write( 3145 err = _mv88e6xxx_reg_write(
3043 ps, REG_GLOBAL2, 3146 chip, REG_GLOBAL2,
3044 GLOBAL2_TRUNK_MAPPING, 3147 GLOBAL2_TRUNK_MAPPING,
3045 GLOBAL2_TRUNK_MAPPING_UPDATE | 3148 GLOBAL2_TRUNK_MAPPING_UPDATE |
3046 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT)); 3149 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
@@ -3048,13 +3151,13 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
3048 return err; 3151 return err;
3049 } 3152 }
3050 3153
3051 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || 3154 if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
3052 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || 3155 mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
3053 mv88e6xxx_6320_family(ps)) { 3156 mv88e6xxx_6320_family(chip)) {
3054 /* Send all frames with destination addresses matching 3157 /* Send all frames with destination addresses matching
3055 * 01:80:c2:00:00:2x to the CPU port. 3158 * 01:80:c2:00:00:2x to the CPU port.
3056 */ 3159 */
3057 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, 3160 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2,
3058 GLOBAL2_MGMT_EN_2X, 0xffff); 3161 GLOBAL2_MGMT_EN_2X, 0xffff);
3059 if (err) 3162 if (err)
3060 return err; 3163 return err;
@@ -3062,14 +3165,14 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
3062 /* Initialise cross-chip port VLAN table to reset 3165 /* Initialise cross-chip port VLAN table to reset
3063 * defaults. 3166 * defaults.
3064 */ 3167 */
3065 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, 3168 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2,
3066 GLOBAL2_PVT_ADDR, 0x9000); 3169 GLOBAL2_PVT_ADDR, 0x9000);
3067 if (err) 3170 if (err)
3068 return err; 3171 return err;
3069 3172
3070 /* Clear the priority override table. */ 3173 /* Clear the priority override table. */
3071 for (i = 0; i < 16; i++) { 3174 for (i = 0; i < 16; i++) {
3072 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, 3175 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2,
3073 GLOBAL2_PRIO_OVERRIDE, 3176 GLOBAL2_PRIO_OVERRIDE,
3074 0x8000 | (i << 8)); 3177 0x8000 | (i << 8));
3075 if (err) 3178 if (err)
@@ -3077,16 +3180,16 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
3077 } 3180 }
3078 } 3181 }
3079 3182
3080 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || 3183 if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
3081 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || 3184 mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
3082 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || 3185 mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
3083 mv88e6xxx_6320_family(ps)) { 3186 mv88e6xxx_6320_family(chip)) {
3084 /* Disable ingress rate limiting by resetting all 3187 /* Disable ingress rate limiting by resetting all
3085 * ingress rate limit registers to their initial 3188 * ingress rate limit registers to their initial
3086 * state. 3189 * state.
3087 */ 3190 */
3088 for (i = 0; i < ps->info->num_ports; i++) { 3191 for (i = 0; i < chip->info->num_ports; i++) {
3089 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, 3192 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2,
3090 GLOBAL2_INGRESS_OP, 3193 GLOBAL2_INGRESS_OP,
3091 0x9000 | (i << 8)); 3194 0x9000 | (i << 8));
3092 if (err) 3195 if (err)
@@ -3095,23 +3198,23 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
3095 } 3198 }
3096 3199
3097 /* Clear the statistics counters for all ports */ 3200 /* Clear the statistics counters for all ports */
3098 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP, 3201 err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
3099 GLOBAL_STATS_OP_FLUSH_ALL); 3202 GLOBAL_STATS_OP_FLUSH_ALL);
3100 if (err) 3203 if (err)
3101 return err; 3204 return err;
3102 3205
3103 /* Wait for the flush to complete. */ 3206 /* Wait for the flush to complete. */
3104 err = _mv88e6xxx_stats_wait(ps); 3207 err = _mv88e6xxx_stats_wait(chip);
3105 if (err) 3208 if (err)
3106 return err; 3209 return err;
3107 3210
3108 /* Clear all ATU entries */ 3211 /* Clear all ATU entries */
3109 err = _mv88e6xxx_atu_flush(ps, 0, true); 3212 err = _mv88e6xxx_atu_flush(chip, 0, true);
3110 if (err) 3213 if (err)
3111 return err; 3214 return err;
3112 3215
3113 /* Clear all the VTU and STU entries */ 3216 /* Clear all the VTU and STU entries */
3114 err = _mv88e6xxx_vtu_stu_flush(ps); 3217 err = _mv88e6xxx_vtu_stu_flush(chip);
3115 if (err < 0) 3218 if (err < 0)
3116 return err; 3219 return err;
3117 3220
@@ -3120,174 +3223,232 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
3120 3223
3121static int mv88e6xxx_setup(struct dsa_switch *ds) 3224static int mv88e6xxx_setup(struct dsa_switch *ds)
3122{ 3225{
3123 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3226 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
3124 int err; 3227 int err;
3125 int i; 3228 int i;
3126 3229
3127 ps->ds = ds; 3230 chip->ds = ds;
3231 ds->slave_mii_bus = chip->mdio_bus;
3128 3232
3129 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) 3233 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM))
3130 mutex_init(&ps->eeprom_mutex); 3234 mutex_init(&chip->eeprom_mutex);
3131 3235
3132 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) 3236 mutex_lock(&chip->reg_lock);
3133 mv88e6xxx_ppu_state_init(ps);
3134 3237
3135 mutex_lock(&ps->smi_mutex); 3238 err = mv88e6xxx_switch_reset(chip);
3136
3137 err = mv88e6xxx_switch_reset(ps);
3138 if (err) 3239 if (err)
3139 goto unlock; 3240 goto unlock;
3140 3241
3141 err = mv88e6xxx_setup_global(ps); 3242 err = mv88e6xxx_setup_global(chip);
3142 if (err) 3243 if (err)
3143 goto unlock; 3244 goto unlock;
3144 3245
3145 for (i = 0; i < ps->info->num_ports; i++) { 3246 for (i = 0; i < chip->info->num_ports; i++) {
3146 err = mv88e6xxx_setup_port(ps, i); 3247 err = mv88e6xxx_setup_port(chip, i);
3147 if (err) 3248 if (err)
3148 goto unlock; 3249 goto unlock;
3149 } 3250 }
3150 3251
3151unlock: 3252unlock:
3152 mutex_unlock(&ps->smi_mutex); 3253 mutex_unlock(&chip->reg_lock);
3153 3254
3154 return err; 3255 return err;
3155} 3256}
3156 3257
3157int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg) 3258static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page,
3259 int reg)
3158{ 3260{
3159 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3261 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
3160 int ret; 3262 int ret;
3161 3263
3162 mutex_lock(&ps->smi_mutex); 3264 mutex_lock(&chip->reg_lock);
3163 ret = _mv88e6xxx_phy_page_read(ps, port, page, reg); 3265 ret = _mv88e6xxx_mdio_page_read(chip, port, page, reg);
3164 mutex_unlock(&ps->smi_mutex); 3266 mutex_unlock(&chip->reg_lock);
3165 3267
3166 return ret; 3268 return ret;
3167} 3269}
3168 3270
3169int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, 3271static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page,
3170 int reg, int val) 3272 int reg, int val)
3171{ 3273{
3172 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3274 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
3173 int ret; 3275 int ret;
3174 3276
3175 mutex_lock(&ps->smi_mutex); 3277 mutex_lock(&chip->reg_lock);
3176 ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val); 3278 ret = _mv88e6xxx_mdio_page_write(chip, port, page, reg, val);
3177 mutex_unlock(&ps->smi_mutex); 3279 mutex_unlock(&chip->reg_lock);
3178 3280
3179 return ret; 3281 return ret;
3180} 3282}
3181 3283
3182static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps, 3284static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port)
3183 int port)
3184{ 3285{
3185 if (port >= 0 && port < ps->info->num_ports) 3286 if (port >= 0 && port < chip->info->num_ports)
3186 return port; 3287 return port;
3187 return -EINVAL; 3288 return -EINVAL;
3188} 3289}
3189 3290
3190static int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum) 3291static int mv88e6xxx_mdio_read(struct mii_bus *bus, int port, int regnum)
3191{ 3292{
3192 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3293 struct mv88e6xxx_chip *chip = bus->priv;
3193 int addr = mv88e6xxx_port_to_phy_addr(ps, port); 3294 int addr = mv88e6xxx_port_to_mdio_addr(chip, port);
3194 int ret; 3295 int ret;
3195 3296
3196 if (addr < 0) 3297 if (addr < 0)
3197 return 0xffff; 3298 return 0xffff;
3198 3299
3199 mutex_lock(&ps->smi_mutex); 3300 mutex_lock(&chip->reg_lock);
3200 3301
3201 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) 3302 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
3202 ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum); 3303 ret = mv88e6xxx_mdio_read_ppu(chip, addr, regnum);
3203 else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY)) 3304 else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY))
3204 ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum); 3305 ret = mv88e6xxx_mdio_read_indirect(chip, addr, regnum);
3205 else 3306 else
3206 ret = _mv88e6xxx_phy_read(ps, addr, regnum); 3307 ret = mv88e6xxx_mdio_read_direct(chip, addr, regnum);
3207 3308
3208 mutex_unlock(&ps->smi_mutex); 3309 mutex_unlock(&chip->reg_lock);
3209 return ret; 3310 return ret;
3210} 3311}
3211 3312
3212static int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, 3313static int mv88e6xxx_mdio_write(struct mii_bus *bus, int port, int regnum,
3213 u16 val) 3314 u16 val)
3214{ 3315{
3215 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3316 struct mv88e6xxx_chip *chip = bus->priv;
3216 int addr = mv88e6xxx_port_to_phy_addr(ps, port); 3317 int addr = mv88e6xxx_port_to_mdio_addr(chip, port);
3217 int ret; 3318 int ret;
3218 3319
3219 if (addr < 0) 3320 if (addr < 0)
3220 return 0xffff; 3321 return 0xffff;
3221 3322
3222 mutex_lock(&ps->smi_mutex); 3323 mutex_lock(&chip->reg_lock);
3223 3324
3224 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) 3325 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
3225 ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val); 3326 ret = mv88e6xxx_mdio_write_ppu(chip, addr, regnum, val);
3226 else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY)) 3327 else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY))
3227 ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val); 3328 ret = mv88e6xxx_mdio_write_indirect(chip, addr, regnum, val);
3228 else 3329 else
3229 ret = _mv88e6xxx_phy_write(ps, addr, regnum, val); 3330 ret = mv88e6xxx_mdio_write_direct(chip, addr, regnum, val);
3230 3331
3231 mutex_unlock(&ps->smi_mutex); 3332 mutex_unlock(&chip->reg_lock);
3232 return ret; 3333 return ret;
3233} 3334}
3234 3335
3336static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
3337 struct device_node *np)
3338{
3339 static int index;
3340 struct mii_bus *bus;
3341 int err;
3342
3343 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
3344 mv88e6xxx_ppu_state_init(chip);
3345
3346 if (np)
3347 chip->mdio_np = of_get_child_by_name(np, "mdio");
3348
3349 bus = devm_mdiobus_alloc(chip->dev);
3350 if (!bus)
3351 return -ENOMEM;
3352
3353 bus->priv = (void *)chip;
3354 if (np) {
3355 bus->name = np->full_name;
3356 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name);
3357 } else {
3358 bus->name = "mv88e6xxx SMI";
3359 snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++);
3360 }
3361
3362 bus->read = mv88e6xxx_mdio_read;
3363 bus->write = mv88e6xxx_mdio_write;
3364 bus->parent = chip->dev;
3365
3366 if (chip->mdio_np)
3367 err = of_mdiobus_register(bus, chip->mdio_np);
3368 else
3369 err = mdiobus_register(bus);
3370 if (err) {
3371 dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
3372 goto out;
3373 }
3374 chip->mdio_bus = bus;
3375
3376 return 0;
3377
3378out:
3379 if (chip->mdio_np)
3380 of_node_put(chip->mdio_np);
3381
3382 return err;
3383}
3384
3385static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip)
3386
3387{
3388 struct mii_bus *bus = chip->mdio_bus;
3389
3390 mdiobus_unregister(bus);
3391
3392 if (chip->mdio_np)
3393 of_node_put(chip->mdio_np);
3394}
3395
3235#ifdef CONFIG_NET_DSA_HWMON 3396#ifdef CONFIG_NET_DSA_HWMON
3236 3397
3237static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) 3398static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
3238{ 3399{
3239 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3400 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
3240 int ret; 3401 int ret;
3241 int val; 3402 int val;
3242 3403
3243 *temp = 0; 3404 *temp = 0;
3244 3405
3245 mutex_lock(&ps->smi_mutex); 3406 mutex_lock(&chip->reg_lock);
3246 3407
3247 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6); 3408 ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x6);
3248 if (ret < 0) 3409 if (ret < 0)
3249 goto error; 3410 goto error;
3250 3411
3251 /* Enable temperature sensor */ 3412 /* Enable temperature sensor */
3252 ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a); 3413 ret = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a);
3253 if (ret < 0) 3414 if (ret < 0)
3254 goto error; 3415 goto error;
3255 3416
3256 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5)); 3417 ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret | (1 << 5));
3257 if (ret < 0) 3418 if (ret < 0)
3258 goto error; 3419 goto error;
3259 3420
3260 /* Wait for temperature to stabilize */ 3421 /* Wait for temperature to stabilize */
3261 usleep_range(10000, 12000); 3422 usleep_range(10000, 12000);
3262 3423
3263 val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a); 3424 val = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a);
3264 if (val < 0) { 3425 if (val < 0) {
3265 ret = val; 3426 ret = val;
3266 goto error; 3427 goto error;
3267 } 3428 }
3268 3429
3269 /* Disable temperature sensor */ 3430 /* Disable temperature sensor */
3270 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5)); 3431 ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret & ~(1 << 5));
3271 if (ret < 0) 3432 if (ret < 0)
3272 goto error; 3433 goto error;
3273 3434
3274 *temp = ((val & 0x1f) - 5) * 5; 3435 *temp = ((val & 0x1f) - 5) * 5;
3275 3436
3276error: 3437error:
3277 _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0); 3438 mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x0);
3278 mutex_unlock(&ps->smi_mutex); 3439 mutex_unlock(&chip->reg_lock);
3279 return ret; 3440 return ret;
3280} 3441}
3281 3442
3282static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) 3443static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
3283{ 3444{
3284 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3445 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
3285 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; 3446 int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
3286 int ret; 3447 int ret;
3287 3448
3288 *temp = 0; 3449 *temp = 0;
3289 3450
3290 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27); 3451 ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 27);
3291 if (ret < 0) 3452 if (ret < 0)
3292 return ret; 3453 return ret;
3293 3454
@@ -3298,12 +3459,12 @@ static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
3298 3459
3299static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) 3460static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
3300{ 3461{
3301 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3462 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
3302 3463
3303 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP)) 3464 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP))
3304 return -EOPNOTSUPP; 3465 return -EOPNOTSUPP;
3305 3466
3306 if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps)) 3467 if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
3307 return mv88e63xx_get_temp(ds, temp); 3468 return mv88e63xx_get_temp(ds, temp);
3308 3469
3309 return mv88e61xx_get_temp(ds, temp); 3470 return mv88e61xx_get_temp(ds, temp);
@@ -3311,16 +3472,16 @@ static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
3311 3472
3312static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) 3473static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
3313{ 3474{
3314 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3475 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
3315 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; 3476 int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
3316 int ret; 3477 int ret;
3317 3478
3318 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT)) 3479 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
3319 return -EOPNOTSUPP; 3480 return -EOPNOTSUPP;
3320 3481
3321 *temp = 0; 3482 *temp = 0;
3322 3483
3323 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); 3484 ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
3324 if (ret < 0) 3485 if (ret < 0)
3325 return ret; 3486 return ret;
3326 3487
@@ -3331,33 +3492,33 @@ static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
3331 3492
3332static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) 3493static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
3333{ 3494{
3334 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3495 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
3335 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; 3496 int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
3336 int ret; 3497 int ret;
3337 3498
3338 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT)) 3499 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
3339 return -EOPNOTSUPP; 3500 return -EOPNOTSUPP;
3340 3501
3341 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); 3502 ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
3342 if (ret < 0) 3503 if (ret < 0)
3343 return ret; 3504 return ret;
3344 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); 3505 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
3345 return mv88e6xxx_phy_page_write(ds, phy, 6, 26, 3506 return mv88e6xxx_mdio_page_write(ds, phy, 6, 26,
3346 (ret & 0xe0ff) | (temp << 8)); 3507 (ret & 0xe0ff) | (temp << 8));
3347} 3508}
3348 3509
3349static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) 3510static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
3350{ 3511{
3351 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3512 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
3352 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; 3513 int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
3353 int ret; 3514 int ret;
3354 3515
3355 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT)) 3516 if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
3356 return -EOPNOTSUPP; 3517 return -EOPNOTSUPP;
3357 3518
3358 *alarm = false; 3519 *alarm = false;
3359 3520
3360 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); 3521 ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
3361 if (ret < 0) 3522 if (ret < 0)
3362 return ret; 3523 return ret;
3363 3524
@@ -3374,6 +3535,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3374 .name = "Marvell 88E6085", 3535 .name = "Marvell 88E6085",
3375 .num_databases = 4096, 3536 .num_databases = 4096,
3376 .num_ports = 10, 3537 .num_ports = 10,
3538 .port_base_addr = 0x10,
3377 .flags = MV88E6XXX_FLAGS_FAMILY_6097, 3539 .flags = MV88E6XXX_FLAGS_FAMILY_6097,
3378 }, 3540 },
3379 3541
@@ -3383,6 +3545,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3383 .name = "Marvell 88E6095/88E6095F", 3545 .name = "Marvell 88E6095/88E6095F",
3384 .num_databases = 256, 3546 .num_databases = 256,
3385 .num_ports = 11, 3547 .num_ports = 11,
3548 .port_base_addr = 0x10,
3386 .flags = MV88E6XXX_FLAGS_FAMILY_6095, 3549 .flags = MV88E6XXX_FLAGS_FAMILY_6095,
3387 }, 3550 },
3388 3551
@@ -3392,6 +3555,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3392 .name = "Marvell 88E6123", 3555 .name = "Marvell 88E6123",
3393 .num_databases = 4096, 3556 .num_databases = 4096,
3394 .num_ports = 3, 3557 .num_ports = 3,
3558 .port_base_addr = 0x10,
3395 .flags = MV88E6XXX_FLAGS_FAMILY_6165, 3559 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3396 }, 3560 },
3397 3561
@@ -3401,6 +3565,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3401 .name = "Marvell 88E6131", 3565 .name = "Marvell 88E6131",
3402 .num_databases = 256, 3566 .num_databases = 256,
3403 .num_ports = 8, 3567 .num_ports = 8,
3568 .port_base_addr = 0x10,
3404 .flags = MV88E6XXX_FLAGS_FAMILY_6185, 3569 .flags = MV88E6XXX_FLAGS_FAMILY_6185,
3405 }, 3570 },
3406 3571
@@ -3410,6 +3575,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3410 .name = "Marvell 88E6161", 3575 .name = "Marvell 88E6161",
3411 .num_databases = 4096, 3576 .num_databases = 4096,
3412 .num_ports = 6, 3577 .num_ports = 6,
3578 .port_base_addr = 0x10,
3413 .flags = MV88E6XXX_FLAGS_FAMILY_6165, 3579 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3414 }, 3580 },
3415 3581
@@ -3419,6 +3585,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3419 .name = "Marvell 88E6165", 3585 .name = "Marvell 88E6165",
3420 .num_databases = 4096, 3586 .num_databases = 4096,
3421 .num_ports = 6, 3587 .num_ports = 6,
3588 .port_base_addr = 0x10,
3422 .flags = MV88E6XXX_FLAGS_FAMILY_6165, 3589 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3423 }, 3590 },
3424 3591
@@ -3428,6 +3595,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3428 .name = "Marvell 88E6171", 3595 .name = "Marvell 88E6171",
3429 .num_databases = 4096, 3596 .num_databases = 4096,
3430 .num_ports = 7, 3597 .num_ports = 7,
3598 .port_base_addr = 0x10,
3431 .flags = MV88E6XXX_FLAGS_FAMILY_6351, 3599 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3432 }, 3600 },
3433 3601
@@ -3437,6 +3605,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3437 .name = "Marvell 88E6172", 3605 .name = "Marvell 88E6172",
3438 .num_databases = 4096, 3606 .num_databases = 4096,
3439 .num_ports = 7, 3607 .num_ports = 7,
3608 .port_base_addr = 0x10,
3440 .flags = MV88E6XXX_FLAGS_FAMILY_6352, 3609 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3441 }, 3610 },
3442 3611
@@ -3446,6 +3615,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3446 .name = "Marvell 88E6175", 3615 .name = "Marvell 88E6175",
3447 .num_databases = 4096, 3616 .num_databases = 4096,
3448 .num_ports = 7, 3617 .num_ports = 7,
3618 .port_base_addr = 0x10,
3449 .flags = MV88E6XXX_FLAGS_FAMILY_6351, 3619 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3450 }, 3620 },
3451 3621
@@ -3455,6 +3625,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3455 .name = "Marvell 88E6176", 3625 .name = "Marvell 88E6176",
3456 .num_databases = 4096, 3626 .num_databases = 4096,
3457 .num_ports = 7, 3627 .num_ports = 7,
3628 .port_base_addr = 0x10,
3458 .flags = MV88E6XXX_FLAGS_FAMILY_6352, 3629 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3459 }, 3630 },
3460 3631
@@ -3464,6 +3635,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3464 .name = "Marvell 88E6185", 3635 .name = "Marvell 88E6185",
3465 .num_databases = 256, 3636 .num_databases = 256,
3466 .num_ports = 10, 3637 .num_ports = 10,
3638 .port_base_addr = 0x10,
3467 .flags = MV88E6XXX_FLAGS_FAMILY_6185, 3639 .flags = MV88E6XXX_FLAGS_FAMILY_6185,
3468 }, 3640 },
3469 3641
@@ -3473,6 +3645,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3473 .name = "Marvell 88E6240", 3645 .name = "Marvell 88E6240",
3474 .num_databases = 4096, 3646 .num_databases = 4096,
3475 .num_ports = 7, 3647 .num_ports = 7,
3648 .port_base_addr = 0x10,
3476 .flags = MV88E6XXX_FLAGS_FAMILY_6352, 3649 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3477 }, 3650 },
3478 3651
@@ -3482,6 +3655,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3482 .name = "Marvell 88E6320", 3655 .name = "Marvell 88E6320",
3483 .num_databases = 4096, 3656 .num_databases = 4096,
3484 .num_ports = 7, 3657 .num_ports = 7,
3658 .port_base_addr = 0x10,
3485 .flags = MV88E6XXX_FLAGS_FAMILY_6320, 3659 .flags = MV88E6XXX_FLAGS_FAMILY_6320,
3486 }, 3660 },
3487 3661
@@ -3491,6 +3665,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3491 .name = "Marvell 88E6321", 3665 .name = "Marvell 88E6321",
3492 .num_databases = 4096, 3666 .num_databases = 4096,
3493 .num_ports = 7, 3667 .num_ports = 7,
3668 .port_base_addr = 0x10,
3494 .flags = MV88E6XXX_FLAGS_FAMILY_6320, 3669 .flags = MV88E6XXX_FLAGS_FAMILY_6320,
3495 }, 3670 },
3496 3671
@@ -3500,6 +3675,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3500 .name = "Marvell 88E6350", 3675 .name = "Marvell 88E6350",
3501 .num_databases = 4096, 3676 .num_databases = 4096,
3502 .num_ports = 7, 3677 .num_ports = 7,
3678 .port_base_addr = 0x10,
3503 .flags = MV88E6XXX_FLAGS_FAMILY_6351, 3679 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3504 }, 3680 },
3505 3681
@@ -3509,6 +3685,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3509 .name = "Marvell 88E6351", 3685 .name = "Marvell 88E6351",
3510 .num_databases = 4096, 3686 .num_databases = 4096,
3511 .num_ports = 7, 3687 .num_ports = 7,
3688 .port_base_addr = 0x10,
3512 .flags = MV88E6XXX_FLAGS_FAMILY_6351, 3689 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3513 }, 3690 },
3514 3691
@@ -3518,75 +3695,128 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3518 .name = "Marvell 88E6352", 3695 .name = "Marvell 88E6352",
3519 .num_databases = 4096, 3696 .num_databases = 4096,
3520 .num_ports = 7, 3697 .num_ports = 7,
3698 .port_base_addr = 0x10,
3521 .flags = MV88E6XXX_FLAGS_FAMILY_6352, 3699 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3522 }, 3700 },
3523}; 3701};
3524 3702
3525static const struct mv88e6xxx_info * 3703static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num)
3526mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table,
3527 unsigned int num)
3528{ 3704{
3529 int i; 3705 int i;
3530 3706
3531 for (i = 0; i < num; ++i) 3707 for (i = 0; i < ARRAY_SIZE(mv88e6xxx_table); ++i)
3532 if (table[i].prod_num == prod_num) 3708 if (mv88e6xxx_table[i].prod_num == prod_num)
3533 return &table[i]; 3709 return &mv88e6xxx_table[i];
3534 3710
3535 return NULL; 3711 return NULL;
3536} 3712}
3537 3713
3538static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, 3714static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
3539 struct device *host_dev, int sw_addr,
3540 void **priv)
3541{ 3715{
3542 const struct mv88e6xxx_info *info; 3716 const struct mv88e6xxx_info *info;
3543 struct mv88e6xxx_priv_state *ps;
3544 struct mii_bus *bus;
3545 const char *name;
3546 int id, prod_num, rev; 3717 int id, prod_num, rev;
3547 3718
3548 bus = dsa_host_dev_to_mii_bus(host_dev); 3719 id = mv88e6xxx_reg_read(chip, chip->info->port_base_addr,
3549 if (!bus) 3720 PORT_SWITCH_ID);
3550 return NULL;
3551
3552 id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
3553 if (id < 0) 3721 if (id < 0)
3554 return NULL; 3722 return id;
3555 3723
3556 prod_num = (id & 0xfff0) >> 4; 3724 prod_num = (id & 0xfff0) >> 4;
3557 rev = id & 0x000f; 3725 rev = id & 0x000f;
3558 3726
3559 info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table, 3727 info = mv88e6xxx_lookup_info(prod_num);
3560 ARRAY_SIZE(mv88e6xxx_table));
3561 if (!info) 3728 if (!info)
3729 return -ENODEV;
3730
3731 /* Update the compatible info with the probed one */
3732 chip->info = info;
3733
3734 dev_info(chip->dev, "switch 0x%x detected: %s, revision %u\n",
3735 chip->info->prod_num, chip->info->name, rev);
3736
3737 return 0;
3738}
3739
3740static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
3741{
3742 struct mv88e6xxx_chip *chip;
3743
3744 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
3745 if (!chip)
3562 return NULL; 3746 return NULL;
3563 3747
3564 name = info->name; 3748 chip->dev = dev;
3749
3750 mutex_init(&chip->reg_lock);
3751
3752 return chip;
3753}
3754
3755static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
3756 struct mii_bus *bus, int sw_addr)
3757{
3758 /* ADDR[0] pin is unavailable externally and considered zero */
3759 if (sw_addr & 0x1)
3760 return -EINVAL;
3761
3762 if (sw_addr == 0)
3763 chip->smi_ops = &mv88e6xxx_smi_single_chip_ops;
3764 else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_MULTI_CHIP))
3765 chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops;
3766 else
3767 return -EINVAL;
3768
3769 chip->bus = bus;
3770 chip->sw_addr = sw_addr;
3771
3772 return 0;
3773}
3565 3774
3566 ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL); 3775static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
3567 if (!ps) 3776 struct device *host_dev, int sw_addr,
3777 void **priv)
3778{
3779 struct mv88e6xxx_chip *chip;
3780 struct mii_bus *bus;
3781 int err;
3782
3783 bus = dsa_host_dev_to_mii_bus(host_dev);
3784 if (!bus)
3785 return NULL;
3786
3787 chip = mv88e6xxx_alloc_chip(dsa_dev);
3788 if (!chip)
3568 return NULL; 3789 return NULL;
3569 3790
3570 ps->bus = bus; 3791 /* Legacy SMI probing will only support chips similar to 88E6085 */
3571 ps->sw_addr = sw_addr; 3792 chip->info = &mv88e6xxx_table[MV88E6085];
3572 ps->info = info; 3793
3573 mutex_init(&ps->smi_mutex); 3794 err = mv88e6xxx_smi_init(chip, bus, sw_addr);
3795 if (err)
3796 goto free;
3797
3798 err = mv88e6xxx_detect(chip);
3799 if (err)
3800 goto free;
3801
3802 err = mv88e6xxx_mdio_register(chip, NULL);
3803 if (err)
3804 goto free;
3574 3805
3575 *priv = ps; 3806 *priv = chip;
3576 3807
3577 dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n", 3808 return chip->info->name;
3578 prod_num, name, rev); 3809free:
3810 devm_kfree(dsa_dev, chip);
3579 3811
3580 return name; 3812 return NULL;
3581} 3813}
3582 3814
3583struct dsa_switch_driver mv88e6xxx_switch_driver = { 3815static struct dsa_switch_driver mv88e6xxx_switch_driver = {
3584 .tag_protocol = DSA_TAG_PROTO_EDSA, 3816 .tag_protocol = DSA_TAG_PROTO_EDSA,
3585 .probe = mv88e6xxx_drv_probe, 3817 .probe = mv88e6xxx_drv_probe,
3586 .setup = mv88e6xxx_setup, 3818 .setup = mv88e6xxx_setup,
3587 .set_addr = mv88e6xxx_set_addr, 3819 .set_addr = mv88e6xxx_set_addr,
3588 .phy_read = mv88e6xxx_phy_read,
3589 .phy_write = mv88e6xxx_phy_write,
3590 .adjust_link = mv88e6xxx_adjust_link, 3820 .adjust_link = mv88e6xxx_adjust_link,
3591 .get_strings = mv88e6xxx_get_strings, 3821 .get_strings = mv88e6xxx_get_strings,
3592 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, 3822 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
@@ -3618,64 +3848,74 @@ struct dsa_switch_driver mv88e6xxx_switch_driver = {
3618 .port_fdb_dump = mv88e6xxx_port_fdb_dump, 3848 .port_fdb_dump = mv88e6xxx_port_fdb_dump,
3619}; 3849};
3620 3850
3621int mv88e6xxx_probe(struct mdio_device *mdiodev) 3851static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip,
3852 struct device_node *np)
3622{ 3853{
3623 struct device *dev = &mdiodev->dev; 3854 struct device *dev = chip->dev;
3624 struct device_node *np = dev->of_node;
3625 struct mv88e6xxx_priv_state *ps;
3626 int id, prod_num, rev;
3627 struct dsa_switch *ds; 3855 struct dsa_switch *ds;
3628 u32 eeprom_len;
3629 int err;
3630 3856
3631 ds = devm_kzalloc(dev, sizeof(*ds) + sizeof(*ps), GFP_KERNEL); 3857 ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
3632 if (!ds) 3858 if (!ds)
3633 return -ENOMEM; 3859 return -ENOMEM;
3634 3860
3635 ps = (struct mv88e6xxx_priv_state *)(ds + 1);
3636 ds->priv = ps;
3637 ds->dev = dev; 3861 ds->dev = dev;
3638 ps->dev = dev; 3862 ds->priv = chip;
3639 ps->ds = ds; 3863 ds->drv = &mv88e6xxx_switch_driver;
3640 ps->bus = mdiodev->bus;
3641 ps->sw_addr = mdiodev->addr;
3642 mutex_init(&ps->smi_mutex);
3643 3864
3644 get_device(&ps->bus->dev); 3865 dev_set_drvdata(dev, ds);
3645 3866
3646 ds->drv = &mv88e6xxx_switch_driver; 3867 return dsa_register_switch(ds, np);
3868}
3647 3869
3648 id = mv88e6xxx_reg_read(ps, REG_PORT(0), PORT_SWITCH_ID); 3870static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip)
3649 if (id < 0) 3871{
3650 return id; 3872 dsa_unregister_switch(chip->ds);
3873}
3651 3874
3652 prod_num = (id & 0xfff0) >> 4; 3875static int mv88e6xxx_probe(struct mdio_device *mdiodev)
3653 rev = id & 0x000f; 3876{
3877 struct device *dev = &mdiodev->dev;
3878 struct device_node *np = dev->of_node;
3879 const struct mv88e6xxx_info *compat_info;
3880 struct mv88e6xxx_chip *chip;
3881 u32 eeprom_len;
3882 int err;
3654 3883
3655 ps->info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table, 3884 compat_info = of_device_get_match_data(dev);
3656 ARRAY_SIZE(mv88e6xxx_table)); 3885 if (!compat_info)
3657 if (!ps->info) 3886 return -EINVAL;
3658 return -ENODEV;
3659 3887
3660 ps->reset = devm_gpiod_get(&mdiodev->dev, "reset", GPIOD_ASIS); 3888 chip = mv88e6xxx_alloc_chip(dev);
3661 if (IS_ERR(ps->reset)) { 3889 if (!chip)
3662 err = PTR_ERR(ps->reset); 3890 return -ENOMEM;
3663 if (err == -ENOENT) {
3664 /* Optional, so not an error */
3665 ps->reset = NULL;
3666 } else {
3667 return err;
3668 }
3669 }
3670 3891
3671 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM) && 3892 chip->info = compat_info;
3893
3894 err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
3895 if (err)
3896 return err;
3897
3898 err = mv88e6xxx_detect(chip);
3899 if (err)
3900 return err;
3901
3902 chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
3903 if (IS_ERR(chip->reset))
3904 return PTR_ERR(chip->reset);
3905
3906 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM) &&
3672 !of_property_read_u32(np, "eeprom-length", &eeprom_len)) 3907 !of_property_read_u32(np, "eeprom-length", &eeprom_len))
3673 ps->eeprom_len = eeprom_len; 3908 chip->eeprom_len = eeprom_len;
3674 3909
3675 dev_set_drvdata(dev, ds); 3910 err = mv88e6xxx_mdio_register(chip, np);
3911 if (err)
3912 return err;
3676 3913
3677 dev_info(dev, "switch 0x%x probed: %s, revision %u\n", 3914 err = mv88e6xxx_register_switch(chip, np);
3678 prod_num, ps->info->name, rev); 3915 if (err) {
3916 mv88e6xxx_mdio_unregister(chip);
3917 return err;
3918 }
3679 3919
3680 return 0; 3920 return 0;
3681} 3921}
@@ -3683,13 +3923,17 @@ int mv88e6xxx_probe(struct mdio_device *mdiodev)
3683static void mv88e6xxx_remove(struct mdio_device *mdiodev) 3923static void mv88e6xxx_remove(struct mdio_device *mdiodev)
3684{ 3924{
3685 struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); 3925 struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
3686 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 3926 struct mv88e6xxx_chip *chip = ds_to_priv(ds);
3687 3927
3688 put_device(&ps->bus->dev); 3928 mv88e6xxx_unregister_switch(chip);
3929 mv88e6xxx_mdio_unregister(chip);
3689} 3930}
3690 3931
3691static const struct of_device_id mv88e6xxx_of_match[] = { 3932static const struct of_device_id mv88e6xxx_of_match[] = {
3692 { .compatible = "marvell,mv88e6085" }, 3933 {
3934 .compatible = "marvell,mv88e6085",
3935 .data = &mv88e6xxx_table[MV88E6085],
3936 },
3693 { /* sentinel */ }, 3937 { /* sentinel */ },
3694}; 3938};
3695 3939
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index 36d0e1504de1..83f06620133d 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * net/dsa/mv88e6xxx.h - Marvell 88e6xxx switch chip support 2 * Marvell 88e6xxx common definitions
3 *
3 * Copyright (c) 2008 Marvell Semiconductor 4 * Copyright (c) 2008 Marvell Semiconductor
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
@@ -387,6 +388,12 @@ enum mv88e6xxx_cap {
387 */ 388 */
388 MV88E6XXX_CAP_EEPROM, 389 MV88E6XXX_CAP_EEPROM,
389 390
391 /* Multi-chip Addressing Mode.
392 * Some chips require an indirect SMI access when their SMI device
393 * address is not zero. See SMI_CMD and SMI_DATA.
394 */
395 MV88E6XXX_CAP_MULTI_CHIP,
396
390 /* Port State Filtering for 802.1D Spanning Tree. 397 /* Port State Filtering for 802.1D Spanning Tree.
391 * See PORT_CONTROL_STATE_* values in the PORT_CONTROL register. 398 * See PORT_CONTROL_STATE_* values in the PORT_CONTROL register.
392 */ 399 */
@@ -439,6 +446,7 @@ enum mv88e6xxx_cap {
439#define MV88E6XXX_FLAG_ATU BIT(MV88E6XXX_CAP_ATU) 446#define MV88E6XXX_FLAG_ATU BIT(MV88E6XXX_CAP_ATU)
440#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE) 447#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE)
441#define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM) 448#define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM)
449#define MV88E6XXX_FLAG_MULTI_CHIP BIT(MV88E6XXX_CAP_MULTI_CHIP)
442#define MV88E6XXX_FLAG_PORTSTATE BIT(MV88E6XXX_CAP_PORTSTATE) 450#define MV88E6XXX_FLAG_PORTSTATE BIT(MV88E6XXX_CAP_PORTSTATE)
443#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) 451#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU)
444#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE) 452#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE)
@@ -452,25 +460,29 @@ enum mv88e6xxx_cap {
452 460
453#define MV88E6XXX_FLAGS_FAMILY_6095 \ 461#define MV88E6XXX_FLAGS_FAMILY_6095 \
454 (MV88E6XXX_FLAG_ATU | \ 462 (MV88E6XXX_FLAG_ATU | \
463 MV88E6XXX_FLAG_MULTI_CHIP | \
455 MV88E6XXX_FLAG_PPU | \ 464 MV88E6XXX_FLAG_PPU | \
456 MV88E6XXX_FLAG_VLANTABLE | \ 465 MV88E6XXX_FLAG_VLANTABLE | \
457 MV88E6XXX_FLAG_VTU) 466 MV88E6XXX_FLAG_VTU)
458 467
459#define MV88E6XXX_FLAGS_FAMILY_6097 \ 468#define MV88E6XXX_FLAGS_FAMILY_6097 \
460 (MV88E6XXX_FLAG_ATU | \ 469 (MV88E6XXX_FLAG_ATU | \
470 MV88E6XXX_FLAG_MULTI_CHIP | \
461 MV88E6XXX_FLAG_PPU | \ 471 MV88E6XXX_FLAG_PPU | \
462 MV88E6XXX_FLAG_STU | \ 472 MV88E6XXX_FLAG_STU | \
463 MV88E6XXX_FLAG_VLANTABLE | \ 473 MV88E6XXX_FLAG_VLANTABLE | \
464 MV88E6XXX_FLAG_VTU) 474 MV88E6XXX_FLAG_VTU)
465 475
466#define MV88E6XXX_FLAGS_FAMILY_6165 \ 476#define MV88E6XXX_FLAGS_FAMILY_6165 \
467 (MV88E6XXX_FLAG_STU | \ 477 (MV88E6XXX_FLAG_MULTI_CHIP | \
478 MV88E6XXX_FLAG_STU | \
468 MV88E6XXX_FLAG_SWITCH_MAC | \ 479 MV88E6XXX_FLAG_SWITCH_MAC | \
469 MV88E6XXX_FLAG_TEMP | \ 480 MV88E6XXX_FLAG_TEMP | \
470 MV88E6XXX_FLAG_VTU) 481 MV88E6XXX_FLAG_VTU)
471 482
472#define MV88E6XXX_FLAGS_FAMILY_6185 \ 483#define MV88E6XXX_FLAGS_FAMILY_6185 \
473 (MV88E6XXX_FLAG_ATU | \ 484 (MV88E6XXX_FLAG_ATU | \
485 MV88E6XXX_FLAG_MULTI_CHIP | \
474 MV88E6XXX_FLAG_PPU | \ 486 MV88E6XXX_FLAG_PPU | \
475 MV88E6XXX_FLAG_VLANTABLE | \ 487 MV88E6XXX_FLAG_VLANTABLE | \
476 MV88E6XXX_FLAG_VTU) 488 MV88E6XXX_FLAG_VTU)
@@ -479,6 +491,7 @@ enum mv88e6xxx_cap {
479 (MV88E6XXX_FLAG_ATU | \ 491 (MV88E6XXX_FLAG_ATU | \
480 MV88E6XXX_FLAG_EEE | \ 492 MV88E6XXX_FLAG_EEE | \
481 MV88E6XXX_FLAG_EEPROM | \ 493 MV88E6XXX_FLAG_EEPROM | \
494 MV88E6XXX_FLAG_MULTI_CHIP | \
482 MV88E6XXX_FLAG_PORTSTATE | \ 495 MV88E6XXX_FLAG_PORTSTATE | \
483 MV88E6XXX_FLAG_PPU_ACTIVE | \ 496 MV88E6XXX_FLAG_PPU_ACTIVE | \
484 MV88E6XXX_FLAG_SMI_PHY | \ 497 MV88E6XXX_FLAG_SMI_PHY | \
@@ -490,6 +503,7 @@ enum mv88e6xxx_cap {
490 503
491#define MV88E6XXX_FLAGS_FAMILY_6351 \ 504#define MV88E6XXX_FLAGS_FAMILY_6351 \
492 (MV88E6XXX_FLAG_ATU | \ 505 (MV88E6XXX_FLAG_ATU | \
506 MV88E6XXX_FLAG_MULTI_CHIP | \
493 MV88E6XXX_FLAG_PORTSTATE | \ 507 MV88E6XXX_FLAG_PORTSTATE | \
494 MV88E6XXX_FLAG_PPU_ACTIVE | \ 508 MV88E6XXX_FLAG_PPU_ACTIVE | \
495 MV88E6XXX_FLAG_SMI_PHY | \ 509 MV88E6XXX_FLAG_SMI_PHY | \
@@ -503,6 +517,7 @@ enum mv88e6xxx_cap {
503 (MV88E6XXX_FLAG_ATU | \ 517 (MV88E6XXX_FLAG_ATU | \
504 MV88E6XXX_FLAG_EEE | \ 518 MV88E6XXX_FLAG_EEE | \
505 MV88E6XXX_FLAG_EEPROM | \ 519 MV88E6XXX_FLAG_EEPROM | \
520 MV88E6XXX_FLAG_MULTI_CHIP | \
506 MV88E6XXX_FLAG_PORTSTATE | \ 521 MV88E6XXX_FLAG_PORTSTATE | \
507 MV88E6XXX_FLAG_PPU_ACTIVE | \ 522 MV88E6XXX_FLAG_PPU_ACTIVE | \
508 MV88E6XXX_FLAG_SMI_PHY | \ 523 MV88E6XXX_FLAG_SMI_PHY | \
@@ -519,6 +534,7 @@ struct mv88e6xxx_info {
519 const char *name; 534 const char *name;
520 unsigned int num_databases; 535 unsigned int num_databases;
521 unsigned int num_ports; 536 unsigned int num_ports;
537 unsigned int port_base_addr;
522 unsigned long flags; 538 unsigned long flags;
523}; 539};
524 540
@@ -541,11 +557,13 @@ struct mv88e6xxx_vtu_stu_entry {
541 u8 data[DSA_MAX_PORTS]; 557 u8 data[DSA_MAX_PORTS];
542}; 558};
543 559
560struct mv88e6xxx_ops;
561
544struct mv88e6xxx_priv_port { 562struct mv88e6xxx_priv_port {
545 struct net_device *bridge_dev; 563 struct net_device *bridge_dev;
546}; 564};
547 565
548struct mv88e6xxx_priv_state { 566struct mv88e6xxx_chip {
549 const struct mv88e6xxx_info *info; 567 const struct mv88e6xxx_info *info;
550 568
551 /* The dsa_switch this private structure is related to */ 569 /* The dsa_switch this private structure is related to */
@@ -554,15 +572,13 @@ struct mv88e6xxx_priv_state {
554 /* The device this structure is associated to */ 572 /* The device this structure is associated to */
555 struct device *dev; 573 struct device *dev;
556 574
557 /* When using multi-chip addressing, this mutex protects 575 /* This mutex protects the access to the switch registers */
558 * access to the indirect access registers. (In single-chip 576 struct mutex reg_lock;
559 * mode, this mutex is effectively useless.)
560 */
561 struct mutex smi_mutex;
562 577
563 /* The MII bus and the address on the bus that is used to 578 /* The MII bus and the address on the bus that is used to
564 * communication with the switch 579 * communication with the switch
565 */ 580 */
581 const struct mv88e6xxx_ops *smi_ops;
566 struct mii_bus *bus; 582 struct mii_bus *bus;
567 int sw_addr; 583 int sw_addr;
568 584
@@ -600,6 +616,17 @@ struct mv88e6xxx_priv_state {
600 616
601 /* set to size of eeprom if supported by the switch */ 617 /* set to size of eeprom if supported by the switch */
602 int eeprom_len; 618 int eeprom_len;
619
620 /* Device node for the MDIO bus */
621 struct device_node *mdio_np;
622
623 /* And the MDIO bus itself */
624 struct mii_bus *mdio_bus;
625};
626
627struct mv88e6xxx_ops {
628 int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
629 int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
603}; 630};
604 631
605enum stat_type { 632enum stat_type {
@@ -615,10 +642,10 @@ struct mv88e6xxx_hw_stat {
615 enum stat_type type; 642 enum stat_type type;
616}; 643};
617 644
618static inline bool mv88e6xxx_has(struct mv88e6xxx_priv_state *ps, 645static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip,
619 unsigned long flags) 646 unsigned long flags)
620{ 647{
621 return (ps->info->flags & flags) == flags; 648 return (chip->info->flags & flags) == flags;
622} 649}
623 650
624#endif 651#endif
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index c89b9aeeceb6..5698f5354c0b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -84,7 +84,6 @@ static u32 ax_msg_enable;
84struct ax_device { 84struct ax_device {
85 struct mii_bus *mii_bus; 85 struct mii_bus *mii_bus;
86 struct mdiobb_ctrl bb_ctrl; 86 struct mdiobb_ctrl bb_ctrl;
87 struct phy_device *phy_dev;
88 void __iomem *addr_memr; 87 void __iomem *addr_memr;
89 u8 reg_memr; 88 u8 reg_memr;
90 int link; 89 int link;
@@ -320,7 +319,7 @@ static void ax_block_output(struct net_device *dev, int count,
320static void ax_handle_link_change(struct net_device *dev) 319static void ax_handle_link_change(struct net_device *dev)
321{ 320{
322 struct ax_device *ax = to_ax_dev(dev); 321 struct ax_device *ax = to_ax_dev(dev);
323 struct phy_device *phy_dev = ax->phy_dev; 322 struct phy_device *phy_dev = dev->phydev;
324 int status_change = 0; 323 int status_change = 0;
325 324
326 if (phy_dev->link && ((ax->speed != phy_dev->speed) || 325 if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
@@ -369,8 +368,6 @@ static int ax_mii_probe(struct net_device *dev)
369 phy_dev->supported &= PHY_BASIC_FEATURES; 368 phy_dev->supported &= PHY_BASIC_FEATURES;
370 phy_dev->advertising = phy_dev->supported; 369 phy_dev->advertising = phy_dev->supported;
371 370
372 ax->phy_dev = phy_dev;
373
374 netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", 371 netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
375 phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq); 372 phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq);
376 373
@@ -410,7 +407,7 @@ static int ax_open(struct net_device *dev)
410 ret = ax_mii_probe(dev); 407 ret = ax_mii_probe(dev);
411 if (ret) 408 if (ret)
412 goto failed_mii_probe; 409 goto failed_mii_probe;
413 phy_start(ax->phy_dev); 410 phy_start(dev->phydev);
414 411
415 ret = ax_ei_open(dev); 412 ret = ax_ei_open(dev);
416 if (ret) 413 if (ret)
@@ -421,7 +418,7 @@ static int ax_open(struct net_device *dev)
421 return 0; 418 return 0;
422 419
423 failed_ax_ei_open: 420 failed_ax_ei_open:
424 phy_disconnect(ax->phy_dev); 421 phy_disconnect(dev->phydev);
425 failed_mii_probe: 422 failed_mii_probe:
426 ax_phy_switch(dev, 0); 423 ax_phy_switch(dev, 0);
427 free_irq(dev->irq, dev); 424 free_irq(dev->irq, dev);
@@ -442,7 +439,7 @@ static int ax_close(struct net_device *dev)
442 439
443 /* turn the phy off */ 440 /* turn the phy off */
444 ax_phy_switch(dev, 0); 441 ax_phy_switch(dev, 0);
445 phy_disconnect(ax->phy_dev); 442 phy_disconnect(dev->phydev);
446 443
447 free_irq(dev->irq, dev); 444 free_irq(dev->irq, dev);
448 return 0; 445 return 0;
@@ -450,8 +447,7 @@ static int ax_close(struct net_device *dev)
450 447
451static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 448static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
452{ 449{
453 struct ax_device *ax = to_ax_dev(dev); 450 struct phy_device *phy_dev = dev->phydev;
454 struct phy_device *phy_dev = ax->phy_dev;
455 451
456 if (!netif_running(dev)) 452 if (!netif_running(dev))
457 return -EINVAL; 453 return -EINVAL;
@@ -474,28 +470,6 @@ static void ax_get_drvinfo(struct net_device *dev,
474 strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); 470 strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
475} 471}
476 472
477static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
478{
479 struct ax_device *ax = to_ax_dev(dev);
480 struct phy_device *phy_dev = ax->phy_dev;
481
482 if (!phy_dev)
483 return -ENODEV;
484
485 return phy_ethtool_gset(phy_dev, cmd);
486}
487
488static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
489{
490 struct ax_device *ax = to_ax_dev(dev);
491 struct phy_device *phy_dev = ax->phy_dev;
492
493 if (!phy_dev)
494 return -ENODEV;
495
496 return phy_ethtool_sset(phy_dev, cmd);
497}
498
499static u32 ax_get_msglevel(struct net_device *dev) 473static u32 ax_get_msglevel(struct net_device *dev)
500{ 474{
501 struct ei_device *ei_local = netdev_priv(dev); 475 struct ei_device *ei_local = netdev_priv(dev);
@@ -512,12 +486,12 @@ static void ax_set_msglevel(struct net_device *dev, u32 v)
512 486
513static const struct ethtool_ops ax_ethtool_ops = { 487static const struct ethtool_ops ax_ethtool_ops = {
514 .get_drvinfo = ax_get_drvinfo, 488 .get_drvinfo = ax_get_drvinfo,
515 .get_settings = ax_get_settings,
516 .set_settings = ax_set_settings,
517 .get_link = ethtool_op_get_link, 489 .get_link = ethtool_op_get_link,
518 .get_ts_info = ethtool_op_get_ts_info, 490 .get_ts_info = ethtool_op_get_ts_info,
519 .get_msglevel = ax_get_msglevel, 491 .get_msglevel = ax_get_msglevel,
520 .set_msglevel = ax_set_msglevel, 492 .set_msglevel = ax_set_msglevel,
493 .get_link_ksettings = phy_ethtool_get_link_ksettings,
494 .set_link_ksettings = phy_ethtool_set_link_ksettings,
521}; 495};
522 496
523#ifdef CONFIG_AX88796_93CX6 497#ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 30defe6c81f2..cd7e2e5f496b 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -440,7 +440,6 @@ struct et131x_adapter {
440 struct net_device *netdev; 440 struct net_device *netdev;
441 struct pci_dev *pdev; 441 struct pci_dev *pdev;
442 struct mii_bus *mii_bus; 442 struct mii_bus *mii_bus;
443 struct phy_device *phydev;
444 struct napi_struct napi; 443 struct napi_struct napi;
445 444
446 /* Flags that indicate current state of the adapter */ 445 /* Flags that indicate current state of the adapter */
@@ -864,7 +863,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
864{ 863{
865 int32_t delay = 0; 864 int32_t delay = 0;
866 struct mac_regs __iomem *mac = &adapter->regs->mac; 865 struct mac_regs __iomem *mac = &adapter->regs->mac;
867 struct phy_device *phydev = adapter->phydev; 866 struct phy_device *phydev = adapter->netdev->phydev;
868 u32 cfg1; 867 u32 cfg1;
869 u32 cfg2; 868 u32 cfg2;
870 u32 ifctrl; 869 u32 ifctrl;
@@ -1035,7 +1034,7 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1035static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) 1034static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1036{ 1035{
1037 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 1036 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1038 struct phy_device *phydev = adapter->phydev; 1037 struct phy_device *phydev = adapter->netdev->phydev;
1039 u32 sa_lo; 1038 u32 sa_lo;
1040 u32 sa_hi = 0; 1039 u32 sa_hi = 0;
1041 u32 pf_ctrl = 0; 1040 u32 pf_ctrl = 0;
@@ -1230,7 +1229,7 @@ out:
1230 1229
1231static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) 1230static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1232{ 1231{
1233 struct phy_device *phydev = adapter->phydev; 1232 struct phy_device *phydev = adapter->netdev->phydev;
1234 1233
1235 if (!phydev) 1234 if (!phydev)
1236 return -EIO; 1235 return -EIO;
@@ -1311,7 +1310,7 @@ static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
1311 1310
1312static void et1310_config_flow_control(struct et131x_adapter *adapter) 1311static void et1310_config_flow_control(struct et131x_adapter *adapter)
1313{ 1312{
1314 struct phy_device *phydev = adapter->phydev; 1313 struct phy_device *phydev = adapter->netdev->phydev;
1315 1314
1316 if (phydev->duplex == DUPLEX_HALF) { 1315 if (phydev->duplex == DUPLEX_HALF) {
1317 adapter->flow = FLOW_NONE; 1316 adapter->flow = FLOW_NONE;
@@ -1456,7 +1455,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1456static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) 1455static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
1457{ 1456{
1458 u16 data; 1457 u16 data;
1459 struct phy_device *phydev = adapter->phydev; 1458 struct phy_device *phydev = adapter->netdev->phydev;
1460 1459
1461 et131x_mii_read(adapter, MII_BMCR, &data); 1460 et131x_mii_read(adapter, MII_BMCR, &data);
1462 data &= ~BMCR_PDOWN; 1461 data &= ~BMCR_PDOWN;
@@ -1469,7 +1468,7 @@ static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
1469static void et131x_xcvr_init(struct et131x_adapter *adapter) 1468static void et131x_xcvr_init(struct et131x_adapter *adapter)
1470{ 1469{
1471 u16 lcr2; 1470 u16 lcr2;
1472 struct phy_device *phydev = adapter->phydev; 1471 struct phy_device *phydev = adapter->netdev->phydev;
1473 1472
1474 /* Set the LED behavior such that LED 1 indicates speed (off = 1473 /* Set the LED behavior such that LED 1 indicates speed (off =
1475 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates 1474 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
@@ -2111,7 +2110,7 @@ static int et131x_init_recv(struct et131x_adapter *adapter)
2111/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */ 2110/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
2112static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) 2111static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2113{ 2112{
2114 struct phy_device *phydev = adapter->phydev; 2113 struct phy_device *phydev = adapter->netdev->phydev;
2115 2114
2116 /* For version B silicon, we do not use the RxDMA timer for 10 and 100 2115 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2117 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. 2116 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
@@ -2426,7 +2425,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2426 struct sk_buff *skb = tcb->skb; 2425 struct sk_buff *skb = tcb->skb;
2427 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; 2426 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2428 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; 2427 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
2429 struct phy_device *phydev = adapter->phydev; 2428 struct phy_device *phydev = adapter->netdev->phydev;
2430 dma_addr_t dma_addr; 2429 dma_addr_t dma_addr;
2431 struct tx_ring *tx_ring = &adapter->tx_ring; 2430 struct tx_ring *tx_ring = &adapter->tx_ring;
2432 2431
@@ -2791,22 +2790,6 @@ static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
2791 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 2790 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2792} 2791}
2793 2792
2794static int et131x_get_settings(struct net_device *netdev,
2795 struct ethtool_cmd *cmd)
2796{
2797 struct et131x_adapter *adapter = netdev_priv(netdev);
2798
2799 return phy_ethtool_gset(adapter->phydev, cmd);
2800}
2801
2802static int et131x_set_settings(struct net_device *netdev,
2803 struct ethtool_cmd *cmd)
2804{
2805 struct et131x_adapter *adapter = netdev_priv(netdev);
2806
2807 return phy_ethtool_sset(adapter->phydev, cmd);
2808}
2809
2810static int et131x_get_regs_len(struct net_device *netdev) 2793static int et131x_get_regs_len(struct net_device *netdev)
2811{ 2794{
2812#define ET131X_REGS_LEN 256 2795#define ET131X_REGS_LEN 256
@@ -2979,12 +2962,12 @@ static void et131x_get_drvinfo(struct net_device *netdev,
2979} 2962}
2980 2963
2981static struct ethtool_ops et131x_ethtool_ops = { 2964static struct ethtool_ops et131x_ethtool_ops = {
2982 .get_settings = et131x_get_settings,
2983 .set_settings = et131x_set_settings,
2984 .get_drvinfo = et131x_get_drvinfo, 2965 .get_drvinfo = et131x_get_drvinfo,
2985 .get_regs_len = et131x_get_regs_len, 2966 .get_regs_len = et131x_get_regs_len,
2986 .get_regs = et131x_get_regs, 2967 .get_regs = et131x_get_regs,
2987 .get_link = ethtool_op_get_link, 2968 .get_link = ethtool_op_get_link,
2969 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2970 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2988}; 2971};
2989 2972
2990/* et131x_hwaddr_init - set up the MAC Address */ 2973/* et131x_hwaddr_init - set up the MAC Address */
@@ -3098,7 +3081,7 @@ err_out:
3098static void et131x_error_timer_handler(unsigned long data) 3081static void et131x_error_timer_handler(unsigned long data)
3099{ 3082{
3100 struct et131x_adapter *adapter = (struct et131x_adapter *)data; 3083 struct et131x_adapter *adapter = (struct et131x_adapter *)data;
3101 struct phy_device *phydev = adapter->phydev; 3084 struct phy_device *phydev = adapter->netdev->phydev;
3102 3085
3103 if (et1310_in_phy_coma(adapter)) { 3086 if (et1310_in_phy_coma(adapter)) {
3104 /* Bring the device immediately out of coma, to 3087 /* Bring the device immediately out of coma, to
@@ -3168,7 +3151,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
3168static void et131x_adjust_link(struct net_device *netdev) 3151static void et131x_adjust_link(struct net_device *netdev)
3169{ 3152{
3170 struct et131x_adapter *adapter = netdev_priv(netdev); 3153 struct et131x_adapter *adapter = netdev_priv(netdev);
3171 struct phy_device *phydev = adapter->phydev; 3154 struct phy_device *phydev = netdev->phydev;
3172 3155
3173 if (!phydev) 3156 if (!phydev)
3174 return; 3157 return;
@@ -3287,7 +3270,6 @@ static int et131x_mii_probe(struct net_device *netdev)
3287 3270
3288 phydev->advertising = phydev->supported; 3271 phydev->advertising = phydev->supported;
3289 phydev->autoneg = AUTONEG_ENABLE; 3272 phydev->autoneg = AUTONEG_ENABLE;
3290 adapter->phydev = phydev;
3291 3273
3292 phy_attached_info(phydev); 3274 phy_attached_info(phydev);
3293 3275
@@ -3323,7 +3305,7 @@ static void et131x_pci_remove(struct pci_dev *pdev)
3323 3305
3324 unregister_netdev(netdev); 3306 unregister_netdev(netdev);
3325 netif_napi_del(&adapter->napi); 3307 netif_napi_del(&adapter->napi);
3326 phy_disconnect(adapter->phydev); 3308 phy_disconnect(netdev->phydev);
3327 mdiobus_unregister(adapter->mii_bus); 3309 mdiobus_unregister(adapter->mii_bus);
3328 mdiobus_free(adapter->mii_bus); 3310 mdiobus_free(adapter->mii_bus);
3329 3311
@@ -3338,20 +3320,16 @@ static void et131x_pci_remove(struct pci_dev *pdev)
3338 3320
3339static void et131x_up(struct net_device *netdev) 3321static void et131x_up(struct net_device *netdev)
3340{ 3322{
3341 struct et131x_adapter *adapter = netdev_priv(netdev);
3342
3343 et131x_enable_txrx(netdev); 3323 et131x_enable_txrx(netdev);
3344 phy_start(adapter->phydev); 3324 phy_start(netdev->phydev);
3345} 3325}
3346 3326
3347static void et131x_down(struct net_device *netdev) 3327static void et131x_down(struct net_device *netdev)
3348{ 3328{
3349 struct et131x_adapter *adapter = netdev_priv(netdev);
3350
3351 /* Save the timestamp for the TX watchdog, prevent a timeout */ 3329 /* Save the timestamp for the TX watchdog, prevent a timeout */
3352 netif_trans_update(netdev); 3330 netif_trans_update(netdev);
3353 3331
3354 phy_stop(adapter->phydev); 3332 phy_stop(netdev->phydev);
3355 et131x_disable_txrx(netdev); 3333 et131x_disable_txrx(netdev);
3356} 3334}
3357 3335
@@ -3684,12 +3662,10 @@ static int et131x_close(struct net_device *netdev)
3684static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, 3662static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
3685 int cmd) 3663 int cmd)
3686{ 3664{
3687 struct et131x_adapter *adapter = netdev_priv(netdev); 3665 if (!netdev->phydev)
3688
3689 if (!adapter->phydev)
3690 return -EINVAL; 3666 return -EINVAL;
3691 3667
3692 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); 3668 return phy_mii_ioctl(netdev->phydev, reqbuf, cmd);
3693} 3669}
3694 3670
3695/* et131x_set_packet_filter - Configures the Rx Packet filtering */ 3671/* et131x_set_packet_filter - Configures the Rx Packet filtering */
@@ -4073,7 +4049,7 @@ out:
4073 return rc; 4049 return rc;
4074 4050
4075err_phy_disconnect: 4051err_phy_disconnect:
4076 phy_disconnect(adapter->phydev); 4052 phy_disconnect(netdev->phydev);
4077err_mdio_unregister: 4053err_mdio_unregister:
4078 mdiobus_unregister(adapter->mii_bus); 4054 mdiobus_unregister(adapter->mii_bus);
4079err_mdio_free: 4055err_mdio_free:
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index de2c4bf5fac4..6ffdff68bfc4 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -77,7 +77,6 @@ struct emac_board_info {
77 77
78 int emacrx_completed_flag; 78 int emacrx_completed_flag;
79 79
80 struct phy_device *phy_dev;
81 struct device_node *phy_node; 80 struct device_node *phy_node;
82 unsigned int link; 81 unsigned int link;
83 unsigned int speed; 82 unsigned int speed;
@@ -115,7 +114,7 @@ static void emac_update_duplex(struct net_device *dev)
115static void emac_handle_link_change(struct net_device *dev) 114static void emac_handle_link_change(struct net_device *dev)
116{ 115{
117 struct emac_board_info *db = netdev_priv(dev); 116 struct emac_board_info *db = netdev_priv(dev);
118 struct phy_device *phydev = db->phy_dev; 117 struct phy_device *phydev = dev->phydev;
119 unsigned long flags; 118 unsigned long flags;
120 int status_change = 0; 119 int status_change = 0;
121 120
@@ -154,21 +153,22 @@ static void emac_handle_link_change(struct net_device *dev)
154static int emac_mdio_probe(struct net_device *dev) 153static int emac_mdio_probe(struct net_device *dev)
155{ 154{
156 struct emac_board_info *db = netdev_priv(dev); 155 struct emac_board_info *db = netdev_priv(dev);
156 struct phy_device *phydev;
157 157
158 /* to-do: PHY interrupts are currently not supported */ 158 /* to-do: PHY interrupts are currently not supported */
159 159
160 /* attach the mac to the phy */ 160 /* attach the mac to the phy */
161 db->phy_dev = of_phy_connect(db->ndev, db->phy_node, 161 phydev = of_phy_connect(db->ndev, db->phy_node,
162 &emac_handle_link_change, 0, 162 &emac_handle_link_change, 0,
163 db->phy_interface); 163 db->phy_interface);
164 if (!db->phy_dev) { 164 if (!phydev) {
165 netdev_err(db->ndev, "could not find the PHY\n"); 165 netdev_err(db->ndev, "could not find the PHY\n");
166 return -ENODEV; 166 return -ENODEV;
167 } 167 }
168 168
169 /* mask with MAC supported features */ 169 /* mask with MAC supported features */
170 db->phy_dev->supported &= PHY_BASIC_FEATURES; 170 phydev->supported &= PHY_BASIC_FEATURES;
171 db->phy_dev->advertising = db->phy_dev->supported; 171 phydev->advertising = phydev->supported;
172 172
173 db->link = 0; 173 db->link = 0;
174 db->speed = 0; 174 db->speed = 0;
@@ -179,10 +179,7 @@ static int emac_mdio_probe(struct net_device *dev)
179 179
180static void emac_mdio_remove(struct net_device *dev) 180static void emac_mdio_remove(struct net_device *dev)
181{ 181{
182 struct emac_board_info *db = netdev_priv(dev); 182 phy_disconnect(dev->phydev);
183
184 phy_disconnect(db->phy_dev);
185 db->phy_dev = NULL;
186} 183}
187 184
188static void emac_reset(struct emac_board_info *db) 185static void emac_reset(struct emac_board_info *db)
@@ -208,8 +205,7 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
208 205
209static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 206static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
210{ 207{
211 struct emac_board_info *dm = netdev_priv(dev); 208 struct phy_device *phydev = dev->phydev;
212 struct phy_device *phydev = dm->phy_dev;
213 209
214 if (!netif_running(dev)) 210 if (!netif_running(dev))
215 return -EINVAL; 211 return -EINVAL;
@@ -229,33 +225,11 @@ static void emac_get_drvinfo(struct net_device *dev,
229 strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info)); 225 strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
230} 226}
231 227
232static int emac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
233{
234 struct emac_board_info *dm = netdev_priv(dev);
235 struct phy_device *phydev = dm->phy_dev;
236
237 if (!phydev)
238 return -ENODEV;
239
240 return phy_ethtool_gset(phydev, cmd);
241}
242
243static int emac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
244{
245 struct emac_board_info *dm = netdev_priv(dev);
246 struct phy_device *phydev = dm->phy_dev;
247
248 if (!phydev)
249 return -ENODEV;
250
251 return phy_ethtool_sset(phydev, cmd);
252}
253
254static const struct ethtool_ops emac_ethtool_ops = { 228static const struct ethtool_ops emac_ethtool_ops = {
255 .get_drvinfo = emac_get_drvinfo, 229 .get_drvinfo = emac_get_drvinfo,
256 .get_settings = emac_get_settings,
257 .set_settings = emac_set_settings,
258 .get_link = ethtool_op_get_link, 230 .get_link = ethtool_op_get_link,
231 .get_link_ksettings = phy_ethtool_get_link_ksettings,
232 .set_link_ksettings = phy_ethtool_set_link_ksettings,
259}; 233};
260 234
261static unsigned int emac_setup(struct net_device *ndev) 235static unsigned int emac_setup(struct net_device *ndev)
@@ -744,7 +718,7 @@ static int emac_open(struct net_device *dev)
744 return ret; 718 return ret;
745 } 719 }
746 720
747 phy_start(db->phy_dev); 721 phy_start(dev->phydev);
748 netif_start_queue(dev); 722 netif_start_queue(dev);
749 723
750 return 0; 724 return 0;
@@ -781,7 +755,7 @@ static int emac_stop(struct net_device *ndev)
781 netif_stop_queue(ndev); 755 netif_stop_queue(ndev);
782 netif_carrier_off(ndev); 756 netif_carrier_off(ndev);
783 757
784 phy_stop(db->phy_dev); 758 phy_stop(ndev->phydev);
785 759
786 emac_mdio_remove(ndev); 760 emac_mdio_remove(ndev);
787 761
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 103c30ddddf7..e0052003d16f 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -473,7 +473,6 @@ struct altera_tse_private {
473 int phy_addr; /* PHY's MDIO address, -1 for autodetection */ 473 int phy_addr; /* PHY's MDIO address, -1 for autodetection */
474 phy_interface_t phy_iface; 474 phy_interface_t phy_iface;
475 struct mii_bus *mdio; 475 struct mii_bus *mdio;
476 struct phy_device *phydev;
477 int oldspeed; 476 int oldspeed;
478 int oldduplex; 477 int oldduplex;
479 int oldlink; 478 int oldlink;
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index be72e1e64525..7c367713c3e6 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -233,40 +233,18 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
233 buf[i] = csrrd32(priv->mac_dev, i * 4); 233 buf[i] = csrrd32(priv->mac_dev, i * 4);
234} 234}
235 235
236static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
237{
238 struct altera_tse_private *priv = netdev_priv(dev);
239 struct phy_device *phydev = priv->phydev;
240
241 if (phydev == NULL)
242 return -ENODEV;
243
244 return phy_ethtool_gset(phydev, cmd);
245}
246
247static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
248{
249 struct altera_tse_private *priv = netdev_priv(dev);
250 struct phy_device *phydev = priv->phydev;
251
252 if (phydev == NULL)
253 return -ENODEV;
254
255 return phy_ethtool_sset(phydev, cmd);
256}
257
258static const struct ethtool_ops tse_ethtool_ops = { 236static const struct ethtool_ops tse_ethtool_ops = {
259 .get_drvinfo = tse_get_drvinfo, 237 .get_drvinfo = tse_get_drvinfo,
260 .get_regs_len = tse_reglen, 238 .get_regs_len = tse_reglen,
261 .get_regs = tse_get_regs, 239 .get_regs = tse_get_regs,
262 .get_link = ethtool_op_get_link, 240 .get_link = ethtool_op_get_link,
263 .get_settings = tse_get_settings,
264 .set_settings = tse_set_settings,
265 .get_strings = tse_gstrings, 241 .get_strings = tse_gstrings,
266 .get_sset_count = tse_sset_count, 242 .get_sset_count = tse_sset_count,
267 .get_ethtool_stats = tse_fill_stats, 243 .get_ethtool_stats = tse_fill_stats,
268 .get_msglevel = tse_get_msglevel, 244 .get_msglevel = tse_get_msglevel,
269 .set_msglevel = tse_set_msglevel, 245 .set_msglevel = tse_set_msglevel,
246 .get_link_ksettings = phy_ethtool_get_link_ksettings,
247 .set_link_ksettings = phy_ethtool_set_link_ksettings,
270}; 248};
271 249
272void altera_tse_set_ethtool_ops(struct net_device *netdev) 250void altera_tse_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index f749e4d389eb..49025e99fb0e 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -625,7 +625,7 @@ out:
625static void altera_tse_adjust_link(struct net_device *dev) 625static void altera_tse_adjust_link(struct net_device *dev)
626{ 626{
627 struct altera_tse_private *priv = netdev_priv(dev); 627 struct altera_tse_private *priv = netdev_priv(dev);
628 struct phy_device *phydev = priv->phydev; 628 struct phy_device *phydev = dev->phydev;
629 int new_state = 0; 629 int new_state = 0;
630 630
631 /* only change config if there is a link */ 631 /* only change config if there is a link */
@@ -845,7 +845,6 @@ static int init_phy(struct net_device *dev)
845 netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n", 845 netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
846 phydev->mdio.addr, phydev->phy_id, phydev->link); 846 phydev->mdio.addr, phydev->phy_id, phydev->link);
847 847
848 priv->phydev = phydev;
849 return 0; 848 return 0;
850} 849}
851 850
@@ -1172,8 +1171,8 @@ static int tse_open(struct net_device *dev)
1172 1171
1173 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); 1172 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1174 1173
1175 if (priv->phydev) 1174 if (dev->phydev)
1176 phy_start(priv->phydev); 1175 phy_start(dev->phydev);
1177 1176
1178 napi_enable(&priv->napi); 1177 napi_enable(&priv->napi);
1179 netif_start_queue(dev); 1178 netif_start_queue(dev);
@@ -1205,8 +1204,8 @@ static int tse_shutdown(struct net_device *dev)
1205 unsigned long int flags; 1204 unsigned long int flags;
1206 1205
1207 /* Stop the PHY */ 1206 /* Stop the PHY */
1208 if (priv->phydev) 1207 if (dev->phydev)
1209 phy_stop(priv->phydev); 1208 phy_stop(dev->phydev);
1210 1209
1211 netif_stop_queue(dev); 1210 netif_stop_queue(dev);
1212 napi_disable(&priv->napi); 1211 napi_disable(&priv->napi);
@@ -1545,10 +1544,9 @@ err_free_netdev:
1545static int altera_tse_remove(struct platform_device *pdev) 1544static int altera_tse_remove(struct platform_device *pdev)
1546{ 1545{
1547 struct net_device *ndev = platform_get_drvdata(pdev); 1546 struct net_device *ndev = platform_get_drvdata(pdev);
1548 struct altera_tse_private *priv = netdev_priv(ndev);
1549 1547
1550 if (priv->phydev) 1548 if (ndev->phydev)
1551 phy_disconnect(priv->phydev); 1549 phy_disconnect(ndev->phydev);
1552 1550
1553 platform_set_drvdata(pdev, NULL); 1551 platform_set_drvdata(pdev, NULL);
1554 altera_tse_mdio_destroy(ndev); 1552 altera_tse_mdio_destroy(ndev);
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 08a23e6b60e9..dc2c35dce216 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -631,7 +631,7 @@ static void nb8800_mac_config(struct net_device *dev)
631static void nb8800_pause_config(struct net_device *dev) 631static void nb8800_pause_config(struct net_device *dev)
632{ 632{
633 struct nb8800_priv *priv = netdev_priv(dev); 633 struct nb8800_priv *priv = netdev_priv(dev);
634 struct phy_device *phydev = priv->phydev; 634 struct phy_device *phydev = dev->phydev;
635 u32 rxcr; 635 u32 rxcr;
636 636
637 if (priv->pause_aneg) { 637 if (priv->pause_aneg) {
@@ -664,7 +664,7 @@ static void nb8800_pause_config(struct net_device *dev)
664static void nb8800_link_reconfigure(struct net_device *dev) 664static void nb8800_link_reconfigure(struct net_device *dev)
665{ 665{
666 struct nb8800_priv *priv = netdev_priv(dev); 666 struct nb8800_priv *priv = netdev_priv(dev);
667 struct phy_device *phydev = priv->phydev; 667 struct phy_device *phydev = dev->phydev;
668 int change = 0; 668 int change = 0;
669 669
670 if (phydev->link) { 670 if (phydev->link) {
@@ -690,7 +690,7 @@ static void nb8800_link_reconfigure(struct net_device *dev)
690 } 690 }
691 691
692 if (change) 692 if (change)
693 phy_print_status(priv->phydev); 693 phy_print_status(phydev);
694} 694}
695 695
696static void nb8800_update_mac_addr(struct net_device *dev) 696static void nb8800_update_mac_addr(struct net_device *dev)
@@ -935,9 +935,10 @@ static int nb8800_dma_stop(struct net_device *dev)
935static void nb8800_pause_adv(struct net_device *dev) 935static void nb8800_pause_adv(struct net_device *dev)
936{ 936{
937 struct nb8800_priv *priv = netdev_priv(dev); 937 struct nb8800_priv *priv = netdev_priv(dev);
938 struct phy_device *phydev = dev->phydev;
938 u32 adv = 0; 939 u32 adv = 0;
939 940
940 if (!priv->phydev) 941 if (!phydev)
941 return; 942 return;
942 943
943 if (priv->pause_rx) 944 if (priv->pause_rx)
@@ -945,13 +946,14 @@ static void nb8800_pause_adv(struct net_device *dev)
945 if (priv->pause_tx) 946 if (priv->pause_tx)
946 adv ^= ADVERTISED_Asym_Pause; 947 adv ^= ADVERTISED_Asym_Pause;
947 948
948 priv->phydev->supported |= adv; 949 phydev->supported |= adv;
949 priv->phydev->advertising |= adv; 950 phydev->advertising |= adv;
950} 951}
951 952
952static int nb8800_open(struct net_device *dev) 953static int nb8800_open(struct net_device *dev)
953{ 954{
954 struct nb8800_priv *priv = netdev_priv(dev); 955 struct nb8800_priv *priv = netdev_priv(dev);
956 struct phy_device *phydev;
955 int err; 957 int err;
956 958
957 /* clear any pending interrupts */ 959 /* clear any pending interrupts */
@@ -969,10 +971,10 @@ static int nb8800_open(struct net_device *dev)
969 nb8800_mac_rx(dev, true); 971 nb8800_mac_rx(dev, true);
970 nb8800_mac_tx(dev, true); 972 nb8800_mac_tx(dev, true);
971 973
972 priv->phydev = of_phy_connect(dev, priv->phy_node, 974 phydev = of_phy_connect(dev, priv->phy_node,
973 nb8800_link_reconfigure, 0, 975 nb8800_link_reconfigure, 0,
974 priv->phy_mode); 976 priv->phy_mode);
975 if (!priv->phydev) 977 if (!phydev)
976 goto err_free_irq; 978 goto err_free_irq;
977 979
978 nb8800_pause_adv(dev); 980 nb8800_pause_adv(dev);
@@ -982,7 +984,7 @@ static int nb8800_open(struct net_device *dev)
982 netif_start_queue(dev); 984 netif_start_queue(dev);
983 985
984 nb8800_start_rx(dev); 986 nb8800_start_rx(dev);
985 phy_start(priv->phydev); 987 phy_start(phydev);
986 988
987 return 0; 989 return 0;
988 990
@@ -997,8 +999,9 @@ err_free_dma:
997static int nb8800_stop(struct net_device *dev) 999static int nb8800_stop(struct net_device *dev)
998{ 1000{
999 struct nb8800_priv *priv = netdev_priv(dev); 1001 struct nb8800_priv *priv = netdev_priv(dev);
1002 struct phy_device *phydev = dev->phydev;
1000 1003
1001 phy_stop(priv->phydev); 1004 phy_stop(phydev);
1002 1005
1003 netif_stop_queue(dev); 1006 netif_stop_queue(dev);
1004 napi_disable(&priv->napi); 1007 napi_disable(&priv->napi);
@@ -1007,8 +1010,7 @@ static int nb8800_stop(struct net_device *dev)
1007 nb8800_mac_rx(dev, false); 1010 nb8800_mac_rx(dev, false);
1008 nb8800_mac_tx(dev, false); 1011 nb8800_mac_tx(dev, false);
1009 1012
1010 phy_disconnect(priv->phydev); 1013 phy_disconnect(phydev);
1011 priv->phydev = NULL;
1012 1014
1013 free_irq(dev->irq, dev); 1015 free_irq(dev->irq, dev);
1014 1016
@@ -1019,9 +1021,7 @@ static int nb8800_stop(struct net_device *dev)
1019 1021
1020static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1022static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1021{ 1023{
1022 struct nb8800_priv *priv = netdev_priv(dev); 1024 return phy_mii_ioctl(dev->phydev, rq, cmd);
1023
1024 return phy_mii_ioctl(priv->phydev, rq, cmd);
1025} 1025}
1026 1026
1027static const struct net_device_ops nb8800_netdev_ops = { 1027static const struct net_device_ops nb8800_netdev_ops = {
@@ -1035,34 +1035,14 @@ static const struct net_device_ops nb8800_netdev_ops = {
1035 .ndo_validate_addr = eth_validate_addr, 1035 .ndo_validate_addr = eth_validate_addr,
1036}; 1036};
1037 1037
1038static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1039{
1040 struct nb8800_priv *priv = netdev_priv(dev);
1041
1042 if (!priv->phydev)
1043 return -ENODEV;
1044
1045 return phy_ethtool_gset(priv->phydev, cmd);
1046}
1047
1048static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1049{
1050 struct nb8800_priv *priv = netdev_priv(dev);
1051
1052 if (!priv->phydev)
1053 return -ENODEV;
1054
1055 return phy_ethtool_sset(priv->phydev, cmd);
1056}
1057
1058static int nb8800_nway_reset(struct net_device *dev) 1038static int nb8800_nway_reset(struct net_device *dev)
1059{ 1039{
1060 struct nb8800_priv *priv = netdev_priv(dev); 1040 struct phy_device *phydev = dev->phydev;
1061 1041
1062 if (!priv->phydev) 1042 if (!phydev)
1063 return -ENODEV; 1043 return -ENODEV;
1064 1044
1065 return genphy_restart_aneg(priv->phydev); 1045 return genphy_restart_aneg(phydev);
1066} 1046}
1067 1047
1068static void nb8800_get_pauseparam(struct net_device *dev, 1048static void nb8800_get_pauseparam(struct net_device *dev,
@@ -1079,6 +1059,7 @@ static int nb8800_set_pauseparam(struct net_device *dev,
1079 struct ethtool_pauseparam *pp) 1059 struct ethtool_pauseparam *pp)
1080{ 1060{
1081 struct nb8800_priv *priv = netdev_priv(dev); 1061 struct nb8800_priv *priv = netdev_priv(dev);
1062 struct phy_device *phydev = dev->phydev;
1082 1063
1083 priv->pause_aneg = pp->autoneg; 1064 priv->pause_aneg = pp->autoneg;
1084 priv->pause_rx = pp->rx_pause; 1065 priv->pause_rx = pp->rx_pause;
@@ -1088,8 +1069,8 @@ static int nb8800_set_pauseparam(struct net_device *dev,
1088 1069
1089 if (!priv->pause_aneg) 1070 if (!priv->pause_aneg)
1090 nb8800_pause_config(dev); 1071 nb8800_pause_config(dev);
1091 else if (priv->phydev) 1072 else if (phydev)
1092 phy_start_aneg(priv->phydev); 1073 phy_start_aneg(phydev);
1093 1074
1094 return 0; 1075 return 0;
1095} 1076}
@@ -1182,8 +1163,6 @@ static void nb8800_get_ethtool_stats(struct net_device *dev,
1182} 1163}
1183 1164
1184static const struct ethtool_ops nb8800_ethtool_ops = { 1165static const struct ethtool_ops nb8800_ethtool_ops = {
1185 .get_settings = nb8800_get_settings,
1186 .set_settings = nb8800_set_settings,
1187 .nway_reset = nb8800_nway_reset, 1166 .nway_reset = nb8800_nway_reset,
1188 .get_link = ethtool_op_get_link, 1167 .get_link = ethtool_op_get_link,
1189 .get_pauseparam = nb8800_get_pauseparam, 1168 .get_pauseparam = nb8800_get_pauseparam,
@@ -1191,6 +1170,8 @@ static const struct ethtool_ops nb8800_ethtool_ops = {
1191 .get_sset_count = nb8800_get_sset_count, 1170 .get_sset_count = nb8800_get_sset_count,
1192 .get_strings = nb8800_get_strings, 1171 .get_strings = nb8800_get_strings,
1193 .get_ethtool_stats = nb8800_get_ethtool_stats, 1172 .get_ethtool_stats = nb8800_get_ethtool_stats,
1173 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1174 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1194}; 1175};
1195 1176
1196static int nb8800_hw_init(struct net_device *dev) 1177static int nb8800_hw_init(struct net_device *dev)
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
index e5adbc2aac9f..6ec4a956e1e5 100644
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -284,7 +284,6 @@ struct nb8800_priv {
284 284
285 struct mii_bus *mii_bus; 285 struct mii_bus *mii_bus;
286 struct device_node *phy_node; 286 struct device_node *phy_node;
287 struct phy_device *phydev;
288 287
289 /* PHY connection type from DT */ 288 /* PHY connection type from DT */
290 int phy_mode; 289 int phy_mode;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 18042c2460bd..d74a92e1c27d 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -139,26 +139,6 @@ config BNX2X_SRIOV
139 Virtualization support in the 578xx and 57712 products. This 139 Virtualization support in the 578xx and 57712 products. This
140 allows for virtual function acceleration in virtual environments. 140 allows for virtual function acceleration in virtual environments.
141 141
142config BNX2X_VXLAN
143 bool "Virtual eXtensible Local Area Network support"
144 default n
145 depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
146 ---help---
147 This enables hardward offload support for VXLAN protocol over the
148 NetXtremeII series adapters.
149 Say Y here if you want to enable hardware offload support for
150 Virtual eXtensible Local Area Network (VXLAN) in the driver.
151
152config BNX2X_GENEVE
153 bool "Generic Network Virtualization Encapsulation (GENEVE) support"
154 depends on BNX2X && GENEVE && !(BNX2X=y && GENEVE=m)
155 ---help---
156 This allows one to create GENEVE virtual interfaces that provide
157 Layer 2 Networks over Layer 3 Networks. GENEVE is often used
158 to tunnel virtual network infrastructure in virtualized environments.
159 Say Y here if you want to enable hardware offload support for
160 Generic Network Virtualization Encapsulation (GENEVE) in the driver.
161
162config BGMAC 142config BGMAC
163 tristate "BCMA bus GBit core support" 143 tristate "BCMA bus GBit core support"
164 depends on BCMA && BCMA_HOST_SOC 144 depends on BCMA && BCMA_HOST_SOC
@@ -186,7 +166,6 @@ config SYSTEMPORT
186config BNXT 166config BNXT
187 tristate "Broadcom NetXtreme-C/E support" 167 tristate "Broadcom NetXtreme-C/E support"
188 depends on PCI 168 depends on PCI
189 depends on VXLAN || VXLAN=n
190 select FW_LOADER 169 select FW_LOADER
191 select LIBCRC32C 170 select LIBCRC32C
192 ---help--- 171 ---help---
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 543bf38105c9..834afbb51aff 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -96,28 +96,6 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
96} 96}
97 97
98/* Ethtool operations */ 98/* Ethtool operations */
99static int bcm_sysport_set_settings(struct net_device *dev,
100 struct ethtool_cmd *cmd)
101{
102 struct bcm_sysport_priv *priv = netdev_priv(dev);
103
104 if (!netif_running(dev))
105 return -EINVAL;
106
107 return phy_ethtool_sset(priv->phydev, cmd);
108}
109
110static int bcm_sysport_get_settings(struct net_device *dev,
111 struct ethtool_cmd *cmd)
112{
113 struct bcm_sysport_priv *priv = netdev_priv(dev);
114
115 if (!netif_running(dev))
116 return -EINVAL;
117
118 return phy_ethtool_gset(priv->phydev, cmd);
119}
120
121static int bcm_sysport_set_rx_csum(struct net_device *dev, 99static int bcm_sysport_set_rx_csum(struct net_device *dev,
122 netdev_features_t wanted) 100 netdev_features_t wanted)
123{ 101{
@@ -1127,7 +1105,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev)
1127static void bcm_sysport_adj_link(struct net_device *dev) 1105static void bcm_sysport_adj_link(struct net_device *dev)
1128{ 1106{
1129 struct bcm_sysport_priv *priv = netdev_priv(dev); 1107 struct bcm_sysport_priv *priv = netdev_priv(dev);
1130 struct phy_device *phydev = priv->phydev; 1108 struct phy_device *phydev = dev->phydev;
1131 unsigned int changed = 0; 1109 unsigned int changed = 0;
1132 u32 cmd_bits = 0, reg; 1110 u32 cmd_bits = 0, reg;
1133 1111
@@ -1182,7 +1160,7 @@ static void bcm_sysport_adj_link(struct net_device *dev)
1182 umac_writel(priv, reg, UMAC_CMD); 1160 umac_writel(priv, reg, UMAC_CMD);
1183 } 1161 }
1184 1162
1185 phy_print_status(priv->phydev); 1163 phy_print_status(phydev);
1186} 1164}
1187 1165
1188static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 1166static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
@@ -1525,7 +1503,7 @@ static void bcm_sysport_netif_start(struct net_device *dev)
1525 /* Enable RX interrupt and TX ring full interrupt */ 1503 /* Enable RX interrupt and TX ring full interrupt */
1526 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1504 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1527 1505
1528 phy_start(priv->phydev); 1506 phy_start(dev->phydev);
1529 1507
1530 /* Enable TX interrupts for the 32 TXQs */ 1508 /* Enable TX interrupts for the 32 TXQs */
1531 intrl2_1_mask_clear(priv, 0xffffffff); 1509 intrl2_1_mask_clear(priv, 0xffffffff);
@@ -1546,6 +1524,7 @@ static void rbuf_init(struct bcm_sysport_priv *priv)
1546static int bcm_sysport_open(struct net_device *dev) 1524static int bcm_sysport_open(struct net_device *dev)
1547{ 1525{
1548 struct bcm_sysport_priv *priv = netdev_priv(dev); 1526 struct bcm_sysport_priv *priv = netdev_priv(dev);
1527 struct phy_device *phydev;
1549 unsigned int i; 1528 unsigned int i;
1550 int ret; 1529 int ret;
1551 1530
@@ -1570,9 +1549,9 @@ static int bcm_sysport_open(struct net_device *dev)
1570 /* Read CRC forward */ 1549 /* Read CRC forward */
1571 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 1550 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1572 1551
1573 priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1552 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1574 0, priv->phy_interface); 1553 0, priv->phy_interface);
1575 if (!priv->phydev) { 1554 if (!phydev) {
1576 netdev_err(dev, "could not attach to PHY\n"); 1555 netdev_err(dev, "could not attach to PHY\n");
1577 return -ENODEV; 1556 return -ENODEV;
1578 } 1557 }
@@ -1650,7 +1629,7 @@ out_free_tx_ring:
1650out_free_irq0: 1629out_free_irq0:
1651 free_irq(priv->irq0, dev); 1630 free_irq(priv->irq0, dev);
1652out_phy_disconnect: 1631out_phy_disconnect:
1653 phy_disconnect(priv->phydev); 1632 phy_disconnect(phydev);
1654 return ret; 1633 return ret;
1655} 1634}
1656 1635
@@ -1661,7 +1640,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
1661 /* stop all software from updating hardware */ 1640 /* stop all software from updating hardware */
1662 netif_tx_stop_all_queues(dev); 1641 netif_tx_stop_all_queues(dev);
1663 napi_disable(&priv->napi); 1642 napi_disable(&priv->napi);
1664 phy_stop(priv->phydev); 1643 phy_stop(dev->phydev);
1665 1644
1666 /* mask all interrupts */ 1645 /* mask all interrupts */
1667 intrl2_0_mask_set(priv, 0xffffffff); 1646 intrl2_0_mask_set(priv, 0xffffffff);
@@ -1708,14 +1687,12 @@ static int bcm_sysport_stop(struct net_device *dev)
1708 free_irq(priv->irq1, dev); 1687 free_irq(priv->irq1, dev);
1709 1688
1710 /* Disconnect from PHY */ 1689 /* Disconnect from PHY */
1711 phy_disconnect(priv->phydev); 1690 phy_disconnect(dev->phydev);
1712 1691
1713 return 0; 1692 return 0;
1714} 1693}
1715 1694
1716static struct ethtool_ops bcm_sysport_ethtool_ops = { 1695static struct ethtool_ops bcm_sysport_ethtool_ops = {
1717 .get_settings = bcm_sysport_get_settings,
1718 .set_settings = bcm_sysport_set_settings,
1719 .get_drvinfo = bcm_sysport_get_drvinfo, 1696 .get_drvinfo = bcm_sysport_get_drvinfo,
1720 .get_msglevel = bcm_sysport_get_msglvl, 1697 .get_msglevel = bcm_sysport_get_msglvl,
1721 .set_msglevel = bcm_sysport_set_msglvl, 1698 .set_msglevel = bcm_sysport_set_msglvl,
@@ -1727,6 +1704,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {
1727 .set_wol = bcm_sysport_set_wol, 1704 .set_wol = bcm_sysport_set_wol,
1728 .get_coalesce = bcm_sysport_get_coalesce, 1705 .get_coalesce = bcm_sysport_get_coalesce,
1729 .set_coalesce = bcm_sysport_set_coalesce, 1706 .set_coalesce = bcm_sysport_set_coalesce,
1707 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1708 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1730}; 1709};
1731 1710
1732static const struct net_device_ops bcm_sysport_netdev_ops = { 1711static const struct net_device_ops bcm_sysport_netdev_ops = {
@@ -1929,7 +1908,7 @@ static int bcm_sysport_suspend(struct device *d)
1929 1908
1930 bcm_sysport_netif_stop(dev); 1909 bcm_sysport_netif_stop(dev);
1931 1910
1932 phy_suspend(priv->phydev); 1911 phy_suspend(dev->phydev);
1933 1912
1934 netif_device_detach(dev); 1913 netif_device_detach(dev);
1935 1914
@@ -2055,7 +2034,7 @@ static int bcm_sysport_resume(struct device *d)
2055 goto out_free_rx_ring; 2034 goto out_free_rx_ring;
2056 } 2035 }
2057 2036
2058 phy_resume(priv->phydev); 2037 phy_resume(dev->phydev);
2059 2038
2060 bcm_sysport_netif_start(dev); 2039 bcm_sysport_netif_start(dev);
2061 2040
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index f28bf545d7f4..1c82e3da69a7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -670,7 +670,6 @@ struct bcm_sysport_priv {
670 670
671 /* PHY device */ 671 /* PHY device */
672 struct device_node *phy_dn; 672 struct device_node *phy_dn;
673 struct phy_device *phydev;
674 phy_interface_t phy_interface; 673 phy_interface_t phy_interface;
675 int old_pause; 674 int old_pause;
676 int old_link; 675 int old_link;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index a6333d38ecc0..b045dc072c40 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -246,6 +246,8 @@ err_dma_head:
246 246
247err_drop: 247err_drop:
248 dev_kfree_skb(skb); 248 dev_kfree_skb(skb);
249 net_dev->stats.tx_dropped++;
250 net_dev->stats.tx_errors++;
249 return NETDEV_TX_OK; 251 return NETDEV_TX_OK;
250} 252}
251 253
@@ -285,6 +287,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
285 DMA_TO_DEVICE); 287 DMA_TO_DEVICE);
286 288
287 if (slot->skb) { 289 if (slot->skb) {
290 bgmac->net_dev->stats.tx_bytes += slot->skb->len;
291 bgmac->net_dev->stats.tx_packets++;
288 bytes_compl += slot->skb->len; 292 bytes_compl += slot->skb->len;
289 pkts_compl++; 293 pkts_compl++;
290 294
@@ -465,6 +469,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
465 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", 469 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
466 ring->start); 470 ring->start);
467 put_page(virt_to_head_page(buf)); 471 put_page(virt_to_head_page(buf));
472 bgmac->net_dev->stats.rx_errors++;
468 break; 473 break;
469 } 474 }
470 475
@@ -472,6 +477,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
472 bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n", 477 bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
473 ring->start); 478 ring->start);
474 put_page(virt_to_head_page(buf)); 479 put_page(virt_to_head_page(buf));
480 bgmac->net_dev->stats.rx_length_errors++;
481 bgmac->net_dev->stats.rx_errors++;
475 break; 482 break;
476 } 483 }
477 484
@@ -482,6 +489,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
482 if (unlikely(!skb)) { 489 if (unlikely(!skb)) {
483 bgmac_err(bgmac, "build_skb failed\n"); 490 bgmac_err(bgmac, "build_skb failed\n");
484 put_page(virt_to_head_page(buf)); 491 put_page(virt_to_head_page(buf));
492 bgmac->net_dev->stats.rx_errors++;
485 break; 493 break;
486 } 494 }
487 skb_put(skb, BGMAC_RX_FRAME_OFFSET + 495 skb_put(skb, BGMAC_RX_FRAME_OFFSET +
@@ -491,6 +499,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
491 499
492 skb_checksum_none_assert(skb); 500 skb_checksum_none_assert(skb);
493 skb->protocol = eth_type_trans(skb, bgmac->net_dev); 501 skb->protocol = eth_type_trans(skb, bgmac->net_dev);
502 bgmac->net_dev->stats.rx_bytes += len;
503 bgmac->net_dev->stats.rx_packets++;
494 napi_gro_receive(&bgmac->napi, skb); 504 napi_gro_receive(&bgmac->napi, skb);
495 handled++; 505 handled++;
496 } while (0); 506 } while (0);
@@ -1311,7 +1321,7 @@ static int bgmac_open(struct net_device *net_dev)
1311 } 1321 }
1312 napi_enable(&bgmac->napi); 1322 napi_enable(&bgmac->napi);
1313 1323
1314 phy_start(bgmac->phy_dev); 1324 phy_start(net_dev->phydev);
1315 1325
1316 netif_start_queue(net_dev); 1326 netif_start_queue(net_dev);
1317 1327
@@ -1324,7 +1334,7 @@ static int bgmac_stop(struct net_device *net_dev)
1324 1334
1325 netif_carrier_off(net_dev); 1335 netif_carrier_off(net_dev);
1326 1336
1327 phy_stop(bgmac->phy_dev); 1337 phy_stop(net_dev->phydev);
1328 1338
1329 napi_disable(&bgmac->napi); 1339 napi_disable(&bgmac->napi);
1330 bgmac_chip_intrs_off(bgmac); 1340 bgmac_chip_intrs_off(bgmac);
@@ -1362,12 +1372,10 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1362 1372
1363static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 1373static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1364{ 1374{
1365 struct bgmac *bgmac = netdev_priv(net_dev);
1366
1367 if (!netif_running(net_dev)) 1375 if (!netif_running(net_dev))
1368 return -EINVAL; 1376 return -EINVAL;
1369 1377
1370 return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd); 1378 return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
1371} 1379}
1372 1380
1373static const struct net_device_ops bgmac_netdev_ops = { 1381static const struct net_device_ops bgmac_netdev_ops = {
@@ -1384,20 +1392,125 @@ static const struct net_device_ops bgmac_netdev_ops = {
1384 * ethtool_ops 1392 * ethtool_ops
1385 **************************************************/ 1393 **************************************************/
1386 1394
1387static int bgmac_get_settings(struct net_device *net_dev, 1395struct bgmac_stat {
1388 struct ethtool_cmd *cmd) 1396 u8 size;
1397 u32 offset;
1398 const char *name;
1399};
1400
1401static struct bgmac_stat bgmac_get_strings_stats[] = {
1402 { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
1403 { 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
1404 { 8, BGMAC_TX_OCTETS, "tx_octets" },
1405 { 4, BGMAC_TX_PKTS, "tx_pkts" },
1406 { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
1407 { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
1408 { 4, BGMAC_TX_LEN_64, "tx_64" },
1409 { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
1410 { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
1411 { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
1412 { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
1413 { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
1414 { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
1415 { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
1416 { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
1417 { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
1418 { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
1419 { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
1420 { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
1421 { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
1422 { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
1423 { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
1424 { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
1425 { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
1426 { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
1427 { 4, BGMAC_TX_DEFERED, "tx_defered" },
1428 { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
1429 { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
1430 { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
1431 { 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
1432 { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
1433 { 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
1434 { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
1435 { 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
1436 { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
1437 { 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
1438 { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
1439 { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
1440 { 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
1441 { 8, BGMAC_RX_OCTETS, "rx_octets" },
1442 { 4, BGMAC_RX_PKTS, "rx_pkts" },
1443 { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
1444 { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
1445 { 4, BGMAC_RX_LEN_64, "rx_64" },
1446 { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
1447 { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
1448 { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
1449 { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
1450 { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
1451 { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
1452 { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
1453 { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
1454 { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
1455 { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
1456 { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
1457 { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
1458 { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
1459 { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
1460 { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
1461 { 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
1462 { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
1463 { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
1464 { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
1465 { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
1466 { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
1467 { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
1468};
1469
1470#define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
1471
1472static int bgmac_get_sset_count(struct net_device *dev, int string_set)
1389{ 1473{
1390 struct bgmac *bgmac = netdev_priv(net_dev); 1474 switch (string_set) {
1475 case ETH_SS_STATS:
1476 return BGMAC_STATS_LEN;
1477 }
1391 1478
1392 return phy_ethtool_gset(bgmac->phy_dev, cmd); 1479 return -EOPNOTSUPP;
1393} 1480}
1394 1481
1395static int bgmac_set_settings(struct net_device *net_dev, 1482static void bgmac_get_strings(struct net_device *dev, u32 stringset,
1396 struct ethtool_cmd *cmd) 1483 u8 *data)
1397{ 1484{
1398 struct bgmac *bgmac = netdev_priv(net_dev); 1485 int i;
1399 1486
1400 return phy_ethtool_sset(bgmac->phy_dev, cmd); 1487 if (stringset != ETH_SS_STATS)
1488 return;
1489
1490 for (i = 0; i < BGMAC_STATS_LEN; i++)
1491 strlcpy(data + i * ETH_GSTRING_LEN,
1492 bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
1493}
1494
1495static void bgmac_get_ethtool_stats(struct net_device *dev,
1496 struct ethtool_stats *ss, uint64_t *data)
1497{
1498 struct bgmac *bgmac = netdev_priv(dev);
1499 const struct bgmac_stat *s;
1500 unsigned int i;
1501 u64 val;
1502
1503 if (!netif_running(dev))
1504 return;
1505
1506 for (i = 0; i < BGMAC_STATS_LEN; i++) {
1507 s = &bgmac_get_strings_stats[i];
1508 val = 0;
1509 if (s->size == 8)
1510 val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
1511 val |= bgmac_read(bgmac, s->offset);
1512 data[i] = val;
1513 }
1401} 1514}
1402 1515
1403static void bgmac_get_drvinfo(struct net_device *net_dev, 1516static void bgmac_get_drvinfo(struct net_device *net_dev,
@@ -1408,9 +1521,12 @@ static void bgmac_get_drvinfo(struct net_device *net_dev,
1408} 1521}
1409 1522
1410static const struct ethtool_ops bgmac_ethtool_ops = { 1523static const struct ethtool_ops bgmac_ethtool_ops = {
1411 .get_settings = bgmac_get_settings, 1524 .get_strings = bgmac_get_strings,
1412 .set_settings = bgmac_set_settings, 1525 .get_sset_count = bgmac_get_sset_count,
1526 .get_ethtool_stats = bgmac_get_ethtool_stats,
1413 .get_drvinfo = bgmac_get_drvinfo, 1527 .get_drvinfo = bgmac_get_drvinfo,
1528 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1529 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1414}; 1530};
1415 1531
1416/************************************************** 1532/**************************************************
@@ -1431,7 +1547,7 @@ static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
1431static void bgmac_adjust_link(struct net_device *net_dev) 1547static void bgmac_adjust_link(struct net_device *net_dev)
1432{ 1548{
1433 struct bgmac *bgmac = netdev_priv(net_dev); 1549 struct bgmac *bgmac = netdev_priv(net_dev);
1434 struct phy_device *phy_dev = bgmac->phy_dev; 1550 struct phy_device *phy_dev = net_dev->phydev;
1435 bool update = false; 1551 bool update = false;
1436 1552
1437 if (phy_dev->link) { 1553 if (phy_dev->link) {
@@ -1475,8 +1591,6 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac)
1475 return err; 1591 return err;
1476 } 1592 }
1477 1593
1478 bgmac->phy_dev = phy_dev;
1479
1480 return err; 1594 return err;
1481} 1595}
1482 1596
@@ -1521,7 +1635,6 @@ static int bgmac_mii_register(struct bgmac *bgmac)
1521 err = PTR_ERR(phy_dev); 1635 err = PTR_ERR(phy_dev);
1522 goto err_unregister_bus; 1636 goto err_unregister_bus;
1523 } 1637 }
1524 bgmac->phy_dev = phy_dev;
1525 1638
1526 return err; 1639 return err;
1527 1640
@@ -1590,6 +1703,7 @@ static int bgmac_probe(struct bcma_device *core)
1590 bgmac->net_dev = net_dev; 1703 bgmac->net_dev = net_dev;
1591 bgmac->core = core; 1704 bgmac->core = core;
1592 bcma_set_drvdata(core, bgmac); 1705 bcma_set_drvdata(core, bgmac);
1706 SET_NETDEV_DEV(net_dev, &core->dev);
1593 1707
1594 /* Defaults */ 1708 /* Defaults */
1595 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); 1709 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 9a03c142b742..99beb181f577 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -123,7 +123,7 @@
123#define BGMAC_TX_LEN_1024_TO_1522 0x334 123#define BGMAC_TX_LEN_1024_TO_1522 0x334
124#define BGMAC_TX_LEN_1523_TO_2047 0x338 124#define BGMAC_TX_LEN_1523_TO_2047 0x338
125#define BGMAC_TX_LEN_2048_TO_4095 0x33c 125#define BGMAC_TX_LEN_2048_TO_4095 0x33c
126#define BGMAC_TX_LEN_4095_TO_8191 0x340 126#define BGMAC_TX_LEN_4096_TO_8191 0x340
127#define BGMAC_TX_LEN_8192_TO_MAX 0x344 127#define BGMAC_TX_LEN_8192_TO_MAX 0x344
128#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */ 128#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */
129#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */ 129#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */
@@ -166,7 +166,7 @@
166#define BGMAC_RX_LEN_1024_TO_1522 0x3e4 166#define BGMAC_RX_LEN_1024_TO_1522 0x3e4
167#define BGMAC_RX_LEN_1523_TO_2047 0x3e8 167#define BGMAC_RX_LEN_1523_TO_2047 0x3e8
168#define BGMAC_RX_LEN_2048_TO_4095 0x3ec 168#define BGMAC_RX_LEN_2048_TO_4095 0x3ec
169#define BGMAC_RX_LEN_4095_TO_8191 0x3f0 169#define BGMAC_RX_LEN_4096_TO_8191 0x3f0
170#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4 170#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4
171#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */ 171#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */
172#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */ 172#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */
@@ -441,7 +441,6 @@ struct bgmac {
441 struct net_device *net_dev; 441 struct net_device *net_dev;
442 struct napi_struct napi; 442 struct napi_struct napi;
443 struct mii_bus *mii_bus; 443 struct mii_bus *mii_bus;
444 struct phy_device *phy_dev;
445 444
446 /* DMA */ 445 /* DMA */
447 struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS]; 446 struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a59d55e25d5f..97e892511666 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -59,9 +59,6 @@
59#include <linux/semaphore.h> 59#include <linux/semaphore.h>
60#include <linux/stringify.h> 60#include <linux/stringify.h>
61#include <linux/vmalloc.h> 61#include <linux/vmalloc.h>
62#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
63#include <net/geneve.h>
64#endif
65#include "bnx2x.h" 62#include "bnx2x.h"
66#include "bnx2x_init.h" 63#include "bnx2x_init.h"
67#include "bnx2x_init_ops.h" 64#include "bnx2x_init_ops.h"
@@ -10076,7 +10073,6 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
10076 } 10073 }
10077} 10074}
10078 10075
10079#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
10080static int bnx2x_udp_port_update(struct bnx2x *bp) 10076static int bnx2x_udp_port_update(struct bnx2x *bp)
10081{ 10077{
10082 struct bnx2x_func_switch_update_params *switch_update_params; 10078 struct bnx2x_func_switch_update_params *switch_update_params;
@@ -10177,47 +10173,42 @@ static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
10177 DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n", 10173 DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
10178 type, port); 10174 type, port);
10179} 10175}
10180#endif
10181
10182#ifdef CONFIG_BNX2X_VXLAN
10183static void bnx2x_add_vxlan_port(struct net_device *netdev,
10184 sa_family_t sa_family, __be16 port)
10185{
10186 struct bnx2x *bp = netdev_priv(netdev);
10187 u16 t_port = ntohs(port);
10188
10189 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10190}
10191
10192static void bnx2x_del_vxlan_port(struct net_device *netdev,
10193 sa_family_t sa_family, __be16 port)
10194{
10195 struct bnx2x *bp = netdev_priv(netdev);
10196 u16 t_port = ntohs(port);
10197
10198 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10199}
10200#endif
10201 10176
10202#if IS_ENABLED(CONFIG_BNX2X_GENEVE) 10177static void bnx2x_udp_tunnel_add(struct net_device *netdev,
10203static void bnx2x_add_geneve_port(struct net_device *netdev, 10178 struct udp_tunnel_info *ti)
10204 sa_family_t sa_family, __be16 port)
10205{ 10179{
10206 struct bnx2x *bp = netdev_priv(netdev); 10180 struct bnx2x *bp = netdev_priv(netdev);
10207 u16 t_port = ntohs(port); 10181 u16 t_port = ntohs(ti->port);
10208 10182
10209 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); 10183 switch (ti->type) {
10184 case UDP_TUNNEL_TYPE_VXLAN:
10185 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10186 break;
10187 case UDP_TUNNEL_TYPE_GENEVE:
10188 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10189 break;
10190 default:
10191 break;
10192 }
10210} 10193}
10211 10194
10212static void bnx2x_del_geneve_port(struct net_device *netdev, 10195static void bnx2x_udp_tunnel_del(struct net_device *netdev,
10213 sa_family_t sa_family, __be16 port) 10196 struct udp_tunnel_info *ti)
10214{ 10197{
10215 struct bnx2x *bp = netdev_priv(netdev); 10198 struct bnx2x *bp = netdev_priv(netdev);
10216 u16 t_port = ntohs(port); 10199 u16 t_port = ntohs(ti->port);
10217 10200
10218 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); 10201 switch (ti->type) {
10202 case UDP_TUNNEL_TYPE_VXLAN:
10203 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10204 break;
10205 case UDP_TUNNEL_TYPE_GENEVE:
10206 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10207 break;
10208 default:
10209 break;
10210 }
10219} 10211}
10220#endif
10221 10212
10222static int bnx2x_close(struct net_device *dev); 10213static int bnx2x_close(struct net_device *dev);
10223 10214
@@ -10325,7 +10316,6 @@ sp_rtnl_not_reset:
10325 &bp->sp_rtnl_state)) 10316 &bp->sp_rtnl_state))
10326 bnx2x_update_mng_version(bp); 10317 bnx2x_update_mng_version(bp);
10327 10318
10328#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
10329 if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT, 10319 if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
10330 &bp->sp_rtnl_state)) { 10320 &bp->sp_rtnl_state)) {
10331 if (bnx2x_udp_port_update(bp)) { 10321 if (bnx2x_udp_port_update(bp)) {
@@ -10335,20 +10325,14 @@ sp_rtnl_not_reset:
10335 BNX2X_UDP_PORT_MAX); 10325 BNX2X_UDP_PORT_MAX);
10336 } else { 10326 } else {
10337 /* Since we don't store additional port information, 10327 /* Since we don't store additional port information,
10338 * if no port is configured for any feature ask for 10328 * if no ports are configured for any feature ask for
10339 * information about currently configured ports. 10329 * information about currently configured ports.
10340 */ 10330 */
10341#ifdef CONFIG_BNX2X_VXLAN 10331 if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count &&
10342 if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) 10332 !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
10343 vxlan_get_rx_port(bp->dev); 10333 udp_tunnel_get_rx_info(bp->dev);
10344#endif
10345#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
10346 if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
10347 geneve_get_rx_port(bp->dev);
10348#endif
10349 } 10334 }
10350 } 10335 }
10351#endif
10352 10336
10353 /* work which needs rtnl lock not-taken (as it takes the lock itself and 10337 /* work which needs rtnl lock not-taken (as it takes the lock itself and
10354 * can be called from other contexts as well) 10338 * can be called from other contexts as well)
@@ -12551,14 +12535,8 @@ static int bnx2x_open(struct net_device *dev)
12551 if (rc) 12535 if (rc)
12552 return rc; 12536 return rc;
12553 12537
12554#ifdef CONFIG_BNX2X_VXLAN
12555 if (IS_PF(bp))
12556 vxlan_get_rx_port(dev);
12557#endif
12558#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
12559 if (IS_PF(bp)) 12538 if (IS_PF(bp))
12560 geneve_get_rx_port(dev); 12539 udp_tunnel_get_rx_info(dev);
12561#endif
12562 12540
12563 return 0; 12541 return 0;
12564} 12542}
@@ -13045,14 +13023,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
13045 .ndo_get_phys_port_id = bnx2x_get_phys_port_id, 13023 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
13046 .ndo_set_vf_link_state = bnx2x_set_vf_link_state, 13024 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
13047 .ndo_features_check = bnx2x_features_check, 13025 .ndo_features_check = bnx2x_features_check,
13048#ifdef CONFIG_BNX2X_VXLAN 13026 .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add,
13049 .ndo_add_vxlan_port = bnx2x_add_vxlan_port, 13027 .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del,
13050 .ndo_del_vxlan_port = bnx2x_del_vxlan_port,
13051#endif
13052#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
13053 .ndo_add_geneve_port = bnx2x_add_geneve_port,
13054 .ndo_del_geneve_port = bnx2x_del_geneve_port,
13055#endif
13056}; 13028};
13057 13029
13058static int bnx2x_set_coherency_mask(struct bnx2x *bp) 13030static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c777cde85ce4..673f4d62e73e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -37,9 +37,7 @@
37#include <net/udp.h> 37#include <net/udp.h>
38#include <net/checksum.h> 38#include <net/checksum.h>
39#include <net/ip6_checksum.h> 39#include <net/ip6_checksum.h>
40#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) 40#include <net/udp_tunnel.h>
41#include <net/vxlan.h>
42#endif
43#ifdef CONFIG_NET_RX_BUSY_POLL 41#ifdef CONFIG_NET_RX_BUSY_POLL
44#include <net/busy_poll.h> 42#include <net/busy_poll.h>
45#endif 43#endif
@@ -75,12 +73,22 @@ enum board_idx {
75 BCM57301, 73 BCM57301,
76 BCM57302, 74 BCM57302,
77 BCM57304, 75 BCM57304,
76 BCM57311,
77 BCM57312,
78 BCM57402, 78 BCM57402,
79 BCM57404, 79 BCM57404,
80 BCM57406, 80 BCM57406,
81 BCM57404_NPAR,
82 BCM57412,
83 BCM57414,
84 BCM57416,
85 BCM57417,
86 BCM57414_NPAR,
81 BCM57314, 87 BCM57314,
82 BCM57304_VF, 88 BCM57304_VF,
83 BCM57404_VF, 89 BCM57404_VF,
90 BCM57414_VF,
91 BCM57314_VF,
84}; 92};
85 93
86/* indexed by enum above */ 94/* indexed by enum above */
@@ -90,25 +98,45 @@ static const struct {
90 { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" }, 98 { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
91 { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" }, 99 { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
92 { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, 100 { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
101 { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" },
102 { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
93 { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" }, 103 { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
94 { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, 104 { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
95 { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" }, 105 { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
106 { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
107 { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" },
108 { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
109 { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" },
110 { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" },
111 { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
96 { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, 112 { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
97 { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" }, 113 { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
98 { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" }, 114 { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
115 { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" },
116 { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" },
99}; 117};
100 118
101static const struct pci_device_id bnxt_pci_tbl[] = { 119static const struct pci_device_id bnxt_pci_tbl[] = {
102 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 120 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
103 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 121 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
104 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 122 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
123 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
124 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
105 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 125 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
106 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 126 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
107 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 127 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
128 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57404_NPAR },
129 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
130 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
131 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
132 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
133 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57414_NPAR },
108 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 134 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
109#ifdef CONFIG_BNXT_SRIOV 135#ifdef CONFIG_BNXT_SRIOV
110 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF }, 136 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
111 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF }, 137 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
138 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF },
139 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF },
112#endif 140#endif
113 { 0 } 141 { 0 }
114}; 142};
@@ -125,12 +153,14 @@ static const u16 bnxt_async_events_arr[] = {
125 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 153 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
126 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 154 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
127 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 155 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
156 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
128 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 157 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
129}; 158};
130 159
131static bool bnxt_vf_pciid(enum board_idx idx) 160static bool bnxt_vf_pciid(enum board_idx idx)
132{ 161{
133 return (idx == BCM57304_VF || idx == BCM57404_VF); 162 return (idx == BCM57304_VF || idx == BCM57404_VF ||
163 idx == BCM57314_VF || idx == BCM57414_VF);
134} 164}
135 165
136#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 166#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -920,6 +950,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
920 } 950 }
921 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 951 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
922 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 952 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
953 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
923 954
924 rxr->rx_prod = NEXT_RX(prod); 955 rxr->rx_prod = NEXT_RX(prod);
925 cons = NEXT_RX(cons); 956 cons = NEXT_RX(cons);
@@ -938,32 +969,102 @@ static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
938 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 969 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
939} 970}
940 971
972static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
973 int payload_off, int tcp_ts,
974 struct sk_buff *skb)
975{
976#ifdef CONFIG_INET
977 struct tcphdr *th;
978 int len, nw_off;
979 u16 outer_ip_off, inner_ip_off, inner_mac_off;
980 u32 hdr_info = tpa_info->hdr_info;
981 bool loopback = false;
982
983 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
984 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
985 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
986
987 /* If the packet is an internal loopback packet, the offsets will
988 * have an extra 4 bytes.
989 */
990 if (inner_mac_off == 4) {
991 loopback = true;
992 } else if (inner_mac_off > 4) {
993 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
994 ETH_HLEN - 2));
995
996 /* We only support inner iPv4/ipv6. If we don't see the
997 * correct protocol ID, it must be a loopback packet where
998 * the offsets are off by 4.
999 */
1000 if (proto != htons(ETH_P_IP) && proto && htons(ETH_P_IPV6))
1001 loopback = true;
1002 }
1003 if (loopback) {
1004 /* internal loopback packet, subtract all offsets by 4 */
1005 inner_ip_off -= 4;
1006 inner_mac_off -= 4;
1007 outer_ip_off -= 4;
1008 }
1009
1010 nw_off = inner_ip_off - ETH_HLEN;
1011 skb_set_network_header(skb, nw_off);
1012 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1013 struct ipv6hdr *iph = ipv6_hdr(skb);
1014
1015 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1016 len = skb->len - skb_transport_offset(skb);
1017 th = tcp_hdr(skb);
1018 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1019 } else {
1020 struct iphdr *iph = ip_hdr(skb);
1021
1022 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1023 len = skb->len - skb_transport_offset(skb);
1024 th = tcp_hdr(skb);
1025 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1026 }
1027
1028 if (inner_mac_off) { /* tunnel */
1029 struct udphdr *uh = NULL;
1030 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1031 ETH_HLEN - 2));
1032
1033 if (proto == htons(ETH_P_IP)) {
1034 struct iphdr *iph = (struct iphdr *)skb->data;
1035
1036 if (iph->protocol == IPPROTO_UDP)
1037 uh = (struct udphdr *)(iph + 1);
1038 } else {
1039 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1040
1041 if (iph->nexthdr == IPPROTO_UDP)
1042 uh = (struct udphdr *)(iph + 1);
1043 }
1044 if (uh) {
1045 if (uh->check)
1046 skb_shinfo(skb)->gso_type |=
1047 SKB_GSO_UDP_TUNNEL_CSUM;
1048 else
1049 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1050 }
1051 }
1052#endif
1053 return skb;
1054}
1055
941#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1056#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
942#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1057#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
943 1058
944static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info, 1059static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
945 struct rx_tpa_end_cmp *tpa_end, 1060 int payload_off, int tcp_ts,
946 struct rx_tpa_end_cmp_ext *tpa_end1,
947 struct sk_buff *skb) 1061 struct sk_buff *skb)
948{ 1062{
949#ifdef CONFIG_INET 1063#ifdef CONFIG_INET
950 struct tcphdr *th; 1064 struct tcphdr *th;
951 int payload_off, tcp_opt_len = 0; 1065 int len, nw_off, tcp_opt_len;
952 int len, nw_off;
953 u16 segs;
954
955 segs = TPA_END_TPA_SEGS(tpa_end);
956 if (segs == 1)
957 return skb;
958 1066
959 NAPI_GRO_CB(skb)->count = segs; 1067 if (tcp_ts)
960 skb_shinfo(skb)->gso_size =
961 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
962 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
963 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
964 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
965 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
966 if (TPA_END_GRO_TS(tpa_end))
967 tcp_opt_len = 12; 1068 tcp_opt_len = 12;
968 1069
969 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1070 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
@@ -1020,6 +1121,32 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
1020 return skb; 1121 return skb;
1021} 1122}
1022 1123
1124static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1125 struct bnxt_tpa_info *tpa_info,
1126 struct rx_tpa_end_cmp *tpa_end,
1127 struct rx_tpa_end_cmp_ext *tpa_end1,
1128 struct sk_buff *skb)
1129{
1130#ifdef CONFIG_INET
1131 int payload_off;
1132 u16 segs;
1133
1134 segs = TPA_END_TPA_SEGS(tpa_end);
1135 if (segs == 1)
1136 return skb;
1137
1138 NAPI_GRO_CB(skb)->count = segs;
1139 skb_shinfo(skb)->gso_size =
1140 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1141 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1142 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1143 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1144 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1145 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1146#endif
1147 return skb;
1148}
1149
1023static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1150static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1024 struct bnxt_napi *bnapi, 1151 struct bnxt_napi *bnapi,
1025 u32 *raw_cons, 1152 u32 *raw_cons,
@@ -1130,7 +1257,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1130 } 1257 }
1131 1258
1132 if (TPA_END_GRO(tpa_end)) 1259 if (TPA_END_GRO(tpa_end))
1133 skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb); 1260 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1134 1261
1135 return skb; 1262 return skb;
1136} 1263}
@@ -1358,6 +1485,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
1358 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1485 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1359 break; 1486 break;
1360 } 1487 }
1488 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1489 if (BNXT_PF(bp))
1490 goto async_event_process_exit;
1491 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1492 break;
1361 default: 1493 default:
1362 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", 1494 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
1363 event_id); 1495 event_id);
@@ -2262,7 +2394,7 @@ static void bnxt_set_tpa_flags(struct bnxt *bp)
2262 bp->flags &= ~BNXT_FLAG_TPA; 2394 bp->flags &= ~BNXT_FLAG_TPA;
2263 if (bp->dev->features & NETIF_F_LRO) 2395 if (bp->dev->features & NETIF_F_LRO)
2264 bp->flags |= BNXT_FLAG_LRO; 2396 bp->flags |= BNXT_FLAG_LRO;
2265 if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0)) 2397 if (bp->dev->features & NETIF_F_GRO)
2266 bp->flags |= BNXT_FLAG_GRO; 2398 bp->flags |= BNXT_FLAG_GRO;
2267} 2399}
2268 2400
@@ -3277,6 +3409,7 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3277 unsigned int ring = 0, grp_idx; 3409 unsigned int ring = 0, grp_idx;
3278 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3410 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3279 struct hwrm_vnic_cfg_input req = {0}; 3411 struct hwrm_vnic_cfg_input req = {0};
3412 u16 def_vlan = 0;
3280 3413
3281 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 3414 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3282 /* Only RSS support for now TBD: COS & LB */ 3415 /* Only RSS support for now TBD: COS & LB */
@@ -3297,7 +3430,11 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3297 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 3430 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3298 VLAN_HLEN); 3431 VLAN_HLEN);
3299 3432
3300 if (bp->flags & BNXT_FLAG_STRIP_VLAN) 3433#ifdef CONFIG_BNXT_SRIOV
3434 if (BNXT_VF(bp))
3435 def_vlan = bp->vf.vlan;
3436#endif
3437 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
3301 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 3438 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3302 3439
3303 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3440 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -3836,6 +3973,39 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
3836 return 0; 3973 return 0;
3837} 3974}
3838 3975
3976static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
3977{
3978 struct hwrm_func_qcfg_input req = {0};
3979 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3980 int rc;
3981
3982 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
3983 req.fid = cpu_to_le16(0xffff);
3984 mutex_lock(&bp->hwrm_cmd_lock);
3985 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3986 if (rc)
3987 goto func_qcfg_exit;
3988
3989#ifdef CONFIG_BNXT_SRIOV
3990 if (BNXT_VF(bp)) {
3991 struct bnxt_vf_info *vf = &bp->vf;
3992
3993 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
3994 }
3995#endif
3996 switch (resp->port_partition_type) {
3997 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
3998 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
3999 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4000 bp->port_partition_type = resp->port_partition_type;
4001 break;
4002 }
4003
4004func_qcfg_exit:
4005 mutex_unlock(&bp->hwrm_cmd_lock);
4006 return rc;
4007}
4008
3839int bnxt_hwrm_func_qcaps(struct bnxt *bp) 4009int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3840{ 4010{
3841 int rc = 0; 4011 int rc = 0;
@@ -3990,6 +4160,8 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
3990 if (resp->hwrm_intf_maj >= 1) 4160 if (resp->hwrm_intf_maj >= 1)
3991 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 4161 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
3992 4162
4163 bp->chip_num = le16_to_cpu(resp->chip_num);
4164
3993hwrm_ver_get_exit: 4165hwrm_ver_get_exit:
3994 mutex_unlock(&bp->hwrm_cmd_lock); 4166 mutex_unlock(&bp->hwrm_cmd_lock);
3995 return rc; 4167 return rc;
@@ -4230,6 +4402,11 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4230 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 4402 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
4231 rc); 4403 rc);
4232 4404
4405 if (BNXT_VF(bp)) {
4406 bnxt_hwrm_func_qcfg(bp);
4407 netdev_update_features(bp->dev);
4408 }
4409
4233 return 0; 4410 return 0;
4234 4411
4235err_out: 4412err_out:
@@ -4644,6 +4821,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
4644 int rc = 0; 4821 int rc = 0;
4645 struct hwrm_port_phy_qcaps_input req = {0}; 4822 struct hwrm_port_phy_qcaps_input req = {0};
4646 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 4823 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4824 struct bnxt_link_info *link_info = &bp->link_info;
4647 4825
4648 if (bp->hwrm_spec_code < 0x10201) 4826 if (bp->hwrm_spec_code < 0x10201)
4649 return 0; 4827 return 0;
@@ -4666,6 +4844,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
4666 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 4844 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
4667 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 4845 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
4668 } 4846 }
4847 link_info->support_auto_speeds =
4848 le16_to_cpu(resp->supported_speeds_auto_mode);
4669 4849
4670hwrm_phy_qcaps_exit: 4850hwrm_phy_qcaps_exit:
4671 mutex_unlock(&bp->hwrm_cmd_lock); 4851 mutex_unlock(&bp->hwrm_cmd_lock);
@@ -4923,7 +5103,7 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
4923{ 5103{
4924 struct hwrm_port_phy_cfg_input req = {0}; 5104 struct hwrm_port_phy_cfg_input req = {0};
4925 5105
4926 if (BNXT_VF(bp)) 5106 if (!BNXT_SINGLE_PF(bp))
4927 return 0; 5107 return 0;
4928 5108
4929 if (pci_num_vf(bp->pdev)) 5109 if (pci_num_vf(bp->pdev))
@@ -5073,15 +5253,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5073 netdev_warn(bp->dev, "failed to update phy settings\n"); 5253 netdev_warn(bp->dev, "failed to update phy settings\n");
5074 } 5254 }
5075 5255
5076 if (irq_re_init) { 5256 if (irq_re_init)
5077#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) 5257 udp_tunnel_get_rx_info(bp->dev);
5078 vxlan_get_rx_port(bp->dev);
5079#endif
5080 if (!bnxt_hwrm_tunnel_dst_port_alloc(
5081 bp, htons(0x17c1),
5082 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
5083 bp->nge_port_cnt = 1;
5084 }
5085 5258
5086 set_bit(BNXT_STATE_OPEN, &bp->state); 5259 set_bit(BNXT_STATE_OPEN, &bp->state);
5087 bnxt_enable_int(bp); 5260 bnxt_enable_int(bp);
@@ -5469,7 +5642,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
5469 features |= NETIF_F_HW_VLAN_CTAG_RX | 5642 features |= NETIF_F_HW_VLAN_CTAG_RX |
5470 NETIF_F_HW_VLAN_STAG_RX; 5643 NETIF_F_HW_VLAN_STAG_RX;
5471 } 5644 }
5472 5645#ifdef CONFIG_BNXT_SRIOV
5646 if (BNXT_VF(bp)) {
5647 if (bp->vf.vlan) {
5648 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
5649 NETIF_F_HW_VLAN_STAG_RX);
5650 }
5651 }
5652#endif
5473 return features; 5653 return features;
5474} 5654}
5475 5655
@@ -5585,9 +5765,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
5585 } 5765 }
5586} 5766}
5587 5767
5588static void bnxt_reset_task(struct bnxt *bp) 5768static void bnxt_reset_task(struct bnxt *bp, bool silent)
5589{ 5769{
5590 bnxt_dbg_dump_states(bp); 5770 if (!silent)
5771 bnxt_dbg_dump_states(bp);
5591 if (netif_running(bp->dev)) { 5772 if (netif_running(bp->dev)) {
5592 bnxt_close_nic(bp, false, false); 5773 bnxt_close_nic(bp, false, false);
5593 bnxt_open_nic(bp, false, false); 5774 bnxt_open_nic(bp, false, false);
@@ -5638,6 +5819,23 @@ bnxt_restart_timer:
5638 mod_timer(&bp->timer, jiffies + bp->current_interval); 5819 mod_timer(&bp->timer, jiffies + bp->current_interval);
5639} 5820}
5640 5821
5822/* Only called from bnxt_sp_task() */
5823static void bnxt_reset(struct bnxt *bp, bool silent)
5824{
5825 /* bnxt_reset_task() calls bnxt_close_nic() which waits
5826 * for BNXT_STATE_IN_SP_TASK to clear.
5827 * If there is a parallel dev_close(), bnxt_close() may be holding
5828 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
5829 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
5830 */
5831 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5832 rtnl_lock();
5833 if (test_bit(BNXT_STATE_OPEN, &bp->state))
5834 bnxt_reset_task(bp, silent);
5835 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5836 rtnl_unlock();
5837}
5838
5641static void bnxt_cfg_ntp_filters(struct bnxt *); 5839static void bnxt_cfg_ntp_filters(struct bnxt *);
5642 5840
5643static void bnxt_sp_task(struct work_struct *work) 5841static void bnxt_sp_task(struct work_struct *work)
@@ -5674,16 +5872,20 @@ static void bnxt_sp_task(struct work_struct *work)
5674 bnxt_hwrm_tunnel_dst_port_free( 5872 bnxt_hwrm_tunnel_dst_port_free(
5675 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 5873 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5676 } 5874 }
5677 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { 5875 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
5678 /* bnxt_reset_task() calls bnxt_close_nic() which waits 5876 bnxt_hwrm_tunnel_dst_port_alloc(
5679 * for BNXT_STATE_IN_SP_TASK to clear. 5877 bp, bp->nge_port,
5680 */ 5878 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
5681 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5682 rtnl_lock();
5683 bnxt_reset_task(bp);
5684 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5685 rtnl_unlock();
5686 } 5879 }
5880 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
5881 bnxt_hwrm_tunnel_dst_port_free(
5882 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
5883 }
5884 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
5885 bnxt_reset(bp, false);
5886
5887 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
5888 bnxt_reset(bp, true);
5687 5889
5688 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) 5890 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
5689 bnxt_get_port_module_status(bp); 5891 bnxt_get_port_module_status(bp);
@@ -6048,47 +6250,83 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6048 6250
6049#endif /* CONFIG_RFS_ACCEL */ 6251#endif /* CONFIG_RFS_ACCEL */
6050 6252
6051static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, 6253static void bnxt_udp_tunnel_add(struct net_device *dev,
6052 __be16 port) 6254 struct udp_tunnel_info *ti)
6053{ 6255{
6054 struct bnxt *bp = netdev_priv(dev); 6256 struct bnxt *bp = netdev_priv(dev);
6055 6257
6056 if (!netif_running(dev)) 6258 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
6057 return; 6259 return;
6058 6260
6059 if (sa_family != AF_INET6 && sa_family != AF_INET) 6261 if (!netif_running(dev))
6060 return; 6262 return;
6061 6263
6062 if (bp->vxlan_port_cnt && bp->vxlan_port != port) 6264 switch (ti->type) {
6063 return; 6265 case UDP_TUNNEL_TYPE_VXLAN:
6266 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
6267 return;
6064 6268
6065 bp->vxlan_port_cnt++; 6269 bp->vxlan_port_cnt++;
6066 if (bp->vxlan_port_cnt == 1) { 6270 if (bp->vxlan_port_cnt == 1) {
6067 bp->vxlan_port = port; 6271 bp->vxlan_port = ti->port;
6068 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 6272 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
6069 schedule_work(&bp->sp_task); 6273 schedule_work(&bp->sp_task);
6274 }
6275 break;
6276 case UDP_TUNNEL_TYPE_GENEVE:
6277 if (bp->nge_port_cnt && bp->nge_port != ti->port)
6278 return;
6279
6280 bp->nge_port_cnt++;
6281 if (bp->nge_port_cnt == 1) {
6282 bp->nge_port = ti->port;
6283 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
6284 }
6285 break;
6286 default:
6287 return;
6070 } 6288 }
6289
6290 schedule_work(&bp->sp_task);
6071} 6291}
6072 6292
6073static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, 6293static void bnxt_udp_tunnel_del(struct net_device *dev,
6074 __be16 port) 6294 struct udp_tunnel_info *ti)
6075{ 6295{
6076 struct bnxt *bp = netdev_priv(dev); 6296 struct bnxt *bp = netdev_priv(dev);
6077 6297
6078 if (!netif_running(dev)) 6298 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
6079 return; 6299 return;
6080 6300
6081 if (sa_family != AF_INET6 && sa_family != AF_INET) 6301 if (!netif_running(dev))
6082 return; 6302 return;
6083 6303
6084 if (bp->vxlan_port_cnt && bp->vxlan_port == port) { 6304 switch (ti->type) {
6305 case UDP_TUNNEL_TYPE_VXLAN:
6306 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
6307 return;
6085 bp->vxlan_port_cnt--; 6308 bp->vxlan_port_cnt--;
6086 6309
6087 if (bp->vxlan_port_cnt == 0) { 6310 if (bp->vxlan_port_cnt != 0)
6088 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 6311 return;
6089 schedule_work(&bp->sp_task); 6312
6090 } 6313 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
6314 break;
6315 case UDP_TUNNEL_TYPE_GENEVE:
6316 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
6317 return;
6318 bp->nge_port_cnt--;
6319
6320 if (bp->nge_port_cnt != 0)
6321 return;
6322
6323 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
6324 break;
6325 default:
6326 return;
6091 } 6327 }
6328
6329 schedule_work(&bp->sp_task);
6092} 6330}
6093 6331
6094static const struct net_device_ops bnxt_netdev_ops = { 6332static const struct net_device_ops bnxt_netdev_ops = {
@@ -6119,8 +6357,8 @@ static const struct net_device_ops bnxt_netdev_ops = {
6119#ifdef CONFIG_RFS_ACCEL 6357#ifdef CONFIG_RFS_ACCEL
6120 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 6358 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
6121#endif 6359#endif
6122 .ndo_add_vxlan_port = bnxt_add_vxlan_port, 6360 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
6123 .ndo_del_vxlan_port = bnxt_del_vxlan_port, 6361 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
6124#ifdef CONFIG_NET_RX_BUSY_POLL 6362#ifdef CONFIG_NET_RX_BUSY_POLL
6125 .ndo_busy_poll = bnxt_busy_poll, 6363 .ndo_busy_poll = bnxt_busy_poll,
6126#endif 6364#endif
@@ -6169,6 +6407,12 @@ static int bnxt_probe_phy(struct bnxt *bp)
6169 return rc; 6407 return rc;
6170 } 6408 }
6171 6409
6410 /* Older firmware does not have supported_auto_speeds, so assume
6411 * that all supported speeds can be autonegotiated.
6412 */
6413 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
6414 link_info->support_auto_speeds = link_info->support_speeds;
6415
6172 /*initialize the ethool setting copy with NVM settings */ 6416 /*initialize the ethool setting copy with NVM settings */
6173 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 6417 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
6174 link_info->autoneg = BNXT_AUTONEG_SPEED; 6418 link_info->autoneg = BNXT_AUTONEG_SPEED;
@@ -6342,7 +6586,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6342 goto init_err; 6586 goto init_err;
6343 6587
6344 mutex_init(&bp->hwrm_cmd_lock); 6588 mutex_init(&bp->hwrm_cmd_lock);
6345 bnxt_hwrm_ver_get(bp); 6589 rc = bnxt_hwrm_ver_get(bp);
6590 if (rc)
6591 goto init_err;
6592
6593 bp->gro_func = bnxt_gro_func_5730x;
6594 if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
6595 bp->gro_func = bnxt_gro_func_5731x;
6346 6596
6347 rc = bnxt_hwrm_func_drv_rgtr(bp); 6597 rc = bnxt_hwrm_func_drv_rgtr(bp);
6348 if (rc) 6598 if (rc)
@@ -6365,6 +6615,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6365 goto init_err; 6615 goto init_err;
6366 } 6616 }
6367 6617
6618 bnxt_hwrm_func_qcfg(bp);
6619
6368 bnxt_set_tpa_flags(bp); 6620 bnxt_set_tpa_flags(bp);
6369 bnxt_set_ring_params(bp); 6621 bnxt_set_ring_params(bp);
6370 if (BNXT_PF(bp)) 6622 if (BNXT_PF(bp))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2824d65b2e35..927ece9c408a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -298,13 +298,14 @@ struct rx_tpa_start_cmp_ext {
298 #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) 298 #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
299 #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) 299 #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
300 #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) 300 #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
301 #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8)
301 302
302 __le32 rx_tpa_start_cmp_metadata; 303 __le32 rx_tpa_start_cmp_metadata;
303 __le32 rx_tpa_start_cmp_cfa_code_v2; 304 __le32 rx_tpa_start_cmp_cfa_code_v2;
304 #define RX_TPA_START_CMP_V2 (0x1 << 0) 305 #define RX_TPA_START_CMP_V2 (0x1 << 0)
305 #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) 306 #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
306 #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 307 #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
307 __le32 rx_tpa_start_cmp_unused5; 308 __le32 rx_tpa_start_cmp_hdr_info;
308}; 309};
309 310
310struct rx_tpa_end_cmp { 311struct rx_tpa_end_cmp {
@@ -584,6 +585,19 @@ struct bnxt_tpa_info {
584 u32 metadata; 585 u32 metadata;
585 enum pkt_hash_types hash_type; 586 enum pkt_hash_types hash_type;
586 u32 rss_hash; 587 u32 rss_hash;
588 u32 hdr_info;
589
590#define BNXT_TPA_L4_SIZE(hdr_info) \
591 (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32)
592
593#define BNXT_TPA_INNER_L3_OFF(hdr_info) \
594 (((hdr_info) >> 18) & 0x1ff)
595
596#define BNXT_TPA_INNER_L2_OFF(hdr_info) \
597 (((hdr_info) >> 9) & 0x1ff)
598
599#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
600 ((hdr_info) & 0x1ff)
587}; 601};
588 602
589struct bnxt_rx_ring_info { 603struct bnxt_rx_ring_info {
@@ -835,6 +849,7 @@ struct bnxt_link_info {
835#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 849#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
836#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 850#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
837#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 851#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
852 u16 support_auto_speeds;
838 u16 lp_auto_link_speeds; 853 u16 lp_auto_link_speeds;
839 u16 force_link_speed; 854 u16 force_link_speed;
840 u32 preemphasis; 855 u32 preemphasis;
@@ -873,6 +888,44 @@ struct bnxt {
873 void __iomem *bar2; 888 void __iomem *bar2;
874 889
875 u32 reg_base; 890 u32 reg_base;
891 u16 chip_num;
892#define CHIP_NUM_57301 0x16c8
893#define CHIP_NUM_57302 0x16c9
894#define CHIP_NUM_57304 0x16ca
895#define CHIP_NUM_57402 0x16d0
896#define CHIP_NUM_57404 0x16d1
897#define CHIP_NUM_57406 0x16d2
898
899#define CHIP_NUM_57311 0x16ce
900#define CHIP_NUM_57312 0x16cf
901#define CHIP_NUM_57314 0x16df
902#define CHIP_NUM_57412 0x16d6
903#define CHIP_NUM_57414 0x16d7
904#define CHIP_NUM_57416 0x16d8
905#define CHIP_NUM_57417 0x16d9
906
907#define BNXT_CHIP_NUM_5730X(chip_num) \
908 ((chip_num) >= CHIP_NUM_57301 && \
909 (chip_num) <= CHIP_NUM_57304)
910
911#define BNXT_CHIP_NUM_5740X(chip_num) \
912 ((chip_num) >= CHIP_NUM_57402 && \
913 (chip_num) <= CHIP_NUM_57406)
914
915#define BNXT_CHIP_NUM_5731X(chip_num) \
916 ((chip_num) == CHIP_NUM_57311 || \
917 (chip_num) == CHIP_NUM_57312 || \
918 (chip_num) == CHIP_NUM_57314)
919
920#define BNXT_CHIP_NUM_5741X(chip_num) \
921 ((chip_num) >= CHIP_NUM_57412 && \
922 (chip_num) <= CHIP_NUM_57417)
923
924#define BNXT_CHIP_NUM_57X0X(chip_num) \
925 (BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
926
927#define BNXT_CHIP_NUM_57X1X(chip_num) \
928 (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num))
876 929
877 struct net_device *dev; 930 struct net_device *dev;
878 struct pci_dev *pdev; 931 struct pci_dev *pdev;
@@ -907,12 +960,17 @@ struct bnxt {
907 960
908#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) 961#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
909#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) 962#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
963#define BNXT_NPAR(bp) ((bp)->port_partition_type)
964#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
910 965
911 struct bnxt_napi **bnapi; 966 struct bnxt_napi **bnapi;
912 967
913 struct bnxt_rx_ring_info *rx_ring; 968 struct bnxt_rx_ring_info *rx_ring;
914 struct bnxt_tx_ring_info *tx_ring; 969 struct bnxt_tx_ring_info *tx_ring;
915 970
971 struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int,
972 struct sk_buff *);
973
916 u32 rx_buf_size; 974 u32 rx_buf_size;
917 u32 rx_buf_use_size; /* useable size */ 975 u32 rx_buf_use_size; /* useable size */
918 u32 rx_ring_size; 976 u32 rx_ring_size;
@@ -991,8 +1049,10 @@ struct bnxt {
991 __be16 vxlan_port; 1049 __be16 vxlan_port;
992 u8 vxlan_port_cnt; 1050 u8 vxlan_port_cnt;
993 __le16 vxlan_fw_dst_port_id; 1051 __le16 vxlan_fw_dst_port_id;
1052 __be16 nge_port;
994 u8 nge_port_cnt; 1053 u8 nge_port_cnt;
995 __le16 nge_fw_dst_port_id; 1054 __le16 nge_fw_dst_port_id;
1055 u8 port_partition_type;
996 1056
997 u16 rx_coal_ticks; 1057 u16 rx_coal_ticks;
998 u16 rx_coal_ticks_irq; 1058 u16 rx_coal_ticks_irq;
@@ -1018,6 +1078,9 @@ struct bnxt {
1018#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 1078#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
1019#define BNXT_PERIODIC_STATS_SP_EVENT 9 1079#define BNXT_PERIODIC_STATS_SP_EVENT 9
1020#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10 1080#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
1081#define BNXT_RESET_TASK_SILENT_SP_EVENT 11
1082#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
1083#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
1021 1084
1022 struct bnxt_pf_info pf; 1085 struct bnxt_pf_info pf;
1023#ifdef CONFIG_BNXT_SRIOV 1086#ifdef CONFIG_BNXT_SRIOV
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index a38cb047b540..d7ab2d7982c2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -628,7 +628,66 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
628 return speed_mask; 628 return speed_mask;
629} 629}
630 630
631static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) 631#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
632{ \
633 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
634 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
635 100baseT_Full); \
636 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
637 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
638 1000baseT_Full); \
639 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
640 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
641 10000baseT_Full); \
642 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
643 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
644 25000baseCR_Full); \
645 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
646 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
647 40000baseCR4_Full);\
648 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
649 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
650 50000baseCR2_Full);\
651 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
652 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
653 Pause); \
654 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
655 ethtool_link_ksettings_add_link_mode( \
656 lk_ksettings, name, Asym_Pause);\
657 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
658 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
659 Asym_Pause); \
660 } \
661}
662
663#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
664{ \
665 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
666 100baseT_Full) || \
667 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
668 100baseT_Half)) \
669 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
670 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
671 1000baseT_Full) || \
672 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
673 1000baseT_Half)) \
674 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
675 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
676 10000baseT_Full)) \
677 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
678 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
679 25000baseCR_Full)) \
680 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
681 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
682 40000baseCR4_Full)) \
683 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
684 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
685 50000baseCR2_Full)) \
686 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
687}
688
689static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
690 struct ethtool_link_ksettings *lk_ksettings)
632{ 691{
633 u16 fw_speeds = link_info->auto_link_speeds; 692 u16 fw_speeds = link_info->auto_link_speeds;
634 u8 fw_pause = 0; 693 u8 fw_pause = 0;
@@ -636,10 +695,11 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
636 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 695 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
637 fw_pause = link_info->auto_pause_setting; 696 fw_pause = link_info->auto_pause_setting;
638 697
639 return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); 698 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
640} 699}
641 700
642static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) 701static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
702 struct ethtool_link_ksettings *lk_ksettings)
643{ 703{
644 u16 fw_speeds = link_info->lp_auto_link_speeds; 704 u16 fw_speeds = link_info->lp_auto_link_speeds;
645 u8 fw_pause = 0; 705 u8 fw_pause = 0;
@@ -647,16 +707,24 @@ static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info)
647 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 707 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
648 fw_pause = link_info->lp_pause; 708 fw_pause = link_info->lp_pause;
649 709
650 return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); 710 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
711 lp_advertising);
651} 712}
652 713
653static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) 714static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
715 struct ethtool_link_ksettings *lk_ksettings)
654{ 716{
655 u16 fw_speeds = link_info->support_speeds; 717 u16 fw_speeds = link_info->support_speeds;
656 u32 supported;
657 718
658 supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 719 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
659 return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause; 720
721 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
722 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
723 Asym_Pause);
724
725 if (link_info->support_auto_speeds)
726 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
727 Autoneg);
660} 728}
661 729
662u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 730u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
@@ -683,65 +751,62 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
683 } 751 }
684} 752}
685 753
686static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 754static int bnxt_get_link_ksettings(struct net_device *dev,
755 struct ethtool_link_ksettings *lk_ksettings)
687{ 756{
688 struct bnxt *bp = netdev_priv(dev); 757 struct bnxt *bp = netdev_priv(dev);
689 struct bnxt_link_info *link_info = &bp->link_info; 758 struct bnxt_link_info *link_info = &bp->link_info;
690 u16 ethtool_speed; 759 struct ethtool_link_settings *base = &lk_ksettings->base;
760 u32 ethtool_speed;
691 761
692 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); 762 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
693 763 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
694 if (link_info->auto_link_speeds)
695 cmd->supported |= SUPPORTED_Autoneg;
696 764
765 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
697 if (link_info->autoneg) { 766 if (link_info->autoneg) {
698 cmd->advertising = 767 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
699 bnxt_fw_to_ethtool_advertised_spds(link_info); 768 ethtool_link_ksettings_add_link_mode(lk_ksettings,
700 cmd->advertising |= ADVERTISED_Autoneg; 769 advertising, Autoneg);
701 cmd->autoneg = AUTONEG_ENABLE; 770 base->autoneg = AUTONEG_ENABLE;
702 if (link_info->phy_link_status == BNXT_LINK_LINK) 771 if (link_info->phy_link_status == BNXT_LINK_LINK)
703 cmd->lp_advertising = 772 bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
704 bnxt_fw_to_ethtool_lp_adv(link_info);
705 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 773 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
706 if (!netif_carrier_ok(dev)) 774 if (!netif_carrier_ok(dev))
707 cmd->duplex = DUPLEX_UNKNOWN; 775 base->duplex = DUPLEX_UNKNOWN;
708 else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 776 else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
709 cmd->duplex = DUPLEX_FULL; 777 base->duplex = DUPLEX_FULL;
710 else 778 else
711 cmd->duplex = DUPLEX_HALF; 779 base->duplex = DUPLEX_HALF;
712 } else { 780 } else {
713 cmd->autoneg = AUTONEG_DISABLE; 781 base->autoneg = AUTONEG_DISABLE;
714 cmd->advertising = 0;
715 ethtool_speed = 782 ethtool_speed =
716 bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 783 bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
717 cmd->duplex = DUPLEX_HALF; 784 base->duplex = DUPLEX_HALF;
718 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 785 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
719 cmd->duplex = DUPLEX_FULL; 786 base->duplex = DUPLEX_FULL;
720 } 787 }
721 ethtool_cmd_speed_set(cmd, ethtool_speed); 788 base->speed = ethtool_speed;
722 789
723 cmd->port = PORT_NONE; 790 base->port = PORT_NONE;
724 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 791 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
725 cmd->port = PORT_TP; 792 base->port = PORT_TP;
726 cmd->supported |= SUPPORTED_TP; 793 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
727 cmd->advertising |= ADVERTISED_TP; 794 TP);
795 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
796 TP);
728 } else { 797 } else {
729 cmd->supported |= SUPPORTED_FIBRE; 798 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
730 cmd->advertising |= ADVERTISED_FIBRE; 799 FIBRE);
800 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
801 FIBRE);
731 802
732 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) 803 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
733 cmd->port = PORT_DA; 804 base->port = PORT_DA;
734 else if (link_info->media_type == 805 else if (link_info->media_type ==
735 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) 806 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
736 cmd->port = PORT_FIBRE; 807 base->port = PORT_FIBRE;
737 } 808 }
738 809 base->phy_address = link_info->phy_addr;
739 if (link_info->transceiver ==
740 PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL)
741 cmd->transceiver = XCVR_INTERNAL;
742 else
743 cmd->transceiver = XCVR_EXTERNAL;
744 cmd->phy_address = link_info->phy_addr;
745 810
746 return 0; 811 return 0;
747} 812}
@@ -815,37 +880,25 @@ u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
815 return fw_speed_mask; 880 return fw_speed_mask;
816} 881}
817 882
818static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 883static int bnxt_set_link_ksettings(struct net_device *dev,
884 const struct ethtool_link_ksettings *lk_ksettings)
819{ 885{
820 int rc = 0;
821 struct bnxt *bp = netdev_priv(dev); 886 struct bnxt *bp = netdev_priv(dev);
822 struct bnxt_link_info *link_info = &bp->link_info; 887 struct bnxt_link_info *link_info = &bp->link_info;
888 const struct ethtool_link_settings *base = &lk_ksettings->base;
823 u32 speed, fw_advertising = 0; 889 u32 speed, fw_advertising = 0;
824 bool set_pause = false; 890 bool set_pause = false;
891 int rc = 0;
825 892
826 if (BNXT_VF(bp)) 893 if (!BNXT_SINGLE_PF(bp))
827 return rc; 894 return -EOPNOTSUPP;
828
829 if (cmd->autoneg == AUTONEG_ENABLE) {
830 u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info);
831 895
832 if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg | 896 if (base->autoneg == AUTONEG_ENABLE) {
833 ADVERTISED_TP | ADVERTISED_FIBRE)) { 897 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
834 netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", 898 advertising);
835 cmd->advertising);
836 rc = -EINVAL;
837 goto set_setting_exit;
838 }
839 fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
840 if (fw_advertising & ~link_info->support_speeds) {
841 netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
842 cmd->advertising);
843 rc = -EINVAL;
844 goto set_setting_exit;
845 }
846 link_info->autoneg |= BNXT_AUTONEG_SPEED; 899 link_info->autoneg |= BNXT_AUTONEG_SPEED;
847 if (!fw_advertising) 900 if (!fw_advertising)
848 link_info->advertising = link_info->support_speeds; 901 link_info->advertising = link_info->support_auto_speeds;
849 else 902 else
850 link_info->advertising = fw_advertising; 903 link_info->advertising = fw_advertising;
851 /* any change to autoneg will cause link change, therefore the 904 /* any change to autoneg will cause link change, therefore the
@@ -863,16 +916,12 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
863 rc = -EINVAL; 916 rc = -EINVAL;
864 goto set_setting_exit; 917 goto set_setting_exit;
865 } 918 }
866 /* TODO: currently don't support half duplex */ 919 if (base->duplex == DUPLEX_HALF) {
867 if (cmd->duplex == DUPLEX_HALF) {
868 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 920 netdev_err(dev, "HALF DUPLEX is not supported!\n");
869 rc = -EINVAL; 921 rc = -EINVAL;
870 goto set_setting_exit; 922 goto set_setting_exit;
871 } 923 }
872 /* If received a request for an unknown duplex, assume full*/ 924 speed = base->speed;
873 if (cmd->duplex == DUPLEX_UNKNOWN)
874 cmd->duplex = DUPLEX_FULL;
875 speed = ethtool_cmd_speed(cmd);
876 fw_speed = bnxt_get_fw_speed(dev, speed); 925 fw_speed = bnxt_get_fw_speed(dev, speed);
877 if (!fw_speed) { 926 if (!fw_speed) {
878 rc = -EINVAL; 927 rc = -EINVAL;
@@ -911,7 +960,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
911 struct bnxt *bp = netdev_priv(dev); 960 struct bnxt *bp = netdev_priv(dev);
912 struct bnxt_link_info *link_info = &bp->link_info; 961 struct bnxt_link_info *link_info = &bp->link_info;
913 962
914 if (BNXT_VF(bp)) 963 if (!BNXT_SINGLE_PF(bp))
915 return rc; 964 return rc;
916 965
917 if (epause->autoneg) { 966 if (epause->autoneg) {
@@ -1433,7 +1482,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1433 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 1482 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
1434 int rc = 0; 1483 int rc = 0;
1435 1484
1436 if (BNXT_VF(bp)) 1485 if (!BNXT_SINGLE_PF(bp))
1437 return 0; 1486 return 0;
1438 1487
1439 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 1488 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
@@ -1618,8 +1667,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
1618} 1667}
1619 1668
1620const struct ethtool_ops bnxt_ethtool_ops = { 1669const struct ethtool_ops bnxt_ethtool_ops = {
1621 .get_settings = bnxt_get_settings, 1670 .get_link_ksettings = bnxt_get_link_ksettings,
1622 .set_settings = bnxt_set_settings, 1671 .set_link_ksettings = bnxt_set_link_ksettings,
1623 .get_pauseparam = bnxt_get_pauseparam, 1672 .get_pauseparam = bnxt_get_pauseparam,
1624 .set_pauseparam = bnxt_set_pauseparam, 1673 .set_pauseparam = bnxt_set_pauseparam,
1625 .get_drvinfo = bnxt_get_drvinfo, 1674 .get_drvinfo = bnxt_get_drvinfo,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 363884dd9e8a..50d2007a2640 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -143,6 +143,9 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
143 u16 vlan_tag; 143 u16 vlan_tag;
144 int rc; 144 int rc;
145 145
146 if (bp->hwrm_spec_code < 0x10201)
147 return -ENOTSUPP;
148
146 rc = bnxt_vf_ndo_prep(bp, vf_id); 149 rc = bnxt_vf_ndo_prep(bp, vf_id);
147 if (rc) 150 if (rc)
148 return rc; 151 return rc;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index cb07d95e3dd9..89c0cfa9719f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -304,7 +304,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
304static void macb_handle_link_change(struct net_device *dev) 304static void macb_handle_link_change(struct net_device *dev)
305{ 305{
306 struct macb *bp = netdev_priv(dev); 306 struct macb *bp = netdev_priv(dev);
307 struct phy_device *phydev = bp->phy_dev; 307 struct phy_device *phydev = dev->phydev;
308 unsigned long flags; 308 unsigned long flags;
309 int status_change = 0; 309 int status_change = 0;
310 310
@@ -414,7 +414,6 @@ static int macb_mii_probe(struct net_device *dev)
414 bp->link = 0; 414 bp->link = 0;
415 bp->speed = 0; 415 bp->speed = 0;
416 bp->duplex = -1; 416 bp->duplex = -1;
417 bp->phy_dev = phydev;
418 417
419 return 0; 418 return 0;
420} 419}
@@ -1886,7 +1885,7 @@ static int macb_open(struct net_device *dev)
1886 netif_carrier_off(dev); 1885 netif_carrier_off(dev);
1887 1886
1888 /* if the phy is not yet register, retry later*/ 1887 /* if the phy is not yet register, retry later*/
1889 if (!bp->phy_dev) 1888 if (!dev->phydev)
1890 return -EAGAIN; 1889 return -EAGAIN;
1891 1890
1892 /* RX buffers initialization */ 1891 /* RX buffers initialization */
@@ -1905,7 +1904,7 @@ static int macb_open(struct net_device *dev)
1905 macb_init_hw(bp); 1904 macb_init_hw(bp);
1906 1905
1907 /* schedule a link state check */ 1906 /* schedule a link state check */
1908 phy_start(bp->phy_dev); 1907 phy_start(dev->phydev);
1909 1908
1910 netif_tx_start_all_queues(dev); 1909 netif_tx_start_all_queues(dev);
1911 1910
@@ -1920,8 +1919,8 @@ static int macb_close(struct net_device *dev)
1920 netif_tx_stop_all_queues(dev); 1919 netif_tx_stop_all_queues(dev);
1921 napi_disable(&bp->napi); 1920 napi_disable(&bp->napi);
1922 1921
1923 if (bp->phy_dev) 1922 if (dev->phydev)
1924 phy_stop(bp->phy_dev); 1923 phy_stop(dev->phydev);
1925 1924
1926 spin_lock_irqsave(&bp->lock, flags); 1925 spin_lock_irqsave(&bp->lock, flags);
1927 macb_reset_hw(bp); 1926 macb_reset_hw(bp);
@@ -2092,28 +2091,6 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
2092 return nstat; 2091 return nstat;
2093} 2092}
2094 2093
2095static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2096{
2097 struct macb *bp = netdev_priv(dev);
2098 struct phy_device *phydev = bp->phy_dev;
2099
2100 if (!phydev)
2101 return -ENODEV;
2102
2103 return phy_ethtool_gset(phydev, cmd);
2104}
2105
2106static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2107{
2108 struct macb *bp = netdev_priv(dev);
2109 struct phy_device *phydev = bp->phy_dev;
2110
2111 if (!phydev)
2112 return -ENODEV;
2113
2114 return phy_ethtool_sset(phydev, cmd);
2115}
2116
2117static int macb_get_regs_len(struct net_device *netdev) 2094static int macb_get_regs_len(struct net_device *netdev)
2118{ 2095{
2119 return MACB_GREGS_NBR * sizeof(u32); 2096 return MACB_GREGS_NBR * sizeof(u32);
@@ -2186,19 +2163,17 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2186} 2163}
2187 2164
2188static const struct ethtool_ops macb_ethtool_ops = { 2165static const struct ethtool_ops macb_ethtool_ops = {
2189 .get_settings = macb_get_settings,
2190 .set_settings = macb_set_settings,
2191 .get_regs_len = macb_get_regs_len, 2166 .get_regs_len = macb_get_regs_len,
2192 .get_regs = macb_get_regs, 2167 .get_regs = macb_get_regs,
2193 .get_link = ethtool_op_get_link, 2168 .get_link = ethtool_op_get_link,
2194 .get_ts_info = ethtool_op_get_ts_info, 2169 .get_ts_info = ethtool_op_get_ts_info,
2195 .get_wol = macb_get_wol, 2170 .get_wol = macb_get_wol,
2196 .set_wol = macb_set_wol, 2171 .set_wol = macb_set_wol,
2172 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2173 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2197}; 2174};
2198 2175
2199static const struct ethtool_ops gem_ethtool_ops = { 2176static const struct ethtool_ops gem_ethtool_ops = {
2200 .get_settings = macb_get_settings,
2201 .set_settings = macb_set_settings,
2202 .get_regs_len = macb_get_regs_len, 2177 .get_regs_len = macb_get_regs_len,
2203 .get_regs = macb_get_regs, 2178 .get_regs = macb_get_regs,
2204 .get_link = ethtool_op_get_link, 2179 .get_link = ethtool_op_get_link,
@@ -2206,12 +2181,13 @@ static const struct ethtool_ops gem_ethtool_ops = {
2206 .get_ethtool_stats = gem_get_ethtool_stats, 2181 .get_ethtool_stats = gem_get_ethtool_stats,
2207 .get_strings = gem_get_ethtool_strings, 2182 .get_strings = gem_get_ethtool_strings,
2208 .get_sset_count = gem_get_sset_count, 2183 .get_sset_count = gem_get_sset_count,
2184 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2185 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2209}; 2186};
2210 2187
2211static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2188static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2212{ 2189{
2213 struct macb *bp = netdev_priv(dev); 2190 struct phy_device *phydev = dev->phydev;
2214 struct phy_device *phydev = bp->phy_dev;
2215 2191
2216 if (!netif_running(dev)) 2192 if (!netif_running(dev))
2217 return -EINVAL; 2193 return -EINVAL;
@@ -2570,7 +2546,7 @@ static int at91ether_open(struct net_device *dev)
2570 MACB_BIT(HRESP)); 2546 MACB_BIT(HRESP));
2571 2547
2572 /* schedule a link state check */ 2548 /* schedule a link state check */
2573 phy_start(lp->phy_dev); 2549 phy_start(dev->phydev);
2574 2550
2575 netif_start_queue(dev); 2551 netif_start_queue(dev);
2576 2552
@@ -3010,7 +2986,7 @@ static int macb_probe(struct platform_device *pdev)
3010 if (err) 2986 if (err)
3011 goto err_out_free_netdev; 2987 goto err_out_free_netdev;
3012 2988
3013 phydev = bp->phy_dev; 2989 phydev = dev->phydev;
3014 2990
3015 netif_carrier_off(dev); 2991 netif_carrier_off(dev);
3016 2992
@@ -3029,7 +3005,7 @@ static int macb_probe(struct platform_device *pdev)
3029 return 0; 3005 return 0;
3030 3006
3031err_out_unregister_mdio: 3007err_out_unregister_mdio:
3032 phy_disconnect(bp->phy_dev); 3008 phy_disconnect(dev->phydev);
3033 mdiobus_unregister(bp->mii_bus); 3009 mdiobus_unregister(bp->mii_bus);
3034 mdiobus_free(bp->mii_bus); 3010 mdiobus_free(bp->mii_bus);
3035 3011
@@ -3057,8 +3033,8 @@ static int macb_remove(struct platform_device *pdev)
3057 3033
3058 if (dev) { 3034 if (dev) {
3059 bp = netdev_priv(dev); 3035 bp = netdev_priv(dev);
3060 if (bp->phy_dev) 3036 if (dev->phydev)
3061 phy_disconnect(bp->phy_dev); 3037 phy_disconnect(dev->phydev);
3062 mdiobus_unregister(bp->mii_bus); 3038 mdiobus_unregister(bp->mii_bus);
3063 mdiobus_free(bp->mii_bus); 3039 mdiobus_free(bp->mii_bus);
3064 3040
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 8a13824ef802..36893d8958d4 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -823,7 +823,6 @@ struct macb {
823 struct macb_or_gem_ops macbgem_ops; 823 struct macb_or_gem_ops macbgem_ops;
824 824
825 struct mii_bus *mii_bus; 825 struct mii_bus *mii_bus;
826 struct phy_device *phy_dev;
827 int link; 826 int link;
828 int speed; 827 int speed;
829 int duplex; 828 int duplex;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index 8ad7425f89bf..d35864ada9a3 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -367,7 +367,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
367 367
368void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) 368void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
369{ 369{
370 u32 mask, i, loop = HZ; 370 int i;
371 u32 mask, loop = HZ;
371 u32 d32; 372 u32 d32;
372 373
373 /* Reset the Enable bits for Input Queues. */ 374 /* Reset the Enable bits for Input Queues. */
@@ -376,7 +377,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
376 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); 377 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
377 378
378 /* Wait until hardware indicates that the queues are out of reset. */ 379 /* Wait until hardware indicates that the queues are out of reset. */
379 mask = oct->io_qmask.iq; 380 mask = (u32)oct->io_qmask.iq;
380 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); 381 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
381 while (((d32 & mask) != mask) && loop--) { 382 while (((d32 & mask) != mask) && loop--) {
382 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); 383 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
@@ -384,8 +385,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
384 } 385 }
385 386
386 /* Reset the doorbell register for each Input queue. */ 387 /* Reset the doorbell register for each Input queue. */
387 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 388 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
388 if (!(oct->io_qmask.iq & (1UL << i))) 389 if (!(oct->io_qmask.iq & (1ULL << i)))
389 continue; 390 continue;
390 octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF); 391 octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
391 d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i)); 392 d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
@@ -398,7 +399,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
398 399
399 /* Wait until hardware indicates that the queues are out of reset. */ 400 /* Wait until hardware indicates that the queues are out of reset. */
400 loop = HZ; 401 loop = HZ;
401 mask = oct->io_qmask.oq; 402 mask = (u32)oct->io_qmask.oq;
402 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); 403 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
403 while (((d32 & mask) != mask) && loop--) { 404 while (((d32 & mask) != mask) && loop--) {
404 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); 405 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
@@ -408,8 +409,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
408 409
409 /* Reset the doorbell register for each Output queue. */ 410 /* Reset the doorbell register for each Output queue. */
410 /* for (i = 0; i < oct->num_oqs; i++) { */ 411 /* for (i = 0; i < oct->num_oqs; i++) { */
411 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 412 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
412 if (!(oct->io_qmask.oq & (1UL << i))) 413 if (!(oct->io_qmask.oq & (1ULL << i)))
413 continue; 414 continue;
414 octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF); 415 octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
415 d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i)); 416 d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
@@ -429,16 +430,16 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
429 430
430void lio_cn6xxx_reinit_regs(struct octeon_device *oct) 431void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
431{ 432{
432 u32 i; 433 int i;
433 434
434 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 435 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
435 if (!(oct->io_qmask.iq & (1UL << i))) 436 if (!(oct->io_qmask.iq & (1ULL << i)))
436 continue; 437 continue;
437 oct->fn_list.setup_iq_regs(oct, i); 438 oct->fn_list.setup_iq_regs(oct, i);
438 } 439 }
439 440
440 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 441 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
441 if (!(oct->io_qmask.oq & (1UL << i))) 442 if (!(oct->io_qmask.oq & (1ULL << i)))
442 continue; 443 continue;
443 oct->fn_list.setup_oq_regs(oct, i); 444 oct->fn_list.setup_oq_regs(oct, i);
444 } 445 }
@@ -450,8 +451,8 @@ void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
450 oct->fn_list.enable_io_queues(oct); 451 oct->fn_list.enable_io_queues(oct);
451 452
452 /* for (i = 0; i < oct->num_oqs; i++) { */ 453 /* for (i = 0; i < oct->num_oqs; i++) { */
453 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 454 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
454 if (!(oct->io_qmask.oq & (1UL << i))) 455 if (!(oct->io_qmask.oq & (1ULL << i)))
455 continue; 456 continue;
456 writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg); 457 writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
457 } 458 }
@@ -495,8 +496,7 @@ u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
495} 496}
496 497
497u32 498u32
498lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)), 499lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
499 struct octeon_instr_queue *iq)
500{ 500{
501 u32 new_idx = readl(iq->inst_cnt_reg); 501 u32 new_idx = readl(iq->inst_cnt_reg);
502 502
@@ -557,7 +557,8 @@ lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
557int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) 557int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
558{ 558{
559 struct octeon_droq *droq; 559 struct octeon_droq *droq;
560 u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb; 560 int oq_no;
561 u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb;
561 u32 droq_cnt_enb, droq_cnt_mask; 562 u32 droq_cnt_enb, droq_cnt_mask;
562 563
563 droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); 564 droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
@@ -573,8 +574,8 @@ int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
573 oct->droq_intr = 0; 574 oct->droq_intr = 0;
574 575
575 /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */ 576 /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
576 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { 577 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) {
577 if (!(droq_mask & (1 << oq_no))) 578 if (!(droq_mask & (1ULL << oq_no)))
578 continue; 579 continue;
579 580
580 droq = oct->droq[oq_no]; 581 droq = oct->droq[oq_no];
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index f77918779355..fe2932cb7ed8 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -91,8 +91,7 @@ void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
91void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask); 91void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
92u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx); 92u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
93u32 93u32
94lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)), 94lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
95 struct octeon_instr_queue *iq);
96void lio_cn6xxx_enable_interrupt(void *chip); 95void lio_cn6xxx_enable_interrupt(void *chip);
97void lio_cn6xxx_disable_interrupt(void *chip); 96void lio_cn6xxx_disable_interrupt(void *chip);
98void cn6xxx_get_pcie_qlmport(struct octeon_device *oct); 97void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 245c063ed4db..03bfa9771e4d 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -40,6 +40,8 @@
40#include "cn68xx_device.h" 40#include "cn68xx_device.h"
41#include "liquidio_image.h" 41#include "liquidio_image.h"
42 42
43static int octnet_get_link_stats(struct net_device *netdev);
44
43struct oct_mdio_cmd_context { 45struct oct_mdio_cmd_context {
44 int octeon_id; 46 int octeon_id;
45 wait_queue_head_t wc; 47 wait_queue_head_t wc;
@@ -71,34 +73,120 @@ enum {
71 INTERFACE_MODE_RXAUI, 73 INTERFACE_MODE_RXAUI,
72 INTERFACE_MODE_QSGMII, 74 INTERFACE_MODE_QSGMII,
73 INTERFACE_MODE_AGL, 75 INTERFACE_MODE_AGL,
76 INTERFACE_MODE_XLAUI,
77 INTERFACE_MODE_XFI,
78 INTERFACE_MODE_10G_KR,
79 INTERFACE_MODE_40G_KR4,
80 INTERFACE_MODE_MIXED,
74}; 81};
75 82
76#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0])) 83#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
77#define OCT_ETHTOOL_REGDUMP_LEN 4096 84#define OCT_ETHTOOL_REGDUMP_LEN 4096
78#define OCT_ETHTOOL_REGSVER 1 85#define OCT_ETHTOOL_REGSVER 1
79 86
87/* statistics of PF */
88static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
89 "rx_packets",
90 "tx_packets",
91 "rx_bytes",
92 "tx_bytes",
93 "rx_errors", /*jabber_err+l2_err+frame_err */
94 "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
95 "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
96 *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
97 */
98 "tx_dropped",
99
100 "tx_total_sent",
101 "tx_total_fwd",
102 "tx_err_pko",
103 "tx_err_link",
104 "tx_err_drop",
105
106 "tx_tso",
107 "tx_tso_packets",
108 "tx_tso_err",
109
110 "mac_tx_total_pkts",
111 "mac_tx_total_bytes",
112 "mac_tx_mcast_pkts",
113 "mac_tx_bcast_pkts",
114 "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
115 "mac_tx_total_collisions",
116 "mac_tx_one_collision",
117 "mac_tx_multi_collison",
118 "mac_tx_max_collision_fail",
119 "mac_tx_max_deferal_fail",
120 "mac_tx_fifo_err",
121 "mac_tx_runts",
122
123 "rx_total_rcvd",
124 "rx_total_fwd",
125 "rx_jabber_err",
126 "rx_l2_err",
127 "rx_frame_err",
128 "rx_err_pko",
129 "rx_err_link",
130 "rx_err_drop",
131
132 "rx_lro_pkts",
133 "rx_lro_bytes",
134 "rx_total_lro",
135
136 "rx_lro_aborts",
137 "rx_lro_aborts_port",
138 "rx_lro_aborts_seq",
139 "rx_lro_aborts_tsval",
140 "rx_lro_aborts_timer",
141 "rx_fwd_rate",
142
143 "mac_rx_total_rcvd",
144 "mac_rx_bytes",
145 "mac_rx_total_bcst",
146 "mac_rx_total_mcst",
147 "mac_rx_runts",
148 "mac_rx_ctl_packets",
149 "mac_rx_fifo_err",
150 "mac_rx_dma_drop",
151 "mac_rx_fcs_err",
152
153 "link_state_changes",
154};
155
156/* statistics of host tx queue */
80static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { 157static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
81 "Instr posted", 158 "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
82 "Instr processed", 159 "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
83 "Instr dropped", 160 "dropped",
84 "Bytes Sent", 161 "iq_busy",
85 "Sgentry_sent", 162 "sgentry_sent",
86 "Inst cntreg", 163
87 "Tx done", 164 "fw_instr_posted",
88 "Tx Iq busy", 165 "fw_instr_processed",
89 "Tx dropped", 166 "fw_instr_dropped",
90 "Tx bytes", 167 "fw_bytes_sent",
168
169 "tso",
170 "txq_restart",
91}; 171};
92 172
173/* statistics of host rx queue */
93static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { 174static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
94 "OQ Pkts Received", 175 "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
95 "OQ Bytes Received", 176 "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
96 "Dropped no dispatch", 177 "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
97 "Dropped nomem", 178 *oct->droq[oq_no]->stats.dropped_nodispatch+
98 "Dropped toomany", 179 *oct->droq[oq_no]->stats.dropped_toomany+
99 "Stack RX cnt", 180 *oct->droq[oq_no]->stats.dropped_nomem
100 "Stack RX Bytes", 181 */
101 "RX dropped", 182 "dropped_nomem",
183 "dropped_toomany",
184 "fw_dropped",
185 "fw_pkts_received",
186 "fw_bytes_received",
187 "fw_dropped_nodispatch",
188
189 "buffer_alloc_failure",
102}; 190};
103 191
104#define OCTNIC_NCMD_AUTONEG_ON 0x1 192#define OCTNIC_NCMD_AUTONEG_ON 0x1
@@ -112,8 +200,9 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
112 200
113 linfo = &lio->linfo; 201 linfo = &lio->linfo;
114 202
115 if (linfo->link.s.interface == INTERFACE_MODE_XAUI || 203 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
116 linfo->link.s.interface == INTERFACE_MODE_RXAUI) { 204 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
205 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
117 ecmd->port = PORT_FIBRE; 206 ecmd->port = PORT_FIBRE;
118 ecmd->supported = 207 ecmd->supported =
119 (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE | 208 (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
@@ -124,10 +213,11 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
124 ecmd->autoneg = AUTONEG_DISABLE; 213 ecmd->autoneg = AUTONEG_DISABLE;
125 214
126 } else { 215 } else {
127 dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n"); 216 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
217 linfo->link.s.if_mode);
128 } 218 }
129 219
130 if (linfo->link.s.status) { 220 if (linfo->link.s.link_up) {
131 ethtool_cmd_speed_set(ecmd, linfo->link.s.speed); 221 ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
132 ecmd->duplex = linfo->link.s.duplex; 222 ecmd->duplex = linfo->link.s.duplex;
133 } else { 223 } else {
@@ -222,23 +312,20 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
222 struct lio *lio = GET_LIO(netdev); 312 struct lio *lio = GET_LIO(netdev);
223 struct octeon_device *oct = lio->oct_dev; 313 struct octeon_device *oct = lio->oct_dev;
224 struct octnic_ctrl_pkt nctrl; 314 struct octnic_ctrl_pkt nctrl;
225 struct octnic_ctrl_params nparams;
226 int ret = 0; 315 int ret = 0;
227 316
228 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 317 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
229 318
230 nctrl.ncmd.u64 = 0; 319 nctrl.ncmd.u64 = 0;
231 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; 320 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
232 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 321 nctrl.ncmd.s.param1 = addr;
233 nctrl.ncmd.s.param2 = addr; 322 nctrl.ncmd.s.param2 = val;
234 nctrl.ncmd.s.param3 = val; 323 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
235 nctrl.wait_time = 100; 324 nctrl.wait_time = 100;
236 nctrl.netpndev = (u64)netdev; 325 nctrl.netpndev = (u64)netdev;
237 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 326 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
238 327
239 nparams.resp_order = OCTEON_RESP_ORDERED; 328 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
240
241 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
242 if (ret < 0) { 329 if (ret < 0) {
243 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); 330 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
244 return -EINVAL; 331 return -EINVAL;
@@ -303,9 +390,10 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
303 mdio_cmd->mdio_addr = loc; 390 mdio_cmd->mdio_addr = loc;
304 if (op) 391 if (op)
305 mdio_cmd->value1 = *value; 392 mdio_cmd->value1 = *value;
306 mdio_cmd->value2 = lio->linfo.ifidx;
307 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); 393 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
308 394
395 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
396
309 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 397 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
310 0, 0, 0); 398 0, 0, 0);
311 399
@@ -317,7 +405,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
317 405
318 retval = octeon_send_soft_command(oct_dev, sc); 406 retval = octeon_send_soft_command(oct_dev, sc);
319 407
320 if (retval) { 408 if (retval == IQ_SEND_FAILED) {
321 dev_err(&oct_dev->pci_dev->dev, 409 dev_err(&oct_dev->pci_dev->dev,
322 "octnet_mdio45_access instruction failed status: %x\n", 410 "octnet_mdio45_access instruction failed status: %x\n",
323 retval); 411 retval);
@@ -503,10 +591,10 @@ static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
503 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { 591 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
504 if (msglvl & NETIF_MSG_HW) 592 if (msglvl & NETIF_MSG_HW)
505 liquidio_set_feature(netdev, 593 liquidio_set_feature(netdev,
506 OCTNET_CMD_VERBOSE_ENABLE); 594 OCTNET_CMD_VERBOSE_ENABLE, 0);
507 else 595 else
508 liquidio_set_feature(netdev, 596 liquidio_set_feature(netdev,
509 OCTNET_CMD_VERBOSE_DISABLE); 597 OCTNET_CMD_VERBOSE_DISABLE, 0);
510 } 598 }
511 599
512 lio->msg_enable = msglvl; 600 lio->msg_enable = msglvl;
@@ -518,8 +606,13 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
518 /* Notes: Not supporting any auto negotiation in these 606 /* Notes: Not supporting any auto negotiation in these
519 * drivers. Just report pause frame support. 607 * drivers. Just report pause frame support.
520 */ 608 */
521 pause->tx_pause = 1; 609 struct lio *lio = GET_LIO(netdev);
522 pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */ 610 struct octeon_device *oct = lio->oct_dev;
611
612 pause->autoneg = 0;
613
614 pause->tx_pause = oct->tx_pause;
615 pause->rx_pause = oct->rx_pause;
523} 616}
524 617
525static void 618static void
@@ -528,51 +621,245 @@ lio_get_ethtool_stats(struct net_device *netdev,
528{ 621{
529 struct lio *lio = GET_LIO(netdev); 622 struct lio *lio = GET_LIO(netdev);
530 struct octeon_device *oct_dev = lio->oct_dev; 623 struct octeon_device *oct_dev = lio->oct_dev;
624 struct net_device_stats *netstats = &netdev->stats;
531 int i = 0, j; 625 int i = 0, j;
532 626
533 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) { 627 netdev->netdev_ops->ndo_get_stats(netdev);
534 if (!(oct_dev->io_qmask.iq & (1UL << j))) 628 octnet_get_link_stats(netdev);
629
630 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
631 data[i++] = CVM_CAST64(netstats->rx_packets);
632 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
633 data[i++] = CVM_CAST64(netstats->tx_packets);
634 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
635 data[i++] = CVM_CAST64(netstats->rx_bytes);
636 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
637 data[i++] = CVM_CAST64(netstats->tx_bytes);
638 data[i++] = CVM_CAST64(netstats->rx_errors);
639 data[i++] = CVM_CAST64(netstats->tx_errors);
640 /*sum of oct->droq[oq_no]->stats->rx_dropped +
641 *oct->droq[oq_no]->stats->dropped_nodispatch +
642 *oct->droq[oq_no]->stats->dropped_toomany +
643 *oct->droq[oq_no]->stats->dropped_nomem
644 */
645 data[i++] = CVM_CAST64(netstats->rx_dropped);
646 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
647 data[i++] = CVM_CAST64(netstats->tx_dropped);
648
649 /*data[i++] = CVM_CAST64(stats->multicast); */
650 /*data[i++] = CVM_CAST64(stats->collisions); */
651
652 /* firmware tx stats */
653 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
654 *fromhost.fw_total_sent
655 */
656 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
657 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
658 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
659 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
660 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
661 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
662 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
663 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
664 *fw_err_drop
665 */
666 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
667
668 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
669 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
670 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
671 *fw_tso_fwd
672 */
673 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
674 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
675 *fw_err_tso
676 */
677 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
678
679 /* mac tx statistics */
680 /*CVMX_BGXX_CMRX_TX_STAT5 */
681 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
682 /*CVMX_BGXX_CMRX_TX_STAT4 */
683 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
684 /*CVMX_BGXX_CMRX_TX_STAT15 */
685 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
686 /*CVMX_BGXX_CMRX_TX_STAT14 */
687 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
688 /*CVMX_BGXX_CMRX_TX_STAT17 */
689 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
690 /*CVMX_BGXX_CMRX_TX_STAT0 */
691 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
692 /*CVMX_BGXX_CMRX_TX_STAT3 */
693 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
694 /*CVMX_BGXX_CMRX_TX_STAT2 */
695 data[i++] =
696 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
697 /*CVMX_BGXX_CMRX_TX_STAT0 */
698 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
699 /*CVMX_BGXX_CMRX_TX_STAT1 */
700 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
701 /*CVMX_BGXX_CMRX_TX_STAT16 */
702 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
703 /*CVMX_BGXX_CMRX_TX_STAT6 */
704 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
705
706 /* RX firmware stats */
707 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
708 *fw_total_rcvd
709 */
710 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
711 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
712 *fw_total_fwd
713 */
714 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
715 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
716 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
717 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
718 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
719 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
720 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
721 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
722 *fw_err_pko
723 */
724 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
725 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
726 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
727 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
728 *fromwire.fw_err_drop
729 */
730 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
731
732 /* LRO */
733 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
734 *fw_lro_pkts
735 */
736 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
737 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
738 *fw_lro_octs
739 */
740 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
741 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
742 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
743 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
744 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
745 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
746 *fw_lro_aborts_port
747 */
748 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
749 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
750 *fw_lro_aborts_seq
751 */
752 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
753 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
754 *fw_lro_aborts_tsval
755 */
756 data[i++] =
757 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
758 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
759 *fw_lro_aborts_timer
760 */
761 /* intrmod: packet forward rate */
762 data[i++] =
763 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
764 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
765 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
766
767 /* mac: link-level stats */
768 /*CVMX_BGXX_CMRX_RX_STAT0 */
769 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
770 /*CVMX_BGXX_CMRX_RX_STAT1 */
771 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
772 /*CVMX_PKI_STATX_STAT5 */
773 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
774 /*CVMX_PKI_STATX_STAT5 */
775 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
776 /*wqe->word2.err_code or wqe->word2.err_level */
777 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
778 /*CVMX_BGXX_CMRX_RX_STAT2 */
779 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
780 /*CVMX_BGXX_CMRX_RX_STAT6 */
781 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
782 /*CVMX_BGXX_CMRX_RX_STAT4 */
783 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
784 /*wqe->word2.err_code or wqe->word2.err_level */
785 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
786 /*lio->link_changes*/
787 data[i++] = CVM_CAST64(lio->link_changes);
788
789 /* TX -- lio_update_stats(lio); */
790 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
791 if (!(oct_dev->io_qmask.iq & (1ULL << j)))
535 continue; 792 continue;
793 /*packets to network port*/
794 /*# of packets tx to network */
795 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
796 /*# of bytes tx to network */
536 data[i++] = 797 data[i++] =
537 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); 798 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
538 data[i++] = 799 /*# of packets dropped */
539 CVM_CAST64(
540 oct_dev->instr_queue[j]->stats.instr_processed);
541 data[i++] = 800 data[i++] =
542 CVM_CAST64( 801 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
543 oct_dev->instr_queue[j]->stats.instr_dropped); 802 /*# of tx fails due to queue full */
544 data[i++] = 803 data[i++] =
545 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); 804 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
805 /*XXX gather entries sent */
546 data[i++] = 806 data[i++] =
547 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); 807 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
808
809 /*instruction to firmware: data and control */
810 /*# of instructions to the queue */
548 data[i++] = 811 data[i++] =
549 readl(oct_dev->instr_queue[j]->inst_cnt_reg); 812 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
550 data[i++] = 813 /*# of instructions processed */
551 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 814 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
552 data[i++] = 815 stats.instr_processed);
553 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); 816 /*# of instructions could not be processed */
817 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
818 stats.instr_dropped);
819 /*bytes sent through the queue */
554 data[i++] = 820 data[i++] =
555 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); 821 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
822
823 /*tso request*/
824 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
825 /*txq restart*/
556 data[i++] = 826 data[i++] =
557 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); 827 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
558 } 828 }
559 829
560 /* for (j = 0; j < oct_dev->num_oqs; j++){ */ 830 /* RX */
561 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) { 831 /* for (j = 0; j < oct_dev->num_oqs; j++) { */
562 if (!(oct_dev->io_qmask.oq & (1UL << j))) 832 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
833 if (!(oct_dev->io_qmask.oq & (1ULL << j)))
563 continue; 834 continue;
564 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 835
565 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 836 /*packets send to TCP/IP network stack */
566 data[i++] = 837 /*# of packets to network stack */
567 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
568 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
569 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
570 data[i++] = 838 data[i++] =
571 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); 839 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
840 /*# of bytes to network stack */
572 data[i++] = 841 data[i++] =
573 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); 842 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
843 /*# of packets dropped */
844 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
845 oct_dev->droq[j]->stats.dropped_toomany +
846 oct_dev->droq[j]->stats.rx_dropped);
847 data[i++] =
848 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
849 data[i++] =
850 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
574 data[i++] = 851 data[i++] =
575 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 852 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
853
854 /*control and data path*/
855 data[i++] =
856 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
857 data[i++] =
858 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
859 data[i++] =
860 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
861 data[i++] =
862 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
576 } 863 }
577} 864}
578 865
@@ -581,26 +868,43 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
581 struct lio *lio = GET_LIO(netdev); 868 struct lio *lio = GET_LIO(netdev);
582 struct octeon_device *oct_dev = lio->oct_dev; 869 struct octeon_device *oct_dev = lio->oct_dev;
583 int num_iq_stats, num_oq_stats, i, j; 870 int num_iq_stats, num_oq_stats, i, j;
871 int num_stats;
584 872
585 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 873 switch (stringset) {
586 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 874 case ETH_SS_STATS:
587 if (!(oct_dev->io_qmask.iq & (1UL << i))) 875 num_stats = ARRAY_SIZE(oct_stats_strings);
588 continue; 876 for (j = 0; j < num_stats; j++) {
589 for (j = 0; j < num_iq_stats; j++) { 877 sprintf(data, "%s", oct_stats_strings[j]);
590 sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]);
591 data += ETH_GSTRING_LEN; 878 data += ETH_GSTRING_LEN;
592 } 879 }
593 }
594 880
595 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 881 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
596 /* for (i = 0; i < oct_dev->num_oqs; i++) { */ 882 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
597 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 883 if (!(oct_dev->io_qmask.iq & (1ULL << i)))
598 if (!(oct_dev->io_qmask.oq & (1UL << i))) 884 continue;
599 continue; 885 for (j = 0; j < num_iq_stats; j++) {
600 for (j = 0; j < num_oq_stats; j++) { 886 sprintf(data, "tx-%d-%s", i,
601 sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]); 887 oct_iq_stats_strings[j]);
602 data += ETH_GSTRING_LEN; 888 data += ETH_GSTRING_LEN;
889 }
603 } 890 }
891
892 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
893 /* for (i = 0; i < oct_dev->num_oqs; i++) { */
894 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
895 if (!(oct_dev->io_qmask.oq & (1ULL << i)))
896 continue;
897 for (j = 0; j < num_oq_stats; j++) {
898 sprintf(data, "rx-%d-%s", i,
899 oct_droq_stats_strings[j]);
900 data += ETH_GSTRING_LEN;
901 }
902 }
903 break;
904
905 default:
906 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
907 break;
604 } 908 }
605} 909}
606 910
@@ -609,8 +913,14 @@ static int lio_get_sset_count(struct net_device *netdev, int sset)
609 struct lio *lio = GET_LIO(netdev); 913 struct lio *lio = GET_LIO(netdev);
610 struct octeon_device *oct_dev = lio->oct_dev; 914 struct octeon_device *oct_dev = lio->oct_dev;
611 915
612 return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) + 916 switch (sset) {
613 (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 917 case ETH_SS_STATS:
918 return (ARRAY_SIZE(oct_stats_strings) +
919 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
920 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
921 default:
922 return -EOPNOTSUPP;
923 }
614} 924}
615 925
616static int lio_get_intr_coalesce(struct net_device *netdev, 926static int lio_get_intr_coalesce(struct net_device *netdev,
@@ -618,50 +928,50 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
618{ 928{
619 struct lio *lio = GET_LIO(netdev); 929 struct lio *lio = GET_LIO(netdev);
620 struct octeon_device *oct = lio->oct_dev; 930 struct octeon_device *oct = lio->oct_dev;
621 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
622 struct octeon_instr_queue *iq; 931 struct octeon_instr_queue *iq;
623 struct oct_intrmod_cfg *intrmod_cfg; 932 struct oct_intrmod_cfg *intrmod_cfg;
624 933
625 intrmod_cfg = &oct->intrmod; 934 intrmod_cfg = &oct->intrmod;
626 935
627 switch (oct->chip_id) { 936 switch (oct->chip_id) {
628 /* case OCTEON_CN73XX: Todo */
629 /* break; */
630 case OCTEON_CN68XX: 937 case OCTEON_CN68XX:
631 case OCTEON_CN66XX: 938 case OCTEON_CN66XX: {
632 if (!intrmod_cfg->intrmod_enable) { 939 struct octeon_cn6xxx *cn6xxx =
940 (struct octeon_cn6xxx *)oct->chip;
941
942 if (!intrmod_cfg->rx_enable) {
633 intr_coal->rx_coalesce_usecs = 943 intr_coal->rx_coalesce_usecs =
634 CFG_GET_OQ_INTR_TIME(cn6xxx->conf); 944 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
635 intr_coal->rx_max_coalesced_frames = 945 intr_coal->rx_max_coalesced_frames =
636 CFG_GET_OQ_INTR_PKT(cn6xxx->conf); 946 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
637 } else {
638 intr_coal->use_adaptive_rx_coalesce =
639 intrmod_cfg->intrmod_enable;
640 intr_coal->rate_sample_interval =
641 intrmod_cfg->intrmod_check_intrvl;
642 intr_coal->pkt_rate_high =
643 intrmod_cfg->intrmod_maxpkt_ratethr;
644 intr_coal->pkt_rate_low =
645 intrmod_cfg->intrmod_minpkt_ratethr;
646 intr_coal->rx_max_coalesced_frames_high =
647 intrmod_cfg->intrmod_maxcnt_trigger;
648 intr_coal->rx_coalesce_usecs_high =
649 intrmod_cfg->intrmod_maxtmr_trigger;
650 intr_coal->rx_coalesce_usecs_low =
651 intrmod_cfg->intrmod_mintmr_trigger;
652 intr_coal->rx_max_coalesced_frames_low =
653 intrmod_cfg->intrmod_mincnt_trigger;
654 } 947 }
655 948
656 iq = oct->instr_queue[lio->linfo.txpciq[0]]; 949 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
657 intr_coal->tx_max_coalesced_frames = iq->fill_threshold; 950 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
658 break; 951 break;
659 952 }
660 default: 953 default:
661 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 954 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
662 return -EINVAL; 955 return -EINVAL;
663 } 956 }
664 957 if (intrmod_cfg->rx_enable) {
958 intr_coal->use_adaptive_rx_coalesce =
959 intrmod_cfg->rx_enable;
960 intr_coal->rate_sample_interval =
961 intrmod_cfg->check_intrvl;
962 intr_coal->pkt_rate_high =
963 intrmod_cfg->maxpkt_ratethr;
964 intr_coal->pkt_rate_low =
965 intrmod_cfg->minpkt_ratethr;
966 intr_coal->rx_max_coalesced_frames_high =
967 intrmod_cfg->rx_maxcnt_trigger;
968 intr_coal->rx_coalesce_usecs_high =
969 intrmod_cfg->rx_maxtmr_trigger;
970 intr_coal->rx_coalesce_usecs_low =
971 intrmod_cfg->rx_mintmr_trigger;
972 intr_coal->rx_max_coalesced_frames_low =
973 intrmod_cfg->rx_mincnt_trigger;
974 }
665 return 0; 975 return 0;
666} 976}
667 977
@@ -681,19 +991,20 @@ static void octnet_intrmod_callback(struct octeon_device *oct_dev,
681 else 991 else
682 dev_info(&oct_dev->pci_dev->dev, 992 dev_info(&oct_dev->pci_dev->dev,
683 "Rx-Adaptive Interrupt moderation enabled:%llx\n", 993 "Rx-Adaptive Interrupt moderation enabled:%llx\n",
684 oct_dev->intrmod.intrmod_enable); 994 oct_dev->intrmod.rx_enable);
685 995
686 octeon_free_soft_command(oct_dev, sc); 996 octeon_free_soft_command(oct_dev, sc);
687} 997}
688 998
689/* Configure interrupt moderation parameters */ 999/* Configure interrupt moderation parameters */
690static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) 1000static int octnet_set_intrmod_cfg(struct lio *lio,
1001 struct oct_intrmod_cfg *intr_cfg)
691{ 1002{
692 struct octeon_soft_command *sc; 1003 struct octeon_soft_command *sc;
693 struct oct_intrmod_cmd *cmd; 1004 struct oct_intrmod_cmd *cmd;
694 struct oct_intrmod_cfg *cfg; 1005 struct oct_intrmod_cfg *cfg;
695 int retval; 1006 int retval;
696 struct octeon_device *oct_dev = (struct octeon_device *)oct; 1007 struct octeon_device *oct_dev = lio->oct_dev;
697 1008
698 /* Alloc soft command */ 1009 /* Alloc soft command */
699 sc = (struct octeon_soft_command *) 1010 sc = (struct octeon_soft_command *)
@@ -714,6 +1025,8 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
714 cmd->cfg = cfg; 1025 cmd->cfg = cfg;
715 cmd->oct_dev = oct_dev; 1026 cmd->oct_dev = oct_dev;
716 1027
1028 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1029
717 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 1030 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
718 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); 1031 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
719 1032
@@ -722,7 +1035,7 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
722 sc->wait_time = 1000; 1035 sc->wait_time = 1000;
723 1036
724 retval = octeon_send_soft_command(oct_dev, sc); 1037 retval = octeon_send_soft_command(oct_dev, sc);
725 if (retval) { 1038 if (retval == IQ_SEND_FAILED) {
726 octeon_free_soft_command(oct_dev, sc); 1039 octeon_free_soft_command(oct_dev, sc);
727 return -EINVAL; 1040 return -EINVAL;
728 } 1041 }
@@ -730,9 +1043,158 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
730 return 0; 1043 return 0;
731} 1044}
732 1045
1046void
1047octnet_nic_stats_callback(struct octeon_device *oct_dev,
1048 u32 status, void *ptr)
1049{
1050 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1051 struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
1052 sc->virtrptr;
1053 struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
1054 sc->ctxptr;
1055 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1056 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1057
1058 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1059 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1060
1061 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1062 octeon_swap_8B_data((u64 *)&resp->stats,
1063 (sizeof(struct oct_link_stats)) >> 3);
1064
1065 /* RX link-level stats */
1066 rstats->total_rcvd = rsp_rstats->total_rcvd;
1067 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1068 rstats->total_bcst = rsp_rstats->total_bcst;
1069 rstats->total_mcst = rsp_rstats->total_mcst;
1070 rstats->runts = rsp_rstats->runts;
1071 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1072 /* Accounts for over/under-run of buffers */
1073 rstats->fifo_err = rsp_rstats->fifo_err;
1074 rstats->dmac_drop = rsp_rstats->dmac_drop;
1075 rstats->fcs_err = rsp_rstats->fcs_err;
1076 rstats->jabber_err = rsp_rstats->jabber_err;
1077 rstats->l2_err = rsp_rstats->l2_err;
1078 rstats->frame_err = rsp_rstats->frame_err;
1079
1080 /* RX firmware stats */
1081 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1082 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1083 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1084 rstats->fw_err_link = rsp_rstats->fw_err_link;
1085 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1086 /* Number of packets that are LROed */
1087 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1088 /* Number of octets that are LROed */
1089 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1090 /* Number of LRO packets formed */
1091 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1092 /* Number of times lRO of packet aborted */
1093 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1094 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1095 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1096 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1097 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1098 /* intrmod: packet forward rate */
1099 rstats->fwd_rate = rsp_rstats->fwd_rate;
1100
1101 /* TX link-level stats */
1102 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1103 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1104 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1105 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1106 tstats->ctl_sent = rsp_tstats->ctl_sent;
1107 /* Packets sent after one collision*/
1108 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1109 /* Packets sent after multiple collision*/
1110 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1111 /* Packets not sent due to max collisions */
1112 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1113 /* Packets not sent due to max deferrals */
1114 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1115 /* Accounts for over/under-run of buffers */
1116 tstats->fifo_err = rsp_tstats->fifo_err;
1117 tstats->runts = rsp_tstats->runts;
1118 /* Total number of collisions detected */
1119 tstats->total_collisions = rsp_tstats->total_collisions;
1120
1121 /* firmware stats */
1122 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1123 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1124 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1125 tstats->fw_err_link = rsp_tstats->fw_err_link;
1126 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1127 tstats->fw_tso = rsp_tstats->fw_tso;
1128 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1129 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1130 resp->status = 1;
1131 } else {
1132 resp->status = -1;
1133 }
1134 complete(&ctrl->complete);
1135}
1136
1137/* Configure interrupt moderation parameters */
1138static int octnet_get_link_stats(struct net_device *netdev)
1139{
1140 struct lio *lio = GET_LIO(netdev);
1141 struct octeon_device *oct_dev = lio->oct_dev;
1142
1143 struct octeon_soft_command *sc;
1144 struct oct_nic_stats_ctrl *ctrl;
1145 struct oct_nic_stats_resp *resp;
1146
1147 int retval;
1148
1149 /* Alloc soft command */
1150 sc = (struct octeon_soft_command *)
1151 octeon_alloc_soft_command(oct_dev,
1152 0,
1153 sizeof(struct oct_nic_stats_resp),
1154 sizeof(struct octnic_ctrl_pkt));
1155
1156 if (!sc)
1157 return -ENOMEM;
1158
1159 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1160 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1161
1162 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1163 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1164 ctrl->netdev = netdev;
1165 init_completion(&ctrl->complete);
1166
1167 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1168
1169 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1170 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1171
1172 sc->callback = octnet_nic_stats_callback;
1173 sc->callback_arg = sc;
1174 sc->wait_time = 500; /*in milli seconds*/
1175
1176 retval = octeon_send_soft_command(oct_dev, sc);
1177 if (retval == IQ_SEND_FAILED) {
1178 octeon_free_soft_command(oct_dev, sc);
1179 return -EINVAL;
1180 }
1181
1182 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1183
1184 if (resp->status != 1) {
1185 octeon_free_soft_command(oct_dev, sc);
1186
1187 return -EINVAL;
1188 }
1189
1190 octeon_free_soft_command(oct_dev, sc);
1191
1192 return 0;
1193}
1194
733/* Enable/Disable auto interrupt Moderation */ 1195/* Enable/Disable auto interrupt Moderation */
734static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce 1196static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
735 *intr_coal, int adaptive) 1197 *intr_coal)
736{ 1198{
737 int ret = 0; 1199 int ret = 0;
738 struct octeon_device *oct = lio->oct_dev; 1200 struct octeon_device *oct = lio->oct_dev;
@@ -740,59 +1202,73 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
740 1202
741 intrmod_cfg = &oct->intrmod; 1203 intrmod_cfg = &oct->intrmod;
742 1204
743 if (adaptive) { 1205 if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
744 if (intr_coal->rate_sample_interval) 1206 if (intr_coal->rate_sample_interval)
745 intrmod_cfg->intrmod_check_intrvl = 1207 intrmod_cfg->check_intrvl =
746 intr_coal->rate_sample_interval; 1208 intr_coal->rate_sample_interval;
747 else 1209 else
748 intrmod_cfg->intrmod_check_intrvl = 1210 intrmod_cfg->check_intrvl =
749 LIO_INTRMOD_CHECK_INTERVAL; 1211 LIO_INTRMOD_CHECK_INTERVAL;
750 1212
751 if (intr_coal->pkt_rate_high) 1213 if (intr_coal->pkt_rate_high)
752 intrmod_cfg->intrmod_maxpkt_ratethr = 1214 intrmod_cfg->maxpkt_ratethr =
753 intr_coal->pkt_rate_high; 1215 intr_coal->pkt_rate_high;
754 else 1216 else
755 intrmod_cfg->intrmod_maxpkt_ratethr = 1217 intrmod_cfg->maxpkt_ratethr =
756 LIO_INTRMOD_MAXPKT_RATETHR; 1218 LIO_INTRMOD_MAXPKT_RATETHR;
757 1219
758 if (intr_coal->pkt_rate_low) 1220 if (intr_coal->pkt_rate_low)
759 intrmod_cfg->intrmod_minpkt_ratethr = 1221 intrmod_cfg->minpkt_ratethr =
760 intr_coal->pkt_rate_low; 1222 intr_coal->pkt_rate_low;
761 else 1223 else
762 intrmod_cfg->intrmod_minpkt_ratethr = 1224 intrmod_cfg->minpkt_ratethr =
763 LIO_INTRMOD_MINPKT_RATETHR; 1225 LIO_INTRMOD_MINPKT_RATETHR;
764 1226 }
1227 if (oct->intrmod.rx_enable) {
765 if (intr_coal->rx_max_coalesced_frames_high) 1228 if (intr_coal->rx_max_coalesced_frames_high)
766 intrmod_cfg->intrmod_maxcnt_trigger = 1229 intrmod_cfg->rx_maxcnt_trigger =
767 intr_coal->rx_max_coalesced_frames_high; 1230 intr_coal->rx_max_coalesced_frames_high;
768 else 1231 else
769 intrmod_cfg->intrmod_maxcnt_trigger = 1232 intrmod_cfg->rx_maxcnt_trigger =
770 LIO_INTRMOD_MAXCNT_TRIGGER; 1233 LIO_INTRMOD_RXMAXCNT_TRIGGER;
771 1234
772 if (intr_coal->rx_coalesce_usecs_high) 1235 if (intr_coal->rx_coalesce_usecs_high)
773 intrmod_cfg->intrmod_maxtmr_trigger = 1236 intrmod_cfg->rx_maxtmr_trigger =
774 intr_coal->rx_coalesce_usecs_high; 1237 intr_coal->rx_coalesce_usecs_high;
775 else 1238 else
776 intrmod_cfg->intrmod_maxtmr_trigger = 1239 intrmod_cfg->rx_maxtmr_trigger =
777 LIO_INTRMOD_MAXTMR_TRIGGER; 1240 LIO_INTRMOD_RXMAXTMR_TRIGGER;
778 1241
779 if (intr_coal->rx_coalesce_usecs_low) 1242 if (intr_coal->rx_coalesce_usecs_low)
780 intrmod_cfg->intrmod_mintmr_trigger = 1243 intrmod_cfg->rx_mintmr_trigger =
781 intr_coal->rx_coalesce_usecs_low; 1244 intr_coal->rx_coalesce_usecs_low;
782 else 1245 else
783 intrmod_cfg->intrmod_mintmr_trigger = 1246 intrmod_cfg->rx_mintmr_trigger =
784 LIO_INTRMOD_MINTMR_TRIGGER; 1247 LIO_INTRMOD_RXMINTMR_TRIGGER;
785 1248
786 if (intr_coal->rx_max_coalesced_frames_low) 1249 if (intr_coal->rx_max_coalesced_frames_low)
787 intrmod_cfg->intrmod_mincnt_trigger = 1250 intrmod_cfg->rx_mincnt_trigger =
788 intr_coal->rx_max_coalesced_frames_low; 1251 intr_coal->rx_max_coalesced_frames_low;
789 else 1252 else
790 intrmod_cfg->intrmod_mincnt_trigger = 1253 intrmod_cfg->rx_mincnt_trigger =
791 LIO_INTRMOD_MINCNT_TRIGGER; 1254 LIO_INTRMOD_RXMINCNT_TRIGGER;
1255 }
1256 if (oct->intrmod.tx_enable) {
1257 if (intr_coal->tx_max_coalesced_frames_high)
1258 intrmod_cfg->tx_maxcnt_trigger =
1259 intr_coal->tx_max_coalesced_frames_high;
1260 else
1261 intrmod_cfg->tx_maxcnt_trigger =
1262 LIO_INTRMOD_TXMAXCNT_TRIGGER;
1263 if (intr_coal->tx_max_coalesced_frames_low)
1264 intrmod_cfg->tx_mincnt_trigger =
1265 intr_coal->tx_max_coalesced_frames_low;
1266 else
1267 intrmod_cfg->tx_mincnt_trigger =
1268 LIO_INTRMOD_TXMINCNT_TRIGGER;
792 } 1269 }
793 1270
794 intrmod_cfg->intrmod_enable = adaptive; 1271 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
795 ret = octnet_set_intrmod_cfg(oct, intrmod_cfg);
796 1272
797 return ret; 1273 return ret;
798} 1274}
@@ -800,51 +1276,79 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
800static int 1276static int
801oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) 1277oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
802{ 1278{
803 int ret;
804 struct octeon_device *oct = lio->oct_dev; 1279 struct octeon_device *oct = lio->oct_dev;
805 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
806 u32 rx_max_coalesced_frames; 1280 u32 rx_max_coalesced_frames;
807 1281
808 if (!intr_coal->rx_max_coalesced_frames)
809 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
810 else
811 rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames;
812
813 /* Disable adaptive interrupt modulation */
814 ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
815 if (ret)
816 return ret;
817
818 /* Config Cnt based interrupt values */ 1282 /* Config Cnt based interrupt values */
819 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, 1283 switch (oct->chip_id) {
820 rx_max_coalesced_frames); 1284 case OCTEON_CN68XX:
821 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); 1285 case OCTEON_CN66XX: {
1286 struct octeon_cn6xxx *cn6xxx =
1287 (struct octeon_cn6xxx *)oct->chip;
1288
1289 if (!intr_coal->rx_max_coalesced_frames)
1290 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
1291 else
1292 rx_max_coalesced_frames =
1293 intr_coal->rx_max_coalesced_frames;
1294 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
1295 rx_max_coalesced_frames);
1296 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
1297 break;
1298 }
1299 default:
1300 return -EINVAL;
1301 }
822 return 0; 1302 return 0;
823} 1303}
824 1304
825static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce 1305static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
826 *intr_coal) 1306 *intr_coal)
827{ 1307{
828 int ret;
829 struct octeon_device *oct = lio->oct_dev; 1308 struct octeon_device *oct = lio->oct_dev;
830 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
831 u32 time_threshold, rx_coalesce_usecs; 1309 u32 time_threshold, rx_coalesce_usecs;
832 1310
833 if (!intr_coal->rx_coalesce_usecs) 1311 /* Config Time based interrupt values */
834 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; 1312 switch (oct->chip_id) {
835 else 1313 case OCTEON_CN68XX:
836 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 1314 case OCTEON_CN66XX: {
1315 struct octeon_cn6xxx *cn6xxx =
1316 (struct octeon_cn6xxx *)oct->chip;
1317 if (!intr_coal->rx_coalesce_usecs)
1318 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
1319 else
1320 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
837 1321
838 /* Disable adaptive interrupt modulation */ 1322 time_threshold = lio_cn6xxx_get_oq_ticks(oct,
839 ret = oct_cfg_adaptive_intr(lio, intr_coal, 0); 1323 rx_coalesce_usecs);
840 if (ret) 1324 octeon_write_csr(oct,
841 return ret; 1325 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
1326 time_threshold);
842 1327
843 /* Config Time based interrupt values */ 1328 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
844 time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs); 1329 break;
845 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold); 1330 }
846 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); 1331 default:
1332 return -EINVAL;
1333 }
1334
1335 return 0;
1336}
847 1337
1338static int
1339oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
1340 __attribute__((unused)))
1341{
1342 struct octeon_device *oct = lio->oct_dev;
1343
1344 /* Config Cnt based interrupt values */
1345 switch (oct->chip_id) {
1346 case OCTEON_CN68XX:
1347 case OCTEON_CN66XX:
1348 break;
1349 default:
1350 return -EINVAL;
1351 }
848 return 0; 1352 return 0;
849} 1353}
850 1354
@@ -855,59 +1359,48 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
855 int ret; 1359 int ret;
856 struct octeon_device *oct = lio->oct_dev; 1360 struct octeon_device *oct = lio->oct_dev;
857 u32 j, q_no; 1361 u32 j, q_no;
1362 int db_max, db_min;
858 1363
859 if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) && 1364 switch (oct->chip_id) {
860 (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) { 1365 case OCTEON_CN68XX:
861 for (j = 0; j < lio->linfo.num_txpciq; j++) { 1366 case OCTEON_CN66XX:
862 q_no = lio->linfo.txpciq[j]; 1367 db_min = CN6XXX_DB_MIN;
863 oct->instr_queue[q_no]->fill_threshold = 1368 db_max = CN6XXX_DB_MAX;
864 intr_coal->tx_max_coalesced_frames; 1369 if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
1370 (intr_coal->tx_max_coalesced_frames <= db_max)) {
1371 for (j = 0; j < lio->linfo.num_txpciq; j++) {
1372 q_no = lio->linfo.txpciq[j].s.q_no;
1373 oct->instr_queue[q_no]->fill_threshold =
1374 intr_coal->tx_max_coalesced_frames;
1375 }
1376 } else {
1377 dev_err(&oct->pci_dev->dev,
1378 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
1379 intr_coal->tx_max_coalesced_frames, db_min,
1380 db_max);
1381 return -EINVAL;
865 } 1382 }
866 } else { 1383 break;
867 dev_err(&oct->pci_dev->dev, 1384 default:
868 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
869 intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN,
870 CN6XXX_DB_MAX);
871 return -EINVAL; 1385 return -EINVAL;
872 } 1386 }
873 1387
874 /* User requested adaptive-rx on */ 1388 oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
875 if (intr_coal->use_adaptive_rx_coalesce) { 1389 oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
876 ret = oct_cfg_adaptive_intr(lio, intr_coal, 1);
877 if (ret)
878 goto ret_intrmod;
879 }
880 1390
881 /* User requested adaptive-rx off and rx coalesce */ 1391 ret = oct_cfg_adaptive_intr(lio, intr_coal);
882 if ((intr_coal->rx_coalesce_usecs) && 1392
883 (!intr_coal->use_adaptive_rx_coalesce)) { 1393 if (!intr_coal->use_adaptive_rx_coalesce) {
884 ret = oct_cfg_rx_intrtime(lio, intr_coal); 1394 ret = oct_cfg_rx_intrtime(lio, intr_coal);
885 if (ret) 1395 if (ret)
886 goto ret_intrmod; 1396 goto ret_intrmod;
887 }
888 1397
889 /* User requested adaptive-rx off and rx coalesce */
890 if ((intr_coal->rx_max_coalesced_frames) &&
891 (!intr_coal->use_adaptive_rx_coalesce)) {
892 ret = oct_cfg_rx_intrcnt(lio, intr_coal); 1398 ret = oct_cfg_rx_intrcnt(lio, intr_coal);
893 if (ret) 1399 if (ret)
894 goto ret_intrmod; 1400 goto ret_intrmod;
895 } 1401 }
896 1402 if (!intr_coal->use_adaptive_tx_coalesce) {
897 /* User requested adaptive-rx off, so use default coalesce params */ 1403 ret = oct_cfg_tx_intrcnt(lio, intr_coal);
898 if ((!intr_coal->rx_max_coalesced_frames) &&
899 (!intr_coal->use_adaptive_rx_coalesce) &&
900 (!intr_coal->rx_coalesce_usecs)) {
901 dev_info(&oct->pci_dev->dev,
902 "Turning off adaptive-rx interrupt moderation\n");
903 dev_info(&oct->pci_dev->dev,
904 "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n",
905 CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT);
906 ret = oct_cfg_rx_intrtime(lio, intr_coal);
907 if (ret)
908 goto ret_intrmod;
909
910 ret = oct_cfg_rx_intrcnt(lio, intr_coal);
911 if (ret) 1404 if (ret)
912 goto ret_intrmod; 1405 goto ret_intrmod;
913 } 1406 }
@@ -923,23 +1416,28 @@ static int lio_get_ts_info(struct net_device *netdev,
923 struct lio *lio = GET_LIO(netdev); 1416 struct lio *lio = GET_LIO(netdev);
924 1417
925 info->so_timestamping = 1418 info->so_timestamping =
1419#ifdef PTP_HARDWARE_TIMESTAMPING
926 SOF_TIMESTAMPING_TX_HARDWARE | 1420 SOF_TIMESTAMPING_TX_HARDWARE |
927 SOF_TIMESTAMPING_TX_SOFTWARE |
928 SOF_TIMESTAMPING_RX_HARDWARE | 1421 SOF_TIMESTAMPING_RX_HARDWARE |
1422 SOF_TIMESTAMPING_RAW_HARDWARE |
1423 SOF_TIMESTAMPING_TX_SOFTWARE |
1424#endif
929 SOF_TIMESTAMPING_RX_SOFTWARE | 1425 SOF_TIMESTAMPING_RX_SOFTWARE |
930 SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; 1426 SOF_TIMESTAMPING_SOFTWARE;
931 1427
932 if (lio->ptp_clock) 1428 if (lio->ptp_clock)
933 info->phc_index = ptp_clock_index(lio->ptp_clock); 1429 info->phc_index = ptp_clock_index(lio->ptp_clock);
934 else 1430 else
935 info->phc_index = -1; 1431 info->phc_index = -1;
936 1432
1433#ifdef PTP_HARDWARE_TIMESTAMPING
937 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 1434 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
938 1435
939 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 1436 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
940 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 1437 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
941 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 1438 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
942 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 1439 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1440#endif
943 1441
944 return 0; 1442 return 0;
945} 1443}
@@ -950,7 +1448,6 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
950 struct octeon_device *oct = lio->oct_dev; 1448 struct octeon_device *oct = lio->oct_dev;
951 struct oct_link_info *linfo; 1449 struct oct_link_info *linfo;
952 struct octnic_ctrl_pkt nctrl; 1450 struct octnic_ctrl_pkt nctrl;
953 struct octnic_ctrl_params nparams;
954 int ret = 0; 1451 int ret = 0;
955 1452
956 /* get the link info */ 1453 /* get the link info */
@@ -965,12 +1462,14 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
965 ecmd->duplex != DUPLEX_FULL))) 1462 ecmd->duplex != DUPLEX_FULL)))
966 return -EINVAL; 1463 return -EINVAL;
967 1464
968 /* Ethtool Support is not provided for XAUI and RXAUI Interfaces 1465 /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
969 * as they operate at fixed Speed and Duplex settings 1466 * as they operate at fixed Speed and Duplex settings
970 */ 1467 */
971 if (linfo->link.s.interface == INTERFACE_MODE_XAUI || 1468 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
972 linfo->link.s.interface == INTERFACE_MODE_RXAUI) { 1469 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
973 dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n"); 1470 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
1471 dev_info(&oct->pci_dev->dev,
1472 "Autonegotiation, duplex and speed settings cannot be modified.\n");
974 return -EINVAL; 1473 return -EINVAL;
975 } 1474 }
976 1475
@@ -978,9 +1477,9 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
978 1477
979 nctrl.ncmd.u64 = 0; 1478 nctrl.ncmd.u64 = 0;
980 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS; 1479 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
1480 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
981 nctrl.wait_time = 1000; 1481 nctrl.wait_time = 1000;
982 nctrl.netpndev = (u64)netdev; 1482 nctrl.netpndev = (u64)netdev;
983 nctrl.ncmd.s.param1 = lio->linfo.ifidx;
984 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1483 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
985 1484
986 /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex 1485 /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
@@ -990,19 +1489,17 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
990 /* Autoneg ON */ 1489 /* Autoneg ON */
991 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON | 1490 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
992 OCTNIC_NCMD_AUTONEG_ON; 1491 OCTNIC_NCMD_AUTONEG_ON;
993 nctrl.ncmd.s.param2 = ecmd->advertising; 1492 nctrl.ncmd.s.param1 = ecmd->advertising;
994 } else { 1493 } else {
995 /* Autoneg OFF */ 1494 /* Autoneg OFF */
996 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON; 1495 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
997 1496
998 nctrl.ncmd.s.param3 = ecmd->duplex; 1497 nctrl.ncmd.s.param2 = ecmd->duplex;
999 1498
1000 nctrl.ncmd.s.param2 = ecmd->speed; 1499 nctrl.ncmd.s.param1 = ecmd->speed;
1001 } 1500 }
1002 1501
1003 nparams.resp_order = OCTEON_RESP_ORDERED; 1502 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1004
1005 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
1006 if (ret < 0) { 1503 if (ret < 0) {
1007 dev_err(&oct->pci_dev->dev, "Failed to set settings\n"); 1504 dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
1008 return -1; 1505 return -1;
@@ -1186,6 +1683,23 @@ static void lio_get_regs(struct net_device *dev,
1186 } 1683 }
1187} 1684}
1188 1685
1686static u32 lio_get_priv_flags(struct net_device *netdev)
1687{
1688 struct lio *lio = GET_LIO(netdev);
1689
1690 return lio->oct_dev->priv_flags;
1691}
1692
1693static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
1694{
1695 struct lio *lio = GET_LIO(netdev);
1696 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
1697
1698 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
1699 intr_by_tx_bytes);
1700 return 0;
1701}
1702
1189static const struct ethtool_ops lio_ethtool_ops = { 1703static const struct ethtool_ops lio_ethtool_ops = {
1190 .get_settings = lio_get_settings, 1704 .get_settings = lio_get_settings,
1191 .get_link = ethtool_op_get_link, 1705 .get_link = ethtool_op_get_link,
@@ -1207,6 +1721,8 @@ static const struct ethtool_ops lio_ethtool_ops = {
1207 .set_settings = lio_set_settings, 1721 .set_settings = lio_set_settings,
1208 .get_coalesce = lio_get_intr_coalesce, 1722 .get_coalesce = lio_get_intr_coalesce,
1209 .set_coalesce = lio_set_intr_coalesce, 1723 .set_coalesce = lio_set_intr_coalesce,
1724 .get_priv_flags = lio_get_priv_flags,
1725 .set_priv_flags = lio_set_priv_flags,
1210 .get_ts_info = lio_get_ts_info, 1726 .get_ts_info = lio_get_ts_info,
1211}; 1727};
1212 1728
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8de79ae63231..1a584ebde42c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -72,6 +72,9 @@ MODULE_PARM_DESC(console_bitmask,
72 72
73#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 73#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
74 74
75#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
76 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
77
75static int debug = -1; 78static int debug = -1;
76module_param(debug, int, 0644); 79module_param(debug, int, 0644);
77MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 80MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
@@ -84,6 +87,8 @@ static int conf_type;
84module_param(conf_type, int, 0); 87module_param(conf_type, int, 0);
85MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs"); 88MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
86 89
90static int ptp_enable = 1;
91
87/* Bit mask values for lio->ifstate */ 92/* Bit mask values for lio->ifstate */
88#define LIO_IFSTATE_DROQ_OPS 0x01 93#define LIO_IFSTATE_DROQ_OPS 0x01
89#define LIO_IFSTATE_REGISTERED 0x02 94#define LIO_IFSTATE_REGISTERED 0x02
@@ -166,6 +171,8 @@ struct octnic_gather {
166 * received from the IP layer. 171 * received from the IP layer.
167 */ 172 */
168 struct octeon_sg_entry *sg; 173 struct octeon_sg_entry *sg;
174
175 u64 sg_dma_ptr;
169}; 176};
170 177
171/** This structure is used by NIC driver to store information required 178/** This structure is used by NIC driver to store information required
@@ -220,8 +227,8 @@ static void octeon_droq_bh(unsigned long pdev)
220 (struct octeon_device_priv *)oct->priv; 227 (struct octeon_device_priv *)oct->priv;
221 228
222 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */ 229 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
223 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) { 230 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
224 if (!(oct->io_qmask.oq & (1UL << q_no))) 231 if (!(oct->io_qmask.oq & (1ULL << q_no)))
225 continue; 232 continue;
226 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], 233 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
227 MAX_PACKET_BUDGET); 234 MAX_PACKET_BUDGET);
@@ -241,8 +248,8 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
241 do { 248 do {
242 pending_pkts = 0; 249 pending_pkts = 0;
243 250
244 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 251 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
245 if (!(oct->io_qmask.oq & (1UL << i))) 252 if (!(oct->io_qmask.oq & (1ULL << i)))
246 continue; 253 continue;
247 pkt_cnt += octeon_droq_check_hw_for_pkts(oct, 254 pkt_cnt += octeon_droq_check_hw_for_pkts(oct,
248 oct->droq[i]); 255 oct->droq[i]);
@@ -361,7 +368,7 @@ static int wait_for_pending_requests(struct octeon_device *oct)
361 [OCTEON_ORDERED_SC_LIST].pending_req_count); 368 [OCTEON_ORDERED_SC_LIST].pending_req_count);
362 if (pcount) 369 if (pcount)
363 schedule_timeout_uninterruptible(HZ / 10); 370 schedule_timeout_uninterruptible(HZ / 10);
364 else 371 else
365 break; 372 break;
366 } 373 }
367 374
@@ -392,10 +399,10 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
392 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 399 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
393 400
394 /* Force all requests waiting to be fetched by OCTEON to complete. */ 401 /* Force all requests waiting to be fetched by OCTEON to complete. */
395 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 402 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
396 struct octeon_instr_queue *iq; 403 struct octeon_instr_queue *iq;
397 404
398 if (!(oct->io_qmask.iq & (1UL << i))) 405 if (!(oct->io_qmask.iq & (1ULL << i)))
399 continue; 406 continue;
400 iq = oct->instr_queue[i]; 407 iq = oct->instr_queue[i];
401 408
@@ -405,7 +412,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
405 iq->octeon_read_index = iq->host_write_index; 412 iq->octeon_read_index = iq->host_write_index;
406 iq->stats.instr_processed += 413 iq->stats.instr_processed +=
407 atomic_read(&iq->instr_pending); 414 atomic_read(&iq->instr_pending);
408 lio_process_iq_request_list(oct, iq); 415 lio_process_iq_request_list(oct, iq, 0);
409 spin_unlock_bh(&iq->lock); 416 spin_unlock_bh(&iq->lock);
410 } 417 }
411 } 418 }
@@ -678,12 +685,24 @@ static inline void txqs_start(struct net_device *netdev)
678 */ 685 */
679static inline void txqs_wake(struct net_device *netdev) 686static inline void txqs_wake(struct net_device *netdev)
680{ 687{
688 struct lio *lio = GET_LIO(netdev);
689
681 if (netif_is_multiqueue(netdev)) { 690 if (netif_is_multiqueue(netdev)) {
682 int i; 691 int i;
683 692
684 for (i = 0; i < netdev->num_tx_queues; i++) 693 for (i = 0; i < netdev->num_tx_queues; i++) {
685 netif_wake_subqueue(netdev, i); 694 int qno = lio->linfo.txpciq[i %
695 (lio->linfo.num_txpciq)].s.q_no;
696
697 if (__netif_subqueue_stopped(netdev, i)) {
698 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
699 tx_restart, 1);
700 netif_wake_subqueue(netdev, i);
701 }
702 }
686 } else { 703 } else {
704 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
705 tx_restart, 1);
687 netif_wake_queue(netdev); 706 netif_wake_queue(netdev);
688 } 707 }
689} 708}
@@ -705,7 +724,7 @@ static void start_txq(struct net_device *netdev)
705{ 724{
706 struct lio *lio = GET_LIO(netdev); 725 struct lio *lio = GET_LIO(netdev);
707 726
708 if (lio->linfo.link.s.status) { 727 if (lio->linfo.link.s.link_up) {
709 txqs_start(netdev); 728 txqs_start(netdev);
710 return; 729 return;
711 } 730 }
@@ -752,16 +771,23 @@ static inline int check_txq_status(struct lio *lio)
752 771
753 /* check each sub-queue state */ 772 /* check each sub-queue state */
754 for (q = 0; q < numqs; q++) { 773 for (q = 0; q < numqs; q++) {
755 iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)]; 774 iq = lio->linfo.txpciq[q %
775 (lio->linfo.num_txpciq)].s.q_no;
756 if (octnet_iq_is_full(lio->oct_dev, iq)) 776 if (octnet_iq_is_full(lio->oct_dev, iq))
757 continue; 777 continue;
758 wake_q(lio->netdev, q); 778 if (__netif_subqueue_stopped(lio->netdev, q)) {
759 ret_val++; 779 wake_q(lio->netdev, q);
780 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
781 tx_restart, 1);
782 ret_val++;
783 }
760 } 784 }
761 } else { 785 } else {
762 if (octnet_iq_is_full(lio->oct_dev, lio->txq)) 786 if (octnet_iq_is_full(lio->oct_dev, lio->txq))
763 return 0; 787 return 0;
764 wake_q(lio->netdev, lio->txq); 788 wake_q(lio->netdev, lio->txq);
789 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
790 tx_restart, 1);
765 ret_val = 1; 791 ret_val = 1;
766 } 792 }
767 return ret_val; 793 return ret_val;
@@ -787,64 +813,116 @@ static inline struct list_head *list_delete_head(struct list_head *root)
787} 813}
788 814
789/** 815/**
790 * \brief Delete gather list 816 * \brief Delete gather lists
791 * @param lio per-network private data 817 * @param lio per-network private data
792 */ 818 */
793static void delete_glist(struct lio *lio) 819static void delete_glists(struct lio *lio)
794{ 820{
795 struct octnic_gather *g; 821 struct octnic_gather *g;
822 int i;
796 823
797 do { 824 if (!lio->glist)
798 g = (struct octnic_gather *) 825 return;
799 list_delete_head(&lio->glist); 826
800 if (g) { 827 for (i = 0; i < lio->linfo.num_txpciq; i++) {
801 if (g->sg) 828 do {
802 kfree((void *)((unsigned long)g->sg - 829 g = (struct octnic_gather *)
803 g->adjust)); 830 list_delete_head(&lio->glist[i]);
804 kfree(g); 831 if (g) {
805 } 832 if (g->sg) {
806 } while (g); 833 dma_unmap_single(&lio->oct_dev->
834 pci_dev->dev,
835 g->sg_dma_ptr,
836 g->sg_size,
837 DMA_TO_DEVICE);
838 kfree((void *)((unsigned long)g->sg -
839 g->adjust));
840 }
841 kfree(g);
842 }
843 } while (g);
844 }
845
846 kfree((void *)lio->glist);
807} 847}
808 848
809/** 849/**
810 * \brief Setup gather list 850 * \brief Setup gather lists
811 * @param lio per-network private data 851 * @param lio per-network private data
812 */ 852 */
813static int setup_glist(struct lio *lio) 853static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
814{ 854{
815 int i; 855 int i, j;
816 struct octnic_gather *g; 856 struct octnic_gather *g;
817 857
818 INIT_LIST_HEAD(&lio->glist); 858 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
859 GFP_KERNEL);
860 if (!lio->glist_lock)
861 return 1;
862
863 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
864 GFP_KERNEL);
865 if (!lio->glist) {
866 kfree((void *)lio->glist_lock);
867 return 1;
868 }
819 869
820 for (i = 0; i < lio->tx_qsize; i++) { 870 for (i = 0; i < num_iqs; i++) {
821 g = kzalloc(sizeof(*g), GFP_KERNEL); 871 int numa_node = cpu_to_node(i % num_online_cpus());
822 if (!g)
823 break;
824 872
825 g->sg_size = 873 spin_lock_init(&lio->glist_lock[i]);
826 ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
827 874
828 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); 875 INIT_LIST_HEAD(&lio->glist[i]);
829 if (!g->sg) { 876
830 kfree(g); 877 for (j = 0; j < lio->tx_qsize; j++) {
831 break; 878 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
879 numa_node);
880 if (!g)
881 g = kzalloc(sizeof(*g), GFP_KERNEL);
882 if (!g)
883 break;
884
885 g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
886 OCT_SG_ENTRY_SIZE);
887
888 g->sg = kmalloc_node(g->sg_size + 8,
889 GFP_KERNEL, numa_node);
890 if (!g->sg)
891 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
892 if (!g->sg) {
893 kfree(g);
894 break;
895 }
896
897 /* The gather component should be aligned on 64-bit
898 * boundary
899 */
900 if (((unsigned long)g->sg) & 7) {
901 g->adjust = 8 - (((unsigned long)g->sg) & 7);
902 g->sg = (struct octeon_sg_entry *)
903 ((unsigned long)g->sg + g->adjust);
904 }
905 g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
906 g->sg, g->sg_size,
907 DMA_TO_DEVICE);
908 if (dma_mapping_error(&oct->pci_dev->dev,
909 g->sg_dma_ptr)) {
910 kfree((void *)((unsigned long)g->sg -
911 g->adjust));
912 kfree(g);
913 break;
914 }
915
916 list_add_tail(&g->list, &lio->glist[i]);
832 } 917 }
833 918
834 /* The gather component should be aligned on 64-bit boundary */ 919 if (j != lio->tx_qsize) {
835 if (((unsigned long)g->sg) & 7) { 920 delete_glists(lio);
836 g->adjust = 8 - (((unsigned long)g->sg) & 7); 921 return 1;
837 g->sg = (struct octeon_sg_entry *)
838 ((unsigned long)g->sg + g->adjust);
839 } 922 }
840 list_add_tail(&g->list, &lio->glist);
841 } 923 }
842 924
843 if (i == lio->tx_qsize) 925 return 0;
844 return 0;
845
846 delete_glist(lio);
847 return 1;
848} 926}
849 927
850/** 928/**
@@ -858,7 +936,7 @@ static void print_link_info(struct net_device *netdev)
858 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { 936 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
859 struct oct_link_info *linfo = &lio->linfo; 937 struct oct_link_info *linfo = &lio->linfo;
860 938
861 if (linfo->link.s.status) { 939 if (linfo->link.s.link_up) {
862 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 940 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
863 linfo->link.s.speed, 941 linfo->link.s.speed,
864 (linfo->link.s.duplex) ? "Full" : "Half"); 942 (linfo->link.s.duplex) ? "Full" : "Half");
@@ -880,13 +958,15 @@ static inline void update_link_status(struct net_device *netdev,
880 union oct_link_status *ls) 958 union oct_link_status *ls)
881{ 959{
882 struct lio *lio = GET_LIO(netdev); 960 struct lio *lio = GET_LIO(netdev);
961 int changed = (lio->linfo.link.u64 != ls->u64);
883 962
884 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 963 lio->linfo.link.u64 = ls->u64;
885 lio->linfo.link.u64 = ls->u64;
886 964
965 if ((lio->intf_open) && (changed)) {
887 print_link_info(netdev); 966 print_link_info(netdev);
967 lio->link_changes++;
888 968
889 if (lio->linfo.link.s.status) { 969 if (lio->linfo.link.s.link_up) {
890 netif_carrier_on(netdev); 970 netif_carrier_on(netdev);
891 /* start_txq(netdev); */ 971 /* start_txq(netdev); */
892 txqs_wake(netdev); 972 txqs_wake(netdev);
@@ -897,6 +977,42 @@ static inline void update_link_status(struct net_device *netdev,
897 } 977 }
898} 978}
899 979
980/* Runs in interrupt context. */
981static void update_txq_status(struct octeon_device *oct, int iq_num)
982{
983 struct net_device *netdev;
984 struct lio *lio;
985 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
986
987 /*octeon_update_iq_read_idx(oct, iq);*/
988
989 netdev = oct->props[iq->ifidx].netdev;
990
991 /* This is needed because the first IQ does not have
992 * a netdev associated with it.
993 */
994 if (!netdev)
995 return;
996
997 lio = GET_LIO(netdev);
998 if (netif_is_multiqueue(netdev)) {
999 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
1000 lio->linfo.link.s.link_up &&
1001 (!octnet_iq_is_full(oct, iq_num))) {
1002 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
1003 tx_restart, 1);
1004 netif_wake_subqueue(netdev, iq->q_index);
1005 } else {
1006 if (!octnet_iq_is_full(oct, lio->txq)) {
1007 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
1008 lio->txq,
1009 tx_restart, 1);
1010 wake_q(netdev, lio->txq);
1011 }
1012 }
1013 }
1014}
1015
900/** 1016/**
901 * \brief Droq packet processor sceduler 1017 * \brief Droq packet processor sceduler
902 * @param oct octeon device 1018 * @param oct octeon device
@@ -910,8 +1026,9 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
910 struct octeon_droq *droq; 1026 struct octeon_droq *droq;
911 1027
912 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { 1028 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
913 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { 1029 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
914 if (!(oct->droq_intr & (1 << oq_no))) 1030 oq_no++) {
1031 if (!(oct->droq_intr & (1ULL << oq_no)))
915 continue; 1032 continue;
916 1033
917 droq = oct->droq[oq_no]; 1034 droq = oct->droq[oq_no];
@@ -1022,6 +1139,9 @@ static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1022 return -ENOMEM; 1139 return -ENOMEM;
1023 } 1140 }
1024 1141
1142 oct_dev->rx_pause = 1;
1143 oct_dev->tx_pause = 1;
1144
1025 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 1145 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1026 1146
1027 return 0; 1147 return 0;
@@ -1087,19 +1207,13 @@ static void octeon_destroy_resources(struct octeon_device *oct)
1087 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1207 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1088 pci_disable_msi(oct->pci_dev); 1208 pci_disable_msi(oct->pci_dev);
1089 1209
1090 /* Soft reset the octeon device before exiting */
1091 oct->fn_list.soft_reset(oct);
1092
1093 /* Disable the device, releasing the PCI INT */
1094 pci_disable_device(oct->pci_dev);
1095
1096 /* fallthrough */ 1210 /* fallthrough */
1097 case OCT_DEV_IN_RESET: 1211 case OCT_DEV_IN_RESET:
1098 case OCT_DEV_DROQ_INIT_DONE: 1212 case OCT_DEV_DROQ_INIT_DONE:
1099 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/ 1213 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
1100 mdelay(100); 1214 mdelay(100);
1101 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 1215 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1102 if (!(oct->io_qmask.oq & (1UL << i))) 1216 if (!(oct->io_qmask.oq & (1ULL << i)))
1103 continue; 1217 continue;
1104 octeon_delete_droq(oct, i); 1218 octeon_delete_droq(oct, i);
1105 } 1219 }
@@ -1126,8 +1240,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
1126 1240
1127 /* fallthrough */ 1241 /* fallthrough */
1128 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1242 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1129 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 1243 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1130 if (!(oct->io_qmask.iq & (1UL << i))) 1244 if (!(oct->io_qmask.iq & (1ULL << i)))
1131 continue; 1245 continue;
1132 octeon_delete_instr_queue(oct, i); 1246 octeon_delete_instr_queue(oct, i);
1133 } 1247 }
@@ -1139,11 +1253,18 @@ static void octeon_destroy_resources(struct octeon_device *oct)
1139 1253
1140 /* fallthrough */ 1254 /* fallthrough */
1141 case OCT_DEV_PCI_MAP_DONE: 1255 case OCT_DEV_PCI_MAP_DONE:
1256
1257 /* Soft reset the octeon device before exiting */
1258 oct->fn_list.soft_reset(oct);
1259
1142 octeon_unmap_pci_barx(oct, 0); 1260 octeon_unmap_pci_barx(oct, 0);
1143 octeon_unmap_pci_barx(oct, 1); 1261 octeon_unmap_pci_barx(oct, 1);
1144 1262
1145 /* fallthrough */ 1263 /* fallthrough */
1146 case OCT_DEV_BEGIN_STATE: 1264 case OCT_DEV_BEGIN_STATE:
1265 /* Disable the device, releasing the PCI INT */
1266 pci_disable_device(oct->pci_dev);
1267
1147 /* Nothing to be done here either */ 1268 /* Nothing to be done here either */
1148 break; 1269 break;
1149 } /* end switch(oct->status) */ 1270 } /* end switch(oct->status) */
@@ -1159,18 +1280,15 @@ static void octeon_destroy_resources(struct octeon_device *oct)
1159static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1280static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1160{ 1281{
1161 struct octnic_ctrl_pkt nctrl; 1282 struct octnic_ctrl_pkt nctrl;
1162 struct octnic_ctrl_params nparams;
1163 1283
1164 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1284 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1165 1285
1166 nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL; 1286 nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
1167 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 1287 nctrl.ncmd.s.param1 = start_stop;
1168 nctrl.ncmd.s.param2 = start_stop; 1288 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1169 nctrl.netpndev = (u64)lio->netdev; 1289 nctrl.netpndev = (u64)lio->netdev;
1170 1290
1171 nparams.resp_order = OCTEON_RESP_NORESPONSE; 1291 if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0)
1172
1173 if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0)
1174 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1292 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1175} 1293}
1176 1294
@@ -1186,6 +1304,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1186{ 1304{
1187 struct net_device *netdev = oct->props[ifidx].netdev; 1305 struct net_device *netdev = oct->props[ifidx].netdev;
1188 struct lio *lio; 1306 struct lio *lio;
1307 struct napi_struct *napi, *n;
1189 1308
1190 if (!netdev) { 1309 if (!netdev) {
1191 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1310 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
@@ -1202,13 +1321,22 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1202 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1321 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1203 txqs_stop(netdev); 1322 txqs_stop(netdev);
1204 1323
1324 if (oct->props[lio->ifidx].napi_enabled == 1) {
1325 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1326 napi_disable(napi);
1327
1328 oct->props[lio->ifidx].napi_enabled = 0;
1329 }
1330
1205 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1331 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1206 unregister_netdev(netdev); 1332 unregister_netdev(netdev);
1207 1333
1208 delete_glist(lio); 1334 delete_glists(lio);
1209 1335
1210 free_netdev(netdev); 1336 free_netdev(netdev);
1211 1337
1338 oct->props[ifidx].gmxport = -1;
1339
1212 oct->props[ifidx].netdev = NULL; 1340 oct->props[ifidx].netdev = NULL;
1213} 1341}
1214 1342
@@ -1227,10 +1355,15 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
1227 return 1; 1355 return 1;
1228 } 1356 }
1229 1357
1358 spin_lock_bh(&oct->cmd_resp_wqlock);
1359 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1360 spin_unlock_bh(&oct->cmd_resp_wqlock);
1361
1230 for (i = 0; i < oct->ifcount; i++) { 1362 for (i = 0; i < oct->ifcount; i++) {
1231 lio = GET_LIO(oct->props[i].netdev); 1363 lio = GET_LIO(oct->props[i].netdev);
1232 for (j = 0; j < lio->linfo.num_rxpciq; j++) 1364 for (j = 0; j < lio->linfo.num_rxpciq; j++)
1233 octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]); 1365 octeon_unregister_droq_ops(oct,
1366 lio->linfo.rxpciq[j].s.q_no);
1234 } 1367 }
1235 1368
1236 for (i = 0; i < oct->ifcount; i++) 1369 for (i = 0; i < oct->ifcount; i++)
@@ -1274,6 +1407,7 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
1274{ 1407{
1275 u32 dev_id, rev_id; 1408 u32 dev_id, rev_id;
1276 int ret = 1; 1409 int ret = 1;
1410 char *s;
1277 1411
1278 pci_read_config_dword(oct->pci_dev, 0, &dev_id); 1412 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1279 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 1413 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
@@ -1283,22 +1417,27 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
1283 case OCTEON_CN68XX_PCIID: 1417 case OCTEON_CN68XX_PCIID:
1284 oct->chip_id = OCTEON_CN68XX; 1418 oct->chip_id = OCTEON_CN68XX;
1285 ret = lio_setup_cn68xx_octeon_device(oct); 1419 ret = lio_setup_cn68xx_octeon_device(oct);
1420 s = "CN68XX";
1286 break; 1421 break;
1287 1422
1288 case OCTEON_CN66XX_PCIID: 1423 case OCTEON_CN66XX_PCIID:
1289 oct->chip_id = OCTEON_CN66XX; 1424 oct->chip_id = OCTEON_CN66XX;
1290 ret = lio_setup_cn66xx_octeon_device(oct); 1425 ret = lio_setup_cn66xx_octeon_device(oct);
1426 s = "CN66XX";
1291 break; 1427 break;
1428
1292 default: 1429 default:
1430 s = "?";
1293 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", 1431 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1294 dev_id); 1432 dev_id);
1295 } 1433 }
1296 1434
1297 if (!ret) 1435 if (!ret)
1298 dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n", 1436 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1299 OCTEON_MAJOR_REV(oct), 1437 OCTEON_MAJOR_REV(oct),
1300 OCTEON_MINOR_REV(oct), 1438 OCTEON_MINOR_REV(oct),
1301 octeon_get_conf(oct)->card_name); 1439 octeon_get_conf(oct)->card_name,
1440 LIQUIDIO_VERSION);
1302 1441
1303 return ret; 1442 return ret;
1304} 1443}
@@ -1326,6 +1465,16 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
1326 return 0; 1465 return 0;
1327} 1466}
1328 1467
1468static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1469{
1470 int q = 0;
1471
1472 if (netif_is_multiqueue(lio->netdev))
1473 q = skb->queue_mapping % lio->linfo.num_txpciq;
1474
1475 return q;
1476}
1477
1329/** 1478/**
1330 * \brief Check Tx queue state for a given network buffer 1479 * \brief Check Tx queue state for a given network buffer
1331 * @param lio per-network private data 1480 * @param lio per-network private data
@@ -1337,14 +1486,19 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
1337 1486
1338 if (netif_is_multiqueue(lio->netdev)) { 1487 if (netif_is_multiqueue(lio->netdev)) {
1339 q = skb->queue_mapping; 1488 q = skb->queue_mapping;
1340 iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))]; 1489 iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
1341 } else { 1490 } else {
1342 iq = lio->txq; 1491 iq = lio->txq;
1492 q = iq;
1343 } 1493 }
1344 1494
1345 if (octnet_iq_is_full(lio->oct_dev, iq)) 1495 if (octnet_iq_is_full(lio->oct_dev, iq))
1346 return 0; 1496 return 0;
1347 wake_q(lio->netdev, q); 1497
1498 if (__netif_subqueue_stopped(lio->netdev, q)) {
1499 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
1500 wake_q(lio->netdev, q);
1501 }
1348 return 1; 1502 return 1;
1349} 1503}
1350 1504
@@ -1367,7 +1521,7 @@ static void free_netbuf(void *buf)
1367 1521
1368 check_txq_state(lio, skb); 1522 check_txq_state(lio, skb);
1369 1523
1370 recv_buffer_free((struct sk_buff *)skb); 1524 tx_buffer_free(skb);
1371} 1525}
1372 1526
1373/** 1527/**
@@ -1380,7 +1534,7 @@ static void free_netsgbuf(void *buf)
1380 struct sk_buff *skb; 1534 struct sk_buff *skb;
1381 struct lio *lio; 1535 struct lio *lio;
1382 struct octnic_gather *g; 1536 struct octnic_gather *g;
1383 int i, frags; 1537 int i, frags, iq;
1384 1538
1385 finfo = (struct octnet_buf_free_info *)buf; 1539 finfo = (struct octnet_buf_free_info *)buf;
1386 skb = finfo->skb; 1540 skb = finfo->skb;
@@ -1402,17 +1556,17 @@ static void free_netsgbuf(void *buf)
1402 i++; 1556 i++;
1403 } 1557 }
1404 1558
1405 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1559 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1406 finfo->dptr, g->sg_size, 1560 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1407 DMA_TO_DEVICE);
1408 1561
1409 spin_lock(&lio->lock); 1562 iq = skb_iq(lio, skb);
1410 list_add_tail(&g->list, &lio->glist); 1563 spin_lock(&lio->glist_lock[iq]);
1411 spin_unlock(&lio->lock); 1564 list_add_tail(&g->list, &lio->glist[iq]);
1565 spin_unlock(&lio->glist_lock[iq]);
1412 1566
1413 check_txq_state(lio, skb); /* mq support: sub-queue state check */ 1567 check_txq_state(lio, skb); /* mq support: sub-queue state check */
1414 1568
1415 recv_buffer_free((struct sk_buff *)skb); 1569 tx_buffer_free(skb);
1416} 1570}
1417 1571
1418/** 1572/**
@@ -1426,7 +1580,7 @@ static void free_netsgbuf_with_resp(void *buf)
1426 struct sk_buff *skb; 1580 struct sk_buff *skb;
1427 struct lio *lio; 1581 struct lio *lio;
1428 struct octnic_gather *g; 1582 struct octnic_gather *g;
1429 int i, frags; 1583 int i, frags, iq;
1430 1584
1431 sc = (struct octeon_soft_command *)buf; 1585 sc = (struct octeon_soft_command *)buf;
1432 skb = (struct sk_buff *)sc->callback_arg; 1586 skb = (struct sk_buff *)sc->callback_arg;
@@ -1450,13 +1604,14 @@ static void free_netsgbuf_with_resp(void *buf)
1450 i++; 1604 i++;
1451 } 1605 }
1452 1606
1453 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1607 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1454 finfo->dptr, g->sg_size, 1608 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1455 DMA_TO_DEVICE); 1609
1610 iq = skb_iq(lio, skb);
1456 1611
1457 spin_lock(&lio->lock); 1612 spin_lock(&lio->glist_lock[iq]);
1458 list_add_tail(&g->list, &lio->glist); 1613 list_add_tail(&g->list, &lio->glist[iq]);
1459 spin_unlock(&lio->lock); 1614 spin_unlock(&lio->glist_lock[iq]);
1460 1615
1461 /* Don't free the skb yet */ 1616 /* Don't free the skb yet */
1462 1617
@@ -1657,6 +1812,7 @@ static int load_firmware(struct octeon_device *oct)
1657 if (ret) { 1812 if (ret) {
1658 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", 1813 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
1659 fw_name); 1814 fw_name);
1815 release_firmware(fw);
1660 return ret; 1816 return ret;
1661 } 1817 }
1662 1818
@@ -1726,6 +1882,9 @@ static void if_cfg_callback(struct octeon_device *oct,
1726 CVM_CAST64(resp->status)); 1882 CVM_CAST64(resp->status));
1727 ACCESS_ONCE(ctx->cond) = 1; 1883 ACCESS_ONCE(ctx->cond) = 1;
1728 1884
1885 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1886 resp->cfg_info.liquidio_firmware_version);
1887
1729 /* This barrier is required to be sure that the response has been 1888 /* This barrier is required to be sure that the response has been
1730 * written fully before waking up the handler 1889 * written fully before waking up the handler
1731 */ 1890 */
@@ -1743,14 +1902,13 @@ static void if_cfg_callback(struct octeon_device *oct,
1743static u16 select_q(struct net_device *dev, struct sk_buff *skb, 1902static u16 select_q(struct net_device *dev, struct sk_buff *skb,
1744 void *accel_priv, select_queue_fallback_t fallback) 1903 void *accel_priv, select_queue_fallback_t fallback)
1745{ 1904{
1746 int qindex; 1905 u32 qindex = 0;
1747 struct lio *lio; 1906 struct lio *lio;
1748 1907
1749 lio = GET_LIO(dev); 1908 lio = GET_LIO(dev);
1750 /* select queue on chosen queue_mapping or core */ 1909 qindex = skb_tx_hash(dev, skb);
1751 qindex = skb_rx_queue_recorded(skb) ? 1910
1752 skb_get_rx_queue(skb) : smp_processor_id(); 1911 return (u16)(qindex % (lio->linfo.num_txpciq));
1753 return (u16)(qindex & (lio->linfo.num_txpciq - 1));
1754} 1912}
1755 1913
1756/** Routine to push packets arriving on Octeon interface upto network layer. 1914/** Routine to push packets arriving on Octeon interface upto network layer.
@@ -1759,26 +1917,28 @@ static u16 select_q(struct net_device *dev, struct sk_buff *skb,
1759 * @param len - size of total data received. 1917 * @param len - size of total data received.
1760 * @param rh - Control header associated with the packet 1918 * @param rh - Control header associated with the packet
1761 * @param param - additional control data with the packet 1919 * @param param - additional control data with the packet
1920 * @param arg - farg registered in droq_ops
1762 */ 1921 */
1763static void 1922static void
1764liquidio_push_packet(u32 octeon_id, 1923liquidio_push_packet(u32 octeon_id,
1765 void *skbuff, 1924 void *skbuff,
1766 u32 len, 1925 u32 len,
1767 union octeon_rh *rh, 1926 union octeon_rh *rh,
1768 void *param) 1927 void *param,
1928 void *arg)
1769{ 1929{
1770 struct napi_struct *napi = param; 1930 struct napi_struct *napi = param;
1771 struct octeon_device *oct = lio_get_device(octeon_id);
1772 struct sk_buff *skb = (struct sk_buff *)skbuff; 1931 struct sk_buff *skb = (struct sk_buff *)skbuff;
1773 struct skb_shared_hwtstamps *shhwtstamps; 1932 struct skb_shared_hwtstamps *shhwtstamps;
1774 u64 ns; 1933 u64 ns;
1775 struct net_device *netdev = 1934 u16 vtag = 0;
1776 (struct net_device *)oct->props[rh->r_dh.link].netdev; 1935 struct net_device *netdev = (struct net_device *)arg;
1777 struct octeon_droq *droq = container_of(param, struct octeon_droq, 1936 struct octeon_droq *droq = container_of(param, struct octeon_droq,
1778 napi); 1937 napi);
1779 if (netdev) { 1938 if (netdev) {
1780 int packet_was_received; 1939 int packet_was_received;
1781 struct lio *lio = GET_LIO(netdev); 1940 struct lio *lio = GET_LIO(netdev);
1941 struct octeon_device *oct = lio->oct_dev;
1782 1942
1783 /* Do not proceed if the interface is not in RUNNING state. */ 1943 /* Do not proceed if the interface is not in RUNNING state. */
1784 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { 1944 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
@@ -1789,21 +1949,54 @@ liquidio_push_packet(u32 octeon_id,
1789 1949
1790 skb->dev = netdev; 1950 skb->dev = netdev;
1791 1951
1792 if (rh->r_dh.has_hwtstamp) { 1952 skb_record_rx_queue(skb, droq->q_no);
1793 /* timestamp is included from the hardware at the 1953 if (likely(len > MIN_SKB_SIZE)) {
1794 * beginning of the packet. 1954 struct octeon_skb_page_info *pg_info;
1795 */ 1955 unsigned char *va;
1796 if (ifstate_check(lio, 1956
1797 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { 1957 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
1798 /* Nanoseconds are in the first 64-bits 1958 if (pg_info->page) {
1799 * of the packet. 1959 /* For Paged allocation use the frags */
1960 va = page_address(pg_info->page) +
1961 pg_info->page_offset;
1962 memcpy(skb->data, va, MIN_SKB_SIZE);
1963 skb_put(skb, MIN_SKB_SIZE);
1964 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1965 pg_info->page,
1966 pg_info->page_offset +
1967 MIN_SKB_SIZE,
1968 len - MIN_SKB_SIZE,
1969 LIO_RXBUFFER_SZ);
1970 }
1971 } else {
1972 struct octeon_skb_page_info *pg_info =
1973 ((struct octeon_skb_page_info *)(skb->cb));
1974 skb_copy_to_linear_data(skb, page_address(pg_info->page)
1975 + pg_info->page_offset, len);
1976 skb_put(skb, len);
1977 put_page(pg_info->page);
1978 }
1979
1980 if (((oct->chip_id == OCTEON_CN66XX) ||
1981 (oct->chip_id == OCTEON_CN68XX)) &&
1982 ptp_enable) {
1983 if (rh->r_dh.has_hwtstamp) {
1984 /* timestamp is included from the hardware at
1985 * the beginning of the packet.
1800 */ 1986 */
1801 memcpy(&ns, (skb->data), sizeof(ns)); 1987 if (ifstate_check
1802 shhwtstamps = skb_hwtstamps(skb); 1988 (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
1803 shhwtstamps->hwtstamp = 1989 /* Nanoseconds are in the first 64-bits
1804 ns_to_ktime(ns + lio->ptp_adjust); 1990 * of the packet.
1991 */
1992 memcpy(&ns, (skb->data), sizeof(ns));
1993 shhwtstamps = skb_hwtstamps(skb);
1994 shhwtstamps->hwtstamp =
1995 ns_to_ktime(ns +
1996 lio->ptp_adjust);
1997 }
1998 skb_pull(skb, sizeof(ns));
1805 } 1999 }
1806 skb_pull(skb, sizeof(ns));
1807 } 2000 }
1808 2001
1809 skb->protocol = eth_type_trans(skb, skb->dev); 2002 skb->protocol = eth_type_trans(skb, skb->dev);
@@ -1815,6 +2008,16 @@ liquidio_push_packet(u32 octeon_id,
1815 else 2008 else
1816 skb->ip_summed = CHECKSUM_NONE; 2009 skb->ip_summed = CHECKSUM_NONE;
1817 2010
2011 /* inbound VLAN tag */
2012 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2013 (rh->r_dh.vlan != 0)) {
2014 u16 vid = rh->r_dh.vlan;
2015 u16 priority = rh->r_dh.priority;
2016
2017 vtag = priority << 13 | vid;
2018 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
2019 }
2020
1818 packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP; 2021 packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
1819 2022
1820 if (packet_was_received) { 2023 if (packet_was_received) {
@@ -1869,39 +2072,6 @@ static void liquidio_napi_drv_callback(void *arg)
1869} 2072}
1870 2073
1871/** 2074/**
1872 * \brief Main NAPI poll function
1873 * @param droq octeon output queue
1874 * @param budget maximum number of items to process
1875 */
1876static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget)
1877{
1878 int work_done;
1879 struct lio *lio = GET_LIO(droq->napi.dev);
1880 struct octeon_device *oct = lio->oct_dev;
1881
1882 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
1883 POLL_EVENT_PROCESS_PKTS,
1884 budget);
1885 if (work_done < 0) {
1886 netif_info(lio, rx_err, lio->netdev,
1887 "Receive work_done < 0, rxq:%d\n", droq->q_no);
1888 goto octnet_napi_finish;
1889 }
1890
1891 if (work_done > budget)
1892 dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n",
1893 __func__, work_done, budget);
1894
1895 return work_done;
1896
1897octnet_napi_finish:
1898 napi_complete(&droq->napi);
1899 octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR,
1900 0);
1901 return 0;
1902}
1903
1904/**
1905 * \brief Entry point for NAPI polling 2075 * \brief Entry point for NAPI polling
1906 * @param napi NAPI structure 2076 * @param napi NAPI structure
1907 * @param budget maximum number of items to process 2077 * @param budget maximum number of items to process
@@ -1910,19 +2080,41 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
1910{ 2080{
1911 struct octeon_droq *droq; 2081 struct octeon_droq *droq;
1912 int work_done; 2082 int work_done;
2083 int tx_done = 0, iq_no;
2084 struct octeon_instr_queue *iq;
2085 struct octeon_device *oct;
1913 2086
1914 droq = container_of(napi, struct octeon_droq, napi); 2087 droq = container_of(napi, struct octeon_droq, napi);
2088 oct = droq->oct_dev;
2089 iq_no = droq->q_no;
2090 /* Handle Droq descriptors */
2091 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
2092 POLL_EVENT_PROCESS_PKTS,
2093 budget);
1915 2094
1916 work_done = liquidio_napi_do_rx(droq, budget); 2095 /* Flush the instruction queue */
2096 iq = oct->instr_queue[iq_no];
2097 if (iq) {
2098 /* Process iq buffers with in the budget limits */
2099 tx_done = octeon_flush_iq(oct, iq, 1, budget);
2100 /* Update iq read-index rather than waiting for next interrupt.
2101 * Return back if tx_done is false.
2102 */
2103 update_txq_status(oct, iq_no);
2104 /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
2105 } else {
2106 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
2107 __func__, iq_no);
2108 }
1917 2109
1918 if (work_done < budget) { 2110 if ((work_done < budget) && (tx_done)) {
1919 napi_complete(napi); 2111 napi_complete(napi);
1920 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, 2112 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
1921 POLL_EVENT_ENABLE_INTR, 0); 2113 POLL_EVENT_ENABLE_INTR, 0);
1922 return 0; 2114 return 0;
1923 } 2115 }
1924 2116
1925 return work_done; 2117 return (!tx_done) ? (budget) : (work_done);
1926} 2118}
1927 2119
1928/** 2120/**
@@ -1935,10 +2127,10 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
1935 * are for ingress packets. 2127 * are for ingress packets.
1936 */ 2128 */
1937static inline int setup_io_queues(struct octeon_device *octeon_dev, 2129static inline int setup_io_queues(struct octeon_device *octeon_dev,
1938 struct net_device *net_device) 2130 int ifidx)
1939{ 2131{
1940 static int first_time = 1; 2132 struct octeon_droq_ops droq_ops;
1941 static struct octeon_droq_ops droq_ops; 2133 struct net_device *netdev;
1942 static int cpu_id; 2134 static int cpu_id;
1943 static int cpu_id_modulus; 2135 static int cpu_id_modulus;
1944 struct octeon_droq *droq; 2136 struct octeon_droq *droq;
@@ -1947,23 +2139,26 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
1947 struct lio *lio; 2139 struct lio *lio;
1948 int num_tx_descs; 2140 int num_tx_descs;
1949 2141
1950 lio = GET_LIO(net_device); 2142 netdev = octeon_dev->props[ifidx].netdev;
1951 if (first_time) { 2143
1952 first_time = 0; 2144 lio = GET_LIO(netdev);
1953 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
1954 2145
1955 droq_ops.fptr = liquidio_push_packet; 2146 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
1956 2147
1957 droq_ops.poll_mode = 1; 2148 droq_ops.fptr = liquidio_push_packet;
1958 droq_ops.napi_fn = liquidio_napi_drv_callback; 2149 droq_ops.farg = (void *)netdev;
1959 cpu_id = 0; 2150
1960 cpu_id_modulus = num_present_cpus(); 2151 droq_ops.poll_mode = 1;
1961 } 2152 droq_ops.napi_fn = liquidio_napi_drv_callback;
2153 cpu_id = 0;
2154 cpu_id_modulus = num_present_cpus();
1962 2155
1963 /* set up DROQs. */ 2156 /* set up DROQs. */
1964 for (q = 0; q < lio->linfo.num_rxpciq; q++) { 2157 for (q = 0; q < lio->linfo.num_rxpciq; q++) {
1965 q_no = lio->linfo.rxpciq[q]; 2158 q_no = lio->linfo.rxpciq[q].s.q_no;
1966 2159 dev_dbg(&octeon_dev->pci_dev->dev,
2160 "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2161 q, q_no);
1967 retval = octeon_setup_droq(octeon_dev, q_no, 2162 retval = octeon_setup_droq(octeon_dev, q_no,
1968 CFG_GET_NUM_RX_DESCS_NIC_IF 2163 CFG_GET_NUM_RX_DESCS_NIC_IF
1969 (octeon_get_conf(octeon_dev), 2164 (octeon_get_conf(octeon_dev),
@@ -1980,7 +2175,11 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
1980 2175
1981 droq = octeon_dev->droq[q_no]; 2176 droq = octeon_dev->droq[q_no];
1982 napi = &droq->napi; 2177 napi = &droq->napi;
1983 netif_napi_add(net_device, napi, liquidio_napi_poll, 64); 2178 dev_dbg(&octeon_dev->pci_dev->dev,
2179 "netif_napi_add netdev:%llx oct:%llx\n",
2180 (u64)netdev,
2181 (u64)octeon_dev);
2182 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
1984 2183
1985 /* designate a CPU for this droq */ 2184 /* designate a CPU for this droq */
1986 droq->cpu_id = cpu_id; 2185 droq->cpu_id = cpu_id;
@@ -1996,9 +2195,9 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
1996 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf 2195 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
1997 (octeon_dev), 2196 (octeon_dev),
1998 lio->ifidx); 2197 lio->ifidx);
1999 retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q], 2198 retval = octeon_setup_iq(octeon_dev, ifidx, q,
2000 num_tx_descs, 2199 lio->linfo.txpciq[q], num_tx_descs,
2001 netdev_get_tx_queue(net_device, q)); 2200 netdev_get_tx_queue(netdev, q));
2002 if (retval) { 2201 if (retval) {
2003 dev_err(&octeon_dev->pci_dev->dev, 2202 dev_err(&octeon_dev->pci_dev->dev,
2004 " %s : Runtime IQ(TxQ) creation failed.\n", 2203 " %s : Runtime IQ(TxQ) creation failed.\n",
@@ -2036,7 +2235,8 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
2036 struct lio *lio = GET_LIO(netdev); 2235 struct lio *lio = GET_LIO(netdev);
2037 struct octeon_device *oct = lio->oct_dev; 2236 struct octeon_device *oct = lio->oct_dev;
2038 2237
2039 lio->txq_status_wq.wq = create_workqueue("txq-status"); 2238 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2239 WQ_MEM_RECLAIM, 0);
2040 if (!lio->txq_status_wq.wq) { 2240 if (!lio->txq_status_wq.wq) {
2041 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); 2241 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
2042 return; 2242 return;
@@ -2048,6 +2248,14 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
2048 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 2248 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2049} 2249}
2050 2250
2251static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2252{
2253 struct lio *lio = GET_LIO(netdev);
2254
2255 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2256 destroy_workqueue(lio->txq_status_wq.wq);
2257}
2258
2051/** 2259/**
2052 * \brief Net device open for LiquidIO 2260 * \brief Net device open for LiquidIO
2053 * @param netdev network device 2261 * @param netdev network device
@@ -2058,17 +2266,22 @@ static int liquidio_open(struct net_device *netdev)
2058 struct octeon_device *oct = lio->oct_dev; 2266 struct octeon_device *oct = lio->oct_dev;
2059 struct napi_struct *napi, *n; 2267 struct napi_struct *napi, *n;
2060 2268
2061 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 2269 if (oct->props[lio->ifidx].napi_enabled == 0) {
2062 napi_enable(napi); 2270 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2271 napi_enable(napi);
2272
2273 oct->props[lio->ifidx].napi_enabled = 1;
2274 }
2063 2275
2064 oct_ptp_open(netdev); 2276 oct_ptp_open(netdev);
2065 2277
2066 ifstate_set(lio, LIO_IFSTATE_RUNNING); 2278 ifstate_set(lio, LIO_IFSTATE_RUNNING);
2279
2067 setup_tx_poll_fn(netdev); 2280 setup_tx_poll_fn(netdev);
2281
2068 start_txq(netdev); 2282 start_txq(netdev);
2069 2283
2070 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 2284 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2071 try_module_get(THIS_MODULE);
2072 2285
2073 /* tell Octeon to start forwarding packets to host */ 2286 /* tell Octeon to start forwarding packets to host */
2074 send_rx_ctrl_cmd(lio, 1); 2287 send_rx_ctrl_cmd(lio, 1);
@@ -2088,39 +2301,35 @@ static int liquidio_open(struct net_device *netdev)
2088 */ 2301 */
2089static int liquidio_stop(struct net_device *netdev) 2302static int liquidio_stop(struct net_device *netdev)
2090{ 2303{
2091 struct napi_struct *napi, *n;
2092 struct lio *lio = GET_LIO(netdev); 2304 struct lio *lio = GET_LIO(netdev);
2093 struct octeon_device *oct = lio->oct_dev; 2305 struct octeon_device *oct = lio->oct_dev;
2094 2306
2095 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 2307 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2308
2309 netif_tx_disable(netdev);
2310
2096 /* Inform that netif carrier is down */ 2311 /* Inform that netif carrier is down */
2312 netif_carrier_off(netdev);
2097 lio->intf_open = 0; 2313 lio->intf_open = 0;
2098 lio->linfo.link.s.status = 0; 2314 lio->linfo.link.s.link_up = 0;
2315 lio->link_changes++;
2099 2316
2100 netif_carrier_off(netdev); 2317 /* Pause for a moment and wait for Octeon to flush out (to the wire) any
2318 * egress packets that are in-flight.
2319 */
2320 set_current_state(TASK_INTERRUPTIBLE);
2321 schedule_timeout(msecs_to_jiffies(100));
2101 2322
2102 /* tell Octeon to stop forwarding packets to host */ 2323 /* Now it should be safe to tell Octeon that nic interface is down. */
2103 send_rx_ctrl_cmd(lio, 0); 2324 send_rx_ctrl_cmd(lio, 0);
2104 2325
2105 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); 2326 cleanup_tx_poll_fn(netdev);
2106 flush_workqueue(lio->txq_status_wq.wq);
2107 destroy_workqueue(lio->txq_status_wq.wq);
2108 2327
2109 if (lio->ptp_clock) { 2328 if (lio->ptp_clock) {
2110 ptp_clock_unregister(lio->ptp_clock); 2329 ptp_clock_unregister(lio->ptp_clock);
2111 lio->ptp_clock = NULL; 2330 lio->ptp_clock = NULL;
2112 } 2331 }
2113 2332
2114 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2115
2116 /* This is a hack that allows DHCP to continue working. */
2117 set_bit(__LINK_STATE_START, &lio->netdev->state);
2118
2119 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2120 napi_disable(napi);
2121
2122 txqs_stop(netdev);
2123
2124 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 2333 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
2125 module_put(THIS_MODULE); 2334 module_put(THIS_MODULE);
2126 2335
@@ -2181,12 +2390,31 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
2181 netdev->name); 2390 netdev->name);
2182 break; 2391 break;
2183 2392
2393 case OCTNET_CMD_ENABLE_VLAN_FILTER:
2394 dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
2395 netdev->name);
2396 break;
2397
2398 case OCTNET_CMD_ADD_VLAN_FILTER:
2399 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
2400 netdev->name, nctrl->ncmd.s.param1);
2401 break;
2402
2403 case OCTNET_CMD_DEL_VLAN_FILTER:
2404 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
2405 netdev->name, nctrl->ncmd.s.param1);
2406 break;
2407
2184 case OCTNET_CMD_SET_SETTINGS: 2408 case OCTNET_CMD_SET_SETTINGS:
2185 dev_info(&oct->pci_dev->dev, "%s settings changed\n", 2409 dev_info(&oct->pci_dev->dev, "%s settings changed\n",
2186 netdev->name); 2410 netdev->name);
2187 2411
2188 break; 2412 break;
2189 2413
2414 case OCTNET_CMD_SET_FLOW_CTL:
2415 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
2416 break;
2417
2190 default: 2418 default:
2191 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, 2419 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
2192 nctrl->ncmd.s.cmd); 2420 nctrl->ncmd.s.cmd);
@@ -2235,7 +2463,6 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
2235 struct lio *lio = GET_LIO(netdev); 2463 struct lio *lio = GET_LIO(netdev);
2236 struct octeon_device *oct = lio->oct_dev; 2464 struct octeon_device *oct = lio->oct_dev;
2237 struct octnic_ctrl_pkt nctrl; 2465 struct octnic_ctrl_pkt nctrl;
2238 struct octnic_ctrl_params nparams;
2239 struct netdev_hw_addr *ha; 2466 struct netdev_hw_addr *ha;
2240 u64 *mc; 2467 u64 *mc;
2241 int ret, i; 2468 int ret, i;
@@ -2246,10 +2473,10 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
2246 /* Create a ctrl pkt command to be sent to core app. */ 2473 /* Create a ctrl pkt command to be sent to core app. */
2247 nctrl.ncmd.u64 = 0; 2474 nctrl.ncmd.u64 = 0;
2248 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 2475 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
2249 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 2476 nctrl.ncmd.s.param1 = get_new_flags(netdev);
2250 nctrl.ncmd.s.param2 = get_new_flags(netdev); 2477 nctrl.ncmd.s.param2 = mc_count;
2251 nctrl.ncmd.s.param3 = mc_count;
2252 nctrl.ncmd.s.more = mc_count; 2478 nctrl.ncmd.s.more = mc_count;
2479 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2253 nctrl.netpndev = (u64)netdev; 2480 nctrl.netpndev = (u64)netdev;
2254 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2481 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2255 2482
@@ -2270,9 +2497,7 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
2270 */ 2497 */
2271 nctrl.wait_time = 0; 2498 nctrl.wait_time = 0;
2272 2499
2273 nparams.resp_order = OCTEON_RESP_NORESPONSE; 2500 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2274
2275 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
2276 if (ret < 0) { 2501 if (ret < 0) {
2277 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 2502 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2278 ret); 2503 ret);
@@ -2290,19 +2515,17 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
2290 struct octeon_device *oct = lio->oct_dev; 2515 struct octeon_device *oct = lio->oct_dev;
2291 struct sockaddr *addr = (struct sockaddr *)p; 2516 struct sockaddr *addr = (struct sockaddr *)p;
2292 struct octnic_ctrl_pkt nctrl; 2517 struct octnic_ctrl_pkt nctrl;
2293 struct octnic_ctrl_params nparams;
2294 2518
2295 if ((!is_valid_ether_addr(addr->sa_data)) || 2519 if (!is_valid_ether_addr(addr->sa_data))
2296 (ifstate_check(lio, LIO_IFSTATE_RUNNING)))
2297 return -EADDRNOTAVAIL; 2520 return -EADDRNOTAVAIL;
2298 2521
2299 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2522 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2300 2523
2301 nctrl.ncmd.u64 = 0; 2524 nctrl.ncmd.u64 = 0;
2302 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2525 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2303 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 2526 nctrl.ncmd.s.param1 = 0;
2304 nctrl.ncmd.s.param2 = 0;
2305 nctrl.ncmd.s.more = 1; 2527 nctrl.ncmd.s.more = 1;
2528 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2306 nctrl.netpndev = (u64)netdev; 2529 nctrl.netpndev = (u64)netdev;
2307 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2530 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2308 nctrl.wait_time = 100; 2531 nctrl.wait_time = 100;
@@ -2311,9 +2534,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
2311 /* The MAC Address is presented in network byte order. */ 2534 /* The MAC Address is presented in network byte order. */
2312 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); 2535 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2313 2536
2314 nparams.resp_order = OCTEON_RESP_ORDERED; 2537 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2315
2316 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
2317 if (ret < 0) { 2538 if (ret < 0) {
2318 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 2539 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2319 return -ENOMEM; 2540 return -ENOMEM;
@@ -2341,7 +2562,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2341 oct = lio->oct_dev; 2562 oct = lio->oct_dev;
2342 2563
2343 for (i = 0; i < lio->linfo.num_txpciq; i++) { 2564 for (i = 0; i < lio->linfo.num_txpciq; i++) {
2344 iq_no = lio->linfo.txpciq[i]; 2565 iq_no = lio->linfo.txpciq[i].s.q_no;
2345 iq_stats = &oct->instr_queue[iq_no]->stats; 2566 iq_stats = &oct->instr_queue[iq_no]->stats;
2346 pkts += iq_stats->tx_done; 2567 pkts += iq_stats->tx_done;
2347 drop += iq_stats->tx_dropped; 2568 drop += iq_stats->tx_dropped;
@@ -2357,7 +2578,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2357 bytes = 0; 2578 bytes = 0;
2358 2579
2359 for (i = 0; i < lio->linfo.num_rxpciq; i++) { 2580 for (i = 0; i < lio->linfo.num_rxpciq; i++) {
2360 oq_no = lio->linfo.rxpciq[i]; 2581 oq_no = lio->linfo.rxpciq[i].s.q_no;
2361 oq_stats = &oct->droq[oq_no]->stats; 2582 oq_stats = &oct->droq[oq_no]->stats;
2362 pkts += oq_stats->rx_pkts_received; 2583 pkts += oq_stats->rx_pkts_received;
2363 drop += (oq_stats->rx_dropped + 2584 drop += (oq_stats->rx_dropped +
@@ -2383,7 +2604,6 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2383 struct lio *lio = GET_LIO(netdev); 2604 struct lio *lio = GET_LIO(netdev);
2384 struct octeon_device *oct = lio->oct_dev; 2605 struct octeon_device *oct = lio->oct_dev;
2385 struct octnic_ctrl_pkt nctrl; 2606 struct octnic_ctrl_pkt nctrl;
2386 struct octnic_ctrl_params nparams;
2387 int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE; 2607 int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE;
2388 int ret = 0; 2608 int ret = 0;
2389 2609
@@ -2403,15 +2623,13 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2403 2623
2404 nctrl.ncmd.u64 = 0; 2624 nctrl.ncmd.u64 = 0;
2405 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; 2625 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
2406 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 2626 nctrl.ncmd.s.param1 = new_mtu;
2407 nctrl.ncmd.s.param2 = new_mtu; 2627 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2408 nctrl.wait_time = 100; 2628 nctrl.wait_time = 100;
2409 nctrl.netpndev = (u64)netdev; 2629 nctrl.netpndev = (u64)netdev;
2410 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2630 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2411 2631
2412 nparams.resp_order = OCTEON_RESP_ORDERED; 2632 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2413
2414 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
2415 if (ret < 0) { 2633 if (ret < 0) {
2416 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); 2634 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
2417 return -1; 2635 return -1;
@@ -2536,7 +2754,7 @@ static void handle_timestamp(struct octeon_device *oct,
2536 } 2754 }
2537 2755
2538 octeon_free_soft_command(oct, sc); 2756 octeon_free_soft_command(oct, sc);
2539 recv_buffer_free(skb); 2757 tx_buffer_free(skb);
2540} 2758}
2541 2759
2542/* \brief Send a data packet that will be timestamped 2760/* \brief Send a data packet that will be timestamped
@@ -2551,10 +2769,9 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2551{ 2769{
2552 int retval; 2770 int retval;
2553 struct octeon_soft_command *sc; 2771 struct octeon_soft_command *sc;
2554 struct octeon_instr_ih *ih;
2555 struct octeon_instr_rdp *rdp;
2556 struct lio *lio; 2772 struct lio *lio;
2557 int ring_doorbell; 2773 int ring_doorbell;
2774 u32 len;
2558 2775
2559 lio = finfo->lio; 2776 lio = finfo->lio;
2560 2777
@@ -2576,14 +2793,13 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2576 sc->callback_arg = finfo->skb; 2793 sc->callback_arg = finfo->skb;
2577 sc->iq_no = ndata->q_no; 2794 sc->iq_no = ndata->q_no;
2578 2795
2579 ih = (struct octeon_instr_ih *)&sc->cmd.ih; 2796 len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
2580 rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
2581 2797
2582 ring_doorbell = !xmit_more; 2798 ring_doorbell = !xmit_more;
2583 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2799 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2584 sc, ih->dlengsz, ndata->reqtype); 2800 sc, len, ndata->reqtype);
2585 2801
2586 if (retval) { 2802 if (retval == IQ_SEND_FAILED) {
2587 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2803 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2588 retval); 2804 retval);
2589 octeon_free_soft_command(oct, sc); 2805 octeon_free_soft_command(oct, sc);
@@ -2594,68 +2810,6 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2594 return retval; 2810 return retval;
2595} 2811}
2596 2812
2597static inline int is_ipv4(struct sk_buff *skb)
2598{
2599 return (skb->protocol == htons(ETH_P_IP)) &&
2600 (ip_hdr(skb)->version == 4);
2601}
2602
2603static inline int is_vlan(struct sk_buff *skb)
2604{
2605 return skb->protocol == htons(ETH_P_8021Q);
2606}
2607
2608static inline int is_ip_fragmented(struct sk_buff *skb)
2609{
2610 /* The Don't fragment and Reserved flag fields are ignored.
2611 * IP is fragmented if
2612 * - the More fragments bit is set (indicating this IP is a fragment
2613 * with more to follow; the current offset could be 0 ).
2614 * - ths offset field is non-zero.
2615 */
2616 return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0;
2617}
2618
2619static inline int is_ipv6(struct sk_buff *skb)
2620{
2621 return (skb->protocol == htons(ETH_P_IPV6)) &&
2622 (ipv6_hdr(skb)->version == 6);
2623}
2624
2625static inline int is_with_extn_hdr(struct sk_buff *skb)
2626{
2627 return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) &&
2628 (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP);
2629}
2630
2631static inline int is_tcpudp(struct sk_buff *skb)
2632{
2633 return (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
2634 (ip_hdr(skb)->protocol == IPPROTO_UDP);
2635}
2636
2637static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb)
2638{
2639 u32 tag;
2640 struct iphdr *iphdr = ip_hdr(skb);
2641
2642 tag = crc32(0, &iphdr->protocol, 1);
2643 tag = crc32(tag, (u8 *)&iphdr->saddr, 8);
2644 tag = crc32(tag, skb_transport_header(skb), 4);
2645 return tag;
2646}
2647
2648static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb)
2649{
2650 u32 tag;
2651 struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
2652
2653 tag = crc32(0, &ipv6hdr->nexthdr, 1);
2654 tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32);
2655 tag = crc32(tag, skb_transport_header(skb), 4);
2656 return tag;
2657}
2658
2659/** \brief Transmit networks packets to the Octeon interface 2813/** \brief Transmit networks packets to the Octeon interface
2660 * @param skbuff skbuff struct to be passed to network layer. 2814 * @param skbuff skbuff struct to be passed to network layer.
2661 * @param netdev pointer to network device 2815 * @param netdev pointer to network device
@@ -2670,18 +2824,22 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2670 struct octnic_data_pkt ndata; 2824 struct octnic_data_pkt ndata;
2671 struct octeon_device *oct; 2825 struct octeon_device *oct;
2672 struct oct_iq_stats *stats; 2826 struct oct_iq_stats *stats;
2673 int cpu = 0, status = 0; 2827 struct octeon_instr_irh *irh;
2828 union tx_info *tx_info;
2829 int status = 0;
2674 int q_idx = 0, iq_no = 0; 2830 int q_idx = 0, iq_no = 0;
2675 int xmit_more; 2831 int xmit_more, j;
2832 u64 dptr = 0;
2676 u32 tag = 0; 2833 u32 tag = 0;
2677 2834
2678 lio = GET_LIO(netdev); 2835 lio = GET_LIO(netdev);
2679 oct = lio->oct_dev; 2836 oct = lio->oct_dev;
2680 2837
2681 if (netif_is_multiqueue(netdev)) { 2838 if (netif_is_multiqueue(netdev)) {
2682 cpu = skb->queue_mapping; 2839 q_idx = skb->queue_mapping;
2683 q_idx = (cpu & (lio->linfo.num_txpciq - 1)); 2840 q_idx = (q_idx % (lio->linfo.num_txpciq));
2684 iq_no = lio->linfo.txpciq[q_idx]; 2841 tag = q_idx;
2842 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2685 } else { 2843 } else {
2686 iq_no = lio->txq; 2844 iq_no = lio->txq;
2687 } 2845 }
@@ -2692,11 +2850,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2692 * transmitted. 2850 * transmitted.
2693 */ 2851 */
2694 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2852 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2695 (!lio->linfo.link.s.status) || 2853 (!lio->linfo.link.s.link_up) ||
2696 (skb->len <= 0)) { 2854 (skb->len <= 0)) {
2697 netif_info(lio, tx_err, lio->netdev, 2855 netif_info(lio, tx_err, lio->netdev,
2698 "Transmit failed link_status : %d\n", 2856 "Transmit failed link_status : %d\n",
2699 lio->linfo.link.s.status); 2857 lio->linfo.link.s.link_up);
2700 goto lio_xmit_failed; 2858 goto lio_xmit_failed;
2701 } 2859 }
2702 2860
@@ -2739,53 +2897,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2739 ndata.datasize = skb->len; 2897 ndata.datasize = skb->len;
2740 2898
2741 cmdsetup.u64 = 0; 2899 cmdsetup.u64 = 0;
2742 cmdsetup.s.ifidx = lio->linfo.ifidx; 2900 cmdsetup.s.iq_no = iq_no;
2743
2744 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2745 if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) {
2746 tag = get_ipv4_5tuple_tag(skb);
2747
2748 cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
2749
2750 if (ip_hdr(skb)->ihl > 5)
2751 cmdsetup.s.ipv4opts_ipv6exthdr =
2752 OCT_PKT_PARAM_IPV4OPTS;
2753
2754 } else if (is_ipv6(skb)) {
2755 tag = get_ipv6_5tuple_tag(skb);
2756
2757 cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
2758
2759 if (is_with_extn_hdr(skb))
2760 cmdsetup.s.ipv4opts_ipv6exthdr =
2761 OCT_PKT_PARAM_IPV6EXTHDR;
2762 2901
2763 } else if (is_vlan(skb)) { 2902 if (skb->ip_summed == CHECKSUM_PARTIAL)
2764 if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto 2903 cmdsetup.s.transport_csum = 1;
2765 == htons(ETH_P_IP) &&
2766 !is_ip_fragmented(skb) && is_tcpudp(skb)) {
2767 tag = get_ipv4_5tuple_tag(skb);
2768 2904
2769 cmdsetup.s.cksum_offset =
2770 sizeof(struct vlan_ethhdr) + 1;
2771
2772 if (ip_hdr(skb)->ihl > 5)
2773 cmdsetup.s.ipv4opts_ipv6exthdr =
2774 OCT_PKT_PARAM_IPV4OPTS;
2775
2776 } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
2777 == htons(ETH_P_IPV6)) {
2778 tag = get_ipv6_5tuple_tag(skb);
2779
2780 cmdsetup.s.cksum_offset =
2781 sizeof(struct vlan_ethhdr) + 1;
2782
2783 if (is_with_extn_hdr(skb))
2784 cmdsetup.s.ipv4opts_ipv6exthdr =
2785 OCT_PKT_PARAM_IPV6EXTHDR;
2786 }
2787 }
2788 }
2789 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2905 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2790 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2906 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2791 cmdsetup.s.timestamp = 1; 2907 cmdsetup.s.timestamp = 1;
@@ -2793,20 +2909,20 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2793 2909
2794 if (skb_shinfo(skb)->nr_frags == 0) { 2910 if (skb_shinfo(skb)->nr_frags == 0) {
2795 cmdsetup.s.u.datasize = skb->len; 2911 cmdsetup.s.u.datasize = skb->len;
2796 octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); 2912 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2797 /* Offload checksum calculation for TCP/UDP packets */ 2913 /* Offload checksum calculation for TCP/UDP packets */
2798 ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, 2914 dptr = dma_map_single(&oct->pci_dev->dev,
2799 skb->data, 2915 skb->data,
2800 skb->len, 2916 skb->len,
2801 DMA_TO_DEVICE); 2917 DMA_TO_DEVICE);
2802 if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { 2918 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2803 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2919 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2804 __func__); 2920 __func__);
2805 return NETDEV_TX_BUSY; 2921 return NETDEV_TX_BUSY;
2806 } 2922 }
2807 2923
2808 finfo->dptr = ndata.cmd.dptr; 2924 ndata.cmd.cmd2.dptr = dptr;
2809 2925 finfo->dptr = dptr;
2810 ndata.reqtype = REQTYPE_NORESP_NET; 2926 ndata.reqtype = REQTYPE_NORESP_NET;
2811 2927
2812 } else { 2928 } else {
@@ -2814,9 +2930,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2814 struct skb_frag_struct *frag; 2930 struct skb_frag_struct *frag;
2815 struct octnic_gather *g; 2931 struct octnic_gather *g;
2816 2932
2817 spin_lock(&lio->lock); 2933 spin_lock(&lio->glist_lock[q_idx]);
2818 g = (struct octnic_gather *)list_delete_head(&lio->glist); 2934 g = (struct octnic_gather *)
2819 spin_unlock(&lio->lock); 2935 list_delete_head(&lio->glist[q_idx]);
2936 spin_unlock(&lio->glist_lock[q_idx]);
2820 2937
2821 if (!g) { 2938 if (!g) {
2822 netif_info(lio, tx_err, lio->netdev, 2939 netif_info(lio, tx_err, lio->netdev,
@@ -2826,7 +2943,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2826 2943
2827 cmdsetup.s.gather = 1; 2944 cmdsetup.s.gather = 1;
2828 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2945 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2829 octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); 2946 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2830 2947
2831 memset(g->sg, 0, g->sg_size); 2948 memset(g->sg, 0, g->sg_size);
2832 2949
@@ -2853,36 +2970,52 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2853 frag->size, 2970 frag->size,
2854 DMA_TO_DEVICE); 2971 DMA_TO_DEVICE);
2855 2972
2973 if (dma_mapping_error(&oct->pci_dev->dev,
2974 g->sg[i >> 2].ptr[i & 3])) {
2975 dma_unmap_single(&oct->pci_dev->dev,
2976 g->sg[0].ptr[0],
2977 skb->len - skb->data_len,
2978 DMA_TO_DEVICE);
2979 for (j = 1; j < i; j++) {
2980 frag = &skb_shinfo(skb)->frags[j - 1];
2981 dma_unmap_page(&oct->pci_dev->dev,
2982 g->sg[j >> 2].ptr[j & 3],
2983 frag->size,
2984 DMA_TO_DEVICE);
2985 }
2986 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2987 __func__);
2988 return NETDEV_TX_BUSY;
2989 }
2990
2856 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); 2991 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2857 i++; 2992 i++;
2858 } 2993 }
2859 2994
2860 ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, 2995 dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
2861 g->sg, g->sg_size, 2996 g->sg_size, DMA_TO_DEVICE);
2862 DMA_TO_DEVICE); 2997 dptr = g->sg_dma_ptr;
2863 if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
2864 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2865 __func__);
2866 dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
2867 skb->len - skb->data_len,
2868 DMA_TO_DEVICE);
2869 return NETDEV_TX_BUSY;
2870 }
2871 2998
2872 finfo->dptr = ndata.cmd.dptr; 2999 ndata.cmd.cmd2.dptr = dptr;
3000 finfo->dptr = dptr;
2873 finfo->g = g; 3001 finfo->g = g;
2874 3002
2875 ndata.reqtype = REQTYPE_NORESP_NET_SG; 3003 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2876 } 3004 }
2877 3005
2878 if (skb_shinfo(skb)->gso_size) { 3006 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2879 struct octeon_instr_irh *irh = 3007 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2880 (struct octeon_instr_irh *)&ndata.cmd.irh;
2881 union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0];
2882 3008
2883 irh->len = 1; /* to indicate that ossp[0] contains tx_info */ 3009 if (skb_shinfo(skb)->gso_size) {
2884 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 3010 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2885 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 3011 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
3012 stats->tx_gso++;
3013 }
3014
3015 /* HW insert VLAN tag */
3016 if (skb_vlan_tag_present(skb)) {
3017 irh->priority = skb_vlan_tag_get(skb) >> 13;
3018 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2886 } 3019 }
2887 3020
2888 xmit_more = skb->xmit_more; 3021 xmit_more = skb->xmit_more;
@@ -2901,7 +3034,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2901 3034
2902 netif_trans_update(netdev); 3035 netif_trans_update(netdev);
2903 3036
2904 stats->tx_done++; 3037 if (skb_shinfo(skb)->gso_size)
3038 stats->tx_done += skb_shinfo(skb)->gso_segs;
3039 else
3040 stats->tx_done++;
2905 stats->tx_tot_bytes += skb->len; 3041 stats->tx_tot_bytes += skb->len;
2906 3042
2907 return NETDEV_TX_OK; 3043 return NETDEV_TX_OK;
@@ -2910,9 +3046,10 @@ lio_xmit_failed:
2910 stats->tx_dropped++; 3046 stats->tx_dropped++;
2911 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 3047 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2912 iq_no, stats->tx_dropped); 3048 iq_no, stats->tx_dropped);
2913 dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr, 3049 if (dptr)
2914 ndata.datasize, DMA_TO_DEVICE); 3050 dma_unmap_single(&oct->pci_dev->dev, dptr,
2915 recv_buffer_free(skb); 3051 ndata.datasize, DMA_TO_DEVICE);
3052 tx_buffer_free(skb);
2916 return NETDEV_TX_OK; 3053 return NETDEV_TX_OK;
2917} 3054}
2918 3055
@@ -2932,27 +3069,79 @@ static void liquidio_tx_timeout(struct net_device *netdev)
2932 txqs_wake(netdev); 3069 txqs_wake(netdev);
2933} 3070}
2934 3071
2935int liquidio_set_feature(struct net_device *netdev, int cmd) 3072static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
3073 __be16 proto __attribute__((unused)),
3074 u16 vid)
2936{ 3075{
2937 struct lio *lio = GET_LIO(netdev); 3076 struct lio *lio = GET_LIO(netdev);
2938 struct octeon_device *oct = lio->oct_dev; 3077 struct octeon_device *oct = lio->oct_dev;
2939 struct octnic_ctrl_pkt nctrl; 3078 struct octnic_ctrl_pkt nctrl;
2940 struct octnic_ctrl_params nparams;
2941 int ret = 0; 3079 int ret = 0;
2942 3080
2943 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3081 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2944 3082
2945 nctrl.ncmd.u64 = 0; 3083 nctrl.ncmd.u64 = 0;
2946 nctrl.ncmd.s.cmd = cmd; 3084 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2947 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 3085 nctrl.ncmd.s.param1 = vid;
2948 nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6; 3086 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3087 nctrl.wait_time = 100;
3088 nctrl.netpndev = (u64)netdev;
3089 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3090
3091 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3092 if (ret < 0) {
3093 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3094 ret);
3095 }
3096
3097 return ret;
3098}
3099
3100static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
3101 __be16 proto __attribute__((unused)),
3102 u16 vid)
3103{
3104 struct lio *lio = GET_LIO(netdev);
3105 struct octeon_device *oct = lio->oct_dev;
3106 struct octnic_ctrl_pkt nctrl;
3107 int ret = 0;
3108
3109 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3110
3111 nctrl.ncmd.u64 = 0;
3112 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3113 nctrl.ncmd.s.param1 = vid;
3114 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2949 nctrl.wait_time = 100; 3115 nctrl.wait_time = 100;
2950 nctrl.netpndev = (u64)netdev; 3116 nctrl.netpndev = (u64)netdev;
2951 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 3117 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2952 3118
2953 nparams.resp_order = OCTEON_RESP_NORESPONSE; 3119 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3120 if (ret < 0) {
3121 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3122 ret);
3123 }
3124 return ret;
3125}
3126
3127int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
3128{
3129 struct lio *lio = GET_LIO(netdev);
3130 struct octeon_device *oct = lio->oct_dev;
3131 struct octnic_ctrl_pkt nctrl;
3132 int ret = 0;
3133
3134 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3135
3136 nctrl.ncmd.u64 = 0;
3137 nctrl.ncmd.s.cmd = cmd;
3138 nctrl.ncmd.s.param1 = param1;
3139 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3140 nctrl.wait_time = 100;
3141 nctrl.netpndev = (u64)netdev;
3142 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2954 3143
2955 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); 3144 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2956 if (ret < 0) { 3145 if (ret < 0) {
2957 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", 3146 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
2958 ret); 3147 ret);
@@ -3008,10 +3197,12 @@ static int liquidio_set_features(struct net_device *netdev,
3008 return 0; 3197 return 0;
3009 3198
3010 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 3199 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
3011 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); 3200 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3201 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3012 else if (!(features & NETIF_F_LRO) && 3202 else if (!(features & NETIF_F_LRO) &&
3013 (lio->dev_capability & NETIF_F_LRO)) 3203 (lio->dev_capability & NETIF_F_LRO))
3014 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE); 3204 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
3205 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3015 3206
3016 return 0; 3207 return 0;
3017} 3208}
@@ -3024,6 +3215,9 @@ static struct net_device_ops lionetdevops = {
3024 .ndo_set_mac_address = liquidio_set_mac, 3215 .ndo_set_mac_address = liquidio_set_mac,
3025 .ndo_set_rx_mode = liquidio_set_mcast_list, 3216 .ndo_set_rx_mode = liquidio_set_mcast_list,
3026 .ndo_tx_timeout = liquidio_tx_timeout, 3217 .ndo_tx_timeout = liquidio_tx_timeout,
3218
3219 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3220 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
3027 .ndo_change_mtu = liquidio_change_mtu, 3221 .ndo_change_mtu = liquidio_change_mtu,
3028 .ndo_do_ioctl = liquidio_ioctl, 3222 .ndo_do_ioctl = liquidio_ioctl,
3029 .ndo_fix_features = liquidio_fix_features, 3223 .ndo_fix_features = liquidio_fix_features,
@@ -3082,24 +3276,27 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3082{ 3276{
3083 struct octeon_device *oct = (struct octeon_device *)buf; 3277 struct octeon_device *oct = (struct octeon_device *)buf;
3084 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3278 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3085 int ifidx = 0; 3279 int gmxport = 0;
3086 union oct_link_status *ls; 3280 union oct_link_status *ls;
3087 int i; 3281 int i;
3088 3282
3089 if ((recv_pkt->buffer_size[0] != sizeof(*ls)) || 3283 if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
3090 (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) {
3091 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 3284 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3092 recv_pkt->buffer_size[0], 3285 recv_pkt->buffer_size[0],
3093 recv_pkt->rh.r_nic_info.ifidx); 3286 recv_pkt->rh.r_nic_info.gmxport);
3094 goto nic_info_err; 3287 goto nic_info_err;
3095 } 3288 }
3096 3289
3097 ifidx = recv_pkt->rh.r_nic_info.ifidx; 3290 gmxport = recv_pkt->rh.r_nic_info.gmxport;
3098 ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]); 3291 ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
3099 3292
3100 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 3293 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3101 3294 for (i = 0; i < oct->ifcount; i++) {
3102 update_link_status(oct->props[ifidx].netdev, ls); 3295 if (oct->props[i].gmxport == gmxport) {
3296 update_link_status(oct->props[i].netdev, ls);
3297 break;
3298 }
3299 }
3103 3300
3104nic_info_err: 3301nic_info_err:
3105 for (i = 0; i < recv_pkt->buffer_count; i++) 3302 for (i = 0; i < recv_pkt->buffer_count; i++)
@@ -3125,13 +3322,13 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3125 struct liquidio_if_cfg_context *ctx; 3322 struct liquidio_if_cfg_context *ctx;
3126 struct liquidio_if_cfg_resp *resp; 3323 struct liquidio_if_cfg_resp *resp;
3127 struct octdev_props *props; 3324 struct octdev_props *props;
3128 int retval, num_iqueues, num_oqueues, q_no; 3325 int retval, num_iqueues, num_oqueues;
3129 u64 q_mask;
3130 int num_cpus = num_online_cpus(); 3326 int num_cpus = num_online_cpus();
3131 union oct_nic_if_cfg if_cfg; 3327 union oct_nic_if_cfg if_cfg;
3132 unsigned int base_queue; 3328 unsigned int base_queue;
3133 unsigned int gmx_port_id; 3329 unsigned int gmx_port_id;
3134 u32 resp_size, ctx_size; 3330 u32 resp_size, ctx_size;
3331 u32 ifidx_or_pfnum;
3135 3332
3136 /* This is to handle link status changes */ 3333 /* This is to handle link status changes */
3137 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3334 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
@@ -3167,13 +3364,14 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3167 CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i); 3364 CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
3168 gmx_port_id = 3365 gmx_port_id =
3169 CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i); 3366 CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
3367 ifidx_or_pfnum = i;
3170 if (num_iqueues > num_cpus) 3368 if (num_iqueues > num_cpus)
3171 num_iqueues = num_cpus; 3369 num_iqueues = num_cpus;
3172 if (num_oqueues > num_cpus) 3370 if (num_oqueues > num_cpus)
3173 num_oqueues = num_cpus; 3371 num_oqueues = num_cpus;
3174 dev_dbg(&octeon_dev->pci_dev->dev, 3372 dev_dbg(&octeon_dev->pci_dev->dev,
3175 "requesting config for interface %d, iqs %d, oqs %d\n", 3373 "requesting config for interface %d, iqs %d, oqs %d\n",
3176 i, num_iqueues, num_oqueues); 3374 ifidx_or_pfnum, num_iqueues, num_oqueues);
3177 ACCESS_ONCE(ctx->cond) = 0; 3375 ACCESS_ONCE(ctx->cond) = 0;
3178 ctx->octeon_id = lio_get_device_id(octeon_dev); 3376 ctx->octeon_id = lio_get_device_id(octeon_dev);
3179 init_waitqueue_head(&ctx->wc); 3377 init_waitqueue_head(&ctx->wc);
@@ -3183,8 +3381,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3183 if_cfg.s.num_oqueues = num_oqueues; 3381 if_cfg.s.num_oqueues = num_oqueues;
3184 if_cfg.s.base_queue = base_queue; 3382 if_cfg.s.base_queue = base_queue;
3185 if_cfg.s.gmx_port_id = gmx_port_id; 3383 if_cfg.s.gmx_port_id = gmx_port_id;
3384
3385 sc->iq_no = 0;
3386
3186 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 3387 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3187 OPCODE_NIC_IF_CFG, i, 3388 OPCODE_NIC_IF_CFG, 0,
3188 if_cfg.u64, 0); 3389 if_cfg.u64, 0);
3189 3390
3190 sc->callback = if_cfg_callback; 3391 sc->callback = if_cfg_callback;
@@ -3192,7 +3393,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3192 sc->wait_time = 1000; 3393 sc->wait_time = 1000;
3193 3394
3194 retval = octeon_send_soft_command(octeon_dev, sc); 3395 retval = octeon_send_soft_command(octeon_dev, sc);
3195 if (retval) { 3396 if (retval == IQ_SEND_FAILED) {
3196 dev_err(&octeon_dev->pci_dev->dev, 3397 dev_err(&octeon_dev->pci_dev->dev,
3197 "iq/oq config failed status: %x\n", 3398 "iq/oq config failed status: %x\n",
3198 retval); 3399 retval);
@@ -3234,8 +3435,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3234 goto setup_nic_dev_fail; 3435 goto setup_nic_dev_fail;
3235 } 3436 }
3236 3437
3237 props = &octeon_dev->props[i]; 3438 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3238 props->netdev = netdev;
3239 3439
3240 if (num_iqueues > 1) 3440 if (num_iqueues > 1)
3241 lionetdevops.ndo_select_queue = select_q; 3441 lionetdevops.ndo_select_queue = select_q;
@@ -3249,23 +3449,21 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3249 3449
3250 memset(lio, 0, sizeof(struct lio)); 3450 memset(lio, 0, sizeof(struct lio));
3251 3451
3252 lio->linfo.ifidx = resp->cfg_info.ifidx; 3452 lio->ifidx = ifidx_or_pfnum;
3253 lio->ifidx = resp->cfg_info.ifidx; 3453
3454 props = &octeon_dev->props[i];
3455 props->gmxport = resp->cfg_info.linfo.gmxport;
3456 props->netdev = netdev;
3254 3457
3255 lio->linfo.num_rxpciq = num_oqueues; 3458 lio->linfo.num_rxpciq = num_oqueues;
3256 lio->linfo.num_txpciq = num_iqueues; 3459 lio->linfo.num_txpciq = num_iqueues;
3257 q_mask = resp->cfg_info.oqmask;
3258 /* q_mask is 0-based and already verified mask is nonzero */
3259 for (j = 0; j < num_oqueues; j++) { 3460 for (j = 0; j < num_oqueues; j++) {
3260 q_no = __ffs64(q_mask); 3461 lio->linfo.rxpciq[j].u64 =
3261 q_mask &= (~(1UL << q_no)); 3462 resp->cfg_info.linfo.rxpciq[j].u64;
3262 lio->linfo.rxpciq[j] = q_no;
3263 } 3463 }
3264 q_mask = resp->cfg_info.iqmask;
3265 for (j = 0; j < num_iqueues; j++) { 3464 for (j = 0; j < num_iqueues; j++) {
3266 q_no = __ffs64(q_mask); 3465 lio->linfo.txpciq[j].u64 =
3267 q_mask &= (~(1UL << q_no)); 3466 resp->cfg_info.linfo.txpciq[j].u64;
3268 lio->linfo.txpciq[j] = q_no;
3269 } 3467 }
3270 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 3468 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3271 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 3469 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
@@ -3274,16 +3472,25 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3274 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3472 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3275 3473
3276 lio->dev_capability = NETIF_F_HIGHDMA 3474 lio->dev_capability = NETIF_F_HIGHDMA
3277 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 3475 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
3278 | NETIF_F_SG | NETIF_F_RXCSUM 3476 | NETIF_F_SG | NETIF_F_RXCSUM
3279 | NETIF_F_TSO | NETIF_F_TSO6 3477 | NETIF_F_GRO
3280 | NETIF_F_LRO; 3478 | NETIF_F_TSO | NETIF_F_TSO6
3479 | NETIF_F_LRO;
3281 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 3480 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3282 3481
3283 netdev->features = lio->dev_capability;
3284 netdev->vlan_features = lio->dev_capability; 3482 netdev->vlan_features = lio->dev_capability;
3483 /* Add any unchangeable hw features */
3484 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3485 NETIF_F_HW_VLAN_CTAG_RX |
3486 NETIF_F_HW_VLAN_CTAG_TX;
3487
3488 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3285 3489
3286 netdev->hw_features = lio->dev_capability; 3490 netdev->hw_features = lio->dev_capability;
3491 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3492 netdev->hw_features = netdev->hw_features &
3493 ~NETIF_F_HW_VLAN_CTAG_RX;
3287 3494
3288 /* Point to the properties for octeon device to which this 3495 /* Point to the properties for octeon device to which this
3289 * interface belongs. 3496 * interface belongs.
@@ -3291,7 +3498,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3291 lio->oct_dev = octeon_dev; 3498 lio->oct_dev = octeon_dev;
3292 lio->octprops = props; 3499 lio->octprops = props;
3293 lio->netdev = netdev; 3500 lio->netdev = netdev;
3294 spin_lock_init(&lio->lock);
3295 3501
3296 dev_dbg(&octeon_dev->pci_dev->dev, 3502 dev_dbg(&octeon_dev->pci_dev->dev,
3297 "if%d gmx: %d hw_addr: 0x%llx\n", i, 3503 "if%d gmx: %d hw_addr: 0x%llx\n", i,
@@ -3306,23 +3512,22 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3306 3512
3307 ether_addr_copy(netdev->dev_addr, mac); 3513 ether_addr_copy(netdev->dev_addr, mac);
3308 3514
3309 if (setup_io_queues(octeon_dev, netdev)) { 3515 /* By default all interfaces on a single Octeon uses the same
3516 * tx and rx queues
3517 */
3518 lio->txq = lio->linfo.txpciq[0].s.q_no;
3519 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3520 if (setup_io_queues(octeon_dev, i)) {
3310 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 3521 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3311 goto setup_nic_dev_fail; 3522 goto setup_nic_dev_fail;
3312 } 3523 }
3313 3524
3314 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 3525 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3315 3526
3316 /* By default all interfaces on a single Octeon uses the same
3317 * tx and rx queues
3318 */
3319 lio->txq = lio->linfo.txpciq[0];
3320 lio->rxq = lio->linfo.rxpciq[0];
3321
3322 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 3527 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3323 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 3528 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3324 3529
3325 if (setup_glist(lio)) { 3530 if (setup_glists(octeon_dev, lio, num_iqueues)) {
3326 dev_err(&octeon_dev->pci_dev->dev, 3531 dev_err(&octeon_dev->pci_dev->dev,
3327 "Gather list allocation failed\n"); 3532 "Gather list allocation failed\n");
3328 goto setup_nic_dev_fail; 3533 goto setup_nic_dev_fail;
@@ -3330,11 +3535,17 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3330 3535
3331 /* Register ethtool support */ 3536 /* Register ethtool support */
3332 liquidio_set_ethtool_ops(netdev); 3537 liquidio_set_ethtool_ops(netdev);
3538 octeon_dev->priv_flags = 0x0;
3539
3540 if (netdev->features & NETIF_F_LRO)
3541 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3542 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3333 3543
3334 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); 3544 liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
3335 3545
3336 if ((debug != -1) && (debug & NETIF_MSG_HW)) 3546 if ((debug != -1) && (debug & NETIF_MSG_HW))
3337 liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE); 3547 liquidio_set_feature(netdev,
3548 OCTNET_CMD_VERBOSE_ENABLE, 0);
3338 3549
3339 /* Register the network device with the OS */ 3550 /* Register the network device with the OS */
3340 if (register_netdev(netdev)) { 3551 if (register_netdev(netdev)) {
@@ -3346,13 +3557,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3346 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3557 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3347 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3558 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3348 netif_carrier_off(netdev); 3559 netif_carrier_off(netdev);
3349 3560 lio->link_changes++;
3350 if (lio->linfo.link.s.status) {
3351 netif_carrier_on(netdev);
3352 start_txq(netdev);
3353 } else {
3354 netif_carrier_off(netdev);
3355 }
3356 3561
3357 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3562 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3358 3563
@@ -3386,7 +3591,7 @@ setup_nic_dev_fail:
3386static int liquidio_init_nic_module(struct octeon_device *oct) 3591static int liquidio_init_nic_module(struct octeon_device *oct)
3387{ 3592{
3388 struct oct_intrmod_cfg *intrmod_cfg; 3593 struct oct_intrmod_cfg *intrmod_cfg;
3389 int retval = 0; 3594 int i, retval = 0;
3390 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); 3595 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3391 3596
3392 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3597 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
@@ -3400,6 +3605,9 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
3400 memset(oct->props, 0, 3605 memset(oct->props, 0,
3401 sizeof(struct octdev_props) * num_nic_ports); 3606 sizeof(struct octdev_props) * num_nic_ports);
3402 3607
3608 for (i = 0; i < MAX_OCTEON_LINKS; i++)
3609 oct->props[i].gmxport = -1;
3610
3403 retval = setup_nic_devices(oct); 3611 retval = setup_nic_devices(oct);
3404 if (retval) { 3612 if (retval) {
3405 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3613 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
@@ -3410,15 +3618,19 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
3410 3618
3411 /* Initialize interrupt moderation params */ 3619 /* Initialize interrupt moderation params */
3412 intrmod_cfg = &((struct octeon_device *)oct)->intrmod; 3620 intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
3413 intrmod_cfg->intrmod_enable = 1; 3621 intrmod_cfg->rx_enable = 1;
3414 intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; 3622 intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
3415 intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; 3623 intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
3416 intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; 3624 intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
3417 intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER; 3625 intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
3418 intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER; 3626 intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
3419 intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER; 3627 intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
3420 intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER; 3628 intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
3421 3629 intrmod_cfg->tx_enable = 1;
3630 intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
3631 intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
3632 intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
3633 intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
3422 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3634 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3423 3635
3424 return retval; 3636 return retval;
@@ -3481,6 +3693,7 @@ static void nic_starter(struct work_struct *work)
3481static int octeon_device_init(struct octeon_device *octeon_dev) 3693static int octeon_device_init(struct octeon_device *octeon_dev)
3482{ 3694{
3483 int j, ret; 3695 int j, ret;
3696 char bootcmd[] = "\n";
3484 struct octeon_device_priv *oct_priv = 3697 struct octeon_device_priv *oct_priv =
3485 (struct octeon_device_priv *)octeon_dev->priv; 3698 (struct octeon_device_priv *)octeon_dev->priv;
3486 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); 3699 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
@@ -3592,14 +3805,19 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
3592 3805
3593 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); 3806 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
3594 3807
3595 if (ddr_timeout == 0) { 3808 if (ddr_timeout == 0)
3596 dev_info(&octeon_dev->pci_dev->dev, 3809 dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
3597 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
3598 }
3599 3810
3600 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); 3811 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
3601 3812
3602 /* Wait for the octeon to initialize DDR after the soft-reset. */ 3813 /* Wait for the octeon to initialize DDR after the soft-reset. */
3814 while (ddr_timeout == 0) {
3815 set_current_state(TASK_INTERRUPTIBLE);
3816 if (schedule_timeout(HZ / 10)) {
3817 /* user probably pressed Control-C */
3818 return 1;
3819 }
3820 }
3603 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); 3821 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
3604 if (ret) { 3822 if (ret) {
3605 dev_err(&octeon_dev->pci_dev->dev, 3823 dev_err(&octeon_dev->pci_dev->dev,
@@ -3613,6 +3831,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
3613 return 1; 3831 return 1;
3614 } 3832 }
3615 3833
3834 /* Divert uboot to take commands from host instead. */
3835 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
3836
3616 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); 3837 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
3617 ret = octeon_init_consoles(octeon_dev); 3838 ret = octeon_init_consoles(octeon_dev);
3618 if (ret) { 3839 if (ret) {
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 0ac347ccc8ba..5aa01f427d4a 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -30,11 +30,10 @@
30 30
31#include "octeon_config.h" 31#include "octeon_config.h"
32 32
33#define LIQUIDIO_VERSION "1.1.9" 33#define LIQUIDIO_BASE_VERSION "1.4"
34#define LIQUIDIO_MAJOR_VERSION 1 34#define LIQUIDIO_MICRO_VERSION ".1"
35#define LIQUIDIO_MINOR_VERSION 1 35#define LIQUIDIO_PACKAGE ""
36#define LIQUIDIO_MICRO_VERSION 9 36#define LIQUIDIO_VERSION "1.4.1"
37
38#define CONTROL_IQ 0 37#define CONTROL_IQ 0
39/** Tag types used by Octeon cores in its work. */ 38/** Tag types used by Octeon cores in its work. */
40enum octeon_tag_type { 39enum octeon_tag_type {
@@ -174,9 +173,11 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
174/*------------------------- End Scatter/Gather ---------------------------*/ 173/*------------------------- End Scatter/Gather ---------------------------*/
175 174
176#define OCTNET_FRM_PTP_HEADER_SIZE 8 175#define OCTNET_FRM_PTP_HEADER_SIZE 8
177#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */
178 176
179#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE) 177#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */
178
179#define OCTNET_MIN_FRM_SIZE 64
180
180#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE) 181#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE)
181 182
182#define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE) 183#define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE)
@@ -212,6 +213,10 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
212#define OCTNET_CMD_VERBOSE_ENABLE 0x14 213#define OCTNET_CMD_VERBOSE_ENABLE 0x14
213#define OCTNET_CMD_VERBOSE_DISABLE 0x15 214#define OCTNET_CMD_VERBOSE_DISABLE 0x15
214 215
216#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16
217#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
218#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
219
215/* RX(packets coming from wire) Checksum verification flags */ 220/* RX(packets coming from wire) Checksum verification flags */
216/* TCP/UDP csum */ 221/* TCP/UDP csum */
217#define CNNIC_L4SUM_VERIFIED 0x1 222#define CNNIC_L4SUM_VERIFIED 0x1
@@ -258,19 +263,19 @@ union octnet_cmd {
258 263
259 u64 more:6; /* How many udd words follow the command */ 264 u64 more:6; /* How many udd words follow the command */
260 265
261 u64 param1:29; 266 u64 reserved:29;
262 267
263 u64 param2:16; 268 u64 param1:16;
264 269
265 u64 param3:8; 270 u64 param2:8;
266 271
267#else 272#else
268 273
269 u64 param3:8; 274 u64 param2:8;
270 275
271 u64 param2:16; 276 u64 param1:16;
272 277
273 u64 param1:29; 278 u64 reserved:29;
274 279
275 u64 more:6; 280 u64 more:6;
276 281
@@ -283,8 +288,140 @@ union octnet_cmd {
283 288
284#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd)) 289#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
285 290
291/* Instruction Header (DPI - CN23xx) - for OCTEON-III models */
292struct octeon_instr_ih3 {
293#ifdef __BIG_ENDIAN_BITFIELD
294
295 /** Reserved3 */
296 u64 reserved3:1;
297
298 /** Gather indicator 1=gather*/
299 u64 gather:1;
300
301 /** Data length OR no. of entries in gather list */
302 u64 dlengsz:14;
303
304 /** Front Data size */
305 u64 fsz:6;
306
307 /** Reserved2 */
308 u64 reserved2:4;
309
310 /** PKI port kind - PKIND */
311 u64 pkind:6;
312
313 /** Reserved1 */
314 u64 reserved1:32;
315
316#else
317 /** Reserved1 */
318 u64 reserved1:32;
319
320 /** PKI port kind - PKIND */
321 u64 pkind:6;
322
323 /** Reserved2 */
324 u64 reserved2:4;
325
326 /** Front Data size */
327 u64 fsz:6;
328
329 /** Data length OR no. of entries in gather list */
330 u64 dlengsz:14;
331
332 /** Gather indicator 1=gather*/
333 u64 gather:1;
334
335 /** Reserved3 */
336 u64 reserved3:1;
337
338#endif
339};
340
341/* Optional PKI Instruction Header(PKI IH) - for OCTEON CN23XX models */
342/** BIG ENDIAN format. */
343struct octeon_instr_pki_ih3 {
344#ifdef __BIG_ENDIAN_BITFIELD
345
346 /** Wider bit */
347 u64 w:1;
348
349 /** Raw mode indicator 1 = RAW */
350 u64 raw:1;
351
352 /** Use Tag */
353 u64 utag:1;
354
355 /** Use QPG */
356 u64 uqpg:1;
357
358 /** Reserved2 */
359 u64 reserved2:1;
360
361 /** Parse Mode */
362 u64 pm:3;
363
364 /** Skip Length */
365 u64 sl:8;
366
367 /** Use Tag Type */
368 u64 utt:1;
369
370 /** Tag type */
371 u64 tagtype:2;
372
373 /** Reserved1 */
374 u64 reserved1:2;
375
376 /** QPG Value */
377 u64 qpg:11;
378
379 /** Tag Value */
380 u64 tag:32;
381
382#else
383
384 /** Tag Value */
385 u64 tag:32;
386
387 /** QPG Value */
388 u64 qpg:11;
389
390 /** Reserved1 */
391 u64 reserved1:2;
392
393 /** Tag type */
394 u64 tagtype:2;
395
396 /** Use Tag Type */
397 u64 utt:1;
398
399 /** Skip Length */
400 u64 sl:8;
401
402 /** Parse Mode */
403 u64 pm:3;
404
405 /** Reserved2 */
406 u64 reserved2:1;
407
408 /** Use QPG */
409 u64 uqpg:1;
410
411 /** Use Tag */
412 u64 utag:1;
413
414 /** Raw mode indicator 1 = RAW */
415 u64 raw:1;
416
417 /** Wider bit */
418 u64 w:1;
419#endif
420
421};
422
286/** Instruction Header */ 423/** Instruction Header */
287struct octeon_instr_ih { 424struct octeon_instr_ih2 {
288#ifdef __BIG_ENDIAN_BITFIELD 425#ifdef __BIG_ENDIAN_BITFIELD
289 /** Raw mode indicator 1 = RAW */ 426 /** Raw mode indicator 1 = RAW */
290 u64 raw:1; 427 u64 raw:1;
@@ -348,15 +485,15 @@ struct octeon_instr_irh {
348 u64 opcode:4; 485 u64 opcode:4;
349 u64 rflag:1; 486 u64 rflag:1;
350 u64 subcode:7; 487 u64 subcode:7;
351 u64 len:3; 488 u64 vlan:12;
352 u64 rid:13; 489 u64 priority:3;
353 u64 reserved:4; 490 u64 reserved:5;
354 u64 ossp:32; /* opcode/subcode specific parameters */ 491 u64 ossp:32; /* opcode/subcode specific parameters */
355#else 492#else
356 u64 ossp:32; /* opcode/subcode specific parameters */ 493 u64 ossp:32; /* opcode/subcode specific parameters */
357 u64 reserved:4; 494 u64 reserved:5;
358 u64 rid:13; 495 u64 priority:3;
359 u64 len:3; 496 u64 vlan:12;
360 u64 subcode:7; 497 u64 subcode:7;
361 u64 rflag:1; 498 u64 rflag:1;
362 u64 opcode:4; 499 u64 opcode:4;
@@ -383,28 +520,27 @@ union octeon_rh {
383 struct { 520 struct {
384 u64 opcode:4; 521 u64 opcode:4;
385 u64 subcode:8; 522 u64 subcode:8;
386 u64 len:3; /** additional 64-bit words */ 523 u64 len:3; /** additional 64-bit words */
387 u64 rid:13; /** request id in response to pkt sent by host */ 524 u64 reserved:17;
388 u64 reserved:4; 525 u64 ossp:32; /** opcode/subcode specific parameters */
389 u64 ossp:32; /** opcode/subcode specific parameters */
390 } r; 526 } r;
391 struct { 527 struct {
392 u64 opcode:4; 528 u64 opcode:4;
393 u64 subcode:8; 529 u64 subcode:8;
394 u64 len:3; /** additional 64-bit words */ 530 u64 len:3; /** additional 64-bit words */
395 u64 rid:13; /** request id in response to pkt sent by host */ 531 u64 extra:28;
396 u64 extra:24; 532 u64 vlan:12;
397 u64 link:8; 533 u64 priority:3;
398 u64 csum_verified:3; /** checksum verified. */ 534 u64 csum_verified:3; /** checksum verified. */
399 u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */ 535 u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
400 } r_dh; 536 } r_dh;
401 struct { 537 struct {
402 u64 opcode:4; 538 u64 opcode:4;
403 u64 subcode:8; 539 u64 subcode:8;
404 u64 len:3; /** additional 64-bit words */ 540 u64 len:3; /** additional 64-bit words */
405 u64 rid:13; /** request id in response to pkt sent by host */ 541 u64 reserved:11;
406 u64 num_gmx_ports:8; 542 u64 num_gmx_ports:8;
407 u64 max_nic_ports:8; 543 u64 max_nic_ports:10;
408 u64 app_cap_flags:4; 544 u64 app_cap_flags:4;
409 u64 app_mode:16; 545 u64 app_mode:16;
410 } r_core_drv_init; 546 } r_core_drv_init;
@@ -412,17 +548,15 @@ union octeon_rh {
412 u64 opcode:4; 548 u64 opcode:4;
413 u64 subcode:8; 549 u64 subcode:8;
414 u64 len:3; /** additional 64-bit words */ 550 u64 len:3; /** additional 64-bit words */
415 u64 rid:13; 551 u64 reserved:8;
416 u64 reserved:4;
417 u64 extra:25; 552 u64 extra:25;
418 u64 ifidx:7; 553 u64 gmxport:16;
419 } r_nic_info; 554 } r_nic_info;
420#else 555#else
421 u64 u64; 556 u64 u64;
422 struct { 557 struct {
423 u64 ossp:32; /** opcode/subcode specific parameters */ 558 u64 ossp:32; /** opcode/subcode specific parameters */
424 u64 reserved:4; 559 u64 reserved:17;
425 u64 rid:13; /** req id in response to pkt sent by host */
426 u64 len:3; /** additional 64-bit words */ 560 u64 len:3; /** additional 64-bit words */
427 u64 subcode:8; 561 u64 subcode:8;
428 u64 opcode:4; 562 u64 opcode:4;
@@ -430,9 +564,9 @@ union octeon_rh {
430 struct { 564 struct {
431 u64 has_hwtstamp:1; /** 1 = has hwtstamp */ 565 u64 has_hwtstamp:1; /** 1 = has hwtstamp */
432 u64 csum_verified:3; /** checksum verified. */ 566 u64 csum_verified:3; /** checksum verified. */
433 u64 link:8; 567 u64 priority:3;
434 u64 extra:24; 568 u64 vlan:12;
435 u64 rid:13; /** req id in response to pkt sent by host */ 569 u64 extra:28;
436 u64 len:3; /** additional 64-bit words */ 570 u64 len:3; /** additional 64-bit words */
437 u64 subcode:8; 571 u64 subcode:8;
438 u64 opcode:4; 572 u64 opcode:4;
@@ -440,18 +574,17 @@ union octeon_rh {
440 struct { 574 struct {
441 u64 app_mode:16; 575 u64 app_mode:16;
442 u64 app_cap_flags:4; 576 u64 app_cap_flags:4;
443 u64 max_nic_ports:8; 577 u64 max_nic_ports:10;
444 u64 num_gmx_ports:8; 578 u64 num_gmx_ports:8;
445 u64 rid:13; 579 u64 reserved:11;
446 u64 len:3; /** additional 64-bit words */ 580 u64 len:3; /** additional 64-bit words */
447 u64 subcode:8; 581 u64 subcode:8;
448 u64 opcode:4; 582 u64 opcode:4;
449 } r_core_drv_init; 583 } r_core_drv_init;
450 struct { 584 struct {
451 u64 ifidx:7; 585 u64 gmxport:16;
452 u64 extra:25; 586 u64 extra:25;
453 u64 reserved:4; 587 u64 reserved:8;
454 u64 rid:13;
455 u64 len:3; /** additional 64-bit words */ 588 u64 len:3; /** additional 64-bit words */
456 u64 subcode:8; 589 u64 subcode:8;
457 u64 opcode:4; 590 u64 opcode:4;
@@ -461,30 +594,25 @@ union octeon_rh {
461 594
462#define OCT_RH_SIZE (sizeof(union octeon_rh)) 595#define OCT_RH_SIZE (sizeof(union octeon_rh))
463 596
464#define OCT_PKT_PARAM_IPV4OPTS 1
465#define OCT_PKT_PARAM_IPV6EXTHDR 2
466
467union octnic_packet_params { 597union octnic_packet_params {
468 u32 u32; 598 u32 u32;
469 struct { 599 struct {
470#ifdef __BIG_ENDIAN_BITFIELD 600#ifdef __BIG_ENDIAN_BITFIELD
471 u32 reserved:6; 601 u32 reserved:24;
602 u32 ip_csum:1; /* Perform IP header checksum(s) */
603 /* Perform Outer transport header checksum */
604 u32 transport_csum:1;
605 /* Find tunnel, and perform transport csum. */
472 u32 tnl_csum:1; 606 u32 tnl_csum:1;
473 u32 ip_csum:1; 607 u32 tsflag:1; /* Timestamp this packet */
474 u32 ipv4opts_ipv6exthdr:2; 608 u32 ipsec_ops:4; /* IPsec operation */
475 u32 ipsec_ops:4;
476 u32 tsflag:1;
477 u32 csoffset:9;
478 u32 ifidx:8;
479#else 609#else
480 u32 ifidx:8;
481 u32 csoffset:9;
482 u32 tsflag:1;
483 u32 ipsec_ops:4; 610 u32 ipsec_ops:4;
484 u32 ipv4opts_ipv6exthdr:2; 611 u32 tsflag:1;
485 u32 ip_csum:1;
486 u32 tnl_csum:1; 612 u32 tnl_csum:1;
487 u32 reserved:6; 613 u32 transport_csum:1;
614 u32 ip_csum:1;
615 u32 reserved:24;
488#endif 616#endif
489 } s; 617 } s;
490}; 618};
@@ -496,56 +624,94 @@ union oct_link_status {
496 struct { 624 struct {
497#ifdef __BIG_ENDIAN_BITFIELD 625#ifdef __BIG_ENDIAN_BITFIELD
498 u64 duplex:8; 626 u64 duplex:8;
499 u64 status:8;
500 u64 mtu:16; 627 u64 mtu:16;
501 u64 speed:16; 628 u64 speed:16;
629 u64 link_up:1;
502 u64 autoneg:1; 630 u64 autoneg:1;
503 u64 interface:4; 631 u64 if_mode:5;
504 u64 pause:1; 632 u64 pause:1;
505 u64 reserved:10; 633 u64 reserved:16;
506#else 634#else
507 u64 reserved:10; 635 u64 reserved:16;
508 u64 pause:1; 636 u64 pause:1;
509 u64 interface:4; 637 u64 if_mode:5;
510 u64 autoneg:1; 638 u64 autoneg:1;
639 u64 link_up:1;
511 u64 speed:16; 640 u64 speed:16;
512 u64 mtu:16; 641 u64 mtu:16;
513 u64 status:8;
514 u64 duplex:8; 642 u64 duplex:8;
515#endif 643#endif
516 } s; 644 } s;
517}; 645};
518 646
647/** The txpciq info passed to host from the firmware */
648
649union oct_txpciq {
650 u64 u64;
651
652 struct {
653#ifdef __BIG_ENDIAN_BITFIELD
654 u64 q_no:8;
655 u64 port:8;
656 u64 pkind:6;
657 u64 use_qpg:1;
658 u64 qpg:11;
659 u64 reserved:30;
660#else
661 u64 reserved:30;
662 u64 qpg:11;
663 u64 use_qpg:1;
664 u64 pkind:6;
665 u64 port:8;
666 u64 q_no:8;
667#endif
668 } s;
669};
670
671/** The rxpciq info passed to host from the firmware */
672
673union oct_rxpciq {
674 u64 u64;
675
676 struct {
677#ifdef __BIG_ENDIAN_BITFIELD
678 u64 q_no:8;
679 u64 reserved:56;
680#else
681 u64 reserved:56;
682 u64 q_no:8;
683#endif
684 } s;
685};
686
519/** Information for a OCTEON ethernet interface shared between core & host. */ 687/** Information for a OCTEON ethernet interface shared between core & host. */
520struct oct_link_info { 688struct oct_link_info {
521 union oct_link_status link; 689 union oct_link_status link;
522 u64 hw_addr; 690 u64 hw_addr;
523 691
524#ifdef __BIG_ENDIAN_BITFIELD 692#ifdef __BIG_ENDIAN_BITFIELD
525 u16 gmxport; 693 u64 gmxport:16;
526 u8 rsvd[3]; 694 u64 rsvd:32;
527 u8 num_txpciq; 695 u64 num_txpciq:8;
528 u8 num_rxpciq; 696 u64 num_rxpciq:8;
529 u8 ifidx;
530#else 697#else
531 u8 ifidx; 698 u64 num_rxpciq:8;
532 u8 num_rxpciq; 699 u64 num_txpciq:8;
533 u8 num_txpciq; 700 u64 rsvd:32;
534 u8 rsvd[3]; 701 u64 gmxport:16;
535 u16 gmxport;
536#endif 702#endif
537 703
538 u8 txpciq[MAX_IOQS_PER_NICIF]; 704 union oct_txpciq txpciq[MAX_IOQS_PER_NICIF];
539 u8 rxpciq[MAX_IOQS_PER_NICIF]; 705 union oct_rxpciq rxpciq[MAX_IOQS_PER_NICIF];
540}; 706};
541 707
542#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info)) 708#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info))
543 709
544struct liquidio_if_cfg_info { 710struct liquidio_if_cfg_info {
545 u64 ifidx;
546 u64 iqmask; /** mask for IQs enabled for the port */ 711 u64 iqmask; /** mask for IQs enabled for the port */
547 u64 oqmask; /** mask for OQs enabled for the port */ 712 u64 oqmask; /** mask for OQs enabled for the port */
548 struct oct_link_info linfo; /** initial link information */ 713 struct oct_link_info linfo; /** initial link information */
714 char liquidio_firmware_version[32];
549}; 715};
550 716
551/** Stats for each NIC port in RX direction. */ 717/** Stats for each NIC port in RX direction. */
@@ -570,10 +736,16 @@ struct nic_rx_stats {
570 u64 fw_err_pko; 736 u64 fw_err_pko;
571 u64 fw_err_link; 737 u64 fw_err_link;
572 u64 fw_err_drop; 738 u64 fw_err_drop;
739
740 /* LRO */
573 u64 fw_lro_pkts; /* Number of packets that are LROed */ 741 u64 fw_lro_pkts; /* Number of packets that are LROed */
574 u64 fw_lro_octs; /* Number of octets that are LROed */ 742 u64 fw_lro_octs; /* Number of octets that are LROed */
575 u64 fw_total_lro; /* Number of LRO packets formed */ 743 u64 fw_total_lro; /* Number of LRO packets formed */
576 u64 fw_lro_aborts; /* Number of times lRO of packet aborted */ 744 u64 fw_lro_aborts; /* Number of times lRO of packet aborted */
745 u64 fw_lro_aborts_port;
746 u64 fw_lro_aborts_seq;
747 u64 fw_lro_aborts_tsval;
748 u64 fw_lro_aborts_timer;
577 /* intrmod: packet forward rate */ 749 /* intrmod: packet forward rate */
578 u64 fwd_rate; 750 u64 fwd_rate;
579}; 751};
@@ -597,9 +769,13 @@ struct nic_tx_stats {
597 /* firmware stats */ 769 /* firmware stats */
598 u64 fw_total_sent; 770 u64 fw_total_sent;
599 u64 fw_total_fwd; 771 u64 fw_total_fwd;
772 u64 fw_total_fwd_bytes;
600 u64 fw_err_pko; 773 u64 fw_err_pko;
601 u64 fw_err_link; 774 u64 fw_err_link;
602 u64 fw_err_drop; 775 u64 fw_err_drop;
776 u64 fw_err_tso;
777 u64 fw_tso; /* number of tso requests */
778 u64 fw_tso_fwd; /* number of packets segmented in tso */
603}; 779};
604 780
605struct oct_link_stats { 781struct oct_link_stats {
@@ -630,23 +806,44 @@ struct oct_mdio_cmd {
630 806
631#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats)) 807#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats))
632 808
809/* intrmod: max. packet rate threshold */
810#define LIO_INTRMOD_MAXPKT_RATETHR 196608
811/* intrmod: min. packet rate threshold */
812#define LIO_INTRMOD_MINPKT_RATETHR 9216
813/* intrmod: max. packets to trigger interrupt */
814#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384
815/* intrmod: min. packets to trigger interrupt */
816#define LIO_INTRMOD_RXMINCNT_TRIGGER 1
817/* intrmod: max. time to trigger interrupt */
818#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128
819/* 66xx:intrmod: min. time to trigger interrupt
820 * (value of 1 is optimum for TCP_RR)
821 */
822#define LIO_INTRMOD_RXMINTMR_TRIGGER 1
823
824/* intrmod: max. packets to trigger interrupt */
825#define LIO_INTRMOD_TXMAXCNT_TRIGGER 64
826/* intrmod: min. packets to trigger interrupt */
827#define LIO_INTRMOD_TXMINCNT_TRIGGER 0
828
829/* intrmod: poll interval in seconds */
633#define LIO_INTRMOD_CHECK_INTERVAL 1 830#define LIO_INTRMOD_CHECK_INTERVAL 1
634#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */
635#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */
636#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */
637#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */
638#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */
639#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */
640 831
641struct oct_intrmod_cfg { 832struct oct_intrmod_cfg {
642 u64 intrmod_enable; 833 u64 rx_enable;
643 u64 intrmod_check_intrvl; 834 u64 tx_enable;
644 u64 intrmod_maxpkt_ratethr; 835 u64 check_intrvl;
645 u64 intrmod_minpkt_ratethr; 836 u64 maxpkt_ratethr;
646 u64 intrmod_maxcnt_trigger; 837 u64 minpkt_ratethr;
647 u64 intrmod_maxtmr_trigger; 838 u64 rx_maxcnt_trigger;
648 u64 intrmod_mincnt_trigger; 839 u64 rx_mincnt_trigger;
649 u64 intrmod_mintmr_trigger; 840 u64 rx_maxtmr_trigger;
841 u64 rx_mintmr_trigger;
842 u64 tx_mincnt_trigger;
843 u64 tx_maxcnt_trigger;
844 u64 rx_frames;
845 u64 tx_frames;
846 u64 rx_usecs;
650}; 847};
651 848
652#define BASE_QUEUE_NOT_REQUESTED 65535 849#define BASE_QUEUE_NOT_REQUESTED 65535
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index 62a8dd5cd3dc..4b8c948400be 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -37,7 +37,7 @@
37/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support 37/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support
38 * multiple(<= MAX_OCTEON_NICIF) Miniports 38 * multiple(<= MAX_OCTEON_NICIF) Miniports
39 */ 39 */
40#define MAX_OCTEON_NICIF 32 40#define MAX_OCTEON_NICIF 128
41#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF 41#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF
42#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF 42#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF
43#define MAX_OCTEON_MULTICAST_ADDR 32 43#define MAX_OCTEON_MULTICAST_ADDR 32
@@ -135,7 +135,7 @@
135#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp) 135#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp)
136 136
137/* Max IOQs per OCTEON Link */ 137/* Max IOQs per OCTEON Link */
138#define MAX_IOQS_PER_NICIF 32 138#define MAX_IOQS_PER_NICIF 64
139 139
140enum lio_card_type { 140enum lio_card_type {
141 LIO_210SV = 0, /* Two port, 66xx */ 141 LIO_210SV = 0, /* Two port, 66xx */
@@ -416,9 +416,11 @@ struct octeon_config {
416#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS) 416#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
417 417
418/* Maximum number of Octeon Instruction (command) queues */ 418/* Maximum number of Octeon Instruction (command) queues */
419#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES 419#define MAX_OCTEON_INSTR_QUEUES(oct) CN6XXX_MAX_INPUT_QUEUES
420/* Maximum number of Octeon Output queues */
421#define MAX_OCTEON_OUTPUT_QUEUES(oct) CN6XXX_MAX_OUTPUT_QUEUES
420 422
421/* Maximum number of Octeon Instruction (command) queues */ 423#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
422#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES 424#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
423 425
424#endif /* __OCTEON_CONFIG_H__ */ 426#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 8e23e3fad662..337220721632 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -19,7 +19,6 @@
19* This file may also be available under a different license from Cavium. 19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information 20* Contact Cavium, Inc. for more information
21**********************************************************************/ 21**********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h> 22#include <linux/types.h>
24#include <linux/list.h> 23#include <linux/list.h>
25#include <linux/interrupt.h> 24#include <linux/interrupt.h>
@@ -550,17 +549,19 @@ static char *get_oct_app_string(u32 app_mode)
550 return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START]; 549 return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
551} 550}
552 551
552u8 fbuf[4 * 1024 * 1024];
553
553int octeon_download_firmware(struct octeon_device *oct, const u8 *data, 554int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
554 size_t size) 555 size_t size)
555{ 556{
556 int ret = 0; 557 int ret = 0;
557 u8 *p; 558 u8 *p = fbuf;
558 u8 *buffer;
559 u32 crc32_result; 559 u32 crc32_result;
560 u64 load_addr; 560 u64 load_addr;
561 u32 image_len; 561 u32 image_len;
562 struct octeon_firmware_file_header *h; 562 struct octeon_firmware_file_header *h;
563 u32 i; 563 u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION);
564 char *base;
564 565
565 if (size < sizeof(struct octeon_firmware_file_header)) { 566 if (size < sizeof(struct octeon_firmware_file_header)) {
566 dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n", 567 dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
@@ -576,19 +577,26 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
576 return -EINVAL; 577 return -EINVAL;
577 } 578 }
578 579
579 crc32_result = 580 crc32_result = crc32((unsigned int)~0, data,
580 crc32(~0, data, 581 sizeof(struct octeon_firmware_file_header) -
581 sizeof(struct octeon_firmware_file_header) - 582 sizeof(u32)) ^ ~0U;
582 sizeof(u32)) ^ ~0U;
583 if (crc32_result != be32_to_cpu(h->crc32)) { 583 if (crc32_result != be32_to_cpu(h->crc32)) {
584 dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n", 584 dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
585 crc32_result, be32_to_cpu(h->crc32)); 585 crc32_result, be32_to_cpu(h->crc32));
586 return -EINVAL; 586 return -EINVAL;
587 } 587 }
588 588
589 if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) { 589 if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
590 dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n", 590 dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
591 LIQUIDIO_VERSION, h->version); 591 LIQUIDIO_PACKAGE, h->version);
592 return -EINVAL;
593 }
594
595 base = h->version + strlen(LIQUIDIO_PACKAGE);
596 ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len);
597 if (ret) {
598 dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
599 LIQUIDIO_BASE_VERSION, base);
592 return -EINVAL; 600 return -EINVAL;
593 } 601 }
594 602
@@ -602,56 +610,56 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
602 snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s", 610 snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
603 h->version); 611 h->version);
604 612
605 buffer = kmemdup(data, size, GFP_KERNEL); 613 data += sizeof(struct octeon_firmware_file_header);
606 if (!buffer)
607 return -ENOMEM;
608
609 p = buffer + sizeof(struct octeon_firmware_file_header);
610 614
615 dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
616 be32_to_cpu(h->num_images));
611 /* load all images */ 617 /* load all images */
612 for (i = 0; i < be32_to_cpu(h->num_images); i++) { 618 for (i = 0; i < be32_to_cpu(h->num_images); i++) {
613 load_addr = be64_to_cpu(h->desc[i].addr); 619 load_addr = be64_to_cpu(h->desc[i].addr);
614 image_len = be32_to_cpu(h->desc[i].len); 620 image_len = be32_to_cpu(h->desc[i].len);
615 621
616 /* validate the image */ 622 dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
617 crc32_result = crc32(~0, p, image_len) ^ ~0U; 623 image_len, load_addr);
618 if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
619 dev_err(&oct->pci_dev->dev,
620 "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
621 i, crc32_result,
622 be32_to_cpu(h->desc[i].crc32));
623 ret = -EINVAL;
624 goto done_downloading;
625 }
626 624
627 /* download the image */ 625 /* Write in 4MB chunks*/
628 octeon_pci_write_core_mem(oct, load_addr, p, image_len); 626 rem = image_len;
629 627
630 p += image_len; 628 while (rem) {
631 dev_dbg(&oct->pci_dev->dev, 629 if (rem < (4 * 1024 * 1024))
632 "Downloaded image %d (%d bytes) to address 0x%016llx\n", 630 size = rem;
633 i, image_len, load_addr); 631 else
632 size = 4 * 1024 * 1024;
633
634 memcpy(p, data, size);
635
636 /* download the image */
637 octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
638
639 data += size;
640 rem -= (u32)size;
641 load_addr += size;
642 }
634 } 643 }
644 dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
645 h->bootcmd);
635 646
636 /* Invoke the bootcmd */ 647 /* Invoke the bootcmd */
637 ret = octeon_console_send_cmd(oct, h->bootcmd, 50); 648 ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
638 649
639done_downloading: 650 return 0;
640 kfree(buffer);
641
642 return ret;
643} 651}
644 652
645void octeon_free_device_mem(struct octeon_device *oct) 653void octeon_free_device_mem(struct octeon_device *oct)
646{ 654{
647 u32 i; 655 u32 i;
648 656
649 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 657 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
650 /* could check mask as well */ 658 /* could check mask as well */
651 vfree(oct->droq[i]); 659 vfree(oct->droq[i]);
652 } 660 }
653 661
654 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 662 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
655 /* could check mask as well */ 663 /* could check mask as well */
656 vfree(oct->instr_queue[i]); 664 vfree(oct->instr_queue[i]);
657 } 665 }
@@ -735,55 +743,65 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
735 octeon_device[oct_idx] = oct; 743 octeon_device[oct_idx] = oct;
736 744
737 oct->octeon_id = oct_idx; 745 oct->octeon_id = oct_idx;
738 snprintf((oct->device_name), sizeof(oct->device_name), 746 snprintf(oct->device_name, sizeof(oct->device_name),
739 "LiquidIO%d", (oct->octeon_id)); 747 "LiquidIO%d", (oct->octeon_id));
740 748
741 return oct; 749 return oct;
742} 750}
743 751
752/* this function is only for setting up the first queue */
744int octeon_setup_instr_queues(struct octeon_device *oct) 753int octeon_setup_instr_queues(struct octeon_device *oct)
745{ 754{
746 u32 i, num_iqs = 0; 755 u32 num_iqs = 0;
747 u32 num_descs = 0; 756 u32 num_descs = 0;
757 u32 iq_no = 0;
758 union oct_txpciq txpciq;
759 int numa_node = cpu_to_node(iq_no % num_online_cpus());
748 760
761 num_iqs = 1;
749 /* this causes queue 0 to be default queue */ 762 /* this causes queue 0 to be default queue */
750 if (OCTEON_CN6XXX(oct)) { 763 if (OCTEON_CN6XXX(oct))
751 num_iqs = 1;
752 num_descs = 764 num_descs =
753 CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); 765 CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
754 }
755 766
756 oct->num_iqs = 0; 767 oct->num_iqs = 0;
757 768
758 for (i = 0; i < num_iqs; i++) { 769 oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]),
759 oct->instr_queue[i] = 770 numa_node);
771 if (!oct->instr_queue[0])
772 oct->instr_queue[0] =
760 vmalloc(sizeof(struct octeon_instr_queue)); 773 vmalloc(sizeof(struct octeon_instr_queue));
761 if (!oct->instr_queue[i]) 774 if (!oct->instr_queue[0])
762 return 1; 775 return 1;
763 776 memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue));
764 memset(oct->instr_queue[i], 0, 777 oct->instr_queue[0]->q_index = 0;
765 sizeof(struct octeon_instr_queue)); 778 oct->instr_queue[0]->app_ctx = (void *)(size_t)0;
766 779 oct->instr_queue[0]->ifidx = 0;
767 oct->instr_queue[i]->app_ctx = (void *)(size_t)i; 780 txpciq.u64 = 0;
768 if (octeon_init_instr_queue(oct, i, num_descs)) 781 txpciq.s.q_no = iq_no;
769 return 1; 782 txpciq.s.use_qpg = 0;
770 783 txpciq.s.qpg = 0;
771 oct->num_iqs++; 784 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
785 /* prevent memory leak */
786 vfree(oct->instr_queue[0]);
787 return 1;
772 } 788 }
773 789
790 oct->num_iqs++;
774 return 0; 791 return 0;
775} 792}
776 793
777int octeon_setup_output_queues(struct octeon_device *oct) 794int octeon_setup_output_queues(struct octeon_device *oct)
778{ 795{
779 u32 i, num_oqs = 0; 796 u32 num_oqs = 0;
780 u32 num_descs = 0; 797 u32 num_descs = 0;
781 u32 desc_size = 0; 798 u32 desc_size = 0;
799 u32 oq_no = 0;
800 int numa_node = cpu_to_node(oq_no % num_online_cpus());
782 801
802 num_oqs = 1;
783 /* this causes queue 0 to be default queue */ 803 /* this causes queue 0 to be default queue */
784 if (OCTEON_CN6XXX(oct)) { 804 if (OCTEON_CN6XXX(oct)) {
785 /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
786 num_oqs = 1;
787 num_descs = 805 num_descs =
788 CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); 806 CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
789 desc_size = 807 desc_size =
@@ -791,19 +809,15 @@ int octeon_setup_output_queues(struct octeon_device *oct)
791 } 809 }
792 810
793 oct->num_oqs = 0; 811 oct->num_oqs = 0;
812 oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
813 if (!oct->droq[0])
814 oct->droq[0] = vmalloc(sizeof(*oct->droq[0]));
815 if (!oct->droq[0])
816 return 1;
794 817
795 for (i = 0; i < num_oqs; i++) { 818 if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL))
796 oct->droq[i] = vmalloc(sizeof(*oct->droq[i])); 819 return 1;
797 if (!oct->droq[i]) 820 oct->num_oqs++;
798 return 1;
799
800 memset(oct->droq[i], 0, sizeof(struct octeon_droq));
801
802 if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
803 return 1;
804
805 oct->num_oqs++;
806 }
807 821
808 return 0; 822 return 0;
809} 823}
@@ -1152,8 +1166,8 @@ core_drv_init_err:
1152int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) 1166int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
1153 1167
1154{ 1168{
1155 if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) && 1169 if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) &&
1156 (oct->io_qmask.iq & (1UL << q_no))) 1170 (oct->io_qmask.iq & (1ULL << q_no)))
1157 return oct->instr_queue[q_no]->max_count; 1171 return oct->instr_queue[q_no]->max_count;
1158 1172
1159 return -1; 1173 return -1;
@@ -1161,8 +1175,8 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
1161 1175
1162int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) 1176int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
1163{ 1177{
1164 if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) && 1178 if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) &&
1165 (oct->io_qmask.oq & (1UL << q_no))) 1179 (oct->io_qmask.oq & (1ULL << q_no)))
1166 return oct->droq[q_no]->max_count; 1180 return oct->droq[q_no]->max_count;
1167 return -1; 1181 return -1;
1168} 1182}
@@ -1253,10 +1267,10 @@ void lio_pci_writeq(struct octeon_device *oct,
1253int octeon_mem_access_ok(struct octeon_device *oct) 1267int octeon_mem_access_ok(struct octeon_device *oct)
1254{ 1268{
1255 u64 access_okay = 0; 1269 u64 access_okay = 0;
1270 u64 lmc0_reset_ctl;
1256 1271
1257 /* Check to make sure a DDR interface is enabled */ 1272 /* Check to make sure a DDR interface is enabled */
1258 u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL); 1273 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
1259
1260 access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK); 1274 access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
1261 1275
1262 return access_okay ? 0 : 1; 1276 return access_okay ? 0 : 1;
@@ -1270,9 +1284,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
1270 if (!timeout) 1284 if (!timeout)
1271 return ret; 1285 return ret;
1272 1286
1273 while (*timeout == 0)
1274 schedule_timeout_uninterruptible(HZ / 10);
1275
1276 for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout)); 1287 for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
1277 ms += HZ / 10) { 1288 ms += HZ / 10) {
1278 ret = octeon_mem_access_ok(oct); 1289 ret = octeon_mem_access_ok(oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 36e1f85df8c4..b4e566dea008 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -152,9 +152,9 @@ struct octeon_mmio {
152#define MAX_OCTEON_MAPS 32 152#define MAX_OCTEON_MAPS 32
153 153
154struct octeon_io_enable { 154struct octeon_io_enable {
155 u32 iq; 155 u64 iq;
156 u32 oq; 156 u64 oq;
157 u32 iq64B; 157 u64 iq64B;
158}; 158};
159 159
160struct octeon_reg_list { 160struct octeon_reg_list {
@@ -204,8 +204,7 @@ struct octeon_fn_list {
204 void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int); 204 void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
205 void (*bar1_idx_write)(struct octeon_device *, u32, u32); 205 void (*bar1_idx_write)(struct octeon_device *, u32, u32);
206 u32 (*bar1_idx_read)(struct octeon_device *, u32); 206 u32 (*bar1_idx_read)(struct octeon_device *, u32);
207 u32 (*update_iq_read_idx)(struct octeon_device *, 207 u32 (*update_iq_read_idx)(struct octeon_instr_queue *);
208 struct octeon_instr_queue *);
209 208
210 void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32); 209 void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
211 void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32); 210 void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
@@ -267,6 +266,8 @@ struct octdev_props {
267 /* Each interface in the Octeon device has a network 266 /* Each interface in the Octeon device has a network
268 * device pointer (used for OS specific calls). 267 * device pointer (used for OS specific calls).
269 */ 268 */
269 int napi_enabled;
270 int gmxport;
270 struct net_device *netdev; 271 struct net_device *netdev;
271}; 272};
272 273
@@ -324,7 +325,8 @@ struct octeon_device {
324 struct octeon_sc_buffer_pool sc_buf_pool; 325 struct octeon_sc_buffer_pool sc_buf_pool;
325 326
326 /** The input instruction queues */ 327 /** The input instruction queues */
327 struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES]; 328 struct octeon_instr_queue *instr_queue
329 [MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
328 330
329 /** The doubly-linked list of instruction response */ 331 /** The doubly-linked list of instruction response */
330 struct octeon_response_list response_list[MAX_RESPONSE_LISTS]; 332 struct octeon_response_list response_list[MAX_RESPONSE_LISTS];
@@ -332,7 +334,7 @@ struct octeon_device {
332 u32 num_oqs; 334 u32 num_oqs;
333 335
334 /** The DROQ output queues */ 336 /** The DROQ output queues */
335 struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES]; 337 struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
336 338
337 struct octeon_io_enable io_qmask; 339 struct octeon_io_enable io_qmask;
338 340
@@ -381,15 +383,29 @@ struct octeon_device {
381 383
382 struct cavium_wq dma_comp_wq; 384 struct cavium_wq dma_comp_wq;
383 385
384 struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES]; 386 /** Lock for dma response list */
387 spinlock_t cmd_resp_wqlock;
388 u32 cmd_resp_state;
389
390 struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
385 391
386 struct cavium_wk nic_poll_work; 392 struct cavium_wk nic_poll_work;
387 393
388 struct cavium_wk console_poll_work[MAX_OCTEON_MAPS]; 394 struct cavium_wk console_poll_work[MAX_OCTEON_MAPS];
389 395
390 void *priv; 396 void *priv;
397
398 int rx_pause;
399 int tx_pause;
400
401 struct oct_link_stats link_stats; /*stastics from firmware*/
402
403 /* private flags to control driver-specific features through ethtool */
404 u32 priv_flags;
391}; 405};
392 406
407#define OCT_DRV_ONLINE 1
408#define OCT_DRV_OFFLINE 2
393#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \ 409#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
394 (oct->chip_id == OCTEON_CN68XX)) 410 (oct->chip_id == OCTEON_CN68XX))
395#define CHIP_FIELD(oct, TYPE, field) \ 411#define CHIP_FIELD(oct, TYPE, field) \
@@ -646,4 +662,17 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
646 */ 662 */
647struct octeon_config *octeon_get_conf(struct octeon_device *oct); 663struct octeon_config *octeon_get_conf(struct octeon_device *oct);
648 664
665/* LiquidIO driver pivate flags */
666enum {
667 OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */
668};
669
670static inline void lio_set_priv_flag(struct octeon_device *octdev, u32 flag,
671 u32 val)
672{
673 if (val)
674 octdev->priv_flags |= (0x1 << flag);
675 else
676 octdev->priv_flags &= ~(0x1 << flag);
677}
649#endif 678#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 174072b3740b..d9bb2f7e0836 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -151,22 +151,26 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
151 struct octeon_droq *droq) 151 struct octeon_droq *droq)
152{ 152{
153 u32 i; 153 u32 i;
154 struct octeon_skb_page_info *pg_info;
154 155
155 for (i = 0; i < droq->max_count; i++) { 156 for (i = 0; i < droq->max_count; i++) {
156 if (droq->recv_buf_list[i].buffer) { 157 pg_info = &droq->recv_buf_list[i].pg_info;
157 if (droq->desc_ring) { 158
158 lio_unmap_ring_info(oct->pci_dev, 159 if (pg_info->dma)
159 (u64)droq-> 160 lio_unmap_ring(oct->pci_dev,
160 desc_ring[i].info_ptr, 161 (u64)pg_info->dma);
161 OCT_DROQ_INFO_SIZE); 162 pg_info->dma = 0;
162 lio_unmap_ring(oct->pci_dev, 163
163 (u64)droq->desc_ring[i]. 164 if (pg_info->page)
164 buffer_ptr, 165 recv_buffer_destroy(droq->recv_buf_list[i].buffer,
165 droq->buffer_size); 166 pg_info);
166 } 167
167 recv_buffer_free(droq->recv_buf_list[i].buffer); 168 if (droq->desc_ring && droq->desc_ring[i].info_ptr)
168 droq->recv_buf_list[i].buffer = NULL; 169 lio_unmap_ring_info(oct->pci_dev,
169 } 170 (u64)droq->
171 desc_ring[i].info_ptr,
172 OCT_DROQ_INFO_SIZE);
173 droq->recv_buf_list[i].buffer = NULL;
170 } 174 }
171 175
172 octeon_droq_reset_indices(droq); 176 octeon_droq_reset_indices(droq);
@@ -181,11 +185,12 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
181 struct octeon_droq_desc *desc_ring = droq->desc_ring; 185 struct octeon_droq_desc *desc_ring = droq->desc_ring;
182 186
183 for (i = 0; i < droq->max_count; i++) { 187 for (i = 0; i < droq->max_count; i++) {
184 buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size); 188 buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info);
185 189
186 if (!buf) { 190 if (!buf) {
187 dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", 191 dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
188 __func__); 192 __func__);
193 droq->stats.rx_alloc_failure++;
189 return -ENOMEM; 194 return -ENOMEM;
190 } 195 }
191 196
@@ -197,9 +202,7 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
197 /* map ring buffers into memory */ 202 /* map ring buffers into memory */
198 desc_ring[i].info_ptr = lio_map_ring_info(droq, i); 203 desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
199 desc_ring[i].buffer_ptr = 204 desc_ring[i].buffer_ptr =
200 lio_map_ring(oct->pci_dev, 205 lio_map_ring(droq->recv_buf_list[i].buffer);
201 droq->recv_buf_list[i].buffer,
202 droq->buffer_size);
203 } 206 }
204 207
205 octeon_droq_reset_indices(droq); 208 octeon_droq_reset_indices(droq);
@@ -242,6 +245,8 @@ int octeon_init_droq(struct octeon_device *oct,
242 struct octeon_droq *droq; 245 struct octeon_droq *droq;
243 u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; 246 u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
244 u32 c_pkts_per_intr = 0, c_refill_threshold = 0; 247 u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
248 int orig_node = dev_to_node(&oct->pci_dev->dev);
249 int numa_node = cpu_to_node(q_no % num_online_cpus());
245 250
246 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); 251 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
247 252
@@ -261,15 +266,23 @@ int octeon_init_droq(struct octeon_device *oct,
261 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); 266 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
262 267
263 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); 268 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
264 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); 269 c_refill_threshold =
270 (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
271 } else {
272 return 1;
265 } 273 }
266 274
267 droq->max_count = c_num_descs; 275 droq->max_count = c_num_descs;
268 droq->buffer_size = c_buf_size; 276 droq->buffer_size = c_buf_size;
269 277
270 desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; 278 desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
279 set_dev_node(&oct->pci_dev->dev, numa_node);
271 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, 280 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
272 (dma_addr_t *)&droq->desc_ring_dma); 281 (dma_addr_t *)&droq->desc_ring_dma);
282 set_dev_node(&oct->pci_dev->dev, orig_node);
283 if (!droq->desc_ring)
284 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
285 (dma_addr_t *)&droq->desc_ring_dma);
273 286
274 if (!droq->desc_ring) { 287 if (!droq->desc_ring) {
275 dev_err(&oct->pci_dev->dev, 288 dev_err(&oct->pci_dev->dev,
@@ -283,12 +296,11 @@ int octeon_init_droq(struct octeon_device *oct,
283 droq->max_count); 296 droq->max_count);
284 297
285 droq->info_list = 298 droq->info_list =
286 cnnic_alloc_aligned_dma(oct->pci_dev, 299 cnnic_numa_alloc_aligned_dma((droq->max_count *
287 (droq->max_count * OCT_DROQ_INFO_SIZE), 300 OCT_DROQ_INFO_SIZE),
288 &droq->info_alloc_size, 301 &droq->info_alloc_size,
289 &droq->info_base_addr, 302 &droq->info_base_addr,
290 &droq->info_list_dma); 303 numa_node);
291
292 if (!droq->info_list) { 304 if (!droq->info_list) {
293 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); 305 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
294 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 306 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -297,7 +309,12 @@ int octeon_init_droq(struct octeon_device *oct,
297 } 309 }
298 310
299 droq->recv_buf_list = (struct octeon_recv_buffer *) 311 droq->recv_buf_list = (struct octeon_recv_buffer *)
300 vmalloc(droq->max_count * 312 vmalloc_node(droq->max_count *
313 OCT_DROQ_RECVBUF_SIZE,
314 numa_node);
315 if (!droq->recv_buf_list)
316 droq->recv_buf_list = (struct octeon_recv_buffer *)
317 vmalloc(droq->max_count *
301 OCT_DROQ_RECVBUF_SIZE); 318 OCT_DROQ_RECVBUF_SIZE);
302 if (!droq->recv_buf_list) { 319 if (!droq->recv_buf_list) {
303 dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); 320 dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
@@ -320,7 +337,7 @@ int octeon_init_droq(struct octeon_device *oct,
320 /* For 56xx Pass1, this function won't be called, so no checks. */ 337 /* For 56xx Pass1, this function won't be called, so no checks. */
321 oct->fn_list.setup_oq_regs(oct, q_no); 338 oct->fn_list.setup_oq_regs(oct, q_no);
322 339
323 oct->io_qmask.oq |= (1 << q_no); 340 oct->io_qmask.oq |= (1ULL << q_no);
324 341
325 return 0; 342 return 0;
326 343
@@ -358,6 +375,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
358 struct octeon_recv_pkt *recv_pkt; 375 struct octeon_recv_pkt *recv_pkt;
359 struct octeon_recv_info *recv_info; 376 struct octeon_recv_info *recv_info;
360 u32 i, bytes_left; 377 u32 i, bytes_left;
378 struct octeon_skb_page_info *pg_info;
361 379
362 info = &droq->info_list[idx]; 380 info = &droq->info_list[idx];
363 381
@@ -375,9 +393,14 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
375 bytes_left = (u32)info->length; 393 bytes_left = (u32)info->length;
376 394
377 while (buf_cnt) { 395 while (buf_cnt) {
378 lio_unmap_ring(octeon_dev->pci_dev, 396 {
379 (u64)droq->desc_ring[idx].buffer_ptr, 397 pg_info = &droq->recv_buf_list[idx].pg_info;
380 droq->buffer_size); 398
399 lio_unmap_ring(octeon_dev->pci_dev,
400 (u64)pg_info->dma);
401 pg_info->page = NULL;
402 pg_info->dma = 0;
403 }
381 404
382 recv_pkt->buffer_size[i] = 405 recv_pkt->buffer_size[i] =
383 (bytes_left >= 406 (bytes_left >=
@@ -449,6 +472,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
449 void *buf = NULL; 472 void *buf = NULL;
450 u8 *data; 473 u8 *data;
451 u32 desc_refilled = 0; 474 u32 desc_refilled = 0;
475 struct octeon_skb_page_info *pg_info;
452 476
453 desc_ring = droq->desc_ring; 477 desc_ring = droq->desc_ring;
454 478
@@ -458,13 +482,22 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
458 * the buffer, else allocate. 482 * the buffer, else allocate.
459 */ 483 */
460 if (!droq->recv_buf_list[droq->refill_idx].buffer) { 484 if (!droq->recv_buf_list[droq->refill_idx].buffer) {
461 buf = recv_buffer_alloc(octeon_dev, droq->q_no, 485 pg_info =
462 droq->buffer_size); 486 &droq->recv_buf_list[droq->refill_idx].pg_info;
487 /* Either recycle the existing pages or go for
488 * new page alloc
489 */
490 if (pg_info->page)
491 buf = recv_buffer_reuse(octeon_dev, pg_info);
492 else
493 buf = recv_buffer_alloc(octeon_dev, pg_info);
463 /* If a buffer could not be allocated, no point in 494 /* If a buffer could not be allocated, no point in
464 * continuing 495 * continuing
465 */ 496 */
466 if (!buf) 497 if (!buf) {
498 droq->stats.rx_alloc_failure++;
467 break; 499 break;
500 }
468 droq->recv_buf_list[droq->refill_idx].buffer = 501 droq->recv_buf_list[droq->refill_idx].buffer =
469 buf; 502 buf;
470 data = get_rbd(buf); 503 data = get_rbd(buf);
@@ -476,11 +509,8 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
476 droq->recv_buf_list[droq->refill_idx].data = data; 509 droq->recv_buf_list[droq->refill_idx].data = data;
477 510
478 desc_ring[droq->refill_idx].buffer_ptr = 511 desc_ring[droq->refill_idx].buffer_ptr =
479 lio_map_ring(octeon_dev->pci_dev, 512 lio_map_ring(droq->recv_buf_list[droq->
480 droq->recv_buf_list[droq-> 513 refill_idx].buffer);
481 refill_idx].buffer,
482 droq->buffer_size);
483
484 /* Reset any previous values in the length field. */ 514 /* Reset any previous values in the length field. */
485 droq->info_list[droq->refill_idx].length = 0; 515 droq->info_list[droq->refill_idx].length = 0;
486 516
@@ -586,6 +616,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
586 for (pkt = 0; pkt < pkt_count; pkt++) { 616 for (pkt = 0; pkt < pkt_count; pkt++) {
587 u32 pkt_len = 0; 617 u32 pkt_len = 0;
588 struct sk_buff *nicbuf = NULL; 618 struct sk_buff *nicbuf = NULL;
619 struct octeon_skb_page_info *pg_info;
620 void *buf;
589 621
590 info = &droq->info_list[droq->read_idx]; 622 info = &droq->info_list[droq->read_idx];
591 octeon_swap_8B_data((u64 *)info, 2); 623 octeon_swap_8B_data((u64 *)info, 2);
@@ -605,7 +637,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
605 rh = &info->rh; 637 rh = &info->rh;
606 638
607 total_len += (u32)info->length; 639 total_len += (u32)info->length;
608
609 if (OPCODE_SLOW_PATH(rh)) { 640 if (OPCODE_SLOW_PATH(rh)) {
610 u32 buf_cnt; 641 u32 buf_cnt;
611 642
@@ -614,50 +645,44 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
614 droq->refill_count += buf_cnt; 645 droq->refill_count += buf_cnt;
615 } else { 646 } else {
616 if (info->length <= droq->buffer_size) { 647 if (info->length <= droq->buffer_size) {
617 lio_unmap_ring(oct->pci_dev,
618 (u64)droq->desc_ring[
619 droq->read_idx].buffer_ptr,
620 droq->buffer_size);
621 pkt_len = (u32)info->length; 648 pkt_len = (u32)info->length;
622 nicbuf = droq->recv_buf_list[ 649 nicbuf = droq->recv_buf_list[
623 droq->read_idx].buffer; 650 droq->read_idx].buffer;
651 pg_info = &droq->recv_buf_list[
652 droq->read_idx].pg_info;
653 if (recv_buffer_recycle(oct, pg_info))
654 pg_info->page = NULL;
624 droq->recv_buf_list[droq->read_idx].buffer = 655 droq->recv_buf_list[droq->read_idx].buffer =
625 NULL; 656 NULL;
626 INCR_INDEX_BY1(droq->read_idx, droq->max_count); 657 INCR_INDEX_BY1(droq->read_idx, droq->max_count);
627 skb_put(nicbuf, pkt_len);
628 droq->refill_count++; 658 droq->refill_count++;
629 } else { 659 } else {
630 nicbuf = octeon_fast_packet_alloc(oct, droq, 660 nicbuf = octeon_fast_packet_alloc((u32)
631 droq->q_no,
632 (u32)
633 info->length); 661 info->length);
634 pkt_len = 0; 662 pkt_len = 0;
635 /* nicbuf allocation can fail. We'll handle it 663 /* nicbuf allocation can fail. We'll handle it
636 * inside the loop. 664 * inside the loop.
637 */ 665 */
638 while (pkt_len < info->length) { 666 while (pkt_len < info->length) {
639 int cpy_len; 667 int cpy_len, idx = droq->read_idx;
640 668
641 cpy_len = ((pkt_len + 669 cpy_len = ((pkt_len + droq->buffer_size)
642 droq->buffer_size) > 670 > info->length) ?
643 info->length) ?
644 ((u32)info->length - pkt_len) : 671 ((u32)info->length - pkt_len) :
645 droq->buffer_size; 672 droq->buffer_size;
646 673
647 if (nicbuf) { 674 if (nicbuf) {
648 lio_unmap_ring(oct->pci_dev,
649 (u64)
650 droq->desc_ring
651 [droq->read_idx].
652 buffer_ptr,
653 droq->
654 buffer_size);
655 octeon_fast_packet_next(droq, 675 octeon_fast_packet_next(droq,
656 nicbuf, 676 nicbuf,
657 cpy_len, 677 cpy_len,
658 droq-> 678 idx);
659 read_idx 679 buf = droq->recv_buf_list[idx].
660 ); 680 buffer;
681 recv_buffer_fast_free(buf);
682 droq->recv_buf_list[idx].buffer
683 = NULL;
684 } else {
685 droq->stats.rx_alloc_failure++;
661 } 686 }
662 687
663 pkt_len += cpy_len; 688 pkt_len += cpy_len;
@@ -668,12 +693,14 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
668 } 693 }
669 694
670 if (nicbuf) { 695 if (nicbuf) {
671 if (droq->ops.fptr) 696 if (droq->ops.fptr) {
672 droq->ops.fptr(oct->octeon_id, 697 droq->ops.fptr(oct->octeon_id,
673 nicbuf, pkt_len, 698 nicbuf, pkt_len,
674 rh, &droq->napi); 699 rh, &droq->napi,
675 else 700 droq->ops.farg);
701 } else {
676 recv_buffer_free(nicbuf); 702 recv_buffer_free(nicbuf);
703 }
677 } 704 }
678 } 705 }
679 706
@@ -681,16 +708,16 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
681 int desc_refilled = octeon_droq_refill(oct, droq); 708 int desc_refilled = octeon_droq_refill(oct, droq);
682 709
683 /* Flush the droq descriptor data to memory to be sure 710 /* Flush the droq descriptor data to memory to be sure
684 * that when we update the credits the data in memory 711 * that when we update the credits the data in memory
685 * is accurate. 712 * is accurate.
686 */ 713 */
687 wmb(); 714 wmb();
688 writel((desc_refilled), droq->pkts_credit_reg); 715 writel((desc_refilled), droq->pkts_credit_reg);
689 /* make sure mmio write completes */ 716 /* make sure mmio write completes */
690 mmiowb(); 717 mmiowb();
691 } 718 }
692 719
693 } /* for ( each packet )... */ 720 } /* for (each packet)... */
694 721
695 /* Increment refill_count by the number of buffers processed. */ 722 /* Increment refill_count by the number of buffers processed. */
696 droq->stats.pkts_received += pkt; 723 droq->stats.pkts_received += pkt;
@@ -937,6 +964,7 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
937 spin_lock_irqsave(&droq->lock, flags); 964 spin_lock_irqsave(&droq->lock, flags);
938 965
939 droq->ops.fptr = NULL; 966 droq->ops.fptr = NULL;
967 droq->ops.farg = NULL;
940 droq->ops.drop_on_max = 0; 968 droq->ops.drop_on_max = 0;
941 969
942 spin_unlock_irqrestore(&droq->lock, flags); 970 spin_unlock_irqrestore(&droq->lock, flags);
@@ -949,6 +977,7 @@ int octeon_create_droq(struct octeon_device *oct,
949 u32 desc_size, void *app_ctx) 977 u32 desc_size, void *app_ctx)
950{ 978{
951 struct octeon_droq *droq; 979 struct octeon_droq *droq;
980 int numa_node = cpu_to_node(q_no % num_online_cpus());
952 981
953 if (oct->droq[q_no]) { 982 if (oct->droq[q_no]) {
954 dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", 983 dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
@@ -957,7 +986,9 @@ int octeon_create_droq(struct octeon_device *oct,
957 } 986 }
958 987
959 /* Allocate the DS for the new droq. */ 988 /* Allocate the DS for the new droq. */
960 droq = vmalloc(sizeof(*droq)); 989 droq = vmalloc_node(sizeof(*droq), numa_node);
990 if (!droq)
991 droq = vmalloc(sizeof(*droq));
961 if (!droq) 992 if (!droq)
962 goto create_droq_fail; 993 goto create_droq_fail;
963 memset(droq, 0, sizeof(struct octeon_droq)); 994 memset(droq, 0, sizeof(struct octeon_droq));
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 7940ccee12d9..1ca9c4f05702 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -65,6 +65,17 @@ struct octeon_droq_info {
65 65
66#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info)) 66#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
67 67
68struct octeon_skb_page_info {
69 /* DMA address for the page */
70 dma_addr_t dma;
71
72 /* Page for the rx dma **/
73 struct page *page;
74
75 /** which offset into page */
76 unsigned int page_offset;
77};
78
68/** Pointer to data buffer. 79/** Pointer to data buffer.
69 * Driver keeps a pointer to the data buffer that it made available to 80 * Driver keeps a pointer to the data buffer that it made available to
70 * the Octeon device. Since the descriptor ring keeps physical (bus) 81 * the Octeon device. Since the descriptor ring keeps physical (bus)
@@ -77,6 +88,9 @@ struct octeon_recv_buffer {
77 88
78 /** Data in the packet buffer. */ 89 /** Data in the packet buffer. */
79 u8 *data; 90 u8 *data;
91
92 /** pg_info **/
93 struct octeon_skb_page_info pg_info;
80}; 94};
81 95
82#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer)) 96#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer))
@@ -106,6 +120,10 @@ struct oct_droq_stats {
106 120
107 /** Num of Packets dropped due to receive path failures. */ 121 /** Num of Packets dropped due to receive path failures. */
108 u64 rx_dropped; 122 u64 rx_dropped;
123
124 /** Num of failures of recv_buffer_alloc() */
125 u64 rx_alloc_failure;
126
109}; 127};
110 128
111#define POLL_EVENT_INTR_ARRIVED 1 129#define POLL_EVENT_INTR_ARRIVED 1
@@ -213,7 +231,8 @@ struct octeon_droq_ops {
213 * data in the buffer. The receive header gives the port 231 * data in the buffer. The receive header gives the port
214 * number to the caller. Function pointer is set by caller. 232 * number to the caller. Function pointer is set by caller.
215 */ 233 */
216 void (*fptr)(u32, void *, u32, union octeon_rh *, void *); 234 void (*fptr)(u32, void *, u32, union octeon_rh *, void *, void *);
235 void *farg;
217 236
218 /* This function will be called by the driver for all NAPI related 237 /* This function will be called by the driver for all NAPI related
219 * events. The first param is the octeon id. The second param is the 238 * events. The first param is the octeon id. The second param is the
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 592fe49b589d..caa2b4f30717 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -65,6 +65,10 @@ struct oct_iq_stats {
65 u64 tx_iq_busy;/**< Numof times this iq was found to be full. */ 65 u64 tx_iq_busy;/**< Numof times this iq was found to be full. */
66 u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */ 66 u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
67 u64 tx_tot_bytes;/**< Total count of bytes sento to network. */ 67 u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
68 u64 tx_gso; /* count of tso */
69 u64 tx_dmamap_fail;
70 u64 tx_restart;
71 /*u64 tx_timeout_count;*/
68}; 72};
69 73
70#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats)) 74#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
@@ -75,14 +79,22 @@ struct oct_iq_stats {
75 * a Octeon device has one such structure to represent it. 79 * a Octeon device has one such structure to represent it.
76*/ 80*/
77struct octeon_instr_queue { 81struct octeon_instr_queue {
82 struct octeon_device *oct_dev;
83
78 /** A spinlock to protect access to the input ring. */ 84 /** A spinlock to protect access to the input ring. */
79 spinlock_t lock; 85 spinlock_t lock;
80 86
87 /** A spinlock to protect while posting on the ring. */
88 spinlock_t post_lock;
89
90 /** A spinlock to protect access to the input ring.*/
91 spinlock_t iq_flush_running_lock;
92
81 /** Flag that indicates if the queue uses 64 byte commands. */ 93 /** Flag that indicates if the queue uses 64 byte commands. */
82 u32 iqcmd_64B:1; 94 u32 iqcmd_64B:1;
83 95
84 /** Queue Number. */ 96 /** Queue info. */
85 u32 iq_no:5; 97 union oct_txpciq txpciq;
86 98
87 u32 rsvd:17; 99 u32 rsvd:17;
88 100
@@ -147,6 +159,13 @@ struct octeon_instr_queue {
147 159
148 /** Application context */ 160 /** Application context */
149 void *app_ctx; 161 void *app_ctx;
162
163 /* network stack queue index */
164 int q_index;
165
166 /*os ifidx associated with this queue */
167 int ifidx;
168
150}; 169};
151 170
152/*---------------------- INSTRUCTION FORMAT ----------------------------*/ 171/*---------------------- INSTRUCTION FORMAT ----------------------------*/
@@ -176,12 +195,12 @@ struct octeon_instr_32B {
176/** 64-byte instruction format. 195/** 64-byte instruction format.
177 * Format of instruction for a 64-byte mode input queue. 196 * Format of instruction for a 64-byte mode input queue.
178 */ 197 */
179struct octeon_instr_64B { 198struct octeon_instr2_64B {
180 /** Pointer where the input data is available. */ 199 /** Pointer where the input data is available. */
181 u64 dptr; 200 u64 dptr;
182 201
183 /** Instruction Header. */ 202 /** Instruction Header. */
184 u64 ih; 203 u64 ih2;
185 204
186 /** Input Request Header. */ 205 /** Input Request Header. */
187 u64 irh; 206 u64 irh;
@@ -198,14 +217,44 @@ struct octeon_instr_64B {
198 u64 rptr; 217 u64 rptr;
199 218
200 u64 reserved; 219 u64 reserved;
220};
221
222struct octeon_instr3_64B {
223 /** Pointer where the input data is available. */
224 u64 dptr;
225
226 /** Instruction Header. */
227 u64 ih3;
228
229 /** Instruction Header. */
230 u64 pki_ih3;
231
232 /** Input Request Header. */
233 u64 irh;
201 234
235 /** opcode/subcode specific parameters */
236 u64 ossp[2];
237
238 /** Return Data Parameters */
239 u64 rdp;
240
241 /** Pointer where the response for a RAW mode packet will be written
242 * by Octeon.
243 */
244 u64 rptr;
245
246};
247
248union octeon_instr_64B {
249 struct octeon_instr2_64B cmd2;
250 struct octeon_instr3_64B cmd3;
202}; 251};
203 252
204#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B)) 253#define OCT_64B_INSTR_SIZE (sizeof(union octeon_instr_64B))
205 254
206/** The size of each buffer in soft command buffer pool 255/** The size of each buffer in soft command buffer pool
207 */ 256 */
208#define SOFT_COMMAND_BUFFER_SIZE 1024 257#define SOFT_COMMAND_BUFFER_SIZE 1536
209 258
210struct octeon_soft_command { 259struct octeon_soft_command {
211 /** Soft command buffer info. */ 260 /** Soft command buffer info. */
@@ -214,7 +263,8 @@ struct octeon_soft_command {
214 u32 size; 263 u32 size;
215 264
216 /** Command and return status */ 265 /** Command and return status */
217 struct octeon_instr_64B cmd; 266 union octeon_instr_64B cmd;
267
218#define COMPLETION_WORD_INIT 0xffffffffffffffffULL 268#define COMPLETION_WORD_INIT 0xffffffffffffffffULL
219 u64 *status_word; 269 u64 *status_word;
220 270
@@ -242,7 +292,7 @@ struct octeon_soft_command {
242 292
243/** Maximum number of buffers to allocate into soft command buffer pool 293/** Maximum number of buffers to allocate into soft command buffer pool
244 */ 294 */
245#define MAX_SOFT_COMMAND_BUFFERS 16 295#define MAX_SOFT_COMMAND_BUFFERS 256
246 296
247/** Head of a soft command buffer pool. 297/** Head of a soft command buffer pool.
248 */ 298 */
@@ -268,14 +318,15 @@ void octeon_free_soft_command(struct octeon_device *oct,
268/** 318/**
269 * octeon_init_instr_queue() 319 * octeon_init_instr_queue()
270 * @param octeon_dev - pointer to the octeon device structure. 320 * @param octeon_dev - pointer to the octeon device structure.
271 * @param iq_no - queue to be initialized (0 <= q_no <= 3). 321 * @param txpciq - queue to be initialized (0 <= q_no <= 3).
272 * 322 *
273 * Called at driver init time for each input queue. iq_conf has the 323 * Called at driver init time for each input queue. iq_conf has the
274 * configuration parameters for the queue. 324 * configuration parameters for the queue.
275 * 325 *
276 * @return Success: 0 Failure: 1 326 * @return Success: 0 Failure: 1
277 */ 327 */
278int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no, 328int octeon_init_instr_queue(struct octeon_device *octeon_dev,
329 union oct_txpciq txpciq,
279 u32 num_descs); 330 u32 num_descs);
280 331
281/** 332/**
@@ -298,7 +349,7 @@ octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
298 349
299int 350int
300lio_process_iq_request_list(struct octeon_device *oct, 351lio_process_iq_request_list(struct octeon_device *oct,
301 struct octeon_instr_queue *iq); 352 struct octeon_instr_queue *iq, u32 napi_budget);
302 353
303int octeon_send_command(struct octeon_device *oct, u32 iq_no, 354int octeon_send_command(struct octeon_device *oct, u32 iq_no,
304 u32 force_db, void *cmd, void *buf, 355 u32 force_db, void *cmd, void *buf,
@@ -313,7 +364,10 @@ void octeon_prepare_soft_command(struct octeon_device *oct,
313int octeon_send_soft_command(struct octeon_device *oct, 364int octeon_send_soft_command(struct octeon_device *oct,
314 struct octeon_soft_command *sc); 365 struct octeon_soft_command *sc);
315 366
316int octeon_setup_iq(struct octeon_device *oct, u32 iq_no, 367int octeon_setup_iq(struct octeon_device *oct, int ifidx,
317 u32 num_descs, void *app_ctx); 368 int q_index, union oct_txpciq iq_no, u32 num_descs,
318 369 void *app_ctx);
370int
371octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
372 u32 pending_thresh, u32 napi_budget);
319#endif /* __OCTEON_IQ_H__ */ 373#endif /* __OCTEON_IQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index cbd081981180..0ff3efc67b84 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -126,22 +126,27 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
126} 126}
127 127
128static inline void * 128static inline void *
129cnnic_alloc_aligned_dma(struct pci_dev *pci_dev, 129cnnic_numa_alloc_aligned_dma(u32 size,
130 u32 size, 130 u32 *alloc_size,
131 u32 *alloc_size, 131 size_t *orig_ptr,
132 size_t *orig_ptr, 132 int numa_node)
133 size_t *dma_addr __attribute__((unused)))
134{ 133{
135 int retries = 0; 134 int retries = 0;
136 void *ptr = NULL; 135 void *ptr = NULL;
137 136
138#define OCTEON_MAX_ALLOC_RETRIES 1 137#define OCTEON_MAX_ALLOC_RETRIES 1
139 do { 138 do {
140 ptr = 139 struct page *page = NULL;
141 (void *)__get_free_pages(GFP_KERNEL, 140
142 get_order(size)); 141 page = alloc_pages_node(numa_node,
142 GFP_KERNEL,
143 get_order(size));
144 if (!page)
145 page = alloc_pages(GFP_KERNEL,
146 get_order(size));
147 ptr = (void *)page_address(page);
143 if ((unsigned long)ptr & 0x07) { 148 if ((unsigned long)ptr & 0x07) {
144 free_pages((unsigned long)ptr, get_order(size)); 149 __free_pages(page, get_order(size));
145 ptr = NULL; 150 ptr = NULL;
146 /* Increment the size required if the first 151 /* Increment the size required if the first
147 * attempt failed. 152 * attempt failed.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index b3abe5818fd3..b481edc56c6e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -30,6 +30,17 @@
30#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
31#include <linux/ptp_clock_kernel.h> 31#include <linux/ptp_clock_kernel.h>
32 32
33struct oct_nic_stats_resp {
34 u64 rh;
35 struct oct_link_stats stats;
36 u64 status;
37};
38
39struct oct_nic_stats_ctrl {
40 struct completion complete;
41 struct net_device *netdev;
42};
43
33/** LiquidIO per-interface network private data */ 44/** LiquidIO per-interface network private data */
34struct lio { 45struct lio {
35 /** State of the interface. Rx/Tx happens only in the RUNNING state. */ 46 /** State of the interface. Rx/Tx happens only in the RUNNING state. */
@@ -48,11 +59,11 @@ struct lio {
48 */ 59 */
49 int rxq; 60 int rxq;
50 61
51 /** Guards the glist */ 62 /** Guards each glist */
52 spinlock_t lock; 63 spinlock_t *glist_lock;
53 64
54 /** Linked list of gather components */ 65 /** Array of gather component linked lists */
55 struct list_head glist; 66 struct list_head *glist;
56 67
57 /** Pointer to the NIC properties for the Octeon device this network 68 /** Pointer to the NIC properties for the Octeon device this network
58 * interface is associated with. 69 * interface is associated with.
@@ -67,6 +78,9 @@ struct lio {
67 /** Link information sent by the core application for this interface. */ 78 /** Link information sent by the core application for this interface. */
68 struct oct_link_info linfo; 79 struct oct_link_info linfo;
69 80
81 /** counter of link changes */
82 u64 link_changes;
83
70 /** Size of Tx queue for this octeon device. */ 84 /** Size of Tx queue for this octeon device. */
71 u32 tx_qsize; 85 u32 tx_qsize;
72 86
@@ -111,8 +125,9 @@ struct lio {
111 * \brief Enable or disable feature 125 * \brief Enable or disable feature
112 * @param netdev pointer to network device 126 * @param netdev pointer to network device
113 * @param cmd Command that just requires acknowledgment 127 * @param cmd Command that just requires acknowledgment
128 * @param param1 Parameter to command
114 */ 129 */
115int liquidio_set_feature(struct net_device *netdev, int cmd); 130int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
116 131
117/** 132/**
118 * \brief Link control command completion callback 133 * \brief Link control command completion callback
@@ -131,14 +146,30 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
131 */ 146 */
132void liquidio_set_ethtool_ops(struct net_device *netdev); 147void liquidio_set_ethtool_ops(struct net_device *netdev);
133 148
134static inline void
135*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)),
136 u32 q_no __attribute__((unused)), u32 size)
137{
138#define SKB_ADJ_MASK 0x3F 149#define SKB_ADJ_MASK 0x3F
139#define SKB_ADJ (SKB_ADJ_MASK + 1) 150#define SKB_ADJ (SKB_ADJ_MASK + 1)
140 151
141 struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ); 152#define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
153#define LIO_RXBUFFER_SZ 2048
154
155static inline void
156*recv_buffer_alloc(struct octeon_device *oct,
157 struct octeon_skb_page_info *pg_info)
158{
159 struct page *page;
160 struct sk_buff *skb;
161 struct octeon_skb_page_info *skb_pg_info;
162
163 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
164 if (unlikely(!page))
165 return NULL;
166
167 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
168 if (unlikely(!skb)) {
169 __free_page(page);
170 pg_info->page = NULL;
171 return NULL;
172 }
142 173
143 if ((unsigned long)skb->data & SKB_ADJ_MASK) { 174 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
144 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); 175 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
@@ -146,11 +177,151 @@ static inline void
146 skb_reserve(skb, r); 177 skb_reserve(skb, r);
147 } 178 }
148 179
180 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
181 /* Get DMA info */
182 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
183 PAGE_SIZE, DMA_FROM_DEVICE);
184
185 /* Mapping failed!! */
186 if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
187 __free_page(page);
188 dev_kfree_skb_any((struct sk_buff *)skb);
189 pg_info->page = NULL;
190 return NULL;
191 }
192
193 pg_info->page = page;
194 pg_info->page_offset = 0;
195 skb_pg_info->page = page;
196 skb_pg_info->page_offset = 0;
197 skb_pg_info->dma = pg_info->dma;
198
149 return (void *)skb; 199 return (void *)skb;
150} 200}
151 201
202static inline void
203*recv_buffer_fast_alloc(u32 size)
204{
205 struct sk_buff *skb;
206 struct octeon_skb_page_info *skb_pg_info;
207
208 skb = dev_alloc_skb(size + SKB_ADJ);
209 if (unlikely(!skb))
210 return NULL;
211
212 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
213 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
214
215 skb_reserve(skb, r);
216 }
217
218 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
219 skb_pg_info->page = NULL;
220 skb_pg_info->page_offset = 0;
221 skb_pg_info->dma = 0;
222
223 return skb;
224}
225
226static inline int
227recv_buffer_recycle(struct octeon_device *oct, void *buf)
228{
229 struct octeon_skb_page_info *pg_info = buf;
230
231 if (!pg_info->page) {
232 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
233 __func__);
234 return -ENOMEM;
235 }
236
237 if (unlikely(page_count(pg_info->page) != 1) ||
238 unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
239 dma_unmap_page(&oct->pci_dev->dev,
240 pg_info->dma, (PAGE_SIZE << 0),
241 DMA_FROM_DEVICE);
242 pg_info->dma = 0;
243 pg_info->page = NULL;
244 pg_info->page_offset = 0;
245 return -ENOMEM;
246 }
247
248 /* Flip to other half of the buffer */
249 if (pg_info->page_offset == 0)
250 pg_info->page_offset = LIO_RXBUFFER_SZ;
251 else
252 pg_info->page_offset = 0;
253 page_ref_inc(pg_info->page);
254
255 return 0;
256}
257
258static inline void
259*recv_buffer_reuse(struct octeon_device *oct, void *buf)
260{
261 struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
262 struct sk_buff *skb;
263
264 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
265 if (unlikely(!skb)) {
266 dma_unmap_page(&oct->pci_dev->dev,
267 pg_info->dma, (PAGE_SIZE << 0),
268 DMA_FROM_DEVICE);
269 return NULL;
270 }
271
272 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
273 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
274
275 skb_reserve(skb, r);
276 }
277
278 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
279 skb_pg_info->page = pg_info->page;
280 skb_pg_info->page_offset = pg_info->page_offset;
281 skb_pg_info->dma = pg_info->dma;
282
283 return skb;
284}
285
286static inline void
287recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
288{
289 struct sk_buff *skb = (struct sk_buff *)buffer;
290
291 put_page(pg_info->page);
292 pg_info->dma = 0;
293 pg_info->page = NULL;
294 pg_info->page_offset = 0;
295
296 if (skb)
297 dev_kfree_skb_any(skb);
298}
299
152static inline void recv_buffer_free(void *buffer) 300static inline void recv_buffer_free(void *buffer)
153{ 301{
302 struct sk_buff *skb = (struct sk_buff *)buffer;
303 struct octeon_skb_page_info *pg_info;
304
305 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
306
307 if (pg_info->page) {
308 put_page(pg_info->page);
309 pg_info->dma = 0;
310 pg_info->page = NULL;
311 pg_info->page_offset = 0;
312 }
313
314 dev_kfree_skb_any((struct sk_buff *)buffer);
315}
316
317static inline void
318recv_buffer_fast_free(void *buffer)
319{
320 dev_kfree_skb_any((struct sk_buff *)buffer);
321}
322
323static inline void tx_buffer_free(void *buffer)
324{
154 dev_kfree_skb_any((struct sk_buff *)buffer); 325 dev_kfree_skb_any((struct sk_buff *)buffer);
155} 326}
156 327
@@ -159,7 +330,17 @@ static inline void recv_buffer_free(void *buffer)
159#define lio_dma_free(oct, size, virt_addr, dma_addr) \ 330#define lio_dma_free(oct, size, virt_addr, dma_addr) \
160 dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr) 331 dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
161 332
162#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data) 333static inline
334void *get_rbd(struct sk_buff *skb)
335{
336 struct octeon_skb_page_info *pg_info;
337 unsigned char *va;
338
339 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
340 va = page_address(pg_info->page) + pg_info->page_offset;
341
342 return va;
343}
163 344
164static inline u64 345static inline u64
165lio_map_ring_info(struct octeon_droq *droq, u32 i) 346lio_map_ring_info(struct octeon_droq *droq, u32 i)
@@ -183,33 +364,44 @@ lio_unmap_ring_info(struct pci_dev *pci_dev,
183} 364}
184 365
185static inline u64 366static inline u64
186lio_map_ring(struct pci_dev *pci_dev, 367lio_map_ring(void *buf)
187 void *buf, u32 size)
188{ 368{
189 dma_addr_t dma_addr; 369 dma_addr_t dma_addr;
190 370
191 dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size, 371 struct sk_buff *skb = (struct sk_buff *)buf;
192 DMA_FROM_DEVICE); 372 struct octeon_skb_page_info *pg_info;
193 373
194 BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr)); 374 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
375 if (!pg_info->page) {
376 pr_err("%s: pg_info->page NULL\n", __func__);
377 WARN_ON(1);
378 }
379
380 /* Get DMA info */
381 dma_addr = pg_info->dma;
382 if (!pg_info->dma) {
383 pr_err("%s: ERROR it should be already available\n",
384 __func__);
385 WARN_ON(1);
386 }
387 dma_addr += pg_info->page_offset;
195 388
196 return (u64)dma_addr; 389 return (u64)dma_addr;
197} 390}
198 391
199static inline void 392static inline void
200lio_unmap_ring(struct pci_dev *pci_dev, 393lio_unmap_ring(struct pci_dev *pci_dev,
201 u64 buf_ptr, u32 size) 394 u64 buf_ptr)
395
202{ 396{
203 dma_unmap_single(&pci_dev->dev, 397 dma_unmap_page(&pci_dev->dev,
204 buf_ptr, size, 398 buf_ptr, (PAGE_SIZE << 0),
205 DMA_FROM_DEVICE); 399 DMA_FROM_DEVICE);
206} 400}
207 401
208static inline void *octeon_fast_packet_alloc(struct octeon_device *oct, 402static inline void *octeon_fast_packet_alloc(u32 size)
209 struct octeon_droq *droq,
210 u32 q_no, u32 size)
211{ 403{
212 return recv_buffer_alloc(oct, q_no, size); 404 return recv_buffer_fast_alloc(size);
213} 405}
214 406
215static inline void octeon_fast_packet_next(struct octeon_droq *droq, 407static inline void octeon_fast_packet_next(struct octeon_droq *droq,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 1a0191549cb3..36f1970a860e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -44,11 +44,11 @@
44 44
45void * 45void *
46octeon_alloc_soft_command_resp(struct octeon_device *oct, 46octeon_alloc_soft_command_resp(struct octeon_device *oct,
47 struct octeon_instr_64B *cmd, 47 union octeon_instr_64B *cmd,
48 size_t rdatasize) 48 u32 rdatasize)
49{ 49{
50 struct octeon_soft_command *sc; 50 struct octeon_soft_command *sc;
51 struct octeon_instr_ih *ih; 51 struct octeon_instr_ih2 *ih2;
52 struct octeon_instr_irh *irh; 52 struct octeon_instr_irh *irh;
53 struct octeon_instr_rdp *rdp; 53 struct octeon_instr_rdp *rdp;
54 54
@@ -59,24 +59,25 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
59 return NULL; 59 return NULL;
60 60
61 /* Copy existing command structure into the soft command */ 61 /* Copy existing command structure into the soft command */
62 memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B)); 62 memcpy(&sc->cmd, cmd, sizeof(union octeon_instr_64B));
63 63
64 /* Add in the response related fields. Opcode and Param are already 64 /* Add in the response related fields. Opcode and Param are already
65 * there. 65 * there.
66 */ 66 */
67 ih = (struct octeon_instr_ih *)&sc->cmd.ih; 67 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
68 ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ 68 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
69 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
70 ih2->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
69 71
70 irh = (struct octeon_instr_irh *)&sc->cmd.irh;
71 irh->rflag = 1; /* a response is required */ 72 irh->rflag = 1; /* a response is required */
72 irh->len = 4; /* means four 64-bit words immediately follow irh */
73 73
74 rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
75 rdp->pcie_port = oct->pcie_port; 74 rdp->pcie_port = oct->pcie_port;
76 rdp->rlen = rdatasize; 75 rdp->rlen = rdatasize;
77 76
78 *sc->status_word = COMPLETION_WORD_INIT; 77 *sc->status_word = COMPLETION_WORD_INIT;
79 78
79 sc->cmd.cmd2.rptr = sc->dmarptr;
80
80 sc->wait_time = 1000; 81 sc->wait_time = 1000;
81 sc->timeout = jiffies + sc->wait_time; 82 sc->timeout = jiffies + sc->wait_time;
82 83
@@ -119,12 +120,11 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct,
119 120
120static inline struct octeon_soft_command 121static inline struct octeon_soft_command
121*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct, 122*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
122 struct octnic_ctrl_pkt *nctrl, 123 struct octnic_ctrl_pkt *nctrl)
123 struct octnic_ctrl_params nparams)
124{ 124{
125 struct octeon_soft_command *sc = NULL; 125 struct octeon_soft_command *sc = NULL;
126 u8 *data; 126 u8 *data;
127 size_t rdatasize; 127 u32 rdatasize;
128 u32 uddsize = 0, datasize = 0; 128 u32 uddsize = 0, datasize = 0;
129 129
130 uddsize = (u32)(nctrl->ncmd.s.more * 8); 130 uddsize = (u32)(nctrl->ncmd.s.more * 8);
@@ -143,7 +143,7 @@ static inline struct octeon_soft_command
143 143
144 data = (u8 *)sc->virtdptr; 144 data = (u8 *)sc->virtdptr;
145 145
146 memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); 146 memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
147 147
148 octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3)); 148 octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3));
149 149
@@ -152,6 +152,8 @@ static inline struct octeon_soft_command
152 memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize); 152 memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize);
153 } 153 }
154 154
155 sc->iq_no = (u32)nctrl->iq_no;
156
155 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 157 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD,
156 0, 0, 0); 158 0, 0, 0);
157 159
@@ -164,26 +166,41 @@ static inline struct octeon_soft_command
164 166
165int 167int
166octnet_send_nic_ctrl_pkt(struct octeon_device *oct, 168octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
167 struct octnic_ctrl_pkt *nctrl, 169 struct octnic_ctrl_pkt *nctrl)
168 struct octnic_ctrl_params nparams)
169{ 170{
170 int retval; 171 int retval;
171 struct octeon_soft_command *sc = NULL; 172 struct octeon_soft_command *sc = NULL;
172 173
173 sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams); 174 spin_lock_bh(&oct->cmd_resp_wqlock);
175 /* Allow only rx ctrl command to stop traffic on the chip
176 * during offline operations
177 */
178 if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) &&
179 (nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) {
180 spin_unlock_bh(&oct->cmd_resp_wqlock);
181 dev_err(&oct->pci_dev->dev,
182 "%s cmd:%d not processed since driver offline\n",
183 __func__, nctrl->ncmd.s.cmd);
184 return -1;
185 }
186
187 sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl);
174 if (!sc) { 188 if (!sc) {
175 dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n", 189 dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
176 __func__); 190 __func__);
191 spin_unlock_bh(&oct->cmd_resp_wqlock);
177 return -1; 192 return -1;
178 } 193 }
179 194
180 retval = octeon_send_soft_command(oct, sc); 195 retval = octeon_send_soft_command(oct, sc);
181 if (retval) { 196 if (retval == IQ_SEND_FAILED) {
182 octeon_free_soft_command(oct, sc); 197 octeon_free_soft_command(oct, sc);
183 dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n", 198 dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n",
184 __func__, retval); 199 __func__, nctrl->ncmd.s.cmd, retval);
200 spin_unlock_bh(&oct->cmd_resp_wqlock);
185 return -1; 201 return -1;
186 } 202 }
187 203
204 spin_unlock_bh(&oct->cmd_resp_wqlock);
188 return retval; 205 return retval;
189} 206}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 0238857c8105..b71a2bbe4bee 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -52,6 +52,9 @@ struct octnic_ctrl_pkt {
52 /** Additional data that may be needed by some commands. */ 52 /** Additional data that may be needed by some commands. */
53 u64 udd[MAX_NCTRL_UDD]; 53 u64 udd[MAX_NCTRL_UDD];
54 54
55 /** Input queue to use to send this command. */
56 u64 iq_no;
57
55 /** Time to wait for Octeon software to respond to this control command. 58 /** Time to wait for Octeon software to respond to this control command.
56 * If wait_time is 0, OSI assumes no response is expected. 59 * If wait_time is 0, OSI assumes no response is expected.
57 */ 60 */
@@ -82,7 +85,7 @@ struct octnic_data_pkt {
82 u32 datasize; 85 u32 datasize;
83 86
84 /** Command to be passed to the Octeon device software. */ 87 /** Command to be passed to the Octeon device software. */
85 struct octeon_instr_64B cmd; 88 union octeon_instr_64B cmd;
86 89
87 /** Input queue to use to send this command. */ 90 /** Input queue to use to send this command. */
88 u32 q_no; 91 u32 q_no;
@@ -94,15 +97,14 @@ struct octnic_data_pkt {
94 */ 97 */
95union octnic_cmd_setup { 98union octnic_cmd_setup {
96 struct { 99 struct {
97 u32 ifidx:8; 100 u32 iq_no:8;
98 u32 cksum_offset:7;
99 u32 gather:1; 101 u32 gather:1;
100 u32 timestamp:1; 102 u32 timestamp:1;
101 u32 ipv4opts_ipv6exthdr:2;
102 u32 ip_csum:1; 103 u32 ip_csum:1;
104 u32 transport_csum:1;
103 u32 tnl_csum:1; 105 u32 tnl_csum:1;
106 u32 rsvd:19;
104 107
105 u32 rsvd:11;
106 union { 108 union {
107 u32 datasize; 109 u32 datasize;
108 u32 gatherptrs; 110 u32 gatherptrs;
@@ -113,79 +115,146 @@ union octnic_cmd_setup {
113 115
114}; 116};
115 117
116struct octnic_ctrl_params {
117 u32 resp_order;
118};
119
120static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) 118static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no)
121{ 119{
122 return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) 120 return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending)
123 >= (oct->instr_queue[q_no]->max_count - 2)); 121 >= (oct->instr_queue[q_no]->max_count - 2));
124} 122}
125 123
126/** Utility function to prepare a 64B NIC instruction based on a setup command
127 * @param cmd - pointer to instruction to be filled in.
128 * @param setup - pointer to the setup structure
129 * @param q_no - which queue for back pressure
130 *
131 * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
132 */
133static inline void 124static inline void
134octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd, 125octnet_prepare_pci_cmd_o2(struct octeon_device *oct,
135 union octnic_cmd_setup *setup, u32 tag) 126 union octeon_instr_64B *cmd,
127 union octnic_cmd_setup *setup, u32 tag)
136{ 128{
137 struct octeon_instr_ih *ih; 129 struct octeon_instr_ih2 *ih2;
138 struct octeon_instr_irh *irh; 130 struct octeon_instr_irh *irh;
139 union octnic_packet_params packet_params; 131 union octnic_packet_params packet_params;
132 int port;
140 133
141 memset(cmd, 0, sizeof(struct octeon_instr_64B)); 134 memset(cmd, 0, sizeof(union octeon_instr_64B));
142 135
143 ih = (struct octeon_instr_ih *)&cmd->ih; 136 ih2 = (struct octeon_instr_ih2 *)&cmd->cmd2.ih2;
144 137
145 /* assume that rflag is cleared so therefore front data will only have 138 /* assume that rflag is cleared so therefore front data will only have
146 * irh and ossp[1] and ossp[2] for a total of 24 bytes 139 * irh and ossp[0], ossp[1] for a total of 32 bytes
147 */ 140 */
148 ih->fsz = 24; 141 ih2->fsz = 24;
142
143 ih2->tagtype = ORDERED_TAG;
144 ih2->grp = DEFAULT_POW_GRP;
149 145
150 ih->tagtype = ORDERED_TAG; 146 port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
151 ih->grp = DEFAULT_POW_GRP;
152 147
153 if (tag) 148 if (tag)
154 ih->tag = tag; 149 ih2->tag = tag;
155 else 150 else
156 ih->tag = LIO_DATA(setup->s.ifidx); 151 ih2->tag = LIO_DATA(port);
157 152
158 ih->raw = 1; 153 ih2->raw = 1;
159 ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */ 154 ih2->qos = (port & 3) + 4; /* map qos based on interface */
160 155
161 if (!setup->s.gather) { 156 if (!setup->s.gather) {
162 ih->dlengsz = setup->s.u.datasize; 157 ih2->dlengsz = setup->s.u.datasize;
163 } else { 158 } else {
164 ih->gather = 1; 159 ih2->gather = 1;
165 ih->dlengsz = setup->s.u.gatherptrs; 160 ih2->dlengsz = setup->s.u.gatherptrs;
166 } 161 }
167 162
168 irh = (struct octeon_instr_irh *)&cmd->irh; 163 irh = (struct octeon_instr_irh *)&cmd->cmd2.irh;
169 164
170 irh->opcode = OPCODE_NIC; 165 irh->opcode = OPCODE_NIC;
171 irh->subcode = OPCODE_NIC_NW_DATA; 166 irh->subcode = OPCODE_NIC_NW_DATA;
172 167
173 packet_params.u32 = 0; 168 packet_params.u32 = 0;
174 169
175 if (setup->s.cksum_offset) { 170 packet_params.s.ip_csum = setup->s.ip_csum;
176 packet_params.s.csoffset = setup->s.cksum_offset; 171 packet_params.s.transport_csum = setup->s.transport_csum;
177 packet_params.s.ipv4opts_ipv6exthdr = 172 packet_params.s.tnl_csum = setup->s.tnl_csum;
178 setup->s.ipv4opts_ipv6exthdr; 173 packet_params.s.tsflag = setup->s.timestamp;
174
175 irh->ossp = packet_params.u32;
176}
177
178static inline void
179octnet_prepare_pci_cmd_o3(struct octeon_device *oct,
180 union octeon_instr_64B *cmd,
181 union octnic_cmd_setup *setup, u32 tag)
182{
183 struct octeon_instr_irh *irh;
184 struct octeon_instr_ih3 *ih3;
185 struct octeon_instr_pki_ih3 *pki_ih3;
186 union octnic_packet_params packet_params;
187 int port;
188
189 memset(cmd, 0, sizeof(union octeon_instr_64B));
190
191 ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
192 pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
193
194 /* assume that rflag is cleared so therefore front data will only have
195 * irh and ossp[1] and ossp[2] for a total of 24 bytes
196 */
197 ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
198 /*PKI IH*/
199 ih3->fsz = 24 + 8;
200
201 if (!setup->s.gather) {
202 ih3->dlengsz = setup->s.u.datasize;
203 } else {
204 ih3->gather = 1;
205 ih3->dlengsz = setup->s.u.gatherptrs;
179 } 206 }
180 207
208 pki_ih3->w = 1;
209 pki_ih3->raw = 1;
210 pki_ih3->utag = 1;
211 pki_ih3->utt = 1;
212 pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
213
214 port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
215
216 if (tag)
217 pki_ih3->tag = tag;
218 else
219 pki_ih3->tag = LIO_DATA(port);
220
221 pki_ih3->tagtype = ORDERED_TAG;
222 pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
223 pki_ih3->pm = 0x7; /*0x7 - meant for Parse nothing, uninterpreted*/
224 pki_ih3->sl = 8; /* sl will be sizeof(pki_ih3)*/
225
226 irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
227
228 irh->opcode = OPCODE_NIC;
229 irh->subcode = OPCODE_NIC_NW_DATA;
230
231 packet_params.u32 = 0;
232
181 packet_params.s.ip_csum = setup->s.ip_csum; 233 packet_params.s.ip_csum = setup->s.ip_csum;
234 packet_params.s.transport_csum = setup->s.transport_csum;
182 packet_params.s.tnl_csum = setup->s.tnl_csum; 235 packet_params.s.tnl_csum = setup->s.tnl_csum;
183 packet_params.s.ifidx = setup->s.ifidx;
184 packet_params.s.tsflag = setup->s.timestamp; 236 packet_params.s.tsflag = setup->s.timestamp;
185 237
186 irh->ossp = packet_params.u32; 238 irh->ossp = packet_params.u32;
187} 239}
188 240
241/** Utility function to prepare a 64B NIC instruction based on a setup command
242 * @param cmd - pointer to instruction to be filled in.
243 * @param setup - pointer to the setup structure
244 * @param q_no - which queue for back pressure
245 *
246 * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
247 */
248static inline void
249octnet_prepare_pci_cmd(struct octeon_device *oct, union octeon_instr_64B *cmd,
250 union octnic_cmd_setup *setup, u32 tag)
251{
252 if (OCTEON_CN6XXX(oct))
253 octnet_prepare_pci_cmd_o2(oct, cmd, setup, tag);
254 else
255 octnet_prepare_pci_cmd_o3(oct, cmd, setup, tag);
256}
257
189/** Allocate and a soft command with space for a response immediately following 258/** Allocate and a soft command with space for a response immediately following
190 * the commnad. 259 * the commnad.
191 * @param oct - octeon device pointer 260 * @param oct - octeon device pointer
@@ -198,8 +267,8 @@ octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
198 */ 267 */
199void * 268void *
200octeon_alloc_soft_command_resp(struct octeon_device *oct, 269octeon_alloc_soft_command_resp(struct octeon_device *oct,
201 struct octeon_instr_64B *cmd, 270 union octeon_instr_64B *cmd,
202 size_t rdatasize); 271 u32 rdatasize);
203 272
204/** Send a NIC data packet to the device 273/** Send a NIC data packet to the device
205 * @param oct - octeon device pointer 274 * @param oct - octeon device pointer
@@ -214,14 +283,11 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
214/** Send a NIC control packet to the device 283/** Send a NIC control packet to the device
215 * @param oct - octeon device pointer 284 * @param oct - octeon device pointer
216 * @param nctrl - control structure with command, timout, and callback info 285 * @param nctrl - control structure with command, timout, and callback info
217 * @param nparams - response control structure
218 *
219 * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the 286 * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
220 * queue should be stopped, and IQ_SEND_OK if it sent okay. 287 * queue should be stopped, and IQ_SEND_OK if it sent okay.
221 */ 288 */
222int 289int
223octnet_send_nic_ctrl_pkt(struct octeon_device *oct, 290octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
224 struct octnic_ctrl_pkt *nctrl, 291 struct octnic_ctrl_pkt *nctrl);
225 struct octnic_ctrl_params nparams);
226 292
227#endif 293#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index a2a24652c8f3..7eafa75ac095 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -51,7 +51,7 @@ struct iq_post_status {
51}; 51};
52 52
53static void check_db_timeout(struct work_struct *work); 53static void check_db_timeout(struct work_struct *work);
54static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no); 54static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
55 55
56static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *); 56static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
57 57
@@ -69,12 +69,16 @@ static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
69 69
70/* Return 0 on success, 1 on failure */ 70/* Return 0 on success, 1 on failure */
71int octeon_init_instr_queue(struct octeon_device *oct, 71int octeon_init_instr_queue(struct octeon_device *oct,
72 u32 iq_no, u32 num_descs) 72 union oct_txpciq txpciq,
73 u32 num_descs)
73{ 74{
74 struct octeon_instr_queue *iq; 75 struct octeon_instr_queue *iq;
75 struct octeon_iq_config *conf = NULL; 76 struct octeon_iq_config *conf = NULL;
77 u32 iq_no = (u32)txpciq.s.q_no;
76 u32 q_size; 78 u32 q_size;
77 struct cavium_wq *db_wq; 79 struct cavium_wq *db_wq;
80 int orig_node = dev_to_node(&oct->pci_dev->dev);
81 int numa_node = cpu_to_node(iq_no % num_online_cpus());
78 82
79 if (OCTEON_CN6XXX(oct)) 83 if (OCTEON_CN6XXX(oct))
80 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf))); 84 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
@@ -95,9 +99,15 @@ int octeon_init_instr_queue(struct octeon_device *oct,
95 q_size = (u32)conf->instr_type * num_descs; 99 q_size = (u32)conf->instr_type * num_descs;
96 100
97 iq = oct->instr_queue[iq_no]; 101 iq = oct->instr_queue[iq_no];
102 iq->oct_dev = oct;
98 103
104 set_dev_node(&oct->pci_dev->dev, numa_node);
99 iq->base_addr = lio_dma_alloc(oct, q_size, 105 iq->base_addr = lio_dma_alloc(oct, q_size,
100 (dma_addr_t *)&iq->base_addr_dma); 106 (dma_addr_t *)&iq->base_addr_dma);
107 set_dev_node(&oct->pci_dev->dev, orig_node);
108 if (!iq->base_addr)
109 iq->base_addr = lio_dma_alloc(oct, q_size,
110 (dma_addr_t *)&iq->base_addr_dma);
101 if (!iq->base_addr) { 111 if (!iq->base_addr) {
102 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n", 112 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
103 iq_no); 113 iq_no);
@@ -109,7 +119,11 @@ int octeon_init_instr_queue(struct octeon_device *oct,
109 /* Initialize a list to holds requests that have been posted to Octeon 119 /* Initialize a list to holds requests that have been posted to Octeon
110 * but has yet to be fetched by octeon 120 * but has yet to be fetched by octeon
111 */ 121 */
112 iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs); 122 iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
123 numa_node);
124 if (!iq->request_list)
125 iq->request_list = vmalloc(sizeof(*iq->request_list) *
126 num_descs);
113 if (!iq->request_list) { 127 if (!iq->request_list) {
114 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); 128 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
115 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", 129 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
@@ -122,7 +136,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
122 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", 136 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
123 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); 137 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
124 138
125 iq->iq_no = iq_no; 139 iq->txpciq.u64 = txpciq.u64;
126 iq->fill_threshold = (u32)conf->db_min; 140 iq->fill_threshold = (u32)conf->db_min;
127 iq->fill_cnt = 0; 141 iq->fill_cnt = 0;
128 iq->host_write_index = 0; 142 iq->host_write_index = 0;
@@ -135,8 +149,11 @@ int octeon_init_instr_queue(struct octeon_device *oct,
135 149
136 /* Initialize the spinlock for this instruction queue */ 150 /* Initialize the spinlock for this instruction queue */
137 spin_lock_init(&iq->lock); 151 spin_lock_init(&iq->lock);
152 spin_lock_init(&iq->post_lock);
138 153
139 oct->io_qmask.iq |= (1 << iq_no); 154 spin_lock_init(&iq->iq_flush_running_lock);
155
156 oct->io_qmask.iq |= (1ULL << iq_no);
140 157
141 /* Set the 32B/64B mode for each input queue */ 158 /* Set the 32B/64B mode for each input queue */
142 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); 159 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
@@ -144,7 +161,9 @@ int octeon_init_instr_queue(struct octeon_device *oct,
144 161
145 oct->fn_list.setup_iq_regs(oct, iq_no); 162 oct->fn_list.setup_iq_regs(oct, iq_no);
146 163
147 oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db"); 164 oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
165 WQ_MEM_RECLAIM,
166 0);
148 if (!oct->check_db_wq[iq_no].wq) { 167 if (!oct->check_db_wq[iq_no].wq) {
149 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); 168 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
150 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n", 169 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
@@ -168,7 +187,6 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
168 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; 187 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
169 188
170 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work); 189 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
171 flush_workqueue(oct->check_db_wq[iq_no].wq);
172 destroy_workqueue(oct->check_db_wq[iq_no].wq); 190 destroy_workqueue(oct->check_db_wq[iq_no].wq);
173 191
174 if (OCTEON_CN6XXX(oct)) 192 if (OCTEON_CN6XXX(oct))
@@ -188,26 +206,38 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
188 206
189/* Return 0 on success, 1 on failure */ 207/* Return 0 on success, 1 on failure */
190int octeon_setup_iq(struct octeon_device *oct, 208int octeon_setup_iq(struct octeon_device *oct,
191 u32 iq_no, 209 int ifidx,
210 int q_index,
211 union oct_txpciq txpciq,
192 u32 num_descs, 212 u32 num_descs,
193 void *app_ctx) 213 void *app_ctx)
194{ 214{
215 u32 iq_no = (u32)txpciq.s.q_no;
216 int numa_node = cpu_to_node(iq_no % num_online_cpus());
217
195 if (oct->instr_queue[iq_no]) { 218 if (oct->instr_queue[iq_no]) {
196 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n", 219 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
197 iq_no); 220 iq_no);
221 oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
198 oct->instr_queue[iq_no]->app_ctx = app_ctx; 222 oct->instr_queue[iq_no]->app_ctx = app_ctx;
199 return 0; 223 return 0;
200 } 224 }
201 oct->instr_queue[iq_no] = 225 oct->instr_queue[iq_no] =
202 vmalloc(sizeof(struct octeon_instr_queue)); 226 vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
227 if (!oct->instr_queue[iq_no])
228 oct->instr_queue[iq_no] =
229 vmalloc(sizeof(struct octeon_instr_queue));
203 if (!oct->instr_queue[iq_no]) 230 if (!oct->instr_queue[iq_no])
204 return 1; 231 return 1;
205 232
206 memset(oct->instr_queue[iq_no], 0, 233 memset(oct->instr_queue[iq_no], 0,
207 sizeof(struct octeon_instr_queue)); 234 sizeof(struct octeon_instr_queue));
208 235
236 oct->instr_queue[iq_no]->q_index = q_index;
209 oct->instr_queue[iq_no]->app_ctx = app_ctx; 237 oct->instr_queue[iq_no]->app_ctx = app_ctx;
210 if (octeon_init_instr_queue(oct, iq_no, num_descs)) { 238 oct->instr_queue[iq_no]->ifidx = ifidx;
239
240 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
211 vfree(oct->instr_queue[iq_no]); 241 vfree(oct->instr_queue[iq_no]);
212 oct->instr_queue[iq_no] = NULL; 242 oct->instr_queue[iq_no] = NULL;
213 return 1; 243 return 1;
@@ -226,8 +256,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct)
226 instr_cnt = 0; 256 instr_cnt = 0;
227 257
228 /*for (i = 0; i < oct->num_iqs; i++) {*/ 258 /*for (i = 0; i < oct->num_iqs; i++) {*/
229 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 259 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
230 if (!(oct->io_qmask.iq & (1UL << i))) 260 if (!(oct->io_qmask.iq & (1ULL << i)))
231 continue; 261 continue;
232 pending = 262 pending =
233 atomic_read(&oct-> 263 atomic_read(&oct->
@@ -364,13 +394,13 @@ __add_to_request_list(struct octeon_instr_queue *iq,
364 394
365int 395int
366lio_process_iq_request_list(struct octeon_device *oct, 396lio_process_iq_request_list(struct octeon_device *oct,
367 struct octeon_instr_queue *iq) 397 struct octeon_instr_queue *iq, u32 napi_budget)
368{ 398{
369 int reqtype; 399 int reqtype;
370 void *buf; 400 void *buf;
371 u32 old = iq->flush_index; 401 u32 old = iq->flush_index;
372 u32 inst_count = 0; 402 u32 inst_count = 0;
373 unsigned pkts_compl = 0, bytes_compl = 0; 403 unsigned int pkts_compl = 0, bytes_compl = 0;
374 struct octeon_soft_command *sc; 404 struct octeon_soft_command *sc;
375 struct octeon_instr_irh *irh; 405 struct octeon_instr_irh *irh;
376 406
@@ -394,7 +424,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
394 case REQTYPE_SOFT_COMMAND: 424 case REQTYPE_SOFT_COMMAND:
395 sc = buf; 425 sc = buf;
396 426
397 irh = (struct octeon_instr_irh *)&sc->cmd.irh; 427 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
398 if (irh->rflag) { 428 if (irh->rflag) {
399 /* We're expecting a response from Octeon. 429 /* We're expecting a response from Octeon.
400 * It's up to lio_process_ordered_list() to 430 * It's up to lio_process_ordered_list() to
@@ -430,6 +460,9 @@ lio_process_iq_request_list(struct octeon_device *oct,
430 skip_this: 460 skip_this:
431 inst_count++; 461 inst_count++;
432 INCR_INDEX_BY1(old, iq->max_count); 462 INCR_INDEX_BY1(old, iq->max_count);
463
464 if ((napi_budget) && (inst_count >= napi_budget))
465 break;
433 } 466 }
434 if (bytes_compl) 467 if (bytes_compl)
435 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl, 468 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
@@ -439,38 +472,63 @@ lio_process_iq_request_list(struct octeon_device *oct,
439 return inst_count; 472 return inst_count;
440} 473}
441 474
442static inline void 475/* Can only be called from process context */
443update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq) 476int
477octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
478 u32 pending_thresh, u32 napi_budget)
444{ 479{
445 u32 inst_processed = 0; 480 u32 inst_processed = 0;
481 u32 tot_inst_processed = 0;
482 int tx_done = 1;
446 483
447 /* Calculate how many commands Octeon has read and move the read index 484 if (!spin_trylock(&iq->iq_flush_running_lock))
448 * accordingly. 485 return tx_done;
449 */
450 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
451 486
452 /* Move the NORESPONSE requests to the per-device completion list. */ 487 spin_lock_bh(&iq->lock);
453 if (iq->flush_index != iq->octeon_read_index)
454 inst_processed = lio_process_iq_request_list(oct, iq);
455 488
456 if (inst_processed) { 489 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
457 atomic_sub(inst_processed, &iq->instr_pending);
458 iq->stats.instr_processed += inst_processed;
459 }
460}
461 490
462static void
463octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
464 u32 pending_thresh)
465{
466 if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) { 491 if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
467 spin_lock_bh(&iq->lock); 492 do {
468 update_iq_indices(oct, iq); 493 /* Process any outstanding IQ packets. */
469 spin_unlock_bh(&iq->lock); 494 if (iq->flush_index == iq->octeon_read_index)
495 break;
496
497 if (napi_budget)
498 inst_processed = lio_process_iq_request_list
499 (oct, iq,
500 napi_budget - tot_inst_processed);
501 else
502 inst_processed =
503 lio_process_iq_request_list(oct, iq, 0);
504
505 if (inst_processed) {
506 atomic_sub(inst_processed, &iq->instr_pending);
507 iq->stats.instr_processed += inst_processed;
508 }
509
510 tot_inst_processed += inst_processed;
511 inst_processed = 0;
512
513 } while (tot_inst_processed < napi_budget);
514
515 if (napi_budget && (tot_inst_processed >= napi_budget))
516 tx_done = 0;
470 } 517 }
518
519 iq->last_db_time = jiffies;
520
521 spin_unlock_bh(&iq->lock);
522
523 spin_unlock(&iq->iq_flush_running_lock);
524
525 return tx_done;
471} 526}
472 527
473static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no) 528/* Process instruction queue after timeout.
529 * This routine gets called from a workqueue or when removing the module.
530 */
531static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
474{ 532{
475 struct octeon_instr_queue *iq; 533 struct octeon_instr_queue *iq;
476 u64 next_time; 534 u64 next_time;
@@ -481,24 +539,17 @@ static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
481 if (!iq) 539 if (!iq)
482 return; 540 return;
483 541
542 /* return immediately, if no work pending */
543 if (!atomic_read(&iq->instr_pending))
544 return;
484 /* If jiffies - last_db_time < db_timeout do nothing */ 545 /* If jiffies - last_db_time < db_timeout do nothing */
485 next_time = iq->last_db_time + iq->db_timeout; 546 next_time = iq->last_db_time + iq->db_timeout;
486 if (!time_after(jiffies, (unsigned long)next_time)) 547 if (!time_after(jiffies, (unsigned long)next_time))
487 return; 548 return;
488 iq->last_db_time = jiffies; 549 iq->last_db_time = jiffies;
489 550
490 /* Get the lock and prevent tasklets. This routine gets called from
491 * the poll thread. Instructions can now be posted in tasklet context
492 */
493 spin_lock_bh(&iq->lock);
494 if (iq->fill_cnt != 0)
495 ring_doorbell(oct, iq);
496
497 spin_unlock_bh(&iq->lock);
498
499 /* Flush the instruction queue */ 551 /* Flush the instruction queue */
500 if (iq->do_auto_flush) 552 octeon_flush_iq(oct, iq, 1, 0);
501 octeon_flush_iq(oct, iq, 1);
502} 553}
503 554
504/* Called by the Poll thread at regular intervals to check the instruction 555/* Called by the Poll thread at regular intervals to check the instruction
@@ -523,7 +574,10 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
523 struct iq_post_status st; 574 struct iq_post_status st;
524 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; 575 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
525 576
526 spin_lock_bh(&iq->lock); 577 /* Get the lock and prevent other tasks and tx interrupt handler from
578 * running.
579 */
580 spin_lock_bh(&iq->post_lock);
527 581
528 st = __post_command2(oct, iq, force_db, cmd); 582 st = __post_command2(oct, iq, force_db, cmd);
529 583
@@ -539,10 +593,13 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
539 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); 593 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
540 } 594 }
541 595
542 spin_unlock_bh(&iq->lock); 596 spin_unlock_bh(&iq->post_lock);
543 597
544 if (iq->do_auto_flush) 598 /* This is only done here to expedite packets being flushed
545 octeon_flush_iq(oct, iq, 2); 599 * for cases where there are no IQ completion interrupts.
600 */
601 /*if (iq->do_auto_flush)*/
602 /* octeon_flush_iq(oct, iq, 2, 0);*/
546 603
547 return st.status; 604 return st.status;
548} 605}
@@ -557,7 +614,7 @@ octeon_prepare_soft_command(struct octeon_device *oct,
557 u64 ossp1) 614 u64 ossp1)
558{ 615{
559 struct octeon_config *oct_cfg; 616 struct octeon_config *oct_cfg;
560 struct octeon_instr_ih *ih; 617 struct octeon_instr_ih2 *ih2;
561 struct octeon_instr_irh *irh; 618 struct octeon_instr_irh *irh;
562 struct octeon_instr_rdp *rdp; 619 struct octeon_instr_rdp *rdp;
563 620
@@ -566,73 +623,69 @@ octeon_prepare_soft_command(struct octeon_device *oct,
566 623
567 oct_cfg = octeon_get_conf(oct); 624 oct_cfg = octeon_get_conf(oct);
568 625
569 ih = (struct octeon_instr_ih *)&sc->cmd.ih; 626 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
570 ih->tagtype = ATOMIC_TAG; 627 ih2->tagtype = ATOMIC_TAG;
571 ih->tag = LIO_CONTROL; 628 ih2->tag = LIO_CONTROL;
572 ih->raw = 1; 629 ih2->raw = 1;
573 ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg); 630 ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
574 631
575 if (sc->datasize) { 632 if (sc->datasize) {
576 ih->dlengsz = sc->datasize; 633 ih2->dlengsz = sc->datasize;
577 ih->rs = 1; 634 ih2->rs = 1;
578 } 635 }
579 636
580 irh = (struct octeon_instr_irh *)&sc->cmd.irh; 637 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
581 irh->opcode = opcode; 638 irh->opcode = opcode;
582 irh->subcode = subcode; 639 irh->subcode = subcode;
583 640
584 /* opcode/subcode specific parameters (ossp) */ 641 /* opcode/subcode specific parameters (ossp) */
585 irh->ossp = irh_ossp; 642 irh->ossp = irh_ossp;
586 sc->cmd.ossp[0] = ossp0; 643 sc->cmd.cmd2.ossp[0] = ossp0;
587 sc->cmd.ossp[1] = ossp1; 644 sc->cmd.cmd2.ossp[1] = ossp1;
588 645
589 if (sc->rdatasize) { 646 if (sc->rdatasize) {
590 rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; 647 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
591 rdp->pcie_port = oct->pcie_port; 648 rdp->pcie_port = oct->pcie_port;
592 rdp->rlen = sc->rdatasize; 649 rdp->rlen = sc->rdatasize;
593 650
594 irh->rflag = 1; 651 irh->rflag = 1;
595 irh->len = 4; 652 ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
596 ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
597 } else { 653 } else {
598 irh->rflag = 0; 654 irh->rflag = 0;
599 irh->len = 2; 655 ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
600 ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
601 } 656 }
602
603 while (!(oct->io_qmask.iq & (1 << sc->iq_no)))
604 sc->iq_no++;
605} 657}
606 658
607int octeon_send_soft_command(struct octeon_device *oct, 659int octeon_send_soft_command(struct octeon_device *oct,
608 struct octeon_soft_command *sc) 660 struct octeon_soft_command *sc)
609{ 661{
610 struct octeon_instr_ih *ih; 662 struct octeon_instr_ih2 *ih2;
611 struct octeon_instr_irh *irh; 663 struct octeon_instr_irh *irh;
612 struct octeon_instr_rdp *rdp; 664 struct octeon_instr_rdp *rdp;
665 u32 len;
613 666
614 ih = (struct octeon_instr_ih *)&sc->cmd.ih; 667 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
615 if (ih->dlengsz) { 668 if (ih2->dlengsz) {
616 BUG_ON(!sc->dmadptr); 669 WARN_ON(!sc->dmadptr);
617 sc->cmd.dptr = sc->dmadptr; 670 sc->cmd.cmd2.dptr = sc->dmadptr;
618 } 671 }
619 672 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
620 irh = (struct octeon_instr_irh *)&sc->cmd.irh;
621 if (irh->rflag) { 673 if (irh->rflag) {
622 BUG_ON(!sc->dmarptr); 674 BUG_ON(!sc->dmarptr);
623 BUG_ON(!sc->status_word); 675 BUG_ON(!sc->status_word);
624 *sc->status_word = COMPLETION_WORD_INIT; 676 *sc->status_word = COMPLETION_WORD_INIT;
625 677
626 rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; 678 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
627 679
628 sc->cmd.rptr = sc->dmarptr; 680 sc->cmd.cmd2.rptr = sc->dmarptr;
629 } 681 }
682 len = (u32)ih2->dlengsz;
630 683
631 if (sc->wait_time) 684 if (sc->wait_time)
632 sc->timeout = jiffies + sc->wait_time; 685 sc->timeout = jiffies + sc->wait_time;
633 686
634 return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, 687 return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
635 (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND); 688 len, REQTYPE_SOFT_COMMAND));
636} 689}
637 690
638int octeon_setup_sc_buffer_pool(struct octeon_device *oct) 691int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 091f537a946e..c93210f99dda 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -54,8 +54,9 @@ int octeon_setup_response_list(struct octeon_device *oct)
54 spin_lock_init(&oct->response_list[i].lock); 54 spin_lock_init(&oct->response_list[i].lock);
55 atomic_set(&oct->response_list[i].pending_req_count, 0); 55 atomic_set(&oct->response_list[i].pending_req_count, 0);
56 } 56 }
57 spin_lock_init(&oct->cmd_resp_wqlock);
57 58
58 oct->dma_comp_wq.wq = create_workqueue("dma-comp"); 59 oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
59 if (!oct->dma_comp_wq.wq) { 60 if (!oct->dma_comp_wq.wq) {
60 dev_err(&oct->pci_dev->dev, "failed to create wq thread\n"); 61 dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
61 return -ENOMEM; 62 return -ENOMEM;
@@ -64,6 +65,7 @@ int octeon_setup_response_list(struct octeon_device *oct)
64 cwq = &oct->dma_comp_wq; 65 cwq = &oct->dma_comp_wq;
65 INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion); 66 INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
66 cwq->wk.ctxptr = oct; 67 cwq->wk.ctxptr = oct;
68 oct->cmd_resp_state = OCT_DRV_ONLINE;
67 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); 69 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
68 70
69 return ret; 71 return ret;
@@ -72,7 +74,6 @@ int octeon_setup_response_list(struct octeon_device *oct)
72void octeon_delete_response_list(struct octeon_device *oct) 74void octeon_delete_response_list(struct octeon_device *oct)
73{ 75{
74 cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work); 76 cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
75 flush_workqueue(oct->dma_comp_wq.wq);
76 destroy_workqueue(oct->dma_comp_wq.wq); 77 destroy_workqueue(oct->dma_comp_wq.wq);
77} 78}
78 79
@@ -86,6 +87,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
86 u32 status; 87 u32 status;
87 u64 status64; 88 u64 status64;
88 struct octeon_instr_rdp *rdp; 89 struct octeon_instr_rdp *rdp;
90 u64 rptr;
89 91
90 ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; 92 ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
91 93
@@ -103,7 +105,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
103 105
104 sc = (struct octeon_soft_command *)ordered_sc_list-> 106 sc = (struct octeon_soft_command *)ordered_sc_list->
105 head.next; 107 head.next;
106 rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; 108 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
109 rptr = sc->cmd.cmd2.rptr;
107 110
108 status = OCTEON_REQUEST_PENDING; 111 status = OCTEON_REQUEST_PENDING;
109 112
@@ -111,7 +114,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
111 * to where rptr is pointing to 114 * to where rptr is pointing to
112 */ 115 */
113 dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev, 116 dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
114 sc->cmd.rptr, rdp->rlen, 117 rptr, rdp->rlen,
115 DMA_FROM_DEVICE); 118 DMA_FROM_DEVICE);
116 status64 = *sc->status_word; 119 status64 = *sc->status_word;
117 120
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 477db477b133..c45de49dc963 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -64,6 +64,7 @@
64#include <net/bonding.h> 64#include <net/bonding.h>
65#include <net/addrconf.h> 65#include <net/addrconf.h>
66#include <asm/uaccess.h> 66#include <asm/uaccess.h>
67#include <linux/crash_dump.h>
67 68
68#include "cxgb4.h" 69#include "cxgb4.h"
69#include "t4_regs.h" 70#include "t4_regs.h"
@@ -206,7 +207,7 @@ static int rx_dma_offset = 2;
206static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV]; 207static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
207 208
208module_param_array(num_vf, uint, NULL, 0644); 209module_param_array(num_vf, uint, NULL, 0644);
209MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); 210MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface.");
210#endif 211#endif
211 212
212/* TX Queue select used to determine what algorithm to use for selecting TX 213/* TX Queue select used to determine what algorithm to use for selecting TX
@@ -460,11 +461,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
460 struct port_info *pi = netdev_priv(dev); 461 struct port_info *pi = netdev_priv(dev);
461 struct adapter *adapter = pi->adapter; 462 struct adapter *adapter = pi->adapter;
462 463
463 if (!(dev->flags & IFF_PROMISC)) { 464 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
464 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); 465 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
465 if (!(dev->flags & IFF_ALLMULTI))
466 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
467 }
468 466
469 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, 467 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
470 (dev->flags & IFF_PROMISC) ? 1 : 0, 468 (dev->flags & IFF_PROMISC) ? 1 : 0,
@@ -3735,7 +3733,8 @@ static int adap_init0(struct adapter *adap)
3735 return ret; 3733 return ret;
3736 3734
3737 /* Contact FW, advertising Master capability */ 3735 /* Contact FW, advertising Master capability */
3738 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); 3736 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
3737 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
3739 if (ret < 0) { 3738 if (ret < 0) {
3740 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", 3739 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3741 ret); 3740 ret);
@@ -4366,6 +4365,11 @@ static void cfg_queues(struct adapter *adap)
4366 if (q10g > netif_get_num_default_rss_queues()) 4365 if (q10g > netif_get_num_default_rss_queues())
4367 q10g = netif_get_num_default_rss_queues(); 4366 q10g = netif_get_num_default_rss_queues();
4368 4367
4368 /* Reduce memory usage in kdump environment, disable all offload.
4369 */
4370 if (is_kdump_kernel())
4371 adap->params.offload = 0;
4372
4369 for_each_port(adap, i) { 4373 for_each_port(adap, i) {
4370 struct port_info *pi = adap2pinfo(adap, i); 4374 struct port_info *pi = adap2pinfo(adap, i);
4371 4375
@@ -4829,6 +4833,60 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
4829 return -EINVAL; 4833 return -EINVAL;
4830} 4834}
4831 4835
4836#ifdef CONFIG_PCI_IOV
4837static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
4838{
4839 int err = 0;
4840 int current_vfs = pci_num_vf(pdev);
4841 u32 pcie_fw;
4842 void __iomem *regs;
4843
4844 regs = pci_ioremap_bar(pdev, 0);
4845 if (!regs) {
4846 dev_err(&pdev->dev, "cannot map device registers\n");
4847 return -ENOMEM;
4848 }
4849
4850 pcie_fw = readl(regs + PCIE_FW_A);
4851 iounmap(regs);
4852 /* Check if cxgb4 is the MASTER and fw is initialized */
4853 if (!(pcie_fw & PCIE_FW_INIT_F) ||
4854 !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
4855 PCIE_FW_MASTER_G(pcie_fw) != 4) {
4856 dev_warn(&pdev->dev,
4857 "cxgb4 driver needs to be MASTER to support SRIOV\n");
4858 return -EOPNOTSUPP;
4859 }
4860
4861 /* If any of the VF's is already assigned to Guest OS, then
4862 * SRIOV for the same cannot be modified
4863 */
4864 if (current_vfs && pci_vfs_assigned(pdev)) {
4865 dev_err(&pdev->dev,
4866 "Cannot modify SR-IOV while VFs are assigned\n");
4867 num_vfs = current_vfs;
4868 return num_vfs;
4869 }
4870
4871 /* Disable SRIOV when zero is passed.
4872 * One needs to disable SRIOV before modifying it, else
4873 * stack throws the below warning:
4874 * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
4875 */
4876 if (!num_vfs) {
4877 pci_disable_sriov(pdev);
4878 return num_vfs;
4879 }
4880
4881 if (num_vfs != current_vfs) {
4882 err = pci_enable_sriov(pdev, num_vfs);
4883 if (err)
4884 return err;
4885 }
4886 return num_vfs;
4887}
4888#endif
4889
4832static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 4890static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4833{ 4891{
4834 int func, i, err, s_qpp, qpp, num_seg; 4892 int func, i, err, s_qpp, qpp, num_seg;
@@ -5162,11 +5220,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5162 5220
5163sriov: 5221sriov:
5164#ifdef CONFIG_PCI_IOV 5222#ifdef CONFIG_PCI_IOV
5165 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) 5223 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) {
5224 dev_warn(&pdev->dev,
5225 "Enabling SR-IOV VFs using the num_vf module "
5226 "parameter is deprecated - please use the pci sysfs "
5227 "interface instead.\n");
5166 if (pci_enable_sriov(pdev, num_vf[func]) == 0) 5228 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5167 dev_info(&pdev->dev, 5229 dev_info(&pdev->dev,
5168 "instantiated %u virtual functions\n", 5230 "instantiated %u virtual functions\n",
5169 num_vf[func]); 5231 num_vf[func]);
5232 }
5170#endif 5233#endif
5171 return 0; 5234 return 0;
5172 5235
@@ -5259,6 +5322,9 @@ static struct pci_driver cxgb4_driver = {
5259 .probe = init_one, 5322 .probe = init_one,
5260 .remove = remove_one, 5323 .remove = remove_one,
5261 .shutdown = remove_one, 5324 .shutdown = remove_one,
5325#ifdef CONFIG_PCI_IOV
5326 .sriov_configure = cxgb4_iov_configure,
5327#endif
5262 .err_handler = &cxgb4_eeh, 5328 .err_handler = &cxgb4_eeh,
5263}; 5329};
5264 5330
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 734dd776c22f..109bc630408b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -353,6 +353,10 @@ struct hash_mac_addr {
353 u8 addr[ETH_ALEN]; 353 u8 addr[ETH_ALEN];
354}; 354};
355 355
356struct mbox_list {
357 struct list_head list;
358};
359
356/* 360/*
357 * Per-"adapter" (Virtual Function) information. 361 * Per-"adapter" (Virtual Function) information.
358 */ 362 */
@@ -387,6 +391,10 @@ struct adapter {
387 /* various locks */ 391 /* various locks */
388 spinlock_t stats_lock; 392 spinlock_t stats_lock;
389 393
394 /* lock for mailbox cmd list */
395 spinlock_t mbox_lock;
396 struct mbox_list mlist;
397
390 /* support for mailbox command/reply logging */ 398 /* support for mailbox command/reply logging */
391#define T4VF_OS_LOG_MBOX_CMDS 256 399#define T4VF_OS_LOG_MBOX_CMDS 256
392 struct mbox_cmd_log *mbox_log; 400 struct mbox_cmd_log *mbox_log;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 04fc6f6d1e25..9f5526478d2f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -937,12 +937,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
937{ 937{
938 struct port_info *pi = netdev_priv(dev); 938 struct port_info *pi = netdev_priv(dev);
939 939
940 if (!(dev->flags & IFF_PROMISC)) { 940 __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
941 __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); 941 __dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
942 if (!(dev->flags & IFF_ALLMULTI))
943 __dev_mc_sync(dev, cxgb4vf_mac_sync,
944 cxgb4vf_mac_unsync);
945 }
946 return t4vf_set_rxmode(pi->adapter, pi->viid, -1, 942 return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
947 (dev->flags & IFF_PROMISC) != 0, 943 (dev->flags & IFF_PROMISC) != 0,
948 (dev->flags & IFF_ALLMULTI) != 0, 944 (dev->flags & IFF_ALLMULTI) != 0,
@@ -2778,6 +2774,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2778 * Initialize SMP data synchronization resources. 2774 * Initialize SMP data synchronization resources.
2779 */ 2775 */
2780 spin_lock_init(&adapter->stats_lock); 2776 spin_lock_init(&adapter->stats_lock);
2777 spin_lock_init(&adapter->mbox_lock);
2778 INIT_LIST_HEAD(&adapter->mlist.list);
2781 2779
2782 /* 2780 /*
2783 * Map our I/O registers in BAR0. 2781 * Map our I/O registers in BAR0.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 955ff7c61f1b..61bfe86da86d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -139,6 +139,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
139 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; 139 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
140 u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi)); 140 u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
141 __be64 cmd_rpl[MBOX_LEN / 8]; 141 __be64 cmd_rpl[MBOX_LEN / 8];
142 struct mbox_list entry;
142 143
143 /* In T6, mailbox size is changed to 128 bytes to avoid 144 /* In T6, mailbox size is changed to 128 bytes to avoid
144 * invalidating the entire prefetch buffer. 145 * invalidating the entire prefetch buffer.
@@ -156,6 +157,51 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
156 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) 157 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
157 return -EINVAL; 158 return -EINVAL;
158 159
160 /* Queue ourselves onto the mailbox access list. When our entry is at
161 * the front of the list, we have rights to access the mailbox. So we
162 * wait [for a while] till we're at the front [or bail out with an
163 * EBUSY] ...
164 */
165 spin_lock(&adapter->mbox_lock);
166 list_add_tail(&entry.list, &adapter->mlist.list);
167 spin_unlock(&adapter->mbox_lock);
168
169 delay_idx = 0;
170 ms = delay[0];
171
172 for (i = 0; ; i += ms) {
173 /* If we've waited too long, return a busy indication. This
174 * really ought to be based on our initial position in the
175 * mailbox access list but this is a start. We very rearely
176 * contend on access to the mailbox ...
177 */
178 if (i > FW_CMD_MAX_TIMEOUT) {
179 spin_lock(&adapter->mbox_lock);
180 list_del(&entry.list);
181 spin_unlock(&adapter->mbox_lock);
182 ret = -EBUSY;
183 t4vf_record_mbox(adapter, cmd, size, access, ret);
184 return ret;
185 }
186
187 /* If we're at the head, break out and start the mailbox
188 * protocol.
189 */
190 if (list_first_entry(&adapter->mlist.list, struct mbox_list,
191 list) == &entry)
192 break;
193
194 /* Delay for a bit before checking again ... */
195 if (sleep_ok) {
196 ms = delay[delay_idx]; /* last element may repeat */
197 if (delay_idx < ARRAY_SIZE(delay) - 1)
198 delay_idx++;
199 msleep(ms);
200 } else {
201 mdelay(ms);
202 }
203 }
204
159 /* 205 /*
160 * Loop trying to get ownership of the mailbox. Return an error 206 * Loop trying to get ownership of the mailbox. Return an error
161 * if we can't gain ownership. 207 * if we can't gain ownership.
@@ -164,6 +210,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
164 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 210 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
165 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); 211 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
166 if (v != MBOX_OWNER_DRV) { 212 if (v != MBOX_OWNER_DRV) {
213 spin_lock(&adapter->mbox_lock);
214 list_del(&entry.list);
215 spin_unlock(&adapter->mbox_lock);
167 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; 216 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
168 t4vf_record_mbox(adapter, cmd, size, access, ret); 217 t4vf_record_mbox(adapter, cmd, size, access, ret);
169 return ret; 218 return ret;
@@ -248,6 +297,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
248 if (cmd_op != FW_VI_STATS_CMD) 297 if (cmd_op != FW_VI_STATS_CMD)
249 t4vf_record_mbox(adapter, cmd_rpl, size, access, 298 t4vf_record_mbox(adapter, cmd_rpl, size, access,
250 execute); 299 execute);
300 spin_lock(&adapter->mbox_lock);
301 list_del(&entry.list);
302 spin_unlock(&adapter->mbox_lock);
251 return -FW_CMD_RETVAL_G(v); 303 return -FW_CMD_RETVAL_G(v);
252 } 304 }
253 } 305 }
@@ -255,6 +307,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
255 /* We timed out. Return the error ... */ 307 /* We timed out. Return the error ... */
256 ret = -ETIMEDOUT; 308 ret = -ETIMEDOUT;
257 t4vf_record_mbox(adapter, cmd, size, access, ret); 309 t4vf_record_mbox(adapter, cmd, size, access, ret);
310 spin_lock(&adapter->mbox_lock);
311 list_del(&entry.list);
312 spin_unlock(&adapter->mbox_lock);
258 return ret; 313 return ret;
259} 314}
260 315
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 60383040d6c6..c363b58552e9 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -53,6 +53,8 @@
53#include <linux/errno.h> 53#include <linux/errno.h>
54#include <linux/netdevice.h> 54#include <linux/netdevice.h>
55#include <linux/etherdevice.h> 55#include <linux/etherdevice.h>
56#include <linux/of.h>
57#include <linux/of_device.h>
56#include <linux/platform_device.h> 58#include <linux/platform_device.h>
57#include <linux/kernel.h> 59#include <linux/kernel.h>
58#include <linux/types.h> 60#include <linux/types.h>
@@ -1895,9 +1897,17 @@ static int cs89x0_platform_remove(struct platform_device *pdev)
1895 return 0; 1897 return 0;
1896} 1898}
1897 1899
1900static const struct __maybe_unused of_device_id cs89x0_match[] = {
1901 { .compatible = "cirrus,cs8900", },
1902 { .compatible = "cirrus,cs8920", },
1903 { },
1904};
1905MODULE_DEVICE_TABLE(of, cs89x0_match);
1906
1898static struct platform_driver cs89x0_driver = { 1907static struct platform_driver cs89x0_driver = {
1899 .driver = { 1908 .driver = {
1900 .name = DRV_NAME, 1909 .name = DRV_NAME,
1910 .of_match_table = of_match_ptr(cs89x0_match),
1901 }, 1911 },
1902 .remove = cs89x0_platform_remove, 1912 .remove = cs89x0_platform_remove,
1903}; 1913};
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f44a39c40642..fd3980cc1e34 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -103,25 +103,29 @@ static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
103 } 103 }
104} 104}
105 105
106static int enic_get_settings(struct net_device *netdev, 106static int enic_get_ksettings(struct net_device *netdev,
107 struct ethtool_cmd *ecmd) 107 struct ethtool_link_ksettings *ecmd)
108{ 108{
109 struct enic *enic = netdev_priv(netdev); 109 struct enic *enic = netdev_priv(netdev);
110 struct ethtool_link_settings *base = &ecmd->base;
110 111
111 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 112 ethtool_link_ksettings_add_link_mode(ecmd, supported,
112 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 113 10000baseT_Full);
113 ecmd->port = PORT_FIBRE; 114 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
114 ecmd->transceiver = XCVR_EXTERNAL; 115 ethtool_link_ksettings_add_link_mode(ecmd, advertising,
116 10000baseT_Full);
117 ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
118 base->port = PORT_FIBRE;
115 119
116 if (netif_carrier_ok(netdev)) { 120 if (netif_carrier_ok(netdev)) {
117 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); 121 base->speed = vnic_dev_port_speed(enic->vdev);
118 ecmd->duplex = DUPLEX_FULL; 122 base->duplex = DUPLEX_FULL;
119 } else { 123 } else {
120 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); 124 base->speed = SPEED_UNKNOWN;
121 ecmd->duplex = DUPLEX_UNKNOWN; 125 base->duplex = DUPLEX_UNKNOWN;
122 } 126 }
123 127
124 ecmd->autoneg = AUTONEG_DISABLE; 128 base->autoneg = AUTONEG_DISABLE;
125 129
126 return 0; 130 return 0;
127} 131}
@@ -500,7 +504,6 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
500} 504}
501 505
502static const struct ethtool_ops enic_ethtool_ops = { 506static const struct ethtool_ops enic_ethtool_ops = {
503 .get_settings = enic_get_settings,
504 .get_drvinfo = enic_get_drvinfo, 507 .get_drvinfo = enic_get_drvinfo,
505 .get_msglevel = enic_get_msglevel, 508 .get_msglevel = enic_get_msglevel,
506 .set_msglevel = enic_set_msglevel, 509 .set_msglevel = enic_set_msglevel,
@@ -516,6 +519,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
516 .get_rxfh_key_size = enic_get_rxfh_key_size, 519 .get_rxfh_key_size = enic_get_rxfh_key_size,
517 .get_rxfh = enic_get_rxfh, 520 .get_rxfh = enic_get_rxfh,
518 .set_rxfh = enic_set_rxfh, 521 .set_rxfh = enic_set_rxfh,
522 .get_link_ksettings = enic_get_ksettings,
519}; 523};
520 524
521void enic_set_ethtool_ops(struct net_device *netdev) 525void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index b69a9eacc531..c3b64cdd0dec 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -173,7 +173,7 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
173static void dnet_handle_link_change(struct net_device *dev) 173static void dnet_handle_link_change(struct net_device *dev)
174{ 174{
175 struct dnet *bp = netdev_priv(dev); 175 struct dnet *bp = netdev_priv(dev);
176 struct phy_device *phydev = bp->phy_dev; 176 struct phy_device *phydev = dev->phydev;
177 unsigned long flags; 177 unsigned long flags;
178 u32 mode_reg, ctl_reg; 178 u32 mode_reg, ctl_reg;
179 179
@@ -295,7 +295,6 @@ static int dnet_mii_probe(struct net_device *dev)
295 bp->link = 0; 295 bp->link = 0;
296 bp->speed = 0; 296 bp->speed = 0;
297 bp->duplex = -1; 297 bp->duplex = -1;
298 bp->phy_dev = phydev;
299 298
300 return 0; 299 return 0;
301} 300}
@@ -629,16 +628,16 @@ static int dnet_open(struct net_device *dev)
629 struct dnet *bp = netdev_priv(dev); 628 struct dnet *bp = netdev_priv(dev);
630 629
631 /* if the phy is not yet register, retry later */ 630 /* if the phy is not yet register, retry later */
632 if (!bp->phy_dev) 631 if (!dev->phydev)
633 return -EAGAIN; 632 return -EAGAIN;
634 633
635 napi_enable(&bp->napi); 634 napi_enable(&bp->napi);
636 dnet_init_hw(bp); 635 dnet_init_hw(bp);
637 636
638 phy_start_aneg(bp->phy_dev); 637 phy_start_aneg(dev->phydev);
639 638
640 /* schedule a link state check */ 639 /* schedule a link state check */
641 phy_start(bp->phy_dev); 640 phy_start(dev->phydev);
642 641
643 netif_start_queue(dev); 642 netif_start_queue(dev);
644 643
@@ -652,8 +651,8 @@ static int dnet_close(struct net_device *dev)
652 netif_stop_queue(dev); 651 netif_stop_queue(dev);
653 napi_disable(&bp->napi); 652 napi_disable(&bp->napi);
654 653
655 if (bp->phy_dev) 654 if (dev->phydev)
656 phy_stop(bp->phy_dev); 655 phy_stop(dev->phydev);
657 656
658 dnet_reset_hw(bp); 657 dnet_reset_hw(bp);
659 netif_carrier_off(dev); 658 netif_carrier_off(dev);
@@ -731,32 +730,9 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
731 return nstat; 730 return nstat;
732} 731}
733 732
734static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
735{
736 struct dnet *bp = netdev_priv(dev);
737 struct phy_device *phydev = bp->phy_dev;
738
739 if (!phydev)
740 return -ENODEV;
741
742 return phy_ethtool_gset(phydev, cmd);
743}
744
745static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
746{
747 struct dnet *bp = netdev_priv(dev);
748 struct phy_device *phydev = bp->phy_dev;
749
750 if (!phydev)
751 return -ENODEV;
752
753 return phy_ethtool_sset(phydev, cmd);
754}
755
756static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 733static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
757{ 734{
758 struct dnet *bp = netdev_priv(dev); 735 struct phy_device *phydev = dev->phydev;
759 struct phy_device *phydev = bp->phy_dev;
760 736
761 if (!netif_running(dev)) 737 if (!netif_running(dev))
762 return -EINVAL; 738 return -EINVAL;
@@ -776,11 +752,11 @@ static void dnet_get_drvinfo(struct net_device *dev,
776} 752}
777 753
778static const struct ethtool_ops dnet_ethtool_ops = { 754static const struct ethtool_ops dnet_ethtool_ops = {
779 .get_settings = dnet_get_settings,
780 .set_settings = dnet_set_settings,
781 .get_drvinfo = dnet_get_drvinfo, 755 .get_drvinfo = dnet_get_drvinfo,
782 .get_link = ethtool_op_get_link, 756 .get_link = ethtool_op_get_link,
783 .get_ts_info = ethtool_op_get_ts_info, 757 .get_ts_info = ethtool_op_get_ts_info,
758 .get_link_ksettings = phy_ethtool_get_link_ksettings,
759 .set_link_ksettings = phy_ethtool_set_link_ksettings,
784}; 760};
785 761
786static const struct net_device_ops dnet_netdev_ops = { 762static const struct net_device_ops dnet_netdev_ops = {
@@ -875,7 +851,7 @@ static int dnet_probe(struct platform_device *pdev)
875 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", 851 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
876 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", 852 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
877 (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); 853 (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
878 phydev = bp->phy_dev; 854 phydev = dev->phydev;
879 phy_attached_info(phydev); 855 phy_attached_info(phydev);
880 856
881 return 0; 857 return 0;
@@ -899,8 +875,8 @@ static int dnet_remove(struct platform_device *pdev)
899 875
900 if (dev) { 876 if (dev) {
901 bp = netdev_priv(dev); 877 bp = netdev_priv(dev);
902 if (bp->phy_dev) 878 if (dev->phydev)
903 phy_disconnect(bp->phy_dev); 879 phy_disconnect(dev->phydev);
904 mdiobus_unregister(bp->mii_bus); 880 mdiobus_unregister(bp->mii_bus);
905 mdiobus_free(bp->mii_bus); 881 mdiobus_free(bp->mii_bus);
906 unregister_netdev(dev); 882 unregister_netdev(dev);
diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h
index 37f5b30fa78b..d985080bbd5d 100644
--- a/drivers/net/ethernet/dnet.h
+++ b/drivers/net/ethernet/dnet.h
@@ -216,7 +216,6 @@ struct dnet {
216 216
217 /* PHY stuff */ 217 /* PHY stuff */
218 struct mii_bus *mii_bus; 218 struct mii_bus *mii_bus;
219 struct phy_device *phy_dev;
220 unsigned int link; 219 unsigned int link;
221 unsigned int speed; 220 unsigned int speed;
222 unsigned int duplex; 221 unsigned int duplex;
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 7108563260ae..b4853ec9de8d 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -13,11 +13,3 @@ config BE2NET_HWMON
13 ---help--- 13 ---help---
14 Say Y here if you want to expose thermal sensor data on 14 Say Y here if you want to expose thermal sensor data on
15 be2net network adapter. 15 be2net network adapter.
16
17config BE2NET_VXLAN
18 bool "VXLAN offload support on be2net driver"
19 default y
20 depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m)
21 ---help---
22 Say Y here if you want to enable VXLAN offload support on
23 be2net driver.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index fe3763df3f13..4555e041ef69 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -97,7 +97,8 @@
97 * SURF/DPDK 97 * SURF/DPDK
98 */ 98 */
99 99
100#define MAX_RSS_IFACES 15 100#define MAX_PORT_RSS_TABLES 15
101#define MAX_NIC_FUNCS 16
101#define MAX_RX_QS 32 102#define MAX_RX_QS 32
102#define MAX_EVT_QS 32 103#define MAX_EVT_QS 32
103#define MAX_TX_QS 32 104#define MAX_TX_QS 32
@@ -442,8 +443,20 @@ struct be_resources {
442 u16 max_iface_count; 443 u16 max_iface_count;
443 u16 max_mcc_count; 444 u16 max_mcc_count;
444 u16 max_evt_qs; 445 u16 max_evt_qs;
446 u16 max_nic_evt_qs; /* NIC's share of evt qs */
445 u32 if_cap_flags; 447 u32 if_cap_flags;
446 u32 vf_if_cap_flags; /* VF if capability flags */ 448 u32 vf_if_cap_flags; /* VF if capability flags */
449 u32 flags;
450 /* Calculated PF Pool's share of RSS Tables. This is not enforced by
451 * the FW, but is a self-imposed driver limitation.
452 */
453 u16 max_rss_tables;
454};
455
456/* These are port-wide values */
457struct be_port_resources {
458 u16 max_vfs;
459 u16 nic_pfs;
447}; 460};
448 461
449#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC) 462#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
@@ -513,7 +526,8 @@ struct be_adapter {
513 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ 526 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
514 spinlock_t mcc_cq_lock; 527 spinlock_t mcc_cq_lock;
515 528
516 u16 cfg_num_qs; /* configured via set-channels */ 529 u16 cfg_num_rx_irqs; /* configured via set-channels */
530 u16 cfg_num_tx_irqs; /* configured via set-channels */
517 u16 num_evt_qs; 531 u16 num_evt_qs;
518 u16 num_msix_vec; 532 u16 num_msix_vec;
519 struct be_eq_obj eq_obj[MAX_EVT_QS]; 533 struct be_eq_obj eq_obj[MAX_EVT_QS];
@@ -632,16 +646,42 @@ struct be_adapter {
632#define be_max_txqs(adapter) (adapter->res.max_tx_qs) 646#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
633#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs) 647#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
634#define be_max_rxqs(adapter) (adapter->res.max_rx_qs) 648#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
635#define be_max_eqs(adapter) (adapter->res.max_evt_qs) 649/* Max number of EQs available for the function (NIC + RoCE (if enabled)) */
650#define be_max_func_eqs(adapter) (adapter->res.max_evt_qs)
651/* Max number of EQs available avaialble only for NIC */
652#define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs)
636#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags) 653#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
637 654#define be_max_pf_pool_rss_tables(adapter) \
638static inline u16 be_max_qs(struct be_adapter *adapter) 655 (adapter->pool_res.max_rss_tables)
656/* Max irqs avaialble for NIC */
657#define be_max_irqs(adapter) \
658 (min_t(u16, be_max_nic_eqs(adapter), num_online_cpus()))
659
660/* Max irqs *needed* for RX queues */
661static inline u16 be_max_rx_irqs(struct be_adapter *adapter)
639{ 662{
640 /* If no RSS, need atleast the one def RXQ */ 663 /* If no RSS, need atleast one irq for def-RXQ */
641 u16 num = max_t(u16, be_max_rss(adapter), 1); 664 u16 num = max_t(u16, be_max_rss(adapter), 1);
642 665
643 num = min(num, be_max_eqs(adapter)); 666 return min_t(u16, num, be_max_irqs(adapter));
644 return min_t(u16, num, num_online_cpus()); 667}
668
669/* Max irqs *needed* for TX queues */
670static inline u16 be_max_tx_irqs(struct be_adapter *adapter)
671{
672 return min_t(u16, be_max_txqs(adapter), be_max_irqs(adapter));
673}
674
675/* Max irqs *needed* for combined queues */
676static inline u16 be_max_qp_irqs(struct be_adapter *adapter)
677{
678 return min(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
679}
680
681/* Max irqs *needed* for RX and TX queues together */
682static inline u16 be_max_any_irqs(struct be_adapter *adapter)
683{
684 return max(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
645} 685}
646 686
647/* Is BE in pvid_tagging mode */ 687/* Is BE in pvid_tagging mode */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 22402db275f2..2cc11756859f 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -87,6 +87,11 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
87 CMD_SUBSYSTEM_LOWLEVEL, 87 CMD_SUBSYSTEM_LOWLEVEL,
88 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 88 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
89 }, 89 },
90 {
91 OPCODE_COMMON_SET_HSW_CONFIG,
92 CMD_SUBSYSTEM_COMMON,
93 BE_PRIV_DEVCFG | BE_PRIV_VHADM
94 },
90}; 95};
91 96
92static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) 97static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
@@ -3850,6 +3855,10 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3850 void *ctxt; 3855 void *ctxt;
3851 int status; 3856 int status;
3852 3857
3858 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_HSW_CONFIG,
3859 CMD_SUBSYSTEM_COMMON))
3860 return -EPERM;
3861
3853 spin_lock_bh(&adapter->mcc_lock); 3862 spin_lock_bh(&adapter->mcc_lock);
3854 3863
3855 wrb = wrb_from_mccq(adapter); 3864 wrb = wrb_from_mccq(adapter);
@@ -3871,7 +3880,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3871 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); 3880 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3872 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); 3881 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3873 } 3882 }
3874 if (!BEx_chip(adapter) && hsw_mode) { 3883 if (hsw_mode) {
3875 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, 3884 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3876 ctxt, adapter->hba_port_num); 3885 ctxt, adapter->hba_port_num);
3877 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); 3886 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
@@ -4023,7 +4032,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
4023 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va; 4032 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
4024 4033
4025 adapter->wol_cap = resp->wol_settings; 4034 adapter->wol_cap = resp->wol_settings;
4026 if (adapter->wol_cap & BE_WOL_CAP) 4035
4036 /* Non-zero macaddr indicates WOL is enabled */
4037 if (adapter->wol_cap & BE_WOL_CAP &&
4038 !is_zero_ether_addr(resp->magic_mac))
4027 adapter->wol_en = true; 4039 adapter->wol_en = true;
4028 } 4040 }
4029err: 4041err:
@@ -4360,9 +4372,35 @@ err:
4360 return status; 4372 return status;
4361} 4373}
4362 4374
4375/* This routine returns a list of all the NIC PF_nums in the adapter */
4376u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
4377{
4378 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4379 struct be_pcie_res_desc *pcie = NULL;
4380 int i;
4381 u16 nic_pf_count = 0;
4382
4383 for (i = 0; i < desc_count; i++) {
4384 if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
4385 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
4386 pcie = (struct be_pcie_res_desc *)hdr;
4387 if (pcie->pf_state && (pcie->pf_type == MISSION_NIC ||
4388 pcie->pf_type == MISSION_RDMA)) {
4389 nic_pf_nums[nic_pf_count++] = pcie->pf_num;
4390 }
4391 }
4392
4393 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4394 hdr = (void *)hdr + hdr->desc_len;
4395 }
4396 return nic_pf_count;
4397}
4398
4363/* Will use MBOX only if MCCQ has not been created */ 4399/* Will use MBOX only if MCCQ has not been created */
4364int be_cmd_get_profile_config(struct be_adapter *adapter, 4400int be_cmd_get_profile_config(struct be_adapter *adapter,
4365 struct be_resources *res, u8 query, u8 domain) 4401 struct be_resources *res,
4402 struct be_port_resources *port_res,
4403 u8 profile_type, u8 query, u8 domain)
4366{ 4404{
4367 struct be_cmd_resp_get_profile_config *resp; 4405 struct be_cmd_resp_get_profile_config *resp;
4368 struct be_cmd_req_get_profile_config *req; 4406 struct be_cmd_req_get_profile_config *req;
@@ -4389,7 +4427,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
4389 4427
4390 if (!lancer_chip(adapter)) 4428 if (!lancer_chip(adapter))
4391 req->hdr.version = 1; 4429 req->hdr.version = 1;
4392 req->type = ACTIVE_PROFILE_TYPE; 4430 req->type = profile_type;
4393 req->hdr.domain = domain; 4431 req->hdr.domain = domain;
4394 4432
4395 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the 4433 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
@@ -4406,6 +4444,28 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
4406 resp = cmd.va; 4444 resp = cmd.va;
4407 desc_count = le16_to_cpu(resp->desc_count); 4445 desc_count = le16_to_cpu(resp->desc_count);
4408 4446
4447 if (port_res) {
4448 u16 nic_pf_cnt = 0, i;
4449 u16 nic_pf_num_list[MAX_NIC_FUNCS];
4450
4451 nic_pf_cnt = be_get_nic_pf_num_list(resp->func_param,
4452 desc_count,
4453 nic_pf_num_list);
4454
4455 for (i = 0; i < nic_pf_cnt; i++) {
4456 nic = be_get_func_nic_desc(resp->func_param, desc_count,
4457 nic_pf_num_list[i]);
4458 if (nic->link_param == adapter->port_num) {
4459 port_res->nic_pfs++;
4460 pcie = be_get_pcie_desc(resp->func_param,
4461 desc_count,
4462 nic_pf_num_list[i]);
4463 port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
4464 }
4465 }
4466 return status;
4467 }
4468
4409 pcie = be_get_pcie_desc(resp->func_param, desc_count, 4469 pcie = be_get_pcie_desc(resp->func_param, desc_count,
4410 adapter->pf_num); 4470 adapter->pf_num);
4411 if (pcie) 4471 if (pcie)
@@ -4465,7 +4525,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
4465} 4525}
4466 4526
4467/* Mark all fields invalid */ 4527/* Mark all fields invalid */
4468static void be_reset_nic_desc(struct be_nic_res_desc *nic) 4528void be_reset_nic_desc(struct be_nic_res_desc *nic)
4469{ 4529{
4470 memset(nic, 0, sizeof(*nic)); 4530 memset(nic, 0, sizeof(*nic));
4471 nic->unicast_mac_count = 0xFFFF; 4531 nic->unicast_mac_count = 0xFFFF;
@@ -4534,73 +4594,9 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
4534 1, version, domain); 4594 1, version, domain);
4535} 4595}
4536 4596
4537static void be_fill_vf_res_template(struct be_adapter *adapter,
4538 struct be_resources pool_res,
4539 u16 num_vfs, u16 num_vf_qs,
4540 struct be_nic_res_desc *nic_vft)
4541{
4542 u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
4543 struct be_resources res_mod = {0};
4544
4545 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4546 * which are modifiable using SET_PROFILE_CONFIG cmd.
4547 */
4548 be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
4549
4550 /* If RSS IFACE capability flags are modifiable for a VF, set the
4551 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4552 * more than 1 RSSQ is available for a VF.
4553 * Otherwise, provision only 1 queue pair for VF.
4554 */
4555 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4556 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4557 if (num_vf_qs > 1) {
4558 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4559 if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4560 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4561 } else {
4562 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4563 BE_IF_FLAGS_DEFQ_RSS);
4564 }
4565 } else {
4566 num_vf_qs = 1;
4567 }
4568
4569 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4570 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4571 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4572 }
4573
4574 nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
4575 nic_vft->rq_count = cpu_to_le16(num_vf_qs);
4576 nic_vft->txq_count = cpu_to_le16(num_vf_qs);
4577 nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
4578 nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
4579 (num_vfs + 1));
4580
4581 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4582 * among the PF and it's VFs, if the fields are changeable
4583 */
4584 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4585 nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
4586 (num_vfs + 1));
4587
4588 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4589 nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
4590 (num_vfs + 1));
4591
4592 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4593 nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
4594 (num_vfs + 1));
4595
4596 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4597 nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
4598 (num_vfs + 1));
4599}
4600
4601int be_cmd_set_sriov_config(struct be_adapter *adapter, 4597int be_cmd_set_sriov_config(struct be_adapter *adapter,
4602 struct be_resources pool_res, u16 num_vfs, 4598 struct be_resources pool_res, u16 num_vfs,
4603 u16 num_vf_qs) 4599 struct be_resources *vft_res)
4604{ 4600{
4605 struct { 4601 struct {
4606 struct be_pcie_res_desc pcie; 4602 struct be_pcie_res_desc pcie;
@@ -4620,12 +4616,26 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter,
4620 be_reset_nic_desc(&desc.nic_vft); 4616 be_reset_nic_desc(&desc.nic_vft);
4621 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 4617 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
4622 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 4618 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4623 desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); 4619 desc.nic_vft.flags = vft_res->flags | BIT(VFT_SHIFT) |
4620 BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
4624 desc.nic_vft.pf_num = adapter->pdev->devfn; 4621 desc.nic_vft.pf_num = adapter->pdev->devfn;
4625 desc.nic_vft.vf_num = 0; 4622 desc.nic_vft.vf_num = 0;
4626 4623 desc.nic_vft.cap_flags = cpu_to_le32(vft_res->vf_if_cap_flags);
4627 be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs, 4624 desc.nic_vft.rq_count = cpu_to_le16(vft_res->max_rx_qs);
4628 &desc.nic_vft); 4625 desc.nic_vft.txq_count = cpu_to_le16(vft_res->max_tx_qs);
4626 desc.nic_vft.rssq_count = cpu_to_le16(vft_res->max_rss_qs);
4627 desc.nic_vft.cq_count = cpu_to_le16(vft_res->max_cq_count);
4628
4629 if (vft_res->max_uc_mac)
4630 desc.nic_vft.unicast_mac_count =
4631 cpu_to_le16(vft_res->max_uc_mac);
4632 if (vft_res->max_vlans)
4633 desc.nic_vft.vlan_count = cpu_to_le16(vft_res->max_vlans);
4634 if (vft_res->max_iface_count)
4635 desc.nic_vft.iface_count =
4636 cpu_to_le16(vft_res->max_iface_count);
4637 if (vft_res->max_mcc_count)
4638 desc.nic_vft.mcc_count = cpu_to_le16(vft_res->max_mcc_count);
4629 4639
4630 return be_cmd_set_profile_config(adapter, &desc, 4640 return be_cmd_set_profile_config(adapter, &desc,
4631 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0); 4641 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d8540ae95e5a..0d6be224a787 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -1556,7 +1556,9 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 {
1556 u8 rsvd0[2]; 1556 u8 rsvd0[2];
1557 u8 wol_settings; 1557 u8 wol_settings;
1558 u8 rsvd1[5]; 1558 u8 rsvd1[5];
1559 u32 rsvd2[295]; 1559 u32 rsvd2[288];
1560 u8 magic_mac[6];
1561 u8 rsvd3[22];
1560} __packed; 1562} __packed;
1561 1563
1562#define BE_GET_WOL_CAP 2 1564#define BE_GET_WOL_CAP 2
@@ -2128,6 +2130,9 @@ struct be_cmd_req_set_ext_fat_caps {
2128#define IMM_SHIFT 6 /* Immediate */ 2130#define IMM_SHIFT 6 /* Immediate */
2129#define NOSV_SHIFT 7 /* No save */ 2131#define NOSV_SHIFT 7 /* No save */
2130 2132
2133#define MISSION_NIC 1
2134#define MISSION_RDMA 8
2135
2131struct be_res_desc_hdr { 2136struct be_res_desc_hdr {
2132 u8 desc_type; 2137 u8 desc_type;
2133 u8 desc_len; 2138 u8 desc_len;
@@ -2244,6 +2249,7 @@ struct be_cmd_req_get_profile_config {
2244 struct be_cmd_req_hdr hdr; 2249 struct be_cmd_req_hdr hdr;
2245 u8 rsvd; 2250 u8 rsvd;
2246#define ACTIVE_PROFILE_TYPE 0x2 2251#define ACTIVE_PROFILE_TYPE 0x2
2252#define SAVED_PROFILE_TYPE 0x0
2247#define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3) 2253#define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3)
2248 u8 type; 2254 u8 type;
2249 u16 rsvd1; 2255 u16 rsvd1;
@@ -2449,7 +2455,9 @@ int be_cmd_query_port_name(struct be_adapter *adapter);
2449int be_cmd_get_func_config(struct be_adapter *adapter, 2455int be_cmd_get_func_config(struct be_adapter *adapter,
2450 struct be_resources *res); 2456 struct be_resources *res);
2451int be_cmd_get_profile_config(struct be_adapter *adapter, 2457int be_cmd_get_profile_config(struct be_adapter *adapter,
2452 struct be_resources *res, u8 query, u8 domain); 2458 struct be_resources *res,
2459 struct be_port_resources *port_res,
2460 u8 profile_type, u8 query, u8 domain);
2453int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile); 2461int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
2454int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, 2462int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
2455 int vf_num); 2463 int vf_num);
@@ -2461,4 +2469,4 @@ int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
2461int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op); 2469int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
2462int be_cmd_set_sriov_config(struct be_adapter *adapter, 2470int be_cmd_set_sriov_config(struct be_adapter *adapter,
2463 struct be_resources res, u16 num_vfs, 2471 struct be_resources res, u16 num_vfs,
2464 u16 num_vf_qs); 2472 struct be_resources *vft_res);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 2ff691636dac..50e7be5da50c 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -793,6 +793,11 @@ static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
793static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 793static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
794{ 794{
795 struct be_adapter *adapter = netdev_priv(netdev); 795 struct be_adapter *adapter = netdev_priv(netdev);
796 struct device *dev = &adapter->pdev->dev;
797 struct be_dma_mem cmd;
798 u8 mac[ETH_ALEN];
799 bool enable;
800 int status;
796 801
797 if (wol->wolopts & ~WAKE_MAGIC) 802 if (wol->wolopts & ~WAKE_MAGIC)
798 return -EOPNOTSUPP; 803 return -EOPNOTSUPP;
@@ -802,12 +807,32 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
802 return -EOPNOTSUPP; 807 return -EOPNOTSUPP;
803 } 808 }
804 809
805 if (wol->wolopts & WAKE_MAGIC) 810 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
806 adapter->wol_en = true; 811 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
807 else 812 if (!cmd.va)
808 adapter->wol_en = false; 813 return -ENOMEM;
809 814
810 return 0; 815 eth_zero_addr(mac);
816
817 enable = wol->wolopts & WAKE_MAGIC;
818 if (enable)
819 ether_addr_copy(mac, adapter->netdev->dev_addr);
820
821 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
822 if (status) {
823 dev_err(dev, "Could not set Wake-on-lan mac address\n");
824 status = be_cmd_status(status);
825 goto err;
826 }
827
828 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
829 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
830
831 adapter->wol_en = enable ? true : false;
832
833err:
834 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
835 return status;
811} 836}
812 837
813static int be_test_ddr_dma(struct be_adapter *adapter) 838static int be_test_ddr_dma(struct be_adapter *adapter)
@@ -1171,9 +1196,17 @@ static void be_get_channels(struct net_device *netdev,
1171 struct ethtool_channels *ch) 1196 struct ethtool_channels *ch)
1172{ 1197{
1173 struct be_adapter *adapter = netdev_priv(netdev); 1198 struct be_adapter *adapter = netdev_priv(netdev);
1199 u16 num_rx_irqs = max_t(u16, adapter->num_rss_qs, 1);
1174 1200
1175 ch->combined_count = adapter->num_evt_qs; 1201 /* num_tx_qs is always same as the number of irqs used for TX */
1176 ch->max_combined = be_max_qs(adapter); 1202 ch->combined_count = min(adapter->num_tx_qs, num_rx_irqs);
1203 ch->rx_count = num_rx_irqs - ch->combined_count;
1204 ch->tx_count = adapter->num_tx_qs - ch->combined_count;
1205
1206 ch->max_combined = be_max_qp_irqs(adapter);
1207 /* The user must create atleast one combined channel */
1208 ch->max_rx = be_max_rx_irqs(adapter) - 1;
1209 ch->max_tx = be_max_tx_irqs(adapter) - 1;
1177} 1210}
1178 1211
1179static int be_set_channels(struct net_device *netdev, 1212static int be_set_channels(struct net_device *netdev,
@@ -1182,11 +1215,22 @@ static int be_set_channels(struct net_device *netdev,
1182 struct be_adapter *adapter = netdev_priv(netdev); 1215 struct be_adapter *adapter = netdev_priv(netdev);
1183 int status; 1216 int status;
1184 1217
1185 if (ch->rx_count || ch->tx_count || ch->other_count || 1218 /* we support either only combined channels or a combination of
1186 !ch->combined_count || ch->combined_count > be_max_qs(adapter)) 1219 * combined and either RX-only or TX-only channels.
1220 */
1221 if (ch->other_count || !ch->combined_count ||
1222 (ch->rx_count && ch->tx_count))
1223 return -EINVAL;
1224
1225 if (ch->combined_count > be_max_qp_irqs(adapter) ||
1226 (ch->rx_count &&
1227 (ch->rx_count + ch->combined_count) > be_max_rx_irqs(adapter)) ||
1228 (ch->tx_count &&
1229 (ch->tx_count + ch->combined_count) > be_max_tx_irqs(adapter)))
1187 return -EINVAL; 1230 return -EINVAL;
1188 1231
1189 adapter->cfg_num_qs = ch->combined_count; 1232 adapter->cfg_num_rx_irqs = ch->combined_count + ch->rx_count;
1233 adapter->cfg_num_tx_irqs = ch->combined_count + ch->tx_count;
1190 1234
1191 status = be_update_queues(adapter); 1235 status = be_update_queues(adapter);
1192 return be_cmd_status(status); 1236 return be_cmd_status(status);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ed98ef1ecac3..1873c74638cd 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -2620,8 +2620,10 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2620 struct be_aic_obj *aic; 2620 struct be_aic_obj *aic;
2621 int i, rc; 2621 int i, rc;
2622 2622
2623 /* need enough EQs to service both RX and TX queues */
2623 adapter->num_evt_qs = min_t(u16, num_irqs(adapter), 2624 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2624 adapter->cfg_num_qs); 2625 max(adapter->cfg_num_rx_irqs,
2626 adapter->cfg_num_tx_irqs));
2625 2627
2626 for_all_evt_queues(adapter, eqo, i) { 2628 for_all_evt_queues(adapter, eqo, i) {
2627 int numa_node = dev_to_node(&adapter->pdev->dev); 2629 int numa_node = dev_to_node(&adapter->pdev->dev);
@@ -2726,7 +2728,7 @@ static int be_tx_qs_create(struct be_adapter *adapter)
2726 struct be_eq_obj *eqo; 2728 struct be_eq_obj *eqo;
2727 int status, i; 2729 int status, i;
2728 2730
2729 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter)); 2731 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
2730 2732
2731 for_all_tx_queues(adapter, txo, i) { 2733 for_all_tx_queues(adapter, txo, i) {
2732 cq = &txo->cq; 2734 cq = &txo->cq;
@@ -2784,11 +2786,11 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
2784 struct be_rx_obj *rxo; 2786 struct be_rx_obj *rxo;
2785 int rc, i; 2787 int rc, i;
2786 2788
2787 /* We can create as many RSS rings as there are EQs. */ 2789 adapter->num_rss_qs =
2788 adapter->num_rss_qs = adapter->num_evt_qs; 2790 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
2789 2791
2790 /* We'll use RSS only if atleast 2 RSS rings are supported. */ 2792 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2791 if (adapter->num_rss_qs <= 1) 2793 if (adapter->num_rss_qs < 2)
2792 adapter->num_rss_qs = 0; 2794 adapter->num_rss_qs = 0;
2793 2795
2794 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq; 2796 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
@@ -3249,18 +3251,22 @@ static void be_msix_disable(struct be_adapter *adapter)
3249 3251
3250static int be_msix_enable(struct be_adapter *adapter) 3252static int be_msix_enable(struct be_adapter *adapter)
3251{ 3253{
3252 int i, num_vec; 3254 unsigned int i, num_vec, max_roce_eqs;
3253 struct device *dev = &adapter->pdev->dev; 3255 struct device *dev = &adapter->pdev->dev;
3254 3256
3255 /* If RoCE is supported, program the max number of NIC vectors that 3257 /* If RoCE is supported, program the max number of vectors that
3256 * may be configured via set-channels, along with vectors needed for 3258 * could be used for NIC and RoCE, else, just program the number
3257 * RoCe. Else, just program the number we'll use initially. 3259 * we'll use initially.
3258 */ 3260 */
3259 if (be_roce_supported(adapter)) 3261 if (be_roce_supported(adapter)) {
3260 num_vec = min_t(int, 2 * be_max_eqs(adapter), 3262 max_roce_eqs =
3261 2 * num_online_cpus()); 3263 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3262 else 3264 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3263 num_vec = adapter->cfg_num_qs; 3265 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3266 } else {
3267 num_vec = max(adapter->cfg_num_rx_irqs,
3268 adapter->cfg_num_tx_irqs);
3269 }
3264 3270
3265 for (i = 0; i < num_vec; i++) 3271 for (i = 0; i < num_vec; i++)
3266 adapter->msix_entries[i].entry = i; 3272 adapter->msix_entries[i].entry = i;
@@ -3625,10 +3631,8 @@ static int be_open(struct net_device *netdev)
3625 be_link_status_update(adapter, link_status); 3631 be_link_status_update(adapter, link_status);
3626 3632
3627 netif_tx_start_all_queues(netdev); 3633 netif_tx_start_all_queues(netdev);
3628#ifdef CONFIG_BE2NET_VXLAN
3629 if (skyhawk_chip(adapter)) 3634 if (skyhawk_chip(adapter))
3630 vxlan_get_rx_port(netdev); 3635 udp_tunnel_get_rx_info(netdev);
3631#endif
3632 3636
3633 return 0; 3637 return 0;
3634err: 3638err:
@@ -3636,40 +3640,6 @@ err:
3636 return -EIO; 3640 return -EIO;
3637} 3641}
3638 3642
3639static int be_setup_wol(struct be_adapter *adapter, bool enable)
3640{
3641 struct device *dev = &adapter->pdev->dev;
3642 struct be_dma_mem cmd;
3643 u8 mac[ETH_ALEN];
3644 int status;
3645
3646 eth_zero_addr(mac);
3647
3648 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
3649 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
3650 if (!cmd.va)
3651 return -ENOMEM;
3652
3653 if (enable) {
3654 status = pci_write_config_dword(adapter->pdev,
3655 PCICFG_PM_CONTROL_OFFSET,
3656 PCICFG_PM_CONTROL_MASK);
3657 if (status) {
3658 dev_err(dev, "Could not enable Wake-on-lan\n");
3659 goto err;
3660 }
3661 } else {
3662 ether_addr_copy(mac, adapter->netdev->dev_addr);
3663 }
3664
3665 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3666 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3667 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3668err:
3669 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
3670 return status;
3671}
3672
3673static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) 3643static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3674{ 3644{
3675 u32 addr; 3645 u32 addr;
@@ -3759,6 +3729,11 @@ static void be_vf_clear(struct be_adapter *adapter)
3759 3729
3760 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); 3730 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3761 } 3731 }
3732
3733 if (BE3_chip(adapter))
3734 be_cmd_set_hsw_config(adapter, 0, 0,
3735 adapter->if_handle,
3736 PORT_FWD_TYPE_PASSTHRU, 0);
3762done: 3737done:
3763 kfree(adapter->vf_cfg); 3738 kfree(adapter->vf_cfg);
3764 adapter->num_vfs = 0; 3739 adapter->num_vfs = 0;
@@ -3789,7 +3764,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
3789 } 3764 }
3790} 3765}
3791 3766
3792#ifdef CONFIG_BE2NET_VXLAN
3793static void be_disable_vxlan_offloads(struct be_adapter *adapter) 3767static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3794{ 3768{
3795 struct net_device *netdev = adapter->netdev; 3769 struct net_device *netdev = adapter->netdev;
@@ -3808,37 +3782,87 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3808 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL); 3782 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3809 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL); 3783 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3810} 3784}
3811#endif
3812 3785
3813static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs) 3786static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3787 struct be_resources *vft_res)
3814{ 3788{
3815 struct be_resources res = adapter->pool_res; 3789 struct be_resources res = adapter->pool_res;
3790 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3791 struct be_resources res_mod = {0};
3816 u16 num_vf_qs = 1; 3792 u16 num_vf_qs = 1;
3817 3793
3818 /* Distribute the queue resources among the PF and it's VFs 3794 /* Distribute the queue resources among the PF and it's VFs */
3819 * Do not distribute queue resources in multi-channel configuration. 3795 if (num_vfs) {
3820 */ 3796 /* Divide the rx queues evenly among the VFs and the PF, capped
3821 if (num_vfs && !be_is_mc(adapter)) { 3797 * at VF-EQ-count. Any remainder queues belong to the PF.
3822 /* Divide the qpairs evenly among the VFs and the PF, capped 3798 */
3823 * at VF-EQ-count. Any remainder qpairs belong to the PF.
3824 */
3825 num_vf_qs = min(SH_VF_MAX_NIC_EQS, 3799 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3826 res.max_rss_qs / (num_vfs + 1)); 3800 res.max_rss_qs / (num_vfs + 1));
3827 3801
3828 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable 3802 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
3829 * interfaces per port. Provide RSS on VFs, only if number 3803 * RSS Tables per port. Provide RSS on VFs, only if number of
3830 * of VFs requested is less than MAX_RSS_IFACES limit. 3804 * VFs requested is less than it's PF Pool's RSS Tables limit.
3831 */ 3805 */
3832 if (num_vfs >= MAX_RSS_IFACES) 3806 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
3833 num_vf_qs = 1; 3807 num_vf_qs = 1;
3834 } 3808 }
3835 return num_vf_qs; 3809
3810 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3811 * which are modifiable using SET_PROFILE_CONFIG cmd.
3812 */
3813 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
3814 RESOURCE_MODIFIABLE, 0);
3815
3816 /* If RSS IFACE capability flags are modifiable for a VF, set the
3817 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3818 * more than 1 RSSQ is available for a VF.
3819 * Otherwise, provision only 1 queue pair for VF.
3820 */
3821 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3822 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3823 if (num_vf_qs > 1) {
3824 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3825 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3826 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3827 } else {
3828 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3829 BE_IF_FLAGS_DEFQ_RSS);
3830 }
3831 } else {
3832 num_vf_qs = 1;
3833 }
3834
3835 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
3836 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3837 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3838 }
3839
3840 vft_res->vf_if_cap_flags = vf_if_cap_flags;
3841 vft_res->max_rx_qs = num_vf_qs;
3842 vft_res->max_rss_qs = num_vf_qs;
3843 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
3844 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
3845
3846 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3847 * among the PF and it's VFs, if the fields are changeable
3848 */
3849 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3850 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
3851
3852 if (res_mod.max_vlans == FIELD_MODIFIABLE)
3853 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
3854
3855 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3856 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
3857
3858 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3859 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
3836} 3860}
3837 3861
3838static int be_clear(struct be_adapter *adapter) 3862static int be_clear(struct be_adapter *adapter)
3839{ 3863{
3840 struct pci_dev *pdev = adapter->pdev; 3864 struct pci_dev *pdev = adapter->pdev;
3841 u16 num_vf_qs; 3865 struct be_resources vft_res = {0};
3842 3866
3843 be_cancel_worker(adapter); 3867 be_cancel_worker(adapter);
3844 3868
@@ -3850,16 +3874,15 @@ static int be_clear(struct be_adapter *adapter)
3850 */ 3874 */
3851 if (skyhawk_chip(adapter) && be_physfn(adapter) && 3875 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3852 !pci_vfs_assigned(pdev)) { 3876 !pci_vfs_assigned(pdev)) {
3853 num_vf_qs = be_calculate_vf_qs(adapter, 3877 be_calculate_vf_res(adapter,
3854 pci_sriov_get_totalvfs(pdev)); 3878 pci_sriov_get_totalvfs(pdev),
3879 &vft_res);
3855 be_cmd_set_sriov_config(adapter, adapter->pool_res, 3880 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3856 pci_sriov_get_totalvfs(pdev), 3881 pci_sriov_get_totalvfs(pdev),
3857 num_vf_qs); 3882 &vft_res);
3858 } 3883 }
3859 3884
3860#ifdef CONFIG_BE2NET_VXLAN
3861 be_disable_vxlan_offloads(adapter); 3885 be_disable_vxlan_offloads(adapter);
3862#endif
3863 kfree(adapter->pmac_id); 3886 kfree(adapter->pmac_id);
3864 adapter->pmac_id = NULL; 3887 adapter->pmac_id = NULL;
3865 3888
@@ -3884,7 +3907,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3884 3907
3885 for_all_vfs(adapter, vf_cfg, vf) { 3908 for_all_vfs(adapter, vf_cfg, vf) {
3886 if (!BE3_chip(adapter)) { 3909 if (!BE3_chip(adapter)) {
3887 status = be_cmd_get_profile_config(adapter, &res, 3910 status = be_cmd_get_profile_config(adapter, &res, NULL,
3911 ACTIVE_PROFILE_TYPE,
3888 RESOURCE_LIMITS, 3912 RESOURCE_LIMITS,
3889 vf + 1); 3913 vf + 1);
3890 if (!status) { 3914 if (!status) {
@@ -4000,6 +4024,15 @@ static int be_vf_setup(struct be_adapter *adapter)
4000 } 4024 }
4001 } 4025 }
4002 4026
4027 if (BE3_chip(adapter)) {
4028 /* On BE3, enable VEB only when SRIOV is enabled */
4029 status = be_cmd_set_hsw_config(adapter, 0, 0,
4030 adapter->if_handle,
4031 PORT_FWD_TYPE_VEB, 0);
4032 if (status)
4033 goto err;
4034 }
4035
4003 adapter->flags |= BE_FLAGS_SRIOV_ENABLED; 4036 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
4004 return 0; 4037 return 0;
4005err: 4038err:
@@ -4069,8 +4102,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
4069 /* On a SuperNIC profile, the driver needs to use the 4102 /* On a SuperNIC profile, the driver needs to use the
4070 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits 4103 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4071 */ 4104 */
4072 be_cmd_get_profile_config(adapter, &super_nic_res, 4105 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4073 RESOURCE_LIMITS, 0); 4106 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4107 0);
4074 /* Some old versions of BE3 FW don't report max_tx_qs value */ 4108 /* Some old versions of BE3 FW don't report max_tx_qs value */
4075 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS; 4109 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4076 } else { 4110 } else {
@@ -4109,12 +4143,38 @@ static void be_setup_init(struct be_adapter *adapter)
4109 adapter->cmd_privileges = MIN_PRIVILEGES; 4143 adapter->cmd_privileges = MIN_PRIVILEGES;
4110} 4144}
4111 4145
4146/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4147 * However, this HW limitation is not exposed to the host via any SLI cmd.
4148 * As a result, in the case of SRIOV and in particular multi-partition configs
4149 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4150 * for distribution between the VFs. This self-imposed limit will determine the
4151 * no: of VFs for which RSS can be enabled.
4152 */
4153void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4154{
4155 struct be_port_resources port_res = {0};
4156 u8 rss_tables_on_port;
4157 u16 max_vfs = be_max_vfs(adapter);
4158
4159 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4160 RESOURCE_LIMITS, 0);
4161
4162 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4163
4164 /* Each PF Pool's RSS Tables limit =
4165 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4166 */
4167 adapter->pool_res.max_rss_tables =
4168 max_vfs * rss_tables_on_port / port_res.max_vfs;
4169}
4170
4112static int be_get_sriov_config(struct be_adapter *adapter) 4171static int be_get_sriov_config(struct be_adapter *adapter)
4113{ 4172{
4114 struct be_resources res = {0}; 4173 struct be_resources res = {0};
4115 int max_vfs, old_vfs; 4174 int max_vfs, old_vfs;
4116 4175
4117 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0); 4176 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4177 RESOURCE_LIMITS, 0);
4118 4178
4119 /* Some old versions of BE3 FW don't report max_vfs value */ 4179 /* Some old versions of BE3 FW don't report max_vfs value */
4120 if (BE3_chip(adapter) && !res.max_vfs) { 4180 if (BE3_chip(adapter) && !res.max_vfs) {
@@ -4138,13 +4198,19 @@ static int be_get_sriov_config(struct be_adapter *adapter)
4138 adapter->num_vfs = old_vfs; 4198 adapter->num_vfs = old_vfs;
4139 } 4199 }
4140 4200
4201 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4202 be_calculate_pf_pool_rss_tables(adapter);
4203 dev_info(&adapter->pdev->dev,
4204 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4205 be_max_pf_pool_rss_tables(adapter));
4206 }
4141 return 0; 4207 return 0;
4142} 4208}
4143 4209
4144static void be_alloc_sriov_res(struct be_adapter *adapter) 4210static void be_alloc_sriov_res(struct be_adapter *adapter)
4145{ 4211{
4146 int old_vfs = pci_num_vf(adapter->pdev); 4212 int old_vfs = pci_num_vf(adapter->pdev);
4147 u16 num_vf_qs; 4213 struct be_resources vft_res = {0};
4148 int status; 4214 int status;
4149 4215
4150 be_get_sriov_config(adapter); 4216 be_get_sriov_config(adapter);
@@ -4158,9 +4224,9 @@ static void be_alloc_sriov_res(struct be_adapter *adapter)
4158 * Also, this is done by FW in Lancer chip. 4224 * Also, this is done by FW in Lancer chip.
4159 */ 4225 */
4160 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) { 4226 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4161 num_vf_qs = be_calculate_vf_qs(adapter, 0); 4227 be_calculate_vf_res(adapter, 0, &vft_res);
4162 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0, 4228 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4163 num_vf_qs); 4229 &vft_res);
4164 if (status) 4230 if (status)
4165 dev_err(&adapter->pdev->dev, 4231 dev_err(&adapter->pdev->dev,
4166 "Failed to optimize SRIOV resources\n"); 4232 "Failed to optimize SRIOV resources\n");
@@ -4173,16 +4239,13 @@ static int be_get_resources(struct be_adapter *adapter)
4173 struct be_resources res = {0}; 4239 struct be_resources res = {0};
4174 int status; 4240 int status;
4175 4241
4176 if (BEx_chip(adapter)) {
4177 BEx_get_resources(adapter, &res);
4178 adapter->res = res;
4179 }
4180
4181 /* For Lancer, SH etc read per-function resource limits from FW. 4242 /* For Lancer, SH etc read per-function resource limits from FW.
4182 * GET_FUNC_CONFIG returns per function guaranteed limits. 4243 * GET_FUNC_CONFIG returns per function guaranteed limits.
4183 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits 4244 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4184 */ 4245 */
4185 if (!BEx_chip(adapter)) { 4246 if (BEx_chip(adapter)) {
4247 BEx_get_resources(adapter, &res);
4248 } else {
4186 status = be_cmd_get_func_config(adapter, &res); 4249 status = be_cmd_get_func_config(adapter, &res);
4187 if (status) 4250 if (status)
4188 return status; 4251 return status;
@@ -4191,13 +4254,13 @@ static int be_get_resources(struct be_adapter *adapter)
4191 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs && 4254 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4192 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)) 4255 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4193 res.max_rss_qs -= 1; 4256 res.max_rss_qs -= 1;
4194
4195 /* If RoCE may be enabled stash away half the EQs for RoCE */
4196 if (be_roce_supported(adapter))
4197 res.max_evt_qs /= 2;
4198 adapter->res = res;
4199 } 4257 }
4200 4258
4259 /* If RoCE is supported stash away half the EQs for RoCE */
4260 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4261 res.max_evt_qs / 2 : res.max_evt_qs;
4262 adapter->res = res;
4263
4201 /* If FW supports RSS default queue, then skip creating non-RSS 4264 /* If FW supports RSS default queue, then skip creating non-RSS
4202 * queue for non-IP traffic. 4265 * queue for non-IP traffic.
4203 */ 4266 */
@@ -4206,15 +4269,17 @@ static int be_get_resources(struct be_adapter *adapter)
4206 4269
4207 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", 4270 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4208 be_max_txqs(adapter), be_max_rxqs(adapter), 4271 be_max_txqs(adapter), be_max_rxqs(adapter),
4209 be_max_rss(adapter), be_max_eqs(adapter), 4272 be_max_rss(adapter), be_max_nic_eqs(adapter),
4210 be_max_vfs(adapter)); 4273 be_max_vfs(adapter));
4211 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n", 4274 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4212 be_max_uc(adapter), be_max_mc(adapter), 4275 be_max_uc(adapter), be_max_mc(adapter),
4213 be_max_vlans(adapter)); 4276 be_max_vlans(adapter));
4214 4277
4215 /* Sanitize cfg_num_qs based on HW and platform limits */ 4278 /* Ensure RX and TX queues are created in pairs at init time */
4216 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(), 4279 adapter->cfg_num_rx_irqs =
4217 be_max_qs(adapter)); 4280 min_t(u16, netif_get_num_default_rss_queues(),
4281 be_max_qp_irqs(adapter));
4282 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
4218 return 0; 4283 return 0;
4219} 4284}
4220 4285
@@ -4241,6 +4306,8 @@ static int be_get_config(struct be_adapter *adapter)
4241 } 4306 }
4242 4307
4243 be_cmd_get_acpi_wol_cap(adapter); 4308 be_cmd_get_acpi_wol_cap(adapter);
4309 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4310 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
4244 4311
4245 be_cmd_query_port_name(adapter); 4312 be_cmd_query_port_name(adapter);
4246 4313
@@ -4251,15 +4318,6 @@ static int be_get_config(struct be_adapter *adapter)
4251 "Using profile 0x%x\n", profile_id); 4318 "Using profile 0x%x\n", profile_id);
4252 } 4319 }
4253 4320
4254 status = be_get_resources(adapter);
4255 if (status)
4256 return status;
4257
4258 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4259 sizeof(*adapter->pmac_id), GFP_KERNEL);
4260 if (!adapter->pmac_id)
4261 return -ENOMEM;
4262
4263 return 0; 4321 return 0;
4264} 4322}
4265 4323
@@ -4334,7 +4392,7 @@ static int be_if_create(struct be_adapter *adapter)
4334 u32 cap_flags = be_if_cap_flags(adapter); 4392 u32 cap_flags = be_if_cap_flags(adapter);
4335 int status; 4393 int status;
4336 4394
4337 if (adapter->cfg_num_qs == 1) 4395 if (adapter->cfg_num_rx_irqs == 1)
4338 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS); 4396 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4339 4397
4340 en_flags &= cap_flags; 4398 en_flags &= cap_flags;
@@ -4460,13 +4518,22 @@ static int be_setup(struct be_adapter *adapter)
4460 return status; 4518 return status;
4461 } 4519 }
4462 4520
4521 status = be_get_config(adapter);
4522 if (status)
4523 goto err;
4524
4463 if (!BE2_chip(adapter) && be_physfn(adapter)) 4525 if (!BE2_chip(adapter) && be_physfn(adapter))
4464 be_alloc_sriov_res(adapter); 4526 be_alloc_sriov_res(adapter);
4465 4527
4466 status = be_get_config(adapter); 4528 status = be_get_resources(adapter);
4467 if (status) 4529 if (status)
4468 goto err; 4530 goto err;
4469 4531
4532 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4533 sizeof(*adapter->pmac_id), GFP_KERNEL);
4534 if (!adapter->pmac_id)
4535 return -ENOMEM;
4536
4470 status = be_msix_enable(adapter); 4537 status = be_msix_enable(adapter);
4471 if (status) 4538 if (status)
4472 goto err; 4539 goto err;
@@ -4511,6 +4578,15 @@ static int be_setup(struct be_adapter *adapter)
4511 be_cmd_set_logical_link_config(adapter, 4578 be_cmd_set_logical_link_config(adapter,
4512 IFLA_VF_LINK_STATE_AUTO, 0); 4579 IFLA_VF_LINK_STATE_AUTO, 0);
4513 4580
4581 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4582 * confusing a linux bridge or OVS that it might be connected to.
4583 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4584 * when SRIOV is not enabled.
4585 */
4586 if (BE3_chip(adapter))
4587 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4588 PORT_FWD_TYPE_PASSTHRU, 0);
4589
4514 if (adapter->num_vfs) 4590 if (adapter->num_vfs)
4515 be_vf_setup(adapter); 4591 be_vf_setup(adapter);
4516 4592
@@ -4651,7 +4727,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4651 0, 0, nlflags, filter_mask, NULL); 4727 0, 0, nlflags, filter_mask, NULL);
4652} 4728}
4653 4729
4654#ifdef CONFIG_BE2NET_VXLAN
4655/* VxLAN offload Notes: 4730/* VxLAN offload Notes:
4656 * 4731 *
4657 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't 4732 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
@@ -4666,13 +4741,17 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4666 * adds more than one port, disable offloads and don't re-enable them again 4741 * adds more than one port, disable offloads and don't re-enable them again
4667 * until after all the tunnels are removed. 4742 * until after all the tunnels are removed.
4668 */ 4743 */
4669static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, 4744static void be_add_vxlan_port(struct net_device *netdev,
4670 __be16 port) 4745 struct udp_tunnel_info *ti)
4671{ 4746{
4672 struct be_adapter *adapter = netdev_priv(netdev); 4747 struct be_adapter *adapter = netdev_priv(netdev);
4673 struct device *dev = &adapter->pdev->dev; 4748 struct device *dev = &adapter->pdev->dev;
4749 __be16 port = ti->port;
4674 int status; 4750 int status;
4675 4751
4752 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4753 return;
4754
4676 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) 4755 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
4677 return; 4756 return;
4678 4757
@@ -4720,10 +4799,14 @@ err:
4720 be_disable_vxlan_offloads(adapter); 4799 be_disable_vxlan_offloads(adapter);
4721} 4800}
4722 4801
4723static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, 4802static void be_del_vxlan_port(struct net_device *netdev,
4724 __be16 port) 4803 struct udp_tunnel_info *ti)
4725{ 4804{
4726 struct be_adapter *adapter = netdev_priv(netdev); 4805 struct be_adapter *adapter = netdev_priv(netdev);
4806 __be16 port = ti->port;
4807
4808 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4809 return;
4727 4810
4728 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) 4811 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
4729 return; 4812 return;
@@ -4785,7 +4868,6 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
4785 4868
4786 return features; 4869 return features;
4787} 4870}
4788#endif
4789 4871
4790static int be_get_phys_port_id(struct net_device *dev, 4872static int be_get_phys_port_id(struct net_device *dev,
4791 struct netdev_phys_item_id *ppid) 4873 struct netdev_phys_item_id *ppid)
@@ -4833,11 +4915,9 @@ static const struct net_device_ops be_netdev_ops = {
4833#ifdef CONFIG_NET_RX_BUSY_POLL 4915#ifdef CONFIG_NET_RX_BUSY_POLL
4834 .ndo_busy_poll = be_busy_poll, 4916 .ndo_busy_poll = be_busy_poll,
4835#endif 4917#endif
4836#ifdef CONFIG_BE2NET_VXLAN 4918 .ndo_udp_tunnel_add = be_add_vxlan_port,
4837 .ndo_add_vxlan_port = be_add_vxlan_port, 4919 .ndo_udp_tunnel_del = be_del_vxlan_port,
4838 .ndo_del_vxlan_port = be_del_vxlan_port,
4839 .ndo_features_check = be_features_check, 4920 .ndo_features_check = be_features_check,
4840#endif
4841 .ndo_get_phys_port_id = be_get_phys_port_id, 4921 .ndo_get_phys_port_id = be_get_phys_port_id,
4842}; 4922};
4843 4923
@@ -5410,9 +5490,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5410{ 5490{
5411 struct be_adapter *adapter = pci_get_drvdata(pdev); 5491 struct be_adapter *adapter = pci_get_drvdata(pdev);
5412 5492
5413 if (adapter->wol_en)
5414 be_setup_wol(adapter, true);
5415
5416 be_intr_set(adapter, false); 5493 be_intr_set(adapter, false);
5417 be_cancel_err_detection(adapter); 5494 be_cancel_err_detection(adapter);
5418 5495
@@ -5441,9 +5518,6 @@ static int be_pci_resume(struct pci_dev *pdev)
5441 5518
5442 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); 5519 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5443 5520
5444 if (adapter->wol_en)
5445 be_setup_wol(adapter, false);
5446
5447 return 0; 5521 return 0;
5448} 5522}
5449 5523
@@ -5552,7 +5626,7 @@ err:
5552static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 5626static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5553{ 5627{
5554 struct be_adapter *adapter = pci_get_drvdata(pdev); 5628 struct be_adapter *adapter = pci_get_drvdata(pdev);
5555 u16 num_vf_qs; 5629 struct be_resources vft_res = {0};
5556 int status; 5630 int status;
5557 5631
5558 if (!num_vfs) 5632 if (!num_vfs)
@@ -5575,9 +5649,10 @@ static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5575 * Also, this is done by FW in Lancer chip. 5649 * Also, this is done by FW in Lancer chip.
5576 */ 5650 */
5577 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) { 5651 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5578 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs); 5652 be_calculate_vf_res(adapter, adapter->num_vfs,
5653 &vft_res);
5579 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 5654 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5580 adapter->num_vfs, num_vf_qs); 5655 adapter->num_vfs, &vft_res);
5581 if (status) 5656 if (status)
5582 dev_err(&pdev->dev, 5657 dev_err(&pdev->dev,
5583 "Failed to optimize SR-IOV resources\n"); 5658 "Failed to optimize SR-IOV resources\n");
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 4089156a7f5e..2b62841c4c63 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index fde609789483..e51719a7307f 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index f58f9ea51639..92fd5c0bf4df 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -442,6 +442,8 @@ struct bufdesc_ex {
442#define FEC_QUIRK_SINGLE_MDIO (1 << 11) 442#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
443/* Controller supports RACC register */ 443/* Controller supports RACC register */
444#define FEC_QUIRK_HAS_RACC (1 << 12) 444#define FEC_QUIRK_HAS_RACC (1 << 12)
445/* Controller supports interrupt coalesc */
446#define FEC_QUIRK_HAS_COALESCE (1 << 13)
445 447
446struct bufdesc_prop { 448struct bufdesc_prop {
447 int qid; 449 int qid;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index fea0f330ddbd..4040003a74f9 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -111,7 +111,13 @@ static struct platform_device_id fec_devtype[] = {
111 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 111 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
112 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 112 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
113 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 113 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
114 FEC_QUIRK_HAS_RACC, 114 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
115 }, {
116 .name = "imx6ul-fec",
117 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
118 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
119 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
120 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
115 }, { 121 }, {
116 /* sentinel */ 122 /* sentinel */
117 } 123 }
@@ -125,6 +131,7 @@ enum imx_fec_type {
125 IMX6Q_FEC, 131 IMX6Q_FEC,
126 MVF600_FEC, 132 MVF600_FEC,
127 IMX6SX_FEC, 133 IMX6SX_FEC,
134 IMX6UL_FEC,
128}; 135};
129 136
130static const struct of_device_id fec_dt_ids[] = { 137static const struct of_device_id fec_dt_ids[] = {
@@ -134,6 +141,7 @@ static const struct of_device_id fec_dt_ids[] = {
134 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, 141 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
135 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, 142 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
136 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, 143 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
144 { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
137 { /* sentinel */ } 145 { /* sentinel */ }
138}; 146};
139MODULE_DEVICE_TABLE(of, fec_dt_ids); 147MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -2358,9 +2366,6 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
2358 struct fec_enet_private *fep = netdev_priv(ndev); 2366 struct fec_enet_private *fep = netdev_priv(ndev);
2359 int rx_itr, tx_itr; 2367 int rx_itr, tx_itr;
2360 2368
2361 if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
2362 return;
2363
2364 /* Must be greater than zero to avoid unpredictable behavior */ 2369 /* Must be greater than zero to avoid unpredictable behavior */
2365 if (!fep->rx_time_itr || !fep->rx_pkts_itr || 2370 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
2366 !fep->tx_time_itr || !fep->tx_pkts_itr) 2371 !fep->tx_time_itr || !fep->tx_pkts_itr)
@@ -2383,10 +2388,12 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
2383 2388
2384 writel(tx_itr, fep->hwp + FEC_TXIC0); 2389 writel(tx_itr, fep->hwp + FEC_TXIC0);
2385 writel(rx_itr, fep->hwp + FEC_RXIC0); 2390 writel(rx_itr, fep->hwp + FEC_RXIC0);
2386 writel(tx_itr, fep->hwp + FEC_TXIC1); 2391 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
2387 writel(rx_itr, fep->hwp + FEC_RXIC1); 2392 writel(tx_itr, fep->hwp + FEC_TXIC1);
2388 writel(tx_itr, fep->hwp + FEC_TXIC2); 2393 writel(rx_itr, fep->hwp + FEC_RXIC1);
2389 writel(rx_itr, fep->hwp + FEC_RXIC2); 2394 writel(tx_itr, fep->hwp + FEC_TXIC2);
2395 writel(rx_itr, fep->hwp + FEC_RXIC2);
2396 }
2390} 2397}
2391 2398
2392static int 2399static int
@@ -2394,7 +2401,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
2394{ 2401{
2395 struct fec_enet_private *fep = netdev_priv(ndev); 2402 struct fec_enet_private *fep = netdev_priv(ndev);
2396 2403
2397 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 2404 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2398 return -EOPNOTSUPP; 2405 return -EOPNOTSUPP;
2399 2406
2400 ec->rx_coalesce_usecs = fep->rx_time_itr; 2407 ec->rx_coalesce_usecs = fep->rx_time_itr;
@@ -2412,7 +2419,7 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
2412 struct fec_enet_private *fep = netdev_priv(ndev); 2419 struct fec_enet_private *fep = netdev_priv(ndev);
2413 unsigned int cycle; 2420 unsigned int cycle;
2414 2421
2415 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 2422 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2416 return -EOPNOTSUPP; 2423 return -EOPNOTSUPP;
2417 2424
2418 if (ec->rx_max_coalesced_frames > 255) { 2425 if (ec->rx_max_coalesced_frames > 255) {
@@ -3191,7 +3198,12 @@ static void fec_reset_phy(struct platform_device *pdev)
3191 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); 3198 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
3192 return; 3199 return;
3193 } 3200 }
3194 msleep(msec); 3201
3202 if (msec > 20)
3203 msleep(msec);
3204 else
3205 usleep_range(msec * 1000, msec * 1000 + 1000);
3206
3195 gpio_set_value_cansleep(phy_reset, !active_high); 3207 gpio_set_value_cansleep(phy_reset, !active_high);
3196} 3208}
3197#else /* CONFIG_OF */ 3209#else /* CONFIG_OF */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2e6785b6e8be..d20935dc8399 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2275,7 +2275,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2275 fcb->flags = flags; 2275 fcb->flags = flags;
2276} 2276}
2277 2277
2278void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 2278static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2279{ 2279{
2280 fcb->flags |= TXFCB_VLN; 2280 fcb->flags |= TXFCB_VLN;
2281 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); 2281 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 4ccc032633c4..2e2566230e27 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_HISILICON 5config NET_VENDOR_HISILICON
6 bool "Hisilicon devices" 6 bool "Hisilicon devices"
7 default y 7 default y
8 depends on OF && HAS_DMA 8 depends on (OF || ACPI) && HAS_DMA
9 depends on ARM || ARM64 || COMPILE_TEST 9 depends on ARM || ARM64 || COMPILE_TEST
10 ---help--- 10 ---help---
11 If you have a network (Ethernet) card belonging to this class, say Y. 11 If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index b9f2ea59308a..275618bb4646 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -218,7 +218,6 @@ struct hix5hd2_priv {
218 struct device *dev; 218 struct device *dev;
219 struct net_device *netdev; 219 struct net_device *netdev;
220 220
221 struct phy_device *phy;
222 struct device_node *phy_node; 221 struct device_node *phy_node;
223 phy_interface_t phy_mode; 222 phy_interface_t phy_mode;
224 223
@@ -402,7 +401,7 @@ static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
402static void hix5hd2_adjust_link(struct net_device *dev) 401static void hix5hd2_adjust_link(struct net_device *dev)
403{ 402{
404 struct hix5hd2_priv *priv = netdev_priv(dev); 403 struct hix5hd2_priv *priv = netdev_priv(dev);
405 struct phy_device *phy = priv->phy; 404 struct phy_device *phy = dev->phydev;
406 405
407 if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) { 406 if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
408 hix5hd2_config_port(dev, phy->speed, phy->duplex); 407 hix5hd2_config_port(dev, phy->speed, phy->duplex);
@@ -679,6 +678,7 @@ static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
679static int hix5hd2_net_open(struct net_device *dev) 678static int hix5hd2_net_open(struct net_device *dev)
680{ 679{
681 struct hix5hd2_priv *priv = netdev_priv(dev); 680 struct hix5hd2_priv *priv = netdev_priv(dev);
681 struct phy_device *phy;
682 int ret; 682 int ret;
683 683
684 ret = clk_prepare_enable(priv->clk); 684 ret = clk_prepare_enable(priv->clk);
@@ -687,12 +687,12 @@ static int hix5hd2_net_open(struct net_device *dev)
687 return ret; 687 return ret;
688 } 688 }
689 689
690 priv->phy = of_phy_connect(dev, priv->phy_node, 690 phy = of_phy_connect(dev, priv->phy_node,
691 &hix5hd2_adjust_link, 0, priv->phy_mode); 691 &hix5hd2_adjust_link, 0, priv->phy_mode);
692 if (!priv->phy) 692 if (!phy)
693 return -ENODEV; 693 return -ENODEV;
694 694
695 phy_start(priv->phy); 695 phy_start(phy);
696 hix5hd2_hw_init(priv); 696 hix5hd2_hw_init(priv);
697 hix5hd2_rx_refill(priv); 697 hix5hd2_rx_refill(priv);
698 698
@@ -716,9 +716,9 @@ static int hix5hd2_net_close(struct net_device *dev)
716 netif_stop_queue(dev); 716 netif_stop_queue(dev);
717 hix5hd2_free_dma_desc_rings(priv); 717 hix5hd2_free_dma_desc_rings(priv);
718 718
719 if (priv->phy) { 719 if (dev->phydev) {
720 phy_stop(priv->phy); 720 phy_stop(dev->phydev);
721 phy_disconnect(priv->phy); 721 phy_disconnect(dev->phydev);
722 } 722 }
723 723
724 clk_disable_unprepare(priv->clk); 724 clk_disable_unprepare(priv->clk);
@@ -750,32 +750,10 @@ static const struct net_device_ops hix5hd2_netdev_ops = {
750 .ndo_set_mac_address = hix5hd2_net_set_mac_address, 750 .ndo_set_mac_address = hix5hd2_net_set_mac_address,
751}; 751};
752 752
753static int hix5hd2_get_settings(struct net_device *net_dev,
754 struct ethtool_cmd *cmd)
755{
756 struct hix5hd2_priv *priv = netdev_priv(net_dev);
757
758 if (!priv->phy)
759 return -ENODEV;
760
761 return phy_ethtool_gset(priv->phy, cmd);
762}
763
764static int hix5hd2_set_settings(struct net_device *net_dev,
765 struct ethtool_cmd *cmd)
766{
767 struct hix5hd2_priv *priv = netdev_priv(net_dev);
768
769 if (!priv->phy)
770 return -ENODEV;
771
772 return phy_ethtool_sset(priv->phy, cmd);
773}
774
775static struct ethtool_ops hix5hd2_ethtools_ops = { 753static struct ethtool_ops hix5hd2_ethtools_ops = {
776 .get_link = ethtool_op_get_link, 754 .get_link = ethtool_op_get_link,
777 .get_settings = hix5hd2_get_settings, 755 .get_link_ksettings = phy_ethtool_get_link_ksettings,
778 .set_settings = hix5hd2_set_settings, 756 .set_link_ksettings = phy_ethtool_set_link_ksettings,
779}; 757};
780 758
781static int hix5hd2_mdio_wait_ready(struct mii_bus *bus) 759static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 3bfe36f9405b..c54c6fac0d1d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -96,16 +96,22 @@ static int __ae_match(struct device *dev, const void *data)
96{ 96{
97 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); 97 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
98 98
99 return hdev->dev->of_node == data; 99 if (dev_of_node(hdev->dev))
100 return (data == &hdev->dev->of_node->fwnode);
101 else if (is_acpi_node(hdev->dev->fwnode))
102 return (data == hdev->dev->fwnode);
103
104 dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n");
105 return 0;
100} 106}
101 107
102static struct hnae_ae_dev *find_ae(const struct device_node *ae_node) 108static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
103{ 109{
104 struct device *dev; 110 struct device *dev;
105 111
106 WARN_ON(!ae_node); 112 WARN_ON(!fwnode);
107 113
108 dev = class_find_device(hnae_class, NULL, ae_node, __ae_match); 114 dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
109 115
110 return dev ? cls_to_ae_dev(dev) : NULL; 116 return dev ? cls_to_ae_dev(dev) : NULL;
111} 117}
@@ -312,7 +318,7 @@ EXPORT_SYMBOL(hnae_reinit_handle);
312 * return handle ptr or ERR_PTR 318 * return handle ptr or ERR_PTR
313 */ 319 */
314struct hnae_handle *hnae_get_handle(struct device *owner_dev, 320struct hnae_handle *hnae_get_handle(struct device *owner_dev,
315 const struct device_node *ae_node, 321 const struct fwnode_handle *fwnode,
316 u32 port_id, 322 u32 port_id,
317 struct hnae_buf_ops *bops) 323 struct hnae_buf_ops *bops)
318{ 324{
@@ -321,7 +327,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
321 int i, j; 327 int i, j;
322 int ret; 328 int ret;
323 329
324 dev = find_ae(ae_node); 330 dev = find_ae(fwnode);
325 if (!dev) 331 if (!dev)
326 return ERR_PTR(-ENODEV); 332 return ERR_PTR(-ENODEV);
327 333
@@ -394,7 +400,6 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
394 400
395 if (!hdev->ops || !hdev->ops->get_handle || 401 if (!hdev->ops || !hdev->ops->get_handle ||
396 !hdev->ops->toggle_ring_irq || 402 !hdev->ops->toggle_ring_irq ||
397 !hdev->ops->toggle_queue_status ||
398 !hdev->ops->get_status || !hdev->ops->adjust_link) 403 !hdev->ops->get_status || !hdev->ops->adjust_link)
399 return -EINVAL; 404 return -EINVAL;
400 405
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index e8d36aaea223..3869322690ac 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -27,6 +27,7 @@
27 * "cb" means control block 27 * "cb" means control block
28 */ 28 */
29 29
30#include <linux/acpi.h>
30#include <linux/delay.h> 31#include <linux/delay.h>
31#include <linux/device.h> 32#include <linux/device.h>
32#include <linux/module.h> 33#include <linux/module.h>
@@ -453,7 +454,6 @@ struct hnae_ae_ops {
453 int (*get_info)(struct hnae_handle *handle, 454 int (*get_info)(struct hnae_handle *handle,
454 u8 *auto_neg, u16 *speed, u8 *duplex); 455 u8 *auto_neg, u16 *speed, u8 *duplex);
455 void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); 456 void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
456 void (*toggle_queue_status)(struct hnae_queue *queue, u32 val);
457 void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); 457 void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
458 int (*set_loopback)(struct hnae_handle *handle, 458 int (*set_loopback)(struct hnae_handle *handle,
459 enum hnae_loop loop_mode, int en); 459 enum hnae_loop loop_mode, int en);
@@ -472,6 +472,11 @@ struct hnae_ae_ops {
472 int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout); 472 int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
473 int (*set_coalesce_frames)(struct hnae_handle *handle, 473 int (*set_coalesce_frames)(struct hnae_handle *handle,
474 u32 coalesce_frames); 474 u32 coalesce_frames);
475 void (*get_coalesce_range)(struct hnae_handle *handle,
476 u32 *tx_frames_low, u32 *rx_frames_low,
477 u32 *tx_frames_high, u32 *rx_frames_high,
478 u32 *tx_usecs_low, u32 *rx_usecs_low,
479 u32 *tx_usecs_high, u32 *rx_usecs_high);
475 void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); 480 void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
476 int (*get_mac_addr)(struct hnae_handle *handle, void **p); 481 int (*get_mac_addr)(struct hnae_handle *handle, void **p);
477 int (*set_mac_addr)(struct hnae_handle *handle, void *p); 482 int (*set_mac_addr)(struct hnae_handle *handle, void *p);
@@ -512,7 +517,7 @@ struct hnae_ae_dev {
512struct hnae_handle { 517struct hnae_handle {
513 struct device *owner_dev; /* the device which make use of this handle */ 518 struct device *owner_dev; /* the device which make use of this handle */
514 struct hnae_ae_dev *dev; /* the device who provides this handle */ 519 struct hnae_ae_dev *dev; /* the device who provides this handle */
515 struct device_node *phy_node; 520 struct phy_device *phy_dev;
516 phy_interface_t phy_if; 521 phy_interface_t phy_if;
517 u32 if_support; 522 u32 if_support;
518 int q_num; 523 int q_num;
@@ -528,7 +533,7 @@ struct hnae_handle {
528#define ring_to_dev(ring) ((ring)->q->dev->dev) 533#define ring_to_dev(ring) ((ring)->q->dev->dev)
529 534
530struct hnae_handle *hnae_get_handle(struct device *owner_dev, 535struct hnae_handle *hnae_get_handle(struct device *owner_dev,
531 const struct device_node *ae_node, 536 const struct fwnode_handle *fwnode,
532 u32 port_id, 537 u32 port_id,
533 struct hnae_buf_ops *bops); 538 struct hnae_buf_ops *bops);
534 539
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 7a757e88c89a..835521bf1bbc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -131,7 +131,7 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
131 vf_cb->mac_cb = dsaf_dev->mac_cb[port_id]; 131 vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
132 132
133 ae_handle->phy_if = vf_cb->mac_cb->phy_if; 133 ae_handle->phy_if = vf_cb->mac_cb->phy_if;
134 ae_handle->phy_node = vf_cb->mac_cb->phy_node; 134 ae_handle->phy_dev = vf_cb->mac_cb->phy_dev;
135 ae_handle->if_support = vf_cb->mac_cb->if_support; 135 ae_handle->if_support = vf_cb->mac_cb->if_support;
136 ae_handle->port_type = vf_cb->mac_cb->mac_type; 136 ae_handle->port_type = vf_cb->mac_cb->mac_type;
137 ae_handle->dport_id = port_id; 137 ae_handle->dport_id = port_id;
@@ -247,12 +247,21 @@ static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
247static int hns_ae_start(struct hnae_handle *handle) 247static int hns_ae_start(struct hnae_handle *handle)
248{ 248{
249 int ret; 249 int ret;
250 int k;
250 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); 251 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
251 252
252 ret = hns_mac_vm_config_bc_en(mac_cb, 0, true); 253 ret = hns_mac_vm_config_bc_en(mac_cb, 0, true);
253 if (ret) 254 if (ret)
254 return ret; 255 return ret;
255 256
257 for (k = 0; k < handle->q_num; k++) {
258 if (AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver))
259 hns_rcb_int_clr_hw(handle->qs[k],
260 RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
261 else
262 hns_rcbv2_int_clr_hw(handle->qs[k],
263 RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
264 }
256 hns_ae_ring_enable_all(handle, 1); 265 hns_ae_ring_enable_all(handle, 1);
257 msleep(100); 266 msleep(100);
258 267
@@ -313,18 +322,6 @@ static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
313 hns_rcbv2_int_ctrl_hw(ring->q, flag, mask); 322 hns_rcbv2_int_ctrl_hw(ring->q, flag, mask);
314} 323}
315 324
316static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val)
317{
318 struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(queue->dev);
319
320 if (AE_IS_VER1(dsaf_dev->dsaf_ver))
321 hns_rcb_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
322 else
323 hns_rcbv2_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
324
325 hns_rcb_start(queue, val);
326}
327
328static int hns_ae_get_link_status(struct hnae_handle *handle) 325static int hns_ae_get_link_status(struct hnae_handle *handle)
329{ 326{
330 u32 link_status; 327 u32 link_status;
@@ -465,6 +462,30 @@ static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
465 ring_pair->port_id_in_comm, coalesce_frames); 462 ring_pair->port_id_in_comm, coalesce_frames);
466} 463}
467 464
465static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
466 u32 *tx_frames_low, u32 *rx_frames_low,
467 u32 *tx_frames_high, u32 *rx_frames_high,
468 u32 *tx_usecs_low, u32 *rx_usecs_low,
469 u32 *tx_usecs_high, u32 *rx_usecs_high)
470{
471 struct dsaf_device *dsaf_dev;
472
473 dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
474
475 *tx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
476 *rx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
477 *tx_frames_high =
478 (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
479 HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
480 *rx_frames_high =
481 (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
482 HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
483 *tx_usecs_low = 0;
484 *rx_usecs_low = 0;
485 *tx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
486 *rx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
487}
488
468void hns_ae_update_stats(struct hnae_handle *handle, 489void hns_ae_update_stats(struct hnae_handle *handle,
469 struct net_device_stats *net_stats) 490 struct net_device_stats *net_stats)
470{ 491{
@@ -587,6 +608,7 @@ void hns_ae_get_strings(struct hnae_handle *handle,
587 int idx; 608 int idx;
588 struct hns_mac_cb *mac_cb; 609 struct hns_mac_cb *mac_cb;
589 struct hns_ppe_cb *ppe_cb; 610 struct hns_ppe_cb *ppe_cb;
611 struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
590 u8 *p = data; 612 u8 *p = data;
591 struct hnae_vf_cb *vf_cb; 613 struct hnae_vf_cb *vf_cb;
592 614
@@ -609,13 +631,14 @@ void hns_ae_get_strings(struct hnae_handle *handle,
609 p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset); 631 p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
610 632
611 if (mac_cb->mac_type == HNAE_PORT_SERVICE) 633 if (mac_cb->mac_type == HNAE_PORT_SERVICE)
612 hns_dsaf_get_strings(stringset, p, port); 634 hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
613} 635}
614 636
615int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset) 637int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
616{ 638{
617 u32 sset_count = 0; 639 u32 sset_count = 0;
618 struct hns_mac_cb *mac_cb; 640 struct hns_mac_cb *mac_cb;
641 struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
619 642
620 assert(handle); 643 assert(handle);
621 644
@@ -626,7 +649,7 @@ int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
626 sset_count += hns_mac_get_sset_count(mac_cb, stringset); 649 sset_count += hns_mac_get_sset_count(mac_cb, stringset);
627 650
628 if (mac_cb->mac_type == HNAE_PORT_SERVICE) 651 if (mac_cb->mac_type == HNAE_PORT_SERVICE)
629 sset_count += hns_dsaf_get_sset_count(stringset); 652 sset_count += hns_dsaf_get_sset_count(dsaf_dev, stringset);
630 653
631 return sset_count; 654 return sset_count;
632} 655}
@@ -637,13 +660,15 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
637 int ret; 660 int ret;
638 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); 661 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
639 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); 662 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
663 struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
640 664
641 switch (loop) { 665 switch (loop) {
642 case MAC_INTERNALLOOP_PHY: 666 case MAC_INTERNALLOOP_PHY:
643 ret = 0; 667 ret = 0;
644 break; 668 break;
645 case MAC_INTERNALLOOP_SERDES: 669 case MAC_INTERNALLOOP_SERDES:
646 ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en); 670 ret = dsaf_dev->misc_op->cfg_serdes_loopback(vf_cb->mac_cb,
671 !!en);
647 break; 672 break;
648 case MAC_INTERNALLOOP_MAC: 673 case MAC_INTERNALLOOP_MAC:
649 ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en); 674 ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
@@ -780,7 +805,6 @@ static struct hnae_ae_ops hns_dsaf_ops = {
780 .stop = hns_ae_stop, 805 .stop = hns_ae_stop,
781 .reset = hns_ae_reset, 806 .reset = hns_ae_reset,
782 .toggle_ring_irq = hns_ae_toggle_ring_irq, 807 .toggle_ring_irq = hns_ae_toggle_ring_irq,
783 .toggle_queue_status = hns_ae_toggle_queue_status,
784 .get_status = hns_ae_get_link_status, 808 .get_status = hns_ae_get_link_status,
785 .get_info = hns_ae_get_mac_info, 809 .get_info = hns_ae_get_mac_info,
786 .adjust_link = hns_ae_adjust_link, 810 .adjust_link = hns_ae_adjust_link,
@@ -794,6 +818,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
794 .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames, 818 .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
795 .set_coalesce_usecs = hns_ae_set_coalesce_usecs, 819 .set_coalesce_usecs = hns_ae_set_coalesce_usecs,
796 .set_coalesce_frames = hns_ae_set_coalesce_frames, 820 .set_coalesce_frames = hns_ae_set_coalesce_frames,
821 .get_coalesce_range = hns_ae_get_coalesce_range,
797 .set_promisc_mode = hns_ae_set_promisc_mode, 822 .set_promisc_mode = hns_ae_set_promisc_mode,
798 .set_mac_addr = hns_ae_set_mac_address, 823 .set_mac_addr = hns_ae_set_mac_address,
799 .set_mc_addr = hns_ae_set_multicast_one, 824 .set_mc_addr = hns_ae_set_multicast_one,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 44abb08de155..1235c7f2564b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -110,7 +110,7 @@ static void hns_gmac_free(void *mac_drv)
110 110
111 u32 mac_id = drv->mac_id; 111 u32 mac_id = drv->mac_id;
112 112
113 hns_dsaf_ge_srst_by_port(dsaf_dev, mac_id, 0); 113 dsaf_dev->misc_op->ge_srst(dsaf_dev, mac_id, 0);
114} 114}
115 115
116static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval) 116static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval)
@@ -317,9 +317,9 @@ static void hns_gmac_init(void *mac_drv)
317 317
318 port = drv->mac_id; 318 port = drv->mac_id;
319 319
320 hns_dsaf_ge_srst_by_port(dsaf_dev, port, 0); 320 dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 0);
321 mdelay(10); 321 mdelay(10);
322 hns_dsaf_ge_srst_by_port(dsaf_dev, port, 1); 322 dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 1);
323 mdelay(10); 323 mdelay(10);
324 hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX); 324 hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
325 hns_gmac_tx_loop_pkt_dis(mac_drv); 325 hns_gmac_tx_loop_pkt_dis(mac_drv);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 611581fccf2a..c526558e6367 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -7,6 +7,7 @@
7 * (at your option) any later version. 7 * (at your option) any later version.
8 */ 8 */
9 9
10#include <linux/acpi.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/interrupt.h> 12#include <linux/interrupt.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -15,7 +16,8 @@
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
16#include <linux/of.h> 17#include <linux/of.h>
17#include <linux/of_address.h> 18#include <linux/of_address.h>
18#include <linux/phy_fixed.h> 19#include <linux/of_mdio.h>
20#include <linux/phy.h>
19#include <linux/platform_device.h> 21#include <linux/platform_device.h>
20 22
21#include "hns_dsaf_main.h" 23#include "hns_dsaf_main.h"
@@ -94,7 +96,7 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
94 else 96 else
95 *link_status = 0; 97 *link_status = 0;
96 98
97 ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt); 99 ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt);
98 if (!ret) 100 if (!ret)
99 *link_status = *link_status && sfp_prsnt; 101 *link_status = *link_status && sfp_prsnt;
100 102
@@ -511,7 +513,7 @@ void hns_mac_stop(struct hns_mac_cb *mac_cb)
511 513
512 mac_ctrl_drv->mac_en_flg = 0; 514 mac_ctrl_drv->mac_en_flg = 0;
513 mac_cb->link = 0; 515 mac_cb->link = 0;
514 cpld_led_reset(mac_cb); 516 mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
515} 517}
516 518
517/** 519/**
@@ -637,6 +639,115 @@ free_mac_drv:
637 return ret; 639 return ret;
638} 640}
639 641
642static int
643hns_mac_phy_parse_addr(struct device *dev, struct fwnode_handle *fwnode)
644{
645 u32 addr;
646 int ret;
647
648 ret = fwnode_property_read_u32(fwnode, "phy-addr", &addr);
649 if (ret) {
650 dev_err(dev, "has invalid PHY address ret:%d\n", ret);
651 return ret;
652 }
653
654 if (addr >= PHY_MAX_ADDR) {
655 dev_err(dev, "PHY address %i is too large\n", addr);
656 return -EINVAL;
657 }
658
659 return addr;
660}
661
662static int hns_mac_phydev_match(struct device *dev, void *fwnode)
663{
664 return dev->fwnode == fwnode;
665}
666
667static struct
668platform_device *hns_mac_find_platform_device(struct fwnode_handle *fwnode)
669{
670 struct device *dev;
671
672 dev = bus_find_device(&platform_bus_type, NULL,
673 fwnode, hns_mac_phydev_match);
674 return dev ? to_platform_device(dev) : NULL;
675}
676
677static int
678hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
679 u32 addr)
680{
681 struct phy_device *phy;
682 const char *phy_type;
683 bool is_c45;
684 int rc;
685
686 rc = fwnode_property_read_string(mac_cb->fw_port,
687 "phy-mode", &phy_type);
688 if (rc < 0)
689 return rc;
690
691 if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII)))
692 is_c45 = 1;
693 else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII)))
694 is_c45 = 0;
695 else
696 return -ENODATA;
697
698 phy = get_phy_device(mdio, addr, is_c45);
699 if (!phy || IS_ERR(phy))
700 return -EIO;
701
702 if (mdio->irq)
703 phy->irq = mdio->irq[addr];
704
705 /* All data is now stored in the phy struct;
706 * register it
707 */
708 rc = phy_device_register(phy);
709 if (rc) {
710 phy_device_free(phy);
711 return -ENODEV;
712 }
713
714 mac_cb->phy_dev = phy;
715
716 dev_dbg(&mdio->dev, "registered phy at address %i\n", addr);
717
718 return 0;
719}
720
721static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
722{
723 struct acpi_reference_args args;
724 struct platform_device *pdev;
725 struct mii_bus *mii_bus;
726 int rc;
727 int addr;
728
729 /* Loop over the child nodes and register a phy_device for each one */
730 if (!to_acpi_device_node(mac_cb->fw_port))
731 return;
732
733 rc = acpi_node_get_property_reference(
734 mac_cb->fw_port, "mdio-node", 0, &args);
735 if (rc)
736 return;
737
738 addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port);
739 if (addr < 0)
740 return;
741
742 /* dev address in adev */
743 pdev = hns_mac_find_platform_device(acpi_fwnode_handle(args.adev));
744 mii_bus = platform_get_drvdata(pdev);
745 rc = hns_mac_register_phydev(mii_bus, mac_cb, addr);
746 if (!rc)
747 dev_dbg(mac_cb->dev, "mac%d register phy addr:%d\n",
748 mac_cb->mac_id, addr);
749}
750
640/** 751/**
641 *hns_mac_get_info - get mac information from device node 752 *hns_mac_get_info - get mac information from device node
642 *@mac_cb: mac device 753 *@mac_cb: mac device
@@ -645,7 +756,7 @@ free_mac_drv:
645 */ 756 */
646static int hns_mac_get_info(struct hns_mac_cb *mac_cb) 757static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
647{ 758{
648 struct device_node *np = mac_cb->dev->of_node; 759 struct device_node *np;
649 struct regmap *syscon; 760 struct regmap *syscon;
650 struct of_phandle_args cpld_args; 761 struct of_phandle_args cpld_args;
651 u32 ret; 762 u32 ret;
@@ -672,63 +783,85 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
672 * from dsaf node 783 * from dsaf node
673 */ 784 */
674 if (!mac_cb->fw_port) { 785 if (!mac_cb->fw_port) {
675 mac_cb->phy_node = of_parse_phandle(np, "phy-handle", 786 np = of_parse_phandle(mac_cb->dev->of_node, "phy-handle",
676 mac_cb->mac_id); 787 mac_cb->mac_id);
677 if (mac_cb->phy_node) 788 mac_cb->phy_dev = of_phy_find_device(np);
789 if (mac_cb->phy_dev) {
790 /* refcount is held by of_phy_find_device()
791 * if the phy_dev is found
792 */
793 put_device(&mac_cb->phy_dev->mdio.dev);
794
678 dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", 795 dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
679 mac_cb->mac_id, mac_cb->phy_node->name); 796 mac_cb->mac_id, np->name);
680 return 0; 797 }
681 }
682 if (!is_of_node(mac_cb->fw_port))
683 return -EINVAL;
684 /* parse property from port subnode in dsaf */
685 mac_cb->phy_node = of_parse_phandle(to_of_node(mac_cb->fw_port),
686 "phy-handle", 0);
687 if (mac_cb->phy_node)
688 dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
689 mac_cb->mac_id, mac_cb->phy_node->name);
690 syscon = syscon_node_to_regmap(
691 of_parse_phandle(to_of_node(mac_cb->fw_port),
692 "serdes-syscon", 0));
693 if (IS_ERR_OR_NULL(syscon)) {
694 dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
695 return -EINVAL;
696 }
697 mac_cb->serdes_ctrl = syscon;
698 798
699 ret = fwnode_property_read_u32(mac_cb->fw_port, 799 return 0;
700 "port-rst-offset",
701 &mac_cb->port_rst_off);
702 if (ret) {
703 dev_dbg(mac_cb->dev,
704 "mac%d port-rst-offset not found, use default value.\n",
705 mac_cb->mac_id);
706 } 800 }
707 801
708 ret = fwnode_property_read_u32(mac_cb->fw_port, 802 if (is_of_node(mac_cb->fw_port)) {
709 "port-mode-offset", 803 /* parse property from port subnode in dsaf */
710 &mac_cb->port_mode_off); 804 np = of_parse_phandle(to_of_node(mac_cb->fw_port),
711 if (ret) { 805 "phy-handle", 0);
712 dev_dbg(mac_cb->dev, 806 mac_cb->phy_dev = of_phy_find_device(np);
713 "mac%d port-mode-offset not found, use default value.\n", 807 if (mac_cb->phy_dev) {
714 mac_cb->mac_id); 808 /* refcount is held by of_phy_find_device()
715 } 809 * if the phy_dev is found
810 */
811 put_device(&mac_cb->phy_dev->mdio.dev);
812 dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
813 mac_cb->mac_id, np->name);
814 }
716 815
717 ret = of_parse_phandle_with_fixed_args(to_of_node(mac_cb->fw_port), 816 syscon = syscon_node_to_regmap(
718 "cpld-syscon", 1, 0, &cpld_args); 817 of_parse_phandle(to_of_node(mac_cb->fw_port),
719 if (ret) { 818 "serdes-syscon", 0));
720 dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
721 mac_cb->mac_id);
722 mac_cb->cpld_ctrl = NULL;
723 } else {
724 syscon = syscon_node_to_regmap(cpld_args.np);
725 if (IS_ERR_OR_NULL(syscon)) { 819 if (IS_ERR_OR_NULL(syscon)) {
726 dev_dbg(mac_cb->dev, "no cpld-syscon found!\n"); 820 dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
821 return -EINVAL;
822 }
823 mac_cb->serdes_ctrl = syscon;
824
825 ret = fwnode_property_read_u32(mac_cb->fw_port,
826 "port-rst-offset",
827 &mac_cb->port_rst_off);
828 if (ret) {
829 dev_dbg(mac_cb->dev,
830 "mac%d port-rst-offset not found, use default value.\n",
831 mac_cb->mac_id);
832 }
833
834 ret = fwnode_property_read_u32(mac_cb->fw_port,
835 "port-mode-offset",
836 &mac_cb->port_mode_off);
837 if (ret) {
838 dev_dbg(mac_cb->dev,
839 "mac%d port-mode-offset not found, use default value.\n",
840 mac_cb->mac_id);
841 }
842
843 ret = of_parse_phandle_with_fixed_args(
844 to_of_node(mac_cb->fw_port), "cpld-syscon", 1, 0,
845 &cpld_args);
846 if (ret) {
847 dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
848 mac_cb->mac_id);
727 mac_cb->cpld_ctrl = NULL; 849 mac_cb->cpld_ctrl = NULL;
728 } else { 850 } else {
729 mac_cb->cpld_ctrl = syscon; 851 syscon = syscon_node_to_regmap(cpld_args.np);
730 mac_cb->cpld_ctrl_reg = cpld_args.args[0]; 852 if (IS_ERR_OR_NULL(syscon)) {
853 dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
854 mac_cb->cpld_ctrl = NULL;
855 } else {
856 mac_cb->cpld_ctrl = syscon;
857 mac_cb->cpld_ctrl_reg = cpld_args.args[0];
858 }
731 } 859 }
860 } else if (is_acpi_node(mac_cb->fw_port)) {
861 hns_mac_register_phy(mac_cb);
862 } else {
863 dev_err(mac_cb->dev, "mac%d cannot find phy node\n",
864 mac_cb->mac_id);
732 } 865 }
733 866
734 return 0; 867 return 0;
@@ -790,7 +923,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
790 else 923 else
791 mac_cb->mac_type = HNAE_PORT_DEBUG; 924 mac_cb->mac_type = HNAE_PORT_DEBUG;
792 925
793 mac_cb->phy_if = hns_mac_get_phy_if(mac_cb); 926 mac_cb->phy_if = dsaf_dev->misc_op->get_phy_if(mac_cb);
794 927
795 ret = hns_mac_get_mode(mac_cb->phy_if); 928 ret = hns_mac_get_mode(mac_cb->phy_if);
796 if (ret < 0) { 929 if (ret < 0) {
@@ -805,7 +938,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
805 if (ret) 938 if (ret)
806 return ret; 939 return ret;
807 940
808 cpld_led_reset(mac_cb); 941 mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
809 mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx); 942 mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
810 943
811 return 0; 944 return 0;
@@ -892,7 +1025,7 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev)
892 int max_port_num = hns_mac_get_max_port_num(dsaf_dev); 1025 int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
893 1026
894 for (i = 0; i < max_port_num; i++) { 1027 for (i = 0; i < max_port_num; i++) {
895 cpld_led_reset(dsaf_dev->mac_cb[i]); 1028 dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
896 dsaf_dev->mac_cb[i] = NULL; 1029 dsaf_dev->mac_cb[i] = NULL;
897 } 1030 }
898} 1031}
@@ -975,7 +1108,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
975 nic_data = 0; 1108 nic_data = 0;
976 mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts; 1109 mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts;
977 mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts; 1110 mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts;
978 hns_cpld_set_led(mac_cb, (int)mac_cb->link, 1111 mac_cb->dsaf_dev->misc_op->cpld_set_led(mac_cb, (int)mac_cb->link,
979 mac_cb->speed, nic_data); 1112 mac_cb->speed, nic_data);
980} 1113}
981 1114
@@ -985,5 +1118,5 @@ int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
985 if (!mac_cb || !mac_cb->cpld_ctrl) 1118 if (!mac_cb || !mac_cb->cpld_ctrl)
986 return 0; 1119 return 0;
987 1120
988 return cpld_set_led_id(mac_cb, status); 1121 return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
989} 1122}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 97ce9a750aaf..05a6e8f7a419 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -338,7 +338,7 @@ struct hns_mac_cb {
338 phy_interface_t phy_if; 338 phy_interface_t phy_if;
339 enum hnae_loop loop_mode; 339 enum hnae_loop loop_mode;
340 340
341 struct device_node *phy_node; 341 struct phy_device *phy_dev;
342 342
343 struct mac_hw_stats hw_stats; 343 struct mac_hw_stats hw_stats;
344}; 344};
@@ -448,8 +448,6 @@ int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
448int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu); 448int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
449int hns_mac_get_port_info(struct hns_mac_cb *mac_cb, 449int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
450 u8 *auto_neg, u16 *speed, u8 *duplex); 450 u8 *auto_neg, u16 *speed, u8 *duplex);
451phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb);
452int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en);
453int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb, 451int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
454 enum hnae_loop loop, int en); 452 enum hnae_loop loop, int en);
455void hns_mac_update_stats(struct hns_mac_cb *mac_cb); 453void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 1c2ddb25e776..67e8e1323205 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -7,6 +7,7 @@
7 * (at your option) any later version. 7 * (at your option) any later version.
8 */ 8 */
9 9
10#include <linux/acpi.h>
10#include <linux/device.h> 11#include <linux/device.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/interrupt.h> 13#include <linux/interrupt.h>
@@ -24,6 +25,7 @@
24#include "hns_dsaf_main.h" 25#include "hns_dsaf_main.h"
25#include "hns_dsaf_ppe.h" 26#include "hns_dsaf_ppe.h"
26#include "hns_dsaf_rcb.h" 27#include "hns_dsaf_rcb.h"
28#include "hns_dsaf_misc.h"
27 29
28const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { 30const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
29 [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf", 31 [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
@@ -32,6 +34,13 @@ const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
32 [DSAF_MODE_DISABLE_SP] = "single-port", 34 [DSAF_MODE_DISABLE_SP] = "single-port",
33}; 35};
34 36
37static const struct acpi_device_id hns_dsaf_acpi_match[] = {
38 { "HISI00B1", 0 },
39 { "HISI00B2", 0 },
40 { },
41};
42MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match);
43
35int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) 44int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
36{ 45{
37 int ret, i; 46 int ret, i;
@@ -45,12 +54,24 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
45 struct device_node *np = dsaf_dev->dev->of_node; 54 struct device_node *np = dsaf_dev->dev->of_node;
46 struct platform_device *pdev = to_platform_device(dsaf_dev->dev); 55 struct platform_device *pdev = to_platform_device(dsaf_dev->dev);
47 56
48 if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) 57 if (dev_of_node(dsaf_dev->dev)) {
49 dsaf_dev->dsaf_ver = AE_VERSION_1; 58 if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
50 else 59 dsaf_dev->dsaf_ver = AE_VERSION_1;
51 dsaf_dev->dsaf_ver = AE_VERSION_2; 60 else
61 dsaf_dev->dsaf_ver = AE_VERSION_2;
62 } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
63 if (acpi_dev_found(hns_dsaf_acpi_match[0].id))
64 dsaf_dev->dsaf_ver = AE_VERSION_1;
65 else if (acpi_dev_found(hns_dsaf_acpi_match[1].id))
66 dsaf_dev->dsaf_ver = AE_VERSION_2;
67 else
68 return -ENXIO;
69 } else {
70 dev_err(dsaf_dev->dev, "cannot get cfg data from of or acpi\n");
71 return -ENXIO;
72 }
52 73
53 ret = of_property_read_string(np, "mode", &mode_str); 74 ret = device_property_read_string(dsaf_dev->dev, "mode", &mode_str);
54 if (ret) { 75 if (ret) {
55 dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret); 76 dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
56 return ret; 77 return ret;
@@ -80,32 +101,40 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
80 else 101 else
81 dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE; 102 dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
82 103
83 syscon = syscon_node_to_regmap( 104 if (dev_of_node(dsaf_dev->dev)) {
84 of_parse_phandle(np, "subctrl-syscon", 0)); 105 syscon = syscon_node_to_regmap(
85 if (IS_ERR_OR_NULL(syscon)) { 106 of_parse_phandle(np, "subctrl-syscon", 0));
86 res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); 107 if (IS_ERR_OR_NULL(syscon)) {
87 if (!res) { 108 res = platform_get_resource(pdev, IORESOURCE_MEM,
88 dev_err(dsaf_dev->dev, "subctrl info is needed!\n"); 109 res_idx++);
89 return -ENOMEM; 110 if (!res) {
90 } 111 dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
91 dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res); 112 return -ENOMEM;
92 if (!dsaf_dev->sc_base) { 113 }
93 dev_err(dsaf_dev->dev, "subctrl can not map!\n");
94 return -ENOMEM;
95 }
96 114
97 res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); 115 dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev,
98 if (!res) { 116 res);
99 dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n"); 117 if (!dsaf_dev->sc_base) {
100 return -ENOMEM; 118 dev_err(dsaf_dev->dev, "subctrl can not map!\n");
101 } 119 return -ENOMEM;
102 dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res); 120 }
103 if (!dsaf_dev->sds_base) { 121
104 dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n"); 122 res = platform_get_resource(pdev, IORESOURCE_MEM,
105 return -ENOMEM; 123 res_idx++);
124 if (!res) {
125 dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
126 return -ENOMEM;
127 }
128
129 dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev,
130 res);
131 if (!dsaf_dev->sds_base) {
132 dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
133 return -ENOMEM;
134 }
135 } else {
136 dsaf_dev->sub_ctrl = syscon;
106 } 137 }
107 } else {
108 dsaf_dev->sub_ctrl = syscon;
109 } 138 }
110 139
111 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base"); 140 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base");
@@ -142,27 +171,28 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
142 } 171 }
143 } 172 }
144 173
145 ret = of_property_read_u32(np, "desc-num", &desc_num); 174 ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num);
146 if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT || 175 if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
147 desc_num > HNS_DSAF_MAX_DESC_CNT) { 176 desc_num > HNS_DSAF_MAX_DESC_CNT) {
148 dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n", 177 dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n",
149 desc_num, ret); 178 desc_num, ret);
150 goto unmap_base_addr; 179 return -EINVAL;
151 } 180 }
152 dsaf_dev->desc_num = desc_num; 181 dsaf_dev->desc_num = desc_num;
153 182
154 ret = of_property_read_u32(np, "reset-field-offset", &reset_offset); 183 ret = device_property_read_u32(dsaf_dev->dev, "reset-field-offset",
184 &reset_offset);
155 if (ret < 0) { 185 if (ret < 0) {
156 dev_dbg(dsaf_dev->dev, 186 dev_dbg(dsaf_dev->dev,
157 "get reset-field-offset fail, ret=%d!\r\n", ret); 187 "get reset-field-offset fail, ret=%d!\r\n", ret);
158 } 188 }
159 dsaf_dev->reset_offset = reset_offset; 189 dsaf_dev->reset_offset = reset_offset;
160 190
161 ret = of_property_read_u32(np, "buf-size", &buf_size); 191 ret = device_property_read_u32(dsaf_dev->dev, "buf-size", &buf_size);
162 if (ret < 0) { 192 if (ret < 0) {
163 dev_err(dsaf_dev->dev, 193 dev_err(dsaf_dev->dev,
164 "get buf-size fail, ret=%d!\r\n", ret); 194 "get buf-size fail, ret=%d!\r\n", ret);
165 goto unmap_base_addr; 195 return ret;
166 } 196 }
167 dsaf_dev->buf_size = buf_size; 197 dsaf_dev->buf_size = buf_size;
168 198
@@ -170,41 +200,19 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
170 if (dsaf_dev->buf_size_type < 0) { 200 if (dsaf_dev->buf_size_type < 0) {
171 dev_err(dsaf_dev->dev, 201 dev_err(dsaf_dev->dev,
172 "buf_size(%d) is wrong!\n", buf_size); 202 "buf_size(%d) is wrong!\n", buf_size);
173 goto unmap_base_addr; 203 return -EINVAL;
174 } 204 }
175 205
206 dsaf_dev->misc_op = hns_misc_op_get(dsaf_dev);
207 if (!dsaf_dev->misc_op)
208 return -ENOMEM;
209
176 if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL))) 210 if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL)))
177 dev_dbg(dsaf_dev->dev, "set mask to 64bit\n"); 211 dev_dbg(dsaf_dev->dev, "set mask to 64bit\n");
178 else 212 else
179 dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n"); 213 dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n");
180 214
181 return 0; 215 return 0;
182
183unmap_base_addr:
184 if (dsaf_dev->io_base)
185 iounmap(dsaf_dev->io_base);
186 if (dsaf_dev->ppe_base)
187 iounmap(dsaf_dev->ppe_base);
188 if (dsaf_dev->sds_base)
189 iounmap(dsaf_dev->sds_base);
190 if (dsaf_dev->sc_base)
191 iounmap(dsaf_dev->sc_base);
192 return ret;
193}
194
195static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev)
196{
197 if (dsaf_dev->io_base)
198 iounmap(dsaf_dev->io_base);
199
200 if (dsaf_dev->ppe_base)
201 iounmap(dsaf_dev->ppe_base);
202
203 if (dsaf_dev->sds_base)
204 iounmap(dsaf_dev->sds_base);
205
206 if (dsaf_dev->sc_base)
207 iounmap(dsaf_dev->sc_base);
208} 216}
209 217
210/** 218/**
@@ -508,10 +516,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
508 o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); 516 o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
509 dsaf_set_field(o_sbm_bp_cfg, 517 dsaf_set_field(o_sbm_bp_cfg,
510 DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, 518 DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
511 DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110); 519 DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 48);
512 dsaf_set_field(o_sbm_bp_cfg, 520 dsaf_set_field(o_sbm_bp_cfg,
513 DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, 521 DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
514 DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160); 522 DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 80);
515 dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); 523 dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
516 524
517 /* for no enable pfc mode */ 525 /* for no enable pfc mode */
@@ -519,29 +527,39 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
519 o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); 527 o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
520 dsaf_set_field(o_sbm_bp_cfg, 528 dsaf_set_field(o_sbm_bp_cfg,
521 DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M, 529 DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M,
522 DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128); 530 DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 192);
523 dsaf_set_field(o_sbm_bp_cfg, 531 dsaf_set_field(o_sbm_bp_cfg,
524 DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M, 532 DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M,
525 DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192); 533 DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 240);
526 dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); 534 dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
527 } 535 }
528 536
529 /* PPE */ 537 /* PPE */
530 reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; 538 for (i = 0; i < DSAFV2_SBM_PPE_CHN; i++) {
531 o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); 539 reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
532 dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, 540 o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
533 DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 10); 541 dsaf_set_field(o_sbm_bp_cfg,
534 dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, 542 DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M,
535 DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 12); 543 DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S, 2);
536 dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); 544 dsaf_set_field(o_sbm_bp_cfg,
545 DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M,
546 DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S, 3);
547 dsaf_set_field(o_sbm_bp_cfg,
548 DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M,
549 DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S, 52);
550 dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
551 }
552
537 /* RoCEE */ 553 /* RoCEE */
538 for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) { 554 for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) {
539 reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i; 555 reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
540 o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); 556 o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
541 dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, 557 dsaf_set_field(o_sbm_bp_cfg,
542 DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 2); 558 DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M,
543 dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, 559 DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S, 2);
544 DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 4); 560 dsaf_set_field(o_sbm_bp_cfg,
561 DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M,
562 DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S, 4);
545 dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); 563 dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
546 } 564 }
547} 565}
@@ -852,6 +870,8 @@ static void hns_dsaf_single_line_tbl_cfg(
852 struct dsaf_device *dsaf_dev, 870 struct dsaf_device *dsaf_dev,
853 u32 address, struct dsaf_tbl_line_cfg *ptbl_line) 871 u32 address, struct dsaf_tbl_line_cfg *ptbl_line)
854{ 872{
873 spin_lock_bh(&dsaf_dev->tcam_lock);
874
855 /*Write Addr*/ 875 /*Write Addr*/
856 hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address); 876 hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address);
857 877
@@ -860,6 +880,8 @@ static void hns_dsaf_single_line_tbl_cfg(
860 880
861 /*Write Plus*/ 881 /*Write Plus*/
862 hns_dsaf_tbl_line_pul(dsaf_dev); 882 hns_dsaf_tbl_line_pul(dsaf_dev);
883
884 spin_unlock_bh(&dsaf_dev->tcam_lock);
863} 885}
864 886
865/** 887/**
@@ -873,6 +895,8 @@ static void hns_dsaf_tcam_uc_cfg(
873 struct dsaf_tbl_tcam_data *ptbl_tcam_data, 895 struct dsaf_tbl_tcam_data *ptbl_tcam_data,
874 struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast) 896 struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
875{ 897{
898 spin_lock_bh(&dsaf_dev->tcam_lock);
899
876 /*Write Addr*/ 900 /*Write Addr*/
877 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); 901 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
878 /*Write Tcam Data*/ 902 /*Write Tcam Data*/
@@ -881,6 +905,8 @@ static void hns_dsaf_tcam_uc_cfg(
881 hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast); 905 hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast);
882 /*Write Plus*/ 906 /*Write Plus*/
883 hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev); 907 hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
908
909 spin_unlock_bh(&dsaf_dev->tcam_lock);
884} 910}
885 911
886/** 912/**
@@ -895,6 +921,8 @@ static void hns_dsaf_tcam_mc_cfg(
895 struct dsaf_tbl_tcam_data *ptbl_tcam_data, 921 struct dsaf_tbl_tcam_data *ptbl_tcam_data,
896 struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast) 922 struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
897{ 923{
924 spin_lock_bh(&dsaf_dev->tcam_lock);
925
898 /*Write Addr*/ 926 /*Write Addr*/
899 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); 927 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
900 /*Write Tcam Data*/ 928 /*Write Tcam Data*/
@@ -903,6 +931,8 @@ static void hns_dsaf_tcam_mc_cfg(
903 hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast); 931 hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast);
904 /*Write Plus*/ 932 /*Write Plus*/
905 hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev); 933 hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
934
935 spin_unlock_bh(&dsaf_dev->tcam_lock);
906} 936}
907 937
908/** 938/**
@@ -912,6 +942,8 @@ static void hns_dsaf_tcam_mc_cfg(
912 */ 942 */
913static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address) 943static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
914{ 944{
945 spin_lock_bh(&dsaf_dev->tcam_lock);
946
915 /*Write Addr*/ 947 /*Write Addr*/
916 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); 948 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
917 949
@@ -924,6 +956,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
924 956
925 /*Write Plus*/ 957 /*Write Plus*/
926 hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev); 958 hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev);
959
960 spin_unlock_bh(&dsaf_dev->tcam_lock);
927} 961}
928 962
929/** 963/**
@@ -941,6 +975,8 @@ static void hns_dsaf_tcam_uc_get(
941 u32 tcam_read_data0; 975 u32 tcam_read_data0;
942 u32 tcam_read_data4; 976 u32 tcam_read_data4;
943 977
978 spin_lock_bh(&dsaf_dev->tcam_lock);
979
944 /*Write Addr*/ 980 /*Write Addr*/
945 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); 981 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
946 982
@@ -949,9 +985,9 @@ static void hns_dsaf_tcam_uc_get(
949 985
950 /*read tcam data*/ 986 /*read tcam data*/
951 ptbl_tcam_data->tbl_tcam_data_high 987 ptbl_tcam_data->tbl_tcam_data_high
952 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
953 ptbl_tcam_data->tbl_tcam_data_low
954 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); 988 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
989 ptbl_tcam_data->tbl_tcam_data_low
990 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
955 991
956 /*read tcam mcast*/ 992 /*read tcam mcast*/
957 tcam_read_data0 = dsaf_read_dev(dsaf_dev, 993 tcam_read_data0 = dsaf_read_dev(dsaf_dev,
@@ -973,6 +1009,8 @@ static void hns_dsaf_tcam_uc_get(
973 DSAF_TBL_UCAST_CFG1_OUT_PORT_S); 1009 DSAF_TBL_UCAST_CFG1_OUT_PORT_S);
974 ptbl_tcam_ucast->tbl_ucast_dvc 1010 ptbl_tcam_ucast->tbl_ucast_dvc
975 = dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S); 1011 = dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S);
1012
1013 spin_unlock_bh(&dsaf_dev->tcam_lock);
976} 1014}
977 1015
978/** 1016/**
@@ -989,6 +1027,8 @@ static void hns_dsaf_tcam_mc_get(
989{ 1027{
990 u32 data_tmp; 1028 u32 data_tmp;
991 1029
1030 spin_lock_bh(&dsaf_dev->tcam_lock);
1031
992 /*Write Addr*/ 1032 /*Write Addr*/
993 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); 1033 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
994 1034
@@ -997,9 +1037,9 @@ static void hns_dsaf_tcam_mc_get(
997 1037
998 /*read tcam data*/ 1038 /*read tcam data*/
999 ptbl_tcam_data->tbl_tcam_data_high = 1039 ptbl_tcam_data->tbl_tcam_data_high =
1000 dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
1001 ptbl_tcam_data->tbl_tcam_data_low =
1002 dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); 1040 dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
1041 ptbl_tcam_data->tbl_tcam_data_low =
1042 dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
1003 1043
1004 /*read tcam mcast*/ 1044 /*read tcam mcast*/
1005 ptbl_tcam_mcast->tbl_mcast_port_msk[0] = 1045 ptbl_tcam_mcast->tbl_mcast_port_msk[0] =
@@ -1019,6 +1059,8 @@ static void hns_dsaf_tcam_mc_get(
1019 ptbl_tcam_mcast->tbl_mcast_port_msk[4] = 1059 ptbl_tcam_mcast->tbl_mcast_port_msk[4] =
1020 dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M, 1060 dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M,
1021 DSAF_TBL_MCAST_CFG4_VM128_112_S); 1061 DSAF_TBL_MCAST_CFG4_VM128_112_S);
1062
1063 spin_unlock_bh(&dsaf_dev->tcam_lock);
1022} 1064}
1023 1065
1024/** 1066/**
@@ -1080,10 +1122,10 @@ int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
1080 u32 en) 1122 u32 en)
1081{ 1123{
1082 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { 1124 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
1083 if (!en) 1125 if (!en) {
1084 dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n"); 1126 dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n");
1085 1127 return -EINVAL;
1086 return -EINVAL; 1128 }
1087 } 1129 }
1088 1130
1089 dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, 1131 dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
@@ -1295,9 +1337,9 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
1295 dev_dbg(dsaf_dev->dev, 1337 dev_dbg(dsaf_dev->dev,
1296 "hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name); 1338 "hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name);
1297 1339
1298 hns_dsaf_rst(dsaf_dev, 0); 1340 dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
1299 mdelay(10); 1341 mdelay(10);
1300 hns_dsaf_rst(dsaf_dev, 1); 1342 dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 1);
1301 1343
1302 hns_dsaf_comm_init(dsaf_dev); 1344 hns_dsaf_comm_init(dsaf_dev);
1303 1345
@@ -1325,7 +1367,7 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
1325static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev) 1367static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
1326{ 1368{
1327 /*reset*/ 1369 /*reset*/
1328 hns_dsaf_rst(dsaf_dev, 0); 1370 dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
1329} 1371}
1330 1372
1331/** 1373/**
@@ -1343,6 +1385,7 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
1343 if (HNS_DSAF_IS_DEBUG(dsaf_dev)) 1385 if (HNS_DSAF_IS_DEBUG(dsaf_dev))
1344 return 0; 1386 return 0;
1345 1387
1388 spin_lock_init(&dsaf_dev->tcam_lock);
1346 ret = hns_dsaf_init_hw(dsaf_dev); 1389 ret = hns_dsaf_init_hw(dsaf_dev);
1347 if (ret) 1390 if (ret)
1348 return ret; 1391 return ret;
@@ -2088,11 +2131,24 @@ void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb)
2088 hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode); 2131 hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode);
2089} 2132}
2090 2133
2134static u32 hns_dsaf_get_inode_prio_reg(int index)
2135{
2136 int base_index, offset;
2137 u32 base_addr = DSAF_INODE_IN_PRIO_PAUSE_BASE_REG;
2138
2139 base_index = (index + 1) / DSAF_REG_PER_ZONE;
2140 offset = (index + 1) % DSAF_REG_PER_ZONE;
2141
2142 return base_addr + DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET * base_index +
2143 DSAF_INODE_IN_PRIO_PAUSE_OFFSET * offset;
2144}
2145
2091void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) 2146void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
2092{ 2147{
2093 struct dsaf_hw_stats *hw_stats 2148 struct dsaf_hw_stats *hw_stats
2094 = &dsaf_dev->hw_stats[node_num]; 2149 = &dsaf_dev->hw_stats[node_num];
2095 bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); 2150 bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
2151 int i;
2096 u32 reg_tmp; 2152 u32 reg_tmp;
2097 2153
2098 hw_stats->pad_drop += dsaf_read_dev(dsaf_dev, 2154 hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
@@ -2127,6 +2183,18 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
2127 hw_stats->stp_drop += dsaf_read_dev(dsaf_dev, 2183 hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
2128 DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num); 2184 DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
2129 2185
2186 /* pfc pause frame statistics stored in dsaf inode*/
2187 if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
2188 for (i = 0; i < DSAF_PRIO_NR; i++) {
2189 reg_tmp = hns_dsaf_get_inode_prio_reg(i);
2190 hw_stats->rx_pfc[i] += dsaf_read_dev(dsaf_dev,
2191 reg_tmp + 0x4 * (u64)node_num);
2192 hw_stats->tx_pfc[i] += dsaf_read_dev(dsaf_dev,
2193 DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG +
2194 DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET * i +
2195 0xF0 * (u64)node_num);
2196 }
2197 }
2130 hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev, 2198 hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev,
2131 DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num); 2199 DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num);
2132} 2200}
@@ -2464,9 +2532,12 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
2464 p[i] = 0xdddddddd; 2532 p[i] = 0xdddddddd;
2465} 2533}
2466 2534
2467static char *hns_dsaf_get_node_stats_strings(char *data, int node) 2535static char *hns_dsaf_get_node_stats_strings(char *data, int node,
2536 struct dsaf_device *dsaf_dev)
2468{ 2537{
2469 char *buff = data; 2538 char *buff = data;
2539 int i;
2540 bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
2470 2541
2471 snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node); 2542 snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
2472 buff = buff + ETH_GSTRING_LEN; 2543 buff = buff + ETH_GSTRING_LEN;
@@ -2494,6 +2565,18 @@ static char *hns_dsaf_get_node_stats_strings(char *data, int node)
2494 buff = buff + ETH_GSTRING_LEN; 2565 buff = buff + ETH_GSTRING_LEN;
2495 snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node); 2566 snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
2496 buff = buff + ETH_GSTRING_LEN; 2567 buff = buff + ETH_GSTRING_LEN;
2568 if ((node < DSAF_SERVICE_NW_NUM) && (!is_ver1)) {
2569 for (i = 0; i < DSAF_PRIO_NR; i++) {
2570 snprintf(buff, ETH_GSTRING_LEN,
2571 "inod%d_pfc_prio%d_pkts", node, i);
2572 buff = buff + ETH_GSTRING_LEN;
2573 }
2574 for (i = 0; i < DSAF_PRIO_NR; i++) {
2575 snprintf(buff, ETH_GSTRING_LEN,
2576 "onod%d_pfc_prio%d_pkts", node, i);
2577 buff = buff + ETH_GSTRING_LEN;
2578 }
2579 }
2497 snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node); 2580 snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
2498 buff = buff + ETH_GSTRING_LEN; 2581 buff = buff + ETH_GSTRING_LEN;
2499 2582
@@ -2504,7 +2587,9 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
2504 int node_num) 2587 int node_num)
2505{ 2588{
2506 u64 *p = data; 2589 u64 *p = data;
2590 int i;
2507 struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num]; 2591 struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num];
2592 bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
2508 2593
2509 p[0] = hw_stats->pad_drop; 2594 p[0] = hw_stats->pad_drop;
2510 p[1] = hw_stats->man_pkts; 2595 p[1] = hw_stats->man_pkts;
@@ -2519,8 +2604,16 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
2519 p[10] = hw_stats->local_addr_false; 2604 p[10] = hw_stats->local_addr_false;
2520 p[11] = hw_stats->vlan_drop; 2605 p[11] = hw_stats->vlan_drop;
2521 p[12] = hw_stats->stp_drop; 2606 p[12] = hw_stats->stp_drop;
2522 p[13] = hw_stats->tx_pkts; 2607 if ((node_num < DSAF_SERVICE_NW_NUM) && (!is_ver1)) {
2608 for (i = 0; i < DSAF_PRIO_NR; i++) {
2609 p[13 + i] = hw_stats->rx_pfc[i];
2610 p[13 + i + DSAF_PRIO_NR] = hw_stats->tx_pfc[i];
2611 }
2612 p[29] = hw_stats->tx_pkts;
2613 return &p[30];
2614 }
2523 2615
2616 p[13] = hw_stats->tx_pkts;
2524 return &p[14]; 2617 return &p[14];
2525} 2618}
2526 2619
@@ -2548,11 +2641,16 @@ void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
2548 *@stringset: type of values in data 2641 *@stringset: type of values in data
2549 *return dsaf string name count 2642 *return dsaf string name count
2550 */ 2643 */
2551int hns_dsaf_get_sset_count(int stringset) 2644int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset)
2552{ 2645{
2553 if (stringset == ETH_SS_STATS) 2646 bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
2554 return DSAF_STATIC_NUM;
2555 2647
2648 if (stringset == ETH_SS_STATS) {
2649 if (is_ver1)
2650 return DSAF_STATIC_NUM;
2651 else
2652 return DSAF_V2_STATIC_NUM;
2653 }
2556 return 0; 2654 return 0;
2557} 2655}
2558 2656
@@ -2562,7 +2660,8 @@ int hns_dsaf_get_sset_count(int stringset)
2562 *@data:strings name value 2660 *@data:strings name value
2563 *@port:port index 2661 *@port:port index
2564 */ 2662 */
2565void hns_dsaf_get_strings(int stringset, u8 *data, int port) 2663void hns_dsaf_get_strings(int stringset, u8 *data, int port,
2664 struct dsaf_device *dsaf_dev)
2566{ 2665{
2567 char *buff = (char *)data; 2666 char *buff = (char *)data;
2568 int node = port; 2667 int node = port;
@@ -2571,11 +2670,11 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port)
2571 return; 2670 return;
2572 2671
2573 /* for ge/xge node info */ 2672 /* for ge/xge node info */
2574 buff = hns_dsaf_get_node_stats_strings(buff, node); 2673 buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
2575 2674
2576 /* for ppe node info */ 2675 /* for ppe node info */
2577 node = port + DSAF_PPE_INODE_BASE; 2676 node = port + DSAF_PPE_INODE_BASE;
2578 (void)hns_dsaf_get_node_stats_strings(buff, node); 2677 (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
2579} 2678}
2580 2679
2581/** 2680/**
@@ -2611,7 +2710,7 @@ static int hns_dsaf_probe(struct platform_device *pdev)
2611 2710
2612 ret = hns_dsaf_init(dsaf_dev); 2711 ret = hns_dsaf_init(dsaf_dev);
2613 if (ret) 2712 if (ret)
2614 goto free_cfg; 2713 goto free_dev;
2615 2714
2616 ret = hns_mac_init(dsaf_dev); 2715 ret = hns_mac_init(dsaf_dev);
2617 if (ret) 2716 if (ret)
@@ -2636,9 +2735,6 @@ uninit_mac:
2636uninit_dsaf: 2735uninit_dsaf:
2637 hns_dsaf_free(dsaf_dev); 2736 hns_dsaf_free(dsaf_dev);
2638 2737
2639free_cfg:
2640 hns_dsaf_free_cfg(dsaf_dev);
2641
2642free_dev: 2738free_dev:
2643 hns_dsaf_free_dev(dsaf_dev); 2739 hns_dsaf_free_dev(dsaf_dev);
2644 2740
@@ -2661,8 +2757,6 @@ static int hns_dsaf_remove(struct platform_device *pdev)
2661 2757
2662 hns_dsaf_free(dsaf_dev); 2758 hns_dsaf_free(dsaf_dev);
2663 2759
2664 hns_dsaf_free_cfg(dsaf_dev);
2665
2666 hns_dsaf_free_dev(dsaf_dev); 2760 hns_dsaf_free_dev(dsaf_dev);
2667 2761
2668 return 0; 2762 return 0;
@@ -2680,6 +2774,7 @@ static struct platform_driver g_dsaf_driver = {
2680 .driver = { 2774 .driver = {
2681 .name = DSAF_DRV_NAME, 2775 .name = DSAF_DRV_NAME,
2682 .of_match_table = g_dsaf_match, 2776 .of_match_table = g_dsaf_match,
2777 .acpi_match_table = hns_dsaf_acpi_match,
2683 }, 2778 },
2684}; 2779};
2685 2780
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index f0502ba0a677..1daf018d9071 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -39,6 +39,9 @@ struct hns_mac_cb;
39 39
40#define DSAF_DUMP_REGS_NUM 504 40#define DSAF_DUMP_REGS_NUM 504
41#define DSAF_STATIC_NUM 28 41#define DSAF_STATIC_NUM 28
42#define DSAF_V2_STATIC_NUM 44
43#define DSAF_PRIO_NR 8
44#define DSAF_REG_PER_ZONE 3
42 45
43#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) 46#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
44#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP) 47#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -176,6 +179,8 @@ struct dsaf_hw_stats {
176 u64 local_addr_false; 179 u64 local_addr_false;
177 u64 vlan_drop; 180 u64 vlan_drop;
178 u64 stp_drop; 181 u64 stp_drop;
182 u64 rx_pfc[DSAF_PRIO_NR];
183 u64 tx_pfc[DSAF_PRIO_NR];
179 u64 tx_pkts; 184 u64 tx_pkts;
180}; 185};
181 186
@@ -268,6 +273,27 @@ struct dsaf_int_stat {
268 273
269}; 274};
270 275
276struct dsaf_misc_op {
277 void (*cpld_set_led)(struct hns_mac_cb *mac_cb, int link_status,
278 u16 speed, int data);
279 void (*cpld_reset_led)(struct hns_mac_cb *mac_cb);
280 int (*cpld_set_led_id)(struct hns_mac_cb *mac_cb,
281 enum hnae_led_state status);
282 /* reset seris function, it will be reset if the dereseet is 0 */
283 void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset);
284 void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
285 void (*xge_core_srst)(struct dsaf_device *dsaf_dev, u32 port,
286 bool dereset);
287 void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
288 void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
289 void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
290
291 phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
292 int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
293
294 int (*cfg_serdes_loopback)(struct hns_mac_cb *mac_cb, bool en);
295};
296
271/* Dsaf device struct define ,and mac -> dsaf */ 297/* Dsaf device struct define ,and mac -> dsaf */
272struct dsaf_device { 298struct dsaf_device {
273 struct device *dev; 299 struct device *dev;
@@ -292,9 +318,12 @@ struct dsaf_device {
292 struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM]; 318 struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
293 struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM]; 319 struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
294 struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM]; 320 struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM];
321 struct dsaf_misc_op *misc_op;
295 322
296 struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM]; 323 struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
297 struct dsaf_int_stat int_stat; 324 struct dsaf_int_stat int_stat;
325 /* make sure tcam table config spinlock */
326 spinlock_t tcam_lock;
298}; 327};
299 328
300static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev) 329static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev)
@@ -388,27 +417,17 @@ int hns_dsaf_get_mac_entry_by_index(
388 u16 entry_index, 417 u16 entry_index,
389 struct dsaf_drv_mac_multi_dest_entry *mac_entry); 418 struct dsaf_drv_mac_multi_dest_entry *mac_entry);
390 419
391void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val);
392
393void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
394
395void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val);
396
397void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb); 420void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
398 421
399int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev); 422int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
400void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev); 423void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
401 424
402void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
403void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
404void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
405 u32 port, u32 val);
406
407void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num); 425void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
408 426
409int hns_dsaf_get_sset_count(int stringset); 427int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset);
410void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port); 428void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
411void hns_dsaf_get_strings(int stringset, u8 *data, int port); 429void hns_dsaf_get_strings(int stringset, u8 *data, int port,
430 struct dsaf_device *dsaf_dev);
412 431
413void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); 432void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
414int hns_dsaf_get_regs_count(void); 433int hns_dsaf_get_regs_count(void);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a837bb9e3839..8473287d4c8b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -12,6 +12,27 @@
12#include "hns_dsaf_ppe.h" 12#include "hns_dsaf_ppe.h"
13#include "hns_dsaf_reg.h" 13#include "hns_dsaf_reg.h"
14 14
15enum _dsm_op_index {
16 HNS_OP_RESET_FUNC = 0x1,
17 HNS_OP_SERDES_LP_FUNC = 0x2,
18 HNS_OP_LED_SET_FUNC = 0x3,
19 HNS_OP_GET_PORT_TYPE_FUNC = 0x4,
20 HNS_OP_GET_SFP_STAT_FUNC = 0x5,
21};
22
23enum _dsm_rst_type {
24 HNS_DSAF_RESET_FUNC = 0x1,
25 HNS_PPE_RESET_FUNC = 0x2,
26 HNS_XGE_CORE_RESET_FUNC = 0x3,
27 HNS_XGE_RESET_FUNC = 0x4,
28 HNS_GE_RESET_FUNC = 0x5,
29};
30
31const u8 hns_dsaf_acpi_dsm_uuid[] = {
32 0x1A, 0xAA, 0x85, 0x1A, 0x93, 0xE2, 0x5E, 0x41,
33 0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A
34};
35
15static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val) 36static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
16{ 37{
17 if (dsaf_dev->sub_ctrl) 38 if (dsaf_dev->sub_ctrl)
@@ -32,8 +53,8 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
32 return ret; 53 return ret;
33} 54}
34 55
35void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, 56static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
36 u16 speed, int data) 57 u16 speed, int data)
37{ 58{
38 int speed_reg = 0; 59 int speed_reg = 0;
39 u8 value; 60 u8 value;
@@ -65,13 +86,14 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
65 mac_cb->cpld_led_value = value; 86 mac_cb->cpld_led_value = value;
66 } 87 }
67 } else { 88 } else {
68 dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, 89 value = (mac_cb->cpld_led_value) & (0x1 << DSAF_LED_ANCHOR_B);
69 CPLD_LED_DEFAULT_VALUE); 90 dsaf_write_syscon(mac_cb->cpld_ctrl,
70 mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; 91 mac_cb->cpld_ctrl_reg, value);
92 mac_cb->cpld_led_value = value;
71 } 93 }
72} 94}
73 95
74void cpld_led_reset(struct hns_mac_cb *mac_cb) 96static void cpld_led_reset(struct hns_mac_cb *mac_cb)
75{ 97{
76 if (!mac_cb || !mac_cb->cpld_ctrl) 98 if (!mac_cb || !mac_cb->cpld_ctrl)
77 return; 99 return;
@@ -81,8 +103,8 @@ void cpld_led_reset(struct hns_mac_cb *mac_cb)
81 mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; 103 mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
82} 104}
83 105
84int cpld_set_led_id(struct hns_mac_cb *mac_cb, 106static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
85 enum hnae_led_state status) 107 enum hnae_led_state status)
86{ 108{
87 switch (status) { 109 switch (status) {
88 case HNAE_LED_ACTIVE: 110 case HNAE_LED_ACTIVE:
@@ -93,7 +115,7 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
93 CPLD_LED_ON_VALUE); 115 CPLD_LED_ON_VALUE);
94 dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, 116 dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
95 mac_cb->cpld_led_value); 117 mac_cb->cpld_led_value);
96 return 2; 118 break;
97 case HNAE_LED_INACTIVE: 119 case HNAE_LED_INACTIVE:
98 dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, 120 dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
99 CPLD_LED_DEFAULT_VALUE); 121 CPLD_LED_DEFAULT_VALUE);
@@ -101,7 +123,8 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
101 mac_cb->cpld_led_value); 123 mac_cb->cpld_led_value);
102 break; 124 break;
103 default: 125 default:
104 break; 126 dev_err(mac_cb->dev, "invalid led state: %d!", status);
127 return -EINVAL;
105 } 128 }
106 129
107 return 0; 130 return 0;
@@ -109,12 +132,40 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
109 132
110#define RESET_REQ_OR_DREQ 1 133#define RESET_REQ_OR_DREQ 1
111 134
112void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val) 135static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
136 u32 port_type, u32 port, u32 val)
137{
138 union acpi_object *obj;
139 union acpi_object obj_args[3], argv4;
140
141 obj_args[0].integer.type = ACPI_TYPE_INTEGER;
142 obj_args[0].integer.value = port_type;
143 obj_args[1].integer.type = ACPI_TYPE_INTEGER;
144 obj_args[1].integer.value = port;
145 obj_args[2].integer.type = ACPI_TYPE_INTEGER;
146 obj_args[2].integer.value = val;
147
148 argv4.type = ACPI_TYPE_PACKAGE;
149 argv4.package.count = 3;
150 argv4.package.elements = obj_args;
151
152 obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev),
153 hns_dsaf_acpi_dsm_uuid, 0, op_type, &argv4);
154 if (!obj) {
155 dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!",
156 port_type, port);
157 return;
158 }
159
160 ACPI_FREE(obj);
161}
162
163static void hns_dsaf_rst(struct dsaf_device *dsaf_dev, bool dereset)
113{ 164{
114 u32 xbar_reg_addr; 165 u32 xbar_reg_addr;
115 u32 nt_reg_addr; 166 u32 nt_reg_addr;
116 167
117 if (!val) { 168 if (!dereset) {
118 xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG; 169 xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG;
119 nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG; 170 nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG;
120 } else { 171 } else {
@@ -126,7 +177,15 @@ void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
126 dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ); 177 dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ);
127} 178}
128 179
129void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) 180static void hns_dsaf_rst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
181{
182 hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
183 HNS_DSAF_RESET_FUNC,
184 0, dereset);
185}
186
187static void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
188 bool dereset)
130{ 189{
131 u32 reg_val = 0; 190 u32 reg_val = 0;
132 u32 reg_addr; 191 u32 reg_addr;
@@ -137,7 +196,7 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
137 reg_val |= RESET_REQ_OR_DREQ; 196 reg_val |= RESET_REQ_OR_DREQ;
138 reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off; 197 reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off;
139 198
140 if (val == 0) 199 if (!dereset)
141 reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; 200 reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
142 else 201 else
143 reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG; 202 reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
@@ -145,8 +204,15 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
145 dsaf_write_sub(dsaf_dev, reg_addr, reg_val); 204 dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
146} 205}
147 206
148void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, 207static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
149 u32 port, u32 val) 208 u32 port, bool dereset)
209{
210 hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
211 HNS_XGE_RESET_FUNC, port, dereset);
212}
213
214static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
215 u32 port, bool dereset)
150{ 216{
151 u32 reg_val = 0; 217 u32 reg_val = 0;
152 u32 reg_addr; 218 u32 reg_addr;
@@ -157,7 +223,7 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
157 reg_val |= XGMAC_TRX_CORE_SRST_M 223 reg_val |= XGMAC_TRX_CORE_SRST_M
158 << dsaf_dev->mac_cb[port]->port_rst_off; 224 << dsaf_dev->mac_cb[port]->port_rst_off;
159 225
160 if (val == 0) 226 if (!dereset)
161 reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; 227 reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
162 else 228 else
163 reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG; 229 reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
@@ -165,7 +231,16 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
165 dsaf_write_sub(dsaf_dev, reg_addr, reg_val); 231 dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
166} 232}
167 233
168void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) 234static void
235hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
236 u32 port, bool dereset)
237{
238 hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
239 HNS_XGE_CORE_RESET_FUNC, port, dereset);
240}
241
242static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
243 bool dereset)
169{ 244{
170 u32 reg_val_1; 245 u32 reg_val_1;
171 u32 reg_val_2; 246 u32 reg_val_2;
@@ -183,7 +258,7 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
183 else 258 else
184 reg_val_2 = 0x2082082 << port_rst_off; 259 reg_val_2 = 0x2082082 << port_rst_off;
185 260
186 if (val == 0) { 261 if (!dereset) {
187 dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, 262 dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
188 reg_val_1); 263 reg_val_1);
189 264
@@ -198,9 +273,13 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
198 } 273 }
199 } else { 274 } else {
200 reg_val_1 = 0x15540 << dsaf_dev->reset_offset; 275 reg_val_1 = 0x15540 << dsaf_dev->reset_offset;
201 reg_val_2 = 0x100 << dsaf_dev->reset_offset;
202 276
203 if (val == 0) { 277 if (AE_IS_VER1(dsaf_dev->dsaf_ver))
278 reg_val_2 = 0x100 << dsaf_dev->reset_offset;
279 else
280 reg_val_2 = 0x40 << dsaf_dev->reset_offset;
281
282 if (!dereset) {
204 dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, 283 dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
205 reg_val_1); 284 reg_val_1);
206 285
@@ -216,14 +295,22 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
216 } 295 }
217} 296}
218 297
219void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) 298static void hns_dsaf_ge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
299 u32 port, bool dereset)
300{
301 hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
302 HNS_GE_RESET_FUNC, port, dereset);
303}
304
305static void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
306 bool dereset)
220{ 307{
221 u32 reg_val = 0; 308 u32 reg_val = 0;
222 u32 reg_addr; 309 u32 reg_addr;
223 310
224 reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off; 311 reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off;
225 312
226 if (val == 0) 313 if (!dereset)
227 reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG; 314 reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
228 else 315 else
229 reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG; 316 reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
@@ -231,15 +318,24 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
231 dsaf_write_sub(dsaf_dev, reg_addr, reg_val); 318 dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
232} 319}
233 320
234void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) 321static void
322hns_ppe_srst_by_port_acpi(struct dsaf_device *dsaf_dev, u32 port, bool dereset)
323{
324 hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
325 HNS_PPE_RESET_FUNC, port, dereset);
326}
327
328static void hns_ppe_com_srst(struct dsaf_device *dsaf_dev, bool dereset)
235{ 329{
236 struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
237 u32 reg_val; 330 u32 reg_val;
238 u32 reg_addr; 331 u32 reg_addr;
239 332
333 if (!(dev_of_node(dsaf_dev->dev)))
334 return;
335
240 if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { 336 if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
241 reg_val = RESET_REQ_OR_DREQ; 337 reg_val = RESET_REQ_OR_DREQ;
242 if (val == 0) 338 if (!dereset)
243 reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG; 339 reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
244 else 340 else
245 reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG; 341 reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
@@ -247,7 +343,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
247 } else { 343 } else {
248 reg_val = 0x100 << dsaf_dev->reset_offset; 344 reg_val = 0x100 << dsaf_dev->reset_offset;
249 345
250 if (val == 0) 346 if (!dereset)
251 reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG; 347 reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
252 else 348 else
253 reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG; 349 reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
@@ -261,7 +357,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
261 * @mac_cb: mac control block 357 * @mac_cb: mac control block
262 * retuen phy interface 358 * retuen phy interface
263 */ 359 */
264phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) 360static phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
265{ 361{
266 u32 mode; 362 u32 mode;
267 u32 reg; 363 u32 reg;
@@ -293,6 +389,36 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
293 return phy_if; 389 return phy_if;
294} 390}
295 391
392static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
393{
394 phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
395 union acpi_object *obj;
396 union acpi_object obj_args, argv4;
397
398 obj_args.integer.type = ACPI_TYPE_INTEGER;
399 obj_args.integer.value = mac_cb->mac_id;
400
401 argv4.type = ACPI_TYPE_PACKAGE,
402 argv4.package.count = 1,
403 argv4.package.elements = &obj_args,
404
405 obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
406 hns_dsaf_acpi_dsm_uuid, 0,
407 HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
408
409 if (!obj || obj->type != ACPI_TYPE_INTEGER)
410 return phy_if;
411
412 phy_if = obj->integer.value ?
413 PHY_INTERFACE_MODE_XGMII : PHY_INTERFACE_MODE_SGMII;
414
415 dev_dbg(mac_cb->dev, "mac_id=%d, phy_if=%d\n", mac_cb->mac_id, phy_if);
416
417 ACPI_FREE(obj);
418
419 return phy_if;
420}
421
296int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) 422int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
297{ 423{
298 if (!mac_cb->cpld_ctrl) 424 if (!mac_cb->cpld_ctrl)
@@ -309,13 +435,8 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
309 * @mac_cb: mac control block 435 * @mac_cb: mac control block
310 * retuen 0 == success 436 * retuen 0 == success
311 */ 437 */
312int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en) 438static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
313{ 439{
314 /* port 0-3 hilink4 base is serdes_vaddr + 0x00280000
315 * port 4-7 hilink3 base is serdes_vaddr + 0x00200000
316 */
317 u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
318 (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
319 const u8 lane_id[] = { 440 const u8 lane_id[] = {
320 0, /* mac 0 -> lane 0 */ 441 0, /* mac 0 -> lane 0 */
321 1, /* mac 1 -> lane 1 */ 442 1, /* mac 1 -> lane 1 */
@@ -332,7 +453,7 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
332 int sfp_prsnt; 453 int sfp_prsnt;
333 int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt); 454 int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
334 455
335 if (!mac_cb->phy_node) { 456 if (!mac_cb->phy_dev) {
336 if (ret) 457 if (ret)
337 pr_info("please confirm sfp is present or not\n"); 458 pr_info("please confirm sfp is present or not\n");
338 else 459 else
@@ -341,13 +462,110 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
341 } 462 }
342 463
343 if (mac_cb->serdes_ctrl) { 464 if (mac_cb->serdes_ctrl) {
344 u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset); 465 u32 origin;
466
467 if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
468#define HILINK_ACCESS_SEL_CFG 0x40008
469 /* hilink4 & hilink3 use the same xge training and
470 * xge u adaptor. There is a hilink access sel cfg
471 * register to select which one to be configed
472 */
473 if ((!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) &&
474 (mac_cb->mac_id <= 3))
475 dsaf_write_syscon(mac_cb->serdes_ctrl,
476 HILINK_ACCESS_SEL_CFG, 0);
477 else
478 dsaf_write_syscon(mac_cb->serdes_ctrl,
479 HILINK_ACCESS_SEL_CFG, 3);
480 }
345 481
346 dsaf_set_field(origin, 1ull << 10, 10, !!en); 482 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
483
484 dsaf_set_field(origin, 1ull << 10, 10, en);
347 dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); 485 dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
348 } else { 486 } else {
349 dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en); 487 u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
488 (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
489 dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
490 }
491
492 return 0;
493}
494
495static int
496hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
497{
498 union acpi_object *obj;
499 union acpi_object obj_args[3], argv4;
500
501 obj_args[0].integer.type = ACPI_TYPE_INTEGER;
502 obj_args[0].integer.value = mac_cb->mac_id;
503 obj_args[1].integer.type = ACPI_TYPE_INTEGER;
504 obj_args[1].integer.value = !!en;
505
506 argv4.type = ACPI_TYPE_PACKAGE;
507 argv4.package.count = 2;
508 argv4.package.elements = obj_args;
509
510 obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev),
511 hns_dsaf_acpi_dsm_uuid, 0,
512 HNS_OP_SERDES_LP_FUNC, &argv4);
513 if (!obj) {
514 dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!",
515 mac_cb->mac_id);
516
517 return -ENOTSUPP;
350 } 518 }
351 519
520 ACPI_FREE(obj);
521
352 return 0; 522 return 0;
353} 523}
524
525struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
526{
527 struct dsaf_misc_op *misc_op;
528
529 misc_op = devm_kzalloc(dsaf_dev->dev, sizeof(*misc_op), GFP_KERNEL);
530 if (!misc_op)
531 return NULL;
532
533 if (dev_of_node(dsaf_dev->dev)) {
534 misc_op->cpld_set_led = hns_cpld_set_led;
535 misc_op->cpld_reset_led = cpld_led_reset;
536 misc_op->cpld_set_led_id = cpld_set_led_id;
537
538 misc_op->dsaf_reset = hns_dsaf_rst;
539 misc_op->xge_srst = hns_dsaf_xge_srst_by_port;
540 misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port;
541 misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
542 misc_op->ppe_srst = hns_ppe_srst_by_port;
543 misc_op->ppe_comm_srst = hns_ppe_com_srst;
544
545 misc_op->get_phy_if = hns_mac_get_phy_if;
546 misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
547
548 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
549 } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
550 misc_op->cpld_set_led = hns_cpld_set_led;
551 misc_op->cpld_reset_led = cpld_led_reset;
552 misc_op->cpld_set_led_id = cpld_set_led_id;
553
554 misc_op->dsaf_reset = hns_dsaf_rst_acpi;
555 misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
556 misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port_acpi;
557 misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
558 misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
559 misc_op->ppe_comm_srst = hns_ppe_com_srst;
560
561 misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
562 misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
563
564 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
565 } else {
566 devm_kfree(dsaf_dev->dev, (void *)misc_op);
567 misc_op = NULL;
568 }
569
570 return (void *)misc_op;
571}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
index 419f07aa9734..f06bb03d47a6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
@@ -33,11 +33,6 @@
33#define DSAF_LED_DATA_B 4 33#define DSAF_LED_DATA_B 4
34#define DSAF_LED_ANCHOR_B 5 34#define DSAF_LED_ANCHOR_B 5
35 35
36void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, 36struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev);
37 u16 speed, int data);
38void cpld_led_reset(struct hns_mac_cb *mac_cb);
39int cpld_set_led_id(struct hns_mac_cb *mac_cb,
40 enum hnae_led_state status);
41int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
42 37
43#endif 38#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 8cd151a5245e..ff8b6a468b24 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -112,7 +112,6 @@ void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
112static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, 112static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
113 int ppe_idx) 113 int ppe_idx)
114{ 114{
115
116 return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; 115 return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
117} 116}
118 117
@@ -200,11 +199,12 @@ static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
200static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common) 199static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
201{ 200{
202 enum ppe_qid_mode qid_mode; 201 enum ppe_qid_mode qid_mode;
203 enum dsaf_mode dsaf_mode = ppe_common->dsaf_dev->dsaf_mode; 202 struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
203 enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
204 204
205 hns_ppe_com_srst(ppe_common, 0); 205 dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0);
206 mdelay(100); 206 mdelay(100);
207 hns_ppe_com_srst(ppe_common, 1); 207 dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1);
208 mdelay(100); 208 mdelay(100);
209 209
210 if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) { 210 if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
@@ -288,9 +288,9 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
288 /* get default RSS key */ 288 /* get default RSS key */
289 netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); 289 netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
290 290
291 hns_ppe_srst_by_port(dsaf_dev, port, 0); 291 dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
292 mdelay(10); 292 mdelay(10);
293 hns_ppe_srst_by_port(dsaf_dev, port, 1); 293 dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 1);
294 294
295 /* clr and msk except irq*/ 295 /* clr and msk except irq*/
296 hns_ppe_exc_irq_en(ppe_cb, 0); 296 hns_ppe_exc_irq_en(ppe_cb, 0);
@@ -328,10 +328,11 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
328static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb) 328static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
329{ 329{
330 u32 port; 330 u32 port;
331 struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
331 332
332 if (ppe_cb->ppe_common_cb) { 333 if (ppe_cb->ppe_common_cb) {
333 port = ppe_cb->index; 334 port = ppe_cb->index;
334 hns_ppe_srst_by_port(ppe_cb->ppe_common_cb->dsaf_dev, port, 0); 335 dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
335 } 336 }
336} 337}
337 338
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 4ef6d23d998e..ef1107777c08 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,6 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
458 u32 i; 458 u32 i;
459 u32 ring_num = rcb_common->ring_num; 459 u32 ring_num = rcb_common->ring_num;
460 int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); 460 int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
461 struct device_node *np = rcb_common->dsaf_dev->dev->of_node;
462 struct platform_device *pdev = 461 struct platform_device *pdev =
463 to_platform_device(rcb_common->dsaf_dev->dev); 462 to_platform_device(rcb_common->dsaf_dev->dev);
464 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); 463 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
@@ -473,10 +472,10 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
473 ring_pair_cb->port_id_in_comm = 472 ring_pair_cb->port_id_in_comm =
474 hns_rcb_get_port_in_comm(rcb_common, i); 473 hns_rcb_get_port_in_comm(rcb_common, i);
475 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = 474 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
476 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : 475 is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) :
477 platform_get_irq(pdev, base_irq_idx + i * 3 + 1); 476 platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
478 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = 477 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] =
479 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) : 478 is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) :
480 platform_get_irq(pdev, base_irq_idx + i * 3); 479 platform_get_irq(pdev, base_irq_idx + i * 3);
481 ring_pair_cb->q.phy_base = 480 ring_pair_cb->q.phy_base =
482 RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); 481 RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
@@ -541,7 +540,7 @@ int hns_rcb_set_coalesce_usecs(
541 } 540 }
542 if (timeout > HNS_RCB_MAX_COALESCED_USECS) { 541 if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
543 dev_err(rcb_common->dsaf_dev->dev, 542 dev_err(rcb_common->dsaf_dev->dev,
544 "error: not support coalesce %dus!\n", timeout); 543 "error: coalesce_usecs setting supports 0~1023us\n");
545 return -EINVAL; 544 return -EINVAL;
546 } 545 }
547 hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); 546 hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index bd54dac82ee0..99b4e1ba0a94 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -40,7 +40,7 @@ struct rcb_common_cb;
40#define HNS_RCB_DEF_COALESCED_FRAMES 50 40#define HNS_RCB_DEF_COALESCED_FRAMES 50
41#define HNS_RCB_CLK_FREQ_MHZ 350 41#define HNS_RCB_CLK_FREQ_MHZ 350
42#define HNS_RCB_MAX_COALESCED_USECS 0x3ff 42#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
43#define HNS_RCB_DEF_COALESCED_USECS 3 43#define HNS_RCB_DEF_COALESCED_USECS 50
44 44
45#define HNS_RCB_COMMON_ENDIAN 1 45#define HNS_RCB_COMMON_ENDIAN 1
46 46
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 7c3b5103d151..235f74444b1d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -32,7 +32,7 @@
32#define DSAFV2_SBM_NUM 8 32#define DSAFV2_SBM_NUM 8
33#define DSAFV2_SBM_XGE_CHN 6 33#define DSAFV2_SBM_XGE_CHN 6
34#define DSAFV2_SBM_PPE_CHN 1 34#define DSAFV2_SBM_PPE_CHN 1
35#define DASFV2_ROCEE_CRD_NUM 8 35#define DASFV2_ROCEE_CRD_NUM 1
36 36
37#define DSAF_VOQ_NUM DSAF_NODE_NUM 37#define DSAF_VOQ_NUM DSAF_NODE_NUM
38#define DSAF_INODE_NUM DSAF_NODE_NUM 38#define DSAF_INODE_NUM DSAF_NODE_NUM
@@ -166,6 +166,9 @@
166#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00 166#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
167#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50 167#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
168#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00 168#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00
169#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
170#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
171#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
169 172
170#define DSAF_SBM_CFG_REG_0_REG 0x2000 173#define DSAF_SBM_CFG_REG_0_REG 0x2000
171#define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG 0x2004 174#define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG 0x2004
@@ -175,7 +178,7 @@
175#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C 178#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C
176#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C 179#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C
177#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C 180#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C
178#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C 181#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C
179#define DSAF_SBM_FREE_CNT_0_0_REG 0x2010 182#define DSAF_SBM_FREE_CNT_0_0_REG 0x2010
180#define DSAF_SBM_FREE_CNT_1_0_REG 0x2014 183#define DSAF_SBM_FREE_CNT_1_0_REG 0x2014
181#define DSAF_SBM_BP_CNT_0_0_REG 0x2018 184#define DSAF_SBM_BP_CNT_0_0_REG 0x2018
@@ -232,6 +235,8 @@
232#define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG 0x3074 235#define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG 0x3074
233#define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG 0x3078 236#define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG 0x3078
234#define DSAF_XOD_FIFO_STATUS_0_REG 0x307C 237#define DSAF_XOD_FIFO_STATUS_0_REG 0x307C
238#define DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG 0x3A00
239#define DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET 0x4
235 240
236#define DSAF_VOQ_ECC_INVERT_EN_0_REG 0x4004 241#define DSAF_VOQ_ECC_INVERT_EN_0_REG 0x4004
237#define DSAF_VOQ_SRAM_PKT_NUM_0_REG 0x4008 242#define DSAF_VOQ_SRAM_PKT_NUM_0_REG 0x4008
@@ -791,6 +796,18 @@
791#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9 796#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9
792#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) 797#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9)
793 798
799#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0
800#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0)
801#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8
802#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M (((1ULL << 8) - 1) << 8)
803
804#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S (0)
805#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M (((1ULL << 6) - 1) << 0)
806#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S (6)
807#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M (((1ULL << 6) - 1) << 6)
808#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S (12)
809#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M (((1ULL << 6) - 1) << 12)
810
794#define DSAF_TBL_TCAM_ADDR_S 0 811#define DSAF_TBL_TCAM_ADDR_S 0
795#define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1) 812#define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1)
796 813
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index fd90f3737963..8f4f0e8da984 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -119,7 +119,7 @@ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
119 = (struct dsaf_device *)dev_get_drvdata(drv->dev); 119 = (struct dsaf_device *)dev_get_drvdata(drv->dev);
120 u32 port = drv->mac_id; 120 u32 port = drv->mac_id;
121 121
122 hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 1); 122 dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 1);
123 mdelay(10); 123 mdelay(10);
124 124
125 /*enable XGE rX/tX */ 125 /*enable XGE rX/tX */
@@ -157,7 +157,7 @@ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
157 } 157 }
158 158
159 mdelay(10); 159 mdelay(10);
160 hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 0); 160 dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 0);
161} 161}
162 162
163/** 163/**
@@ -198,9 +198,9 @@ static void hns_xgmac_init(void *mac_drv)
198 = (struct dsaf_device *)dev_get_drvdata(drv->dev); 198 = (struct dsaf_device *)dev_get_drvdata(drv->dev);
199 u32 port = drv->mac_id; 199 u32 port = drv->mac_id;
200 200
201 hns_dsaf_xge_srst_by_port(dsaf_dev, port, 0); 201 dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0);
202 mdelay(100); 202 mdelay(100);
203 hns_dsaf_xge_srst_by_port(dsaf_dev, port, 1); 203 dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1);
204 204
205 mdelay(100); 205 mdelay(100);
206 hns_xgmac_exc_irq_en(drv, 0); 206 hns_xgmac_exc_irq_en(drv, 0);
@@ -425,7 +425,7 @@ static void hns_xgmac_free(void *mac_drv)
425 425
426 u32 mac_id = drv->mac_id; 426 u32 mac_id = drv->mac_id;
427 427
428 hns_dsaf_xge_srst_by_port(dsaf_dev, mac_id, 0); 428 dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0);
429} 429}
430 430
431/** 431/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index e621636e69b9..d5297ecfe4a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -132,6 +132,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
132 ring_ptr_move_fw(ring, next_to_use); 132 ring_ptr_move_fw(ring, next_to_use);
133} 133}
134 134
135static const struct acpi_device_id hns_enet_acpi_match[] = {
136 { "HISI00C1", 0 },
137 { "HISI00C2", 0 },
138 { },
139};
140MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
141
135static void fill_desc(struct hnae_ring *ring, void *priv, 142static void fill_desc(struct hnae_ring *ring, void *priv,
136 int size, dma_addr_t dma, int frag_end, 143 int size, dma_addr_t dma, int frag_end,
137 int buf_num, enum hns_desc_type type, int mtu) 144 int buf_num, enum hns_desc_type type, int mtu)
@@ -593,6 +600,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
593 ring->stats.sw_err_cnt++; 600 ring->stats.sw_err_cnt++;
594 return -ENOMEM; 601 return -ENOMEM;
595 } 602 }
603 skb_reset_mac_header(skb);
596 604
597 prefetchw(skb->data); 605 prefetchw(skb->data);
598 length = le16_to_cpu(desc->rx.pkt_len); 606 length = le16_to_cpu(desc->rx.pkt_len);
@@ -760,10 +768,10 @@ recv:
760 clean_count = 0; 768 clean_count = 0;
761 } 769 }
762 770
763 /* poll one pkg*/ 771 /* poll one pkt*/
764 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum); 772 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
765 if (unlikely(!skb)) /* this fault cannot be repaired */ 773 if (unlikely(!skb)) /* this fault cannot be repaired */
766 break; 774 goto out;
767 775
768 recv_bds += bnum; 776 recv_bds += bnum;
769 clean_count += bnum; 777 clean_count += bnum;
@@ -789,6 +797,7 @@ recv:
789 } 797 }
790 } 798 }
791 799
800out:
792 /* make all data has been write before submit */ 801 /* make all data has been write before submit */
793 if (clean_count > 0) 802 if (clean_count > 0)
794 hns_nic_alloc_rx_buffers(ring_data, clean_count); 803 hns_nic_alloc_rx_buffers(ring_data, clean_count);
@@ -983,8 +992,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
983{ 992{
984 struct hns_nic_priv *priv = netdev_priv(ndev); 993 struct hns_nic_priv *priv = netdev_priv(ndev);
985 struct hnae_handle *h = priv->ae_handle; 994 struct hnae_handle *h = priv->ae_handle;
995 int state = 1;
996
997 if (priv->phy) {
998 h->dev->ops->adjust_link(h, ndev->phydev->speed,
999 ndev->phydev->duplex);
1000 state = priv->phy->link;
1001 }
1002 state = state && h->dev->ops->get_status(h);
986 1003
987 h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex); 1004 if (state != priv->link) {
1005 if (state) {
1006 netif_carrier_on(ndev);
1007 netif_tx_wake_all_queues(ndev);
1008 netdev_info(ndev, "link up\n");
1009 } else {
1010 netif_carrier_off(ndev);
1011 netdev_info(ndev, "link down\n");
1012 }
1013 priv->link = state;
1014 }
988} 1015}
989 1016
990/** 1017/**
@@ -996,19 +1023,22 @@ static void hns_nic_adjust_link(struct net_device *ndev)
996int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) 1023int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
997{ 1024{
998 struct hns_nic_priv *priv = netdev_priv(ndev); 1025 struct hns_nic_priv *priv = netdev_priv(ndev);
999 struct phy_device *phy_dev = NULL; 1026 struct phy_device *phy_dev = h->phy_dev;
1027 int ret;
1000 1028
1001 if (!h->phy_node) 1029 if (!h->phy_dev)
1002 return 0; 1030 return 0;
1003 1031
1004 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) 1032 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1005 phy_dev = of_phy_connect(ndev, h->phy_node, 1033 phy_dev->dev_flags = 0;
1006 hns_nic_adjust_link, 0, h->phy_if);
1007 else
1008 phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if);
1009 1034
1010 if (unlikely(!phy_dev) || IS_ERR(phy_dev)) 1035 ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
1011 return !phy_dev ? -ENODEV : PTR_ERR(phy_dev); 1036 h->phy_if);
1037 } else {
1038 ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
1039 }
1040 if (unlikely(ret))
1041 return -ENODEV;
1012 1042
1013 phy_dev->supported &= h->if_support; 1043 phy_dev->supported &= h->if_support;
1014 phy_dev->advertising = phy_dev->supported; 1044 phy_dev->advertising = phy_dev->supported;
@@ -1067,13 +1097,8 @@ void hns_nic_update_stats(struct net_device *netdev)
1067static void hns_init_mac_addr(struct net_device *ndev) 1097static void hns_init_mac_addr(struct net_device *ndev)
1068{ 1098{
1069 struct hns_nic_priv *priv = netdev_priv(ndev); 1099 struct hns_nic_priv *priv = netdev_priv(ndev);
1070 struct device_node *node = priv->dev->of_node;
1071 const void *mac_addr_temp;
1072 1100
1073 mac_addr_temp = of_get_mac_address(node); 1101 if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
1074 if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) {
1075 memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len);
1076 } else {
1077 eth_hw_addr_random(ndev); 1102 eth_hw_addr_random(ndev);
1078 dev_warn(priv->dev, "No valid mac, use random mac %pM", 1103 dev_warn(priv->dev, "No valid mac, use random mac %pM",
1079 ndev->dev_addr); 1104 ndev->dev_addr);
@@ -1176,7 +1201,7 @@ static int hns_nic_net_up(struct net_device *ndev)
1176{ 1201{
1177 struct hns_nic_priv *priv = netdev_priv(ndev); 1202 struct hns_nic_priv *priv = netdev_priv(ndev);
1178 struct hnae_handle *h = priv->ae_handle; 1203 struct hnae_handle *h = priv->ae_handle;
1179 int i, j, k; 1204 int i, j;
1180 int ret; 1205 int ret;
1181 1206
1182 ret = hns_nic_init_irq(priv); 1207 ret = hns_nic_init_irq(priv);
@@ -1191,9 +1216,6 @@ static int hns_nic_net_up(struct net_device *ndev)
1191 goto out_has_some_queues; 1216 goto out_has_some_queues;
1192 } 1217 }
1193 1218
1194 for (k = 0; k < h->q_num; k++)
1195 h->dev->ops->toggle_queue_status(h->qs[k], 1);
1196
1197 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr); 1219 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
1198 if (ret) 1220 if (ret)
1199 goto out_set_mac_addr_err; 1221 goto out_set_mac_addr_err;
@@ -1213,8 +1235,6 @@ static int hns_nic_net_up(struct net_device *ndev)
1213out_start_err: 1235out_start_err:
1214 netif_stop_queue(ndev); 1236 netif_stop_queue(ndev);
1215out_set_mac_addr_err: 1237out_set_mac_addr_err:
1216 for (k = 0; k < h->q_num; k++)
1217 h->dev->ops->toggle_queue_status(h->qs[k], 0);
1218out_has_some_queues: 1238out_has_some_queues:
1219 for (j = i - 1; j >= 0; j--) 1239 for (j = i - 1; j >= 0; j--)
1220 hns_nic_ring_close(ndev, j); 1240 hns_nic_ring_close(ndev, j);
@@ -1421,7 +1441,6 @@ static int hns_nic_set_features(struct net_device *netdev,
1421 netdev_features_t features) 1441 netdev_features_t features)
1422{ 1442{
1423 struct hns_nic_priv *priv = netdev_priv(netdev); 1443 struct hns_nic_priv *priv = netdev_priv(netdev);
1424 struct hnae_handle *h = priv->ae_handle;
1425 1444
1426 switch (priv->enet_ver) { 1445 switch (priv->enet_ver) {
1427 case AE_VERSION_1: 1446 case AE_VERSION_1:
@@ -1434,11 +1453,9 @@ static int hns_nic_set_features(struct net_device *netdev,
1434 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; 1453 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1435 /* The chip only support 7*4096 */ 1454 /* The chip only support 7*4096 */
1436 netif_set_gso_max_size(netdev, 7 * 4096); 1455 netif_set_gso_max_size(netdev, 7 * 4096);
1437 h->dev->ops->set_tso_stats(h, 1);
1438 } else { 1456 } else {
1439 priv->ops.fill_desc = fill_v2_desc; 1457 priv->ops.fill_desc = fill_v2_desc;
1440 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1458 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1441 h->dev->ops->set_tso_stats(h, 0);
1442 } 1459 }
1443 break; 1460 break;
1444 } 1461 }
@@ -1571,27 +1588,14 @@ static void hns_nic_update_link_status(struct net_device *netdev)
1571 struct hns_nic_priv *priv = netdev_priv(netdev); 1588 struct hns_nic_priv *priv = netdev_priv(netdev);
1572 1589
1573 struct hnae_handle *h = priv->ae_handle; 1590 struct hnae_handle *h = priv->ae_handle;
1574 int state = 1;
1575 1591
1576 if (priv->phy) { 1592 if (h->phy_dev) {
1577 if (!genphy_update_link(priv->phy)) 1593 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1578 state = priv->phy->link; 1594 return;
1579 else
1580 state = 0;
1581 }
1582 state = state && h->dev->ops->get_status(h);
1583 1595
1584 if (state != priv->link) { 1596 (void)genphy_read_status(h->phy_dev);
1585 if (state) {
1586 netif_carrier_on(netdev);
1587 netif_tx_wake_all_queues(netdev);
1588 netdev_info(netdev, "link up\n");
1589 } else {
1590 netif_carrier_off(netdev);
1591 netdev_info(netdev, "link down\n");
1592 }
1593 priv->link = state;
1594 } 1597 }
1598 hns_nic_adjust_link(netdev);
1595} 1599}
1596 1600
1597/* for dumping key regs*/ 1601/* for dumping key regs*/
@@ -1627,7 +1631,7 @@ static void hns_nic_dump(struct hns_nic_priv *priv)
1627 } 1631 }
1628} 1632}
1629 1633
1630/* for resetting suntask*/ 1634/* for resetting subtask */
1631static void hns_nic_reset_subtask(struct hns_nic_priv *priv) 1635static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
1632{ 1636{
1633 enum hnae_port_type type = priv->ae_handle->port_type; 1637 enum hnae_port_type type = priv->ae_handle->port_type;
@@ -1797,11 +1801,14 @@ static void hns_nic_set_priv_ops(struct net_device *netdev)
1797 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; 1801 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1798 /* This chip only support 7*4096 */ 1802 /* This chip only support 7*4096 */
1799 netif_set_gso_max_size(netdev, 7 * 4096); 1803 netif_set_gso_max_size(netdev, 7 * 4096);
1800 h->dev->ops->set_tso_stats(h, 1);
1801 } else { 1804 } else {
1802 priv->ops.fill_desc = fill_v2_desc; 1805 priv->ops.fill_desc = fill_v2_desc;
1803 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1806 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1804 } 1807 }
1808 /* enable tso when init
1809 * control tso on/off through TSE bit in bd
1810 */
1811 h->dev->ops->set_tso_stats(h, 1);
1805 } 1812 }
1806} 1813}
1807 1814
@@ -1812,7 +1819,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
1812 int ret; 1819 int ret;
1813 1820
1814 h = hnae_get_handle(&priv->netdev->dev, 1821 h = hnae_get_handle(&priv->netdev->dev,
1815 priv->ae_node, priv->port_id, NULL); 1822 priv->fwnode, priv->port_id, NULL);
1816 if (IS_ERR_OR_NULL(h)) { 1823 if (IS_ERR_OR_NULL(h)) {
1817 ret = -ENODEV; 1824 ret = -ENODEV;
1818 dev_dbg(priv->dev, "has not handle, register notifier!\n"); 1825 dev_dbg(priv->dev, "has not handle, register notifier!\n");
@@ -1872,7 +1879,6 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
1872 struct device *dev = &pdev->dev; 1879 struct device *dev = &pdev->dev;
1873 struct net_device *ndev; 1880 struct net_device *ndev;
1874 struct hns_nic_priv *priv; 1881 struct hns_nic_priv *priv;
1875 struct device_node *node = dev->of_node;
1876 u32 port_id; 1882 u32 port_id;
1877 int ret; 1883 int ret;
1878 1884
@@ -1886,22 +1892,49 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
1886 priv->dev = dev; 1892 priv->dev = dev;
1887 priv->netdev = ndev; 1893 priv->netdev = ndev;
1888 1894
1889 if (of_device_is_compatible(node, "hisilicon,hns-nic-v1")) 1895 if (dev_of_node(dev)) {
1890 priv->enet_ver = AE_VERSION_1; 1896 struct device_node *ae_node;
1891 else
1892 priv->enet_ver = AE_VERSION_2;
1893 1897
1894 priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0); 1898 if (of_device_is_compatible(dev->of_node,
1895 if (IS_ERR_OR_NULL(priv->ae_node)) { 1899 "hisilicon,hns-nic-v1"))
1896 ret = PTR_ERR(priv->ae_node); 1900 priv->enet_ver = AE_VERSION_1;
1897 dev_err(dev, "not find ae-handle\n"); 1901 else
1898 goto out_read_prop_fail; 1902 priv->enet_ver = AE_VERSION_2;
1903
1904 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
1905 if (IS_ERR_OR_NULL(ae_node)) {
1906 ret = PTR_ERR(ae_node);
1907 dev_err(dev, "not find ae-handle\n");
1908 goto out_read_prop_fail;
1909 }
1910 priv->fwnode = &ae_node->fwnode;
1911 } else if (is_acpi_node(dev->fwnode)) {
1912 struct acpi_reference_args args;
1913
1914 if (acpi_dev_found(hns_enet_acpi_match[0].id))
1915 priv->enet_ver = AE_VERSION_1;
1916 else if (acpi_dev_found(hns_enet_acpi_match[1].id))
1917 priv->enet_ver = AE_VERSION_2;
1918 else
1919 return -ENXIO;
1920
1921 /* try to find port-idx-in-ae first */
1922 ret = acpi_node_get_property_reference(dev->fwnode,
1923 "ae-handle", 0, &args);
1924 if (ret) {
1925 dev_err(dev, "not find ae-handle\n");
1926 goto out_read_prop_fail;
1927 }
1928 priv->fwnode = acpi_fwnode_handle(args.adev);
1929 } else {
1930 dev_err(dev, "cannot read cfg data from OF or acpi\n");
1931 return -ENXIO;
1899 } 1932 }
1900 /* try to find port-idx-in-ae first */ 1933
1901 ret = of_property_read_u32(node, "port-idx-in-ae", &port_id); 1934 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
1902 if (ret) { 1935 if (ret) {
1903 /* only for old code compatible */ 1936 /* only for old code compatible */
1904 ret = of_property_read_u32(node, "port-id", &port_id); 1937 ret = device_property_read_u32(dev, "port-id", &port_id);
1905 if (ret) 1938 if (ret)
1906 goto out_read_prop_fail; 1939 goto out_read_prop_fail;
1907 /* for old dts, we need to caculate the port offset */ 1940 /* for old dts, we need to caculate the port offset */
@@ -1940,7 +1973,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
1940 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) 1973 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
1941 dev_dbg(dev, "set mask to 64bit\n"); 1974 dev_dbg(dev, "set mask to 64bit\n");
1942 else 1975 else
1943 dev_err(dev, "set mask to 32bit fail!\n"); 1976 dev_err(dev, "set mask to 64bit fail!\n");
1944 1977
1945 /* carrier off reporting is important to ethtool even BEFORE open */ 1978 /* carrier off reporting is important to ethtool even BEFORE open */
1946 netif_carrier_off(ndev); 1979 netif_carrier_off(ndev);
@@ -2014,6 +2047,7 @@ static struct platform_driver hns_nic_dev_driver = {
2014 .driver = { 2047 .driver = {
2015 .name = "hns-nic", 2048 .name = "hns-nic",
2016 .of_match_table = hns_enet_of_match, 2049 .of_match_table = hns_enet_of_match,
2050 .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
2017 }, 2051 },
2018 .probe = hns_nic_dev_probe, 2052 .probe = hns_nic_dev_probe,
2019 .remove = hns_nic_dev_remove, 2053 .remove = hns_nic_dev_remove,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 337efa582bac..44bb3015eed3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -54,7 +54,7 @@ struct hns_nic_ops {
54}; 54};
55 55
56struct hns_nic_priv { 56struct hns_nic_priv {
57 const struct device_node *ae_node; 57 const struct fwnode_handle *fwnode;
58 u32 enet_ver; 58 u32 enet_ver;
59 u32 port_id; 59 u32 port_id;
60 int phy_mode; 60 int phy_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 67a648c7d3a9..a395ca1405c3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -49,7 +49,7 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
49 h = priv->ae_handle; 49 h = priv->ae_handle;
50 50
51 if (priv->phy) { 51 if (priv->phy) {
52 if (!genphy_update_link(priv->phy)) 52 if (!genphy_read_status(priv->phy))
53 link_stat = priv->phy->link; 53 link_stat = priv->phy->link;
54 else 54 else
55 link_stat = 0; 55 link_stat = 0;
@@ -242,6 +242,7 @@ static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = {
242static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) 242static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
243{ 243{
244#define COPPER_CONTROL_REG 0 244#define COPPER_CONTROL_REG 0
245#define PHY_POWER_DOWN BIT(11)
245#define PHY_LOOP_BACK BIT(14) 246#define PHY_LOOP_BACK BIT(14)
246 u16 val = 0; 247 u16 val = 0;
247 248
@@ -252,33 +253,40 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
252 /* speed : 1000M */ 253 /* speed : 1000M */
253 phy_write(phy_dev, HNS_PHY_PAGE_REG, 2); 254 phy_write(phy_dev, HNS_PHY_PAGE_REG, 2);
254 phy_write(phy_dev, 21, 0x1046); 255 phy_write(phy_dev, 21, 0x1046);
256
257 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
255 /* Force Master */ 258 /* Force Master */
256 phy_write(phy_dev, 9, 0x1F00); 259 phy_write(phy_dev, 9, 0x1F00);
260
257 /* Soft-reset */ 261 /* Soft-reset */
258 phy_write(phy_dev, 0, 0x9140); 262 phy_write(phy_dev, 0, 0x9140);
259 /* If autoneg disabled,two soft-reset operations */ 263 /* If autoneg disabled,two soft-reset operations */
260 phy_write(phy_dev, 0, 0x9140); 264 phy_write(phy_dev, 0, 0x9140);
261 phy_write(phy_dev, 22, 0xFA); 265
266 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
262 267
263 /* Default is 0x0400 */ 268 /* Default is 0x0400 */
264 phy_write(phy_dev, 1, 0x418); 269 phy_write(phy_dev, 1, 0x418);
265 270
266 /* Force 1000M Link, Default is 0x0200 */ 271 /* Force 1000M Link, Default is 0x0200 */
267 phy_write(phy_dev, 7, 0x20C); 272 phy_write(phy_dev, 7, 0x20C);
268 phy_write(phy_dev, 22, 0); 273 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
269 274
270 /* Enable MAC loop-back */ 275 /* Enable PHY loop-back */
271 val = phy_read(phy_dev, COPPER_CONTROL_REG); 276 val = phy_read(phy_dev, COPPER_CONTROL_REG);
272 val |= PHY_LOOP_BACK; 277 val |= PHY_LOOP_BACK;
278 val &= ~PHY_POWER_DOWN;
273 phy_write(phy_dev, COPPER_CONTROL_REG, val); 279 phy_write(phy_dev, COPPER_CONTROL_REG, val);
274 } else { 280 } else {
275 phy_write(phy_dev, 22, 0xFA); 281 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
276 phy_write(phy_dev, 1, 0x400); 282 phy_write(phy_dev, 1, 0x400);
277 phy_write(phy_dev, 7, 0x200); 283 phy_write(phy_dev, 7, 0x200);
278 phy_write(phy_dev, 22, 0); 284 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
285 phy_write(phy_dev, 9, 0xF00);
279 286
280 val = phy_read(phy_dev, COPPER_CONTROL_REG); 287 val = phy_read(phy_dev, COPPER_CONTROL_REG);
281 val &= ~PHY_LOOP_BACK; 288 val &= ~PHY_LOOP_BACK;
289 val |= PHY_POWER_DOWN;
282 phy_write(phy_dev, COPPER_CONTROL_REG, val); 290 phy_write(phy_dev, COPPER_CONTROL_REG, val);
283 } 291 }
284 return 0; 292 return 0;
@@ -339,28 +347,16 @@ static int __lb_up(struct net_device *ndev,
339 347
340 hns_nic_net_reset(ndev); 348 hns_nic_net_reset(ndev);
341 349
342 if (priv->phy) {
343 phy_disconnect(priv->phy);
344 msleep(100);
345
346 ret = hns_nic_init_phy(ndev, h);
347 if (ret)
348 return ret;
349 }
350
351 ret = __lb_setup(ndev, loop_mode); 350 ret = __lb_setup(ndev, loop_mode);
352 if (ret) 351 if (ret)
353 return ret; 352 return ret;
354 353
355 msleep(100); 354 msleep(200);
356 355
357 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; 356 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
358 if (ret) 357 if (ret)
359 return ret; 358 return ret;
360 359
361 if (priv->phy)
362 phy_start(priv->phy);
363
364 /* link adjust duplex*/ 360 /* link adjust duplex*/
365 if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) 361 if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
366 speed = 1000; 362 speed = 1000;
@@ -561,9 +557,6 @@ static int __lb_down(struct net_device *ndev)
561 __func__, 557 __func__,
562 ret); 558 ret);
563 559
564 if (priv->phy)
565 phy_stop(priv->phy);
566
567 if (h->dev->ops->stop) 560 if (h->dev->ops->stop)
568 h->dev->ops->stop(h); 561 h->dev->ops->stop(h);
569 562
@@ -596,7 +589,7 @@ static void hns_nic_self_test(struct net_device *ndev,
596 st_param[1][0] = MAC_INTERNALLOOP_SERDES; 589 st_param[1][0] = MAC_INTERNALLOOP_SERDES;
597 st_param[1][1] = 1; /*serdes must exist*/ 590 st_param[1][1] = 1; /*serdes must exist*/
598 st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/ 591 st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/
599 st_param[2][1] = ((!!(priv->ae_handle->phy_node)) && 592 st_param[2][1] = ((!!(priv->ae_handle->phy_dev)) &&
600 (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)); 593 (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII));
601 594
602 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 595 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
@@ -758,6 +751,16 @@ static int hns_get_coalesce(struct net_device *net_dev,
758 &ec->tx_max_coalesced_frames, 751 &ec->tx_max_coalesced_frames,
759 &ec->rx_max_coalesced_frames); 752 &ec->rx_max_coalesced_frames);
760 753
754 ops->get_coalesce_range(priv->ae_handle,
755 &ec->tx_max_coalesced_frames_low,
756 &ec->rx_max_coalesced_frames_low,
757 &ec->tx_max_coalesced_frames_high,
758 &ec->rx_max_coalesced_frames_high,
759 &ec->tx_coalesce_usecs_low,
760 &ec->rx_coalesce_usecs_low,
761 &ec->tx_coalesce_usecs_high,
762 &ec->rx_coalesce_usecs_high);
763
761 return 0; 764 return 0;
762} 765}
763 766
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 765ddb3dcd1a..761a32fceceb 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -7,6 +7,7 @@
7 * (at your option) any later version. 7 * (at your option) any later version.
8 */ 8 */
9 9
10#include <linux/acpi.h>
10#include <linux/errno.h> 11#include <linux/errno.h>
11#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
12#include <linux/init.h> 13#include <linux/init.h>
@@ -354,67 +355,64 @@ static int hns_mdio_reset(struct mii_bus *bus)
354 struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; 355 struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
355 int ret; 356 int ret;
356 357
357 if (!mdio_dev->subctrl_vbase) { 358 if (dev_of_node(bus->parent)) {
358 dev_err(&bus->dev, "mdio sys ctl reg has not maped\n"); 359 if (!mdio_dev->subctrl_vbase) {
359 return -ENODEV; 360 dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
360 } 361 return -ENODEV;
361 362 }
362 /*1. reset req, and read reset st check*/
363 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1,
364 MDIO_SC_RESET_ST, 0x1,
365 MDIO_CHECK_SET_ST);
366 if (ret) {
367 dev_err(&bus->dev, "MDIO reset fail\n");
368 return ret;
369 }
370 363
371 /*2. dis clk, and read clk st check*/ 364 /* 1. reset req, and read reset st check */
372 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS, 365 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1,
373 0x1, MDIO_SC_CLK_ST, 0x1, 366 MDIO_SC_RESET_ST, 0x1,
374 MDIO_CHECK_CLR_ST); 367 MDIO_CHECK_SET_ST);
375 if (ret) { 368 if (ret) {
376 dev_err(&bus->dev, "MDIO dis clk fail\n"); 369 dev_err(&bus->dev, "MDIO reset fail\n");
377 return ret; 370 return ret;
378 } 371 }
379 372
380 /*3. reset dreq, and read reset st check*/ 373 /* 2. dis clk, and read clk st check */
381 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1, 374 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS,
382 MDIO_SC_RESET_ST, 0x1, 375 0x1, MDIO_SC_CLK_ST, 0x1,
383 MDIO_CHECK_CLR_ST); 376 MDIO_CHECK_CLR_ST);
384 if (ret) { 377 if (ret) {
385 dev_err(&bus->dev, "MDIO dis clk fail\n"); 378 dev_err(&bus->dev, "MDIO dis clk fail\n");
386 return ret; 379 return ret;
387 } 380 }
388 381
389 /*4. en clk, and read clk st check*/ 382 /* 3. reset dreq, and read reset st check */
390 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN, 383 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1,
391 0x1, MDIO_SC_CLK_ST, 0x1, 384 MDIO_SC_RESET_ST, 0x1,
392 MDIO_CHECK_SET_ST); 385 MDIO_CHECK_CLR_ST);
393 if (ret) 386 if (ret) {
394 dev_err(&bus->dev, "MDIO en clk fail\n"); 387 dev_err(&bus->dev, "MDIO dis clk fail\n");
388 return ret;
389 }
395 390
391 /* 4. en clk, and read clk st check */
392 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN,
393 0x1, MDIO_SC_CLK_ST, 0x1,
394 MDIO_CHECK_SET_ST);
395 if (ret)
396 dev_err(&bus->dev, "MDIO en clk fail\n");
397 } else if (is_acpi_node(bus->parent->fwnode)) {
398 acpi_status s;
399
400 s = acpi_evaluate_object(ACPI_HANDLE(bus->parent),
401 "_RST", NULL, NULL);
402 if (ACPI_FAILURE(s)) {
403 dev_err(&bus->dev, "Reset failed, return:%#x\n", s);
404 ret = -EBUSY;
405 } else {
406 ret = 0;
407 }
408 } else {
409 dev_err(&bus->dev, "Can not get cfg data from DT or ACPI\n");
410 ret = -ENXIO;
411 }
396 return ret; 412 return ret;
397} 413}
398 414
399/** 415/**
400 * hns_mdio_bus_name - get mdio bus name
401 * @name: mdio bus name
402 * @np: mdio device node pointer
403 */
404static void hns_mdio_bus_name(char *name, struct device_node *np)
405{
406 const u32 *addr;
407 u64 taddr = OF_BAD_ADDR;
408
409 addr = of_get_address(np, 0, NULL, NULL);
410 if (addr)
411 taddr = of_translate_address(np, addr);
412
413 snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
414 (unsigned long long)taddr);
415}
416
417/**
418 * hns_mdio_probe - probe mdio device 416 * hns_mdio_probe - probe mdio device
419 * @pdev: mdio platform device 417 * @pdev: mdio platform device
420 * 418 *
@@ -422,17 +420,16 @@ static void hns_mdio_bus_name(char *name, struct device_node *np)
422 */ 420 */
423static int hns_mdio_probe(struct platform_device *pdev) 421static int hns_mdio_probe(struct platform_device *pdev)
424{ 422{
425 struct device_node *np;
426 struct hns_mdio_device *mdio_dev; 423 struct hns_mdio_device *mdio_dev;
427 struct mii_bus *new_bus; 424 struct mii_bus *new_bus;
428 struct resource *res; 425 struct resource *res;
429 int ret; 426 int ret = -ENODEV;
430 427
431 if (!pdev) { 428 if (!pdev) {
432 dev_err(NULL, "pdev is NULL!\r\n"); 429 dev_err(NULL, "pdev is NULL!\r\n");
433 return -ENODEV; 430 return -ENODEV;
434 } 431 }
435 np = pdev->dev.of_node; 432
436 mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL); 433 mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL);
437 if (!mdio_dev) 434 if (!mdio_dev)
438 return -ENOMEM; 435 return -ENOMEM;
@@ -448,7 +445,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
448 new_bus->write = hns_mdio_write; 445 new_bus->write = hns_mdio_write;
449 new_bus->reset = hns_mdio_reset; 446 new_bus->reset = hns_mdio_reset;
450 new_bus->priv = mdio_dev; 447 new_bus->priv = mdio_dev;
451 hns_mdio_bus_name(new_bus->id, np); 448 new_bus->parent = &pdev->dev;
452 449
453 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 450 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
454 mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res); 451 mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res);
@@ -457,16 +454,32 @@ static int hns_mdio_probe(struct platform_device *pdev)
457 return ret; 454 return ret;
458 } 455 }
459 456
460 mdio_dev->subctrl_vbase =
461 syscon_node_to_regmap(of_parse_phandle(np, "subctrl-vbase", 0));
462 if (IS_ERR(mdio_dev->subctrl_vbase)) {
463 dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
464 mdio_dev->subctrl_vbase = NULL;
465 }
466 new_bus->parent = &pdev->dev;
467 platform_set_drvdata(pdev, new_bus); 457 platform_set_drvdata(pdev, new_bus);
458 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%s", "Mii",
459 dev_name(&pdev->dev));
460 if (dev_of_node(&pdev->dev)) {
461 mdio_dev->subctrl_vbase = syscon_node_to_regmap(
462 of_parse_phandle(pdev->dev.of_node,
463 "subctrl-vbase", 0));
464 if (IS_ERR(mdio_dev->subctrl_vbase)) {
465 dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
466 mdio_dev->subctrl_vbase = NULL;
467 }
468 ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
469 } else if (is_acpi_node(pdev->dev.fwnode)) {
470 /* Clear all the IRQ properties */
471 memset(new_bus->irq, PHY_POLL, 4 * PHY_MAX_ADDR);
472
473 /* Mask out all PHYs from auto probing. */
474 new_bus->phy_mask = ~0;
475
476 /* Register the MDIO bus */
477 ret = mdiobus_register(new_bus);
478 } else {
479 dev_err(&pdev->dev, "Can not get cfg data from DT or ACPI\n");
480 ret = -ENXIO;
481 }
468 482
469 ret = of_mdiobus_register(new_bus, np);
470 if (ret) { 483 if (ret) {
471 dev_err(&pdev->dev, "Cannot register as MDIO bus!\n"); 484 dev_err(&pdev->dev, "Cannot register as MDIO bus!\n");
472 platform_set_drvdata(pdev, NULL); 485 platform_set_drvdata(pdev, NULL);
@@ -499,12 +512,19 @@ static const struct of_device_id hns_mdio_match[] = {
499 {} 512 {}
500}; 513};
501 514
515static const struct acpi_device_id hns_mdio_acpi_match[] = {
516 { "HISI0141", 0 },
517 { },
518};
519MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match);
520
502static struct platform_driver hns_mdio_driver = { 521static struct platform_driver hns_mdio_driver = {
503 .probe = hns_mdio_probe, 522 .probe = hns_mdio_probe,
504 .remove = hns_mdio_remove, 523 .remove = hns_mdio_remove,
505 .driver = { 524 .driver = {
506 .name = MDIO_DRV_NAME, 525 .name = MDIO_DRV_NAME,
507 .of_match_table = hns_mdio_match, 526 .of_match_table = hns_mdio_match,
527 .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
508 }, 528 },
509}; 529};
510 530
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 714bd1014ddb..c0e17433f623 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -167,17 +167,6 @@ config IXGBE
167 To compile this driver as a module, choose M here. The module 167 To compile this driver as a module, choose M here. The module
168 will be called ixgbe. 168 will be called ixgbe.
169 169
170config IXGBE_VXLAN
171 bool "Virtual eXtensible Local Area Network Support"
172 default n
173 depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m)
174 ---help---
175 This allows one to create VXLAN virtual interfaces that provide
176 Layer 2 Networks over Layer 3 Networks. VXLAN is often used
177 to tunnel virtual network infrastructure in virtualized environments.
178 Say Y here if you want to use Virtual eXtensible Local Area Network
179 (VXLAN) in the driver.
180
181config IXGBE_HWMON 170config IXGBE_HWMON
182 bool "Intel(R) 10GbE PCI Express adapters HWMON support" 171 bool "Intel(R) 10GbE PCI Express adapters HWMON support"
183 default y 172 default y
@@ -236,27 +225,6 @@ config I40E
236 To compile this driver as a module, choose M here. The module 225 To compile this driver as a module, choose M here. The module
237 will be called i40e. 226 will be called i40e.
238 227
239config I40E_VXLAN
240 bool "Virtual eXtensible Local Area Network Support"
241 default n
242 depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
243 ---help---
244 This allows one to create VXLAN virtual interfaces that provide
245 Layer 2 Networks over Layer 3 Networks. VXLAN is often used
246 to tunnel virtual network infrastructure in virtualized environments.
247 Say Y here if you want to use Virtual eXtensible Local Area Network
248 (VXLAN) in the driver.
249
250config I40E_GENEVE
251 bool "Generic Network Virtualization Encapsulation (GENEVE) Support"
252 depends on I40E && GENEVE && !(I40E=y && GENEVE=m)
253 default n
254 ---help---
255 This allows one to create GENEVE virtual interfaces that provide
256 Layer 2 Networks over Layer 3 Networks. GENEVE is often used
257 to tunnel virtual network infrastructure in virtualized environments.
258 Say Y here if you want to use GENEVE in the driver.
259
260config I40E_DCB 228config I40E_DCB
261 bool "Data Center Bridging (DCB) Support" 229 bool "Data Center Bridging (DCB) Support"
262 default n 230 default n
@@ -307,15 +275,4 @@ config FM10K
307 To compile this driver as a module, choose M here. The module 275 To compile this driver as a module, choose M here. The module
308 will be called fm10k. MSI-X interrupt support is required 276 will be called fm10k. MSI-X interrupt support is required
309 277
310config FM10K_VXLAN
311 bool "Virtual eXtensible Local Area Network Support"
312 default n
313 depends on FM10K && VXLAN && !(FM10K=y && VXLAN=m)
314 ---help---
315 This allows one to create VXLAN virtual interfaces that provide
316 Layer 2 Networks over Layer 3 Networks. VXLAN is often used
317 to tunnel virtual network infrastructure in virtualized environments.
318 Say Y here if you want to use Virtual eXtensible Local Area Network
319 (VXLAN) in the driver.
320
321endif # NET_VENDOR_INTEL 278endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2a08d3f5b6df..d00cb193da9a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -20,9 +20,7 @@
20 20
21#include "fm10k.h" 21#include "fm10k.h"
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#ifdef CONFIG_FM10K_VXLAN 23#include <net/udp_tunnel.h>
24#include <net/vxlan.h>
25#endif /* CONFIG_FM10K_VXLAN */
26 24
27/** 25/**
28 * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) 26 * fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
@@ -436,6 +434,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
436 * @netdev: network interface device structure 434 * @netdev: network interface device structure
437 * @sa_family: Address family of new port 435 * @sa_family: Address family of new port
438 * @port: port number used for VXLAN 436 * @port: port number used for VXLAN
437 * @type: Enumerated value specifying udp encapsulation type
439 * 438 *
440 * This function is called when a new VXLAN interface has added a new port 439 * This function is called when a new VXLAN interface has added a new port
441 * number to the range that is currently in use for VXLAN. The new port 440 * number to the range that is currently in use for VXLAN. The new port
@@ -444,18 +443,21 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
444 * is always used as the VXLAN port number for offloads. 443 * is always used as the VXLAN port number for offloads.
445 **/ 444 **/
446static void fm10k_add_vxlan_port(struct net_device *dev, 445static void fm10k_add_vxlan_port(struct net_device *dev,
447 sa_family_t sa_family, __be16 port) { 446 struct udp_tunnel_info *ti)
447{
448 struct fm10k_intfc *interface = netdev_priv(dev); 448 struct fm10k_intfc *interface = netdev_priv(dev);
449 struct fm10k_vxlan_port *vxlan_port; 449 struct fm10k_vxlan_port *vxlan_port;
450 450
451 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
452 return;
451 /* only the PF supports configuring tunnels */ 453 /* only the PF supports configuring tunnels */
452 if (interface->hw.mac.type != fm10k_mac_pf) 454 if (interface->hw.mac.type != fm10k_mac_pf)
453 return; 455 return;
454 456
455 /* existing ports are pulled out so our new entry is always last */ 457 /* existing ports are pulled out so our new entry is always last */
456 fm10k_vxlan_port_for_each(vxlan_port, interface) { 458 fm10k_vxlan_port_for_each(vxlan_port, interface) {
457 if ((vxlan_port->port == port) && 459 if ((vxlan_port->port == ti->port) &&
458 (vxlan_port->sa_family == sa_family)) { 460 (vxlan_port->sa_family == ti->sa_family)) {
459 list_del(&vxlan_port->list); 461 list_del(&vxlan_port->list);
460 goto insert_tail; 462 goto insert_tail;
461 } 463 }
@@ -465,8 +467,8 @@ static void fm10k_add_vxlan_port(struct net_device *dev,
465 vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC); 467 vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC);
466 if (!vxlan_port) 468 if (!vxlan_port)
467 return; 469 return;
468 vxlan_port->port = port; 470 vxlan_port->port = ti->port;
469 vxlan_port->sa_family = sa_family; 471 vxlan_port->sa_family = ti->sa_family;
470 472
471insert_tail: 473insert_tail:
472 /* add new port value to list */ 474 /* add new port value to list */
@@ -480,6 +482,7 @@ insert_tail:
480 * @netdev: network interface device structure 482 * @netdev: network interface device structure
481 * @sa_family: Address family of freed port 483 * @sa_family: Address family of freed port
482 * @port: port number used for VXLAN 484 * @port: port number used for VXLAN
485 * @type: Enumerated value specifying udp encapsulation type
483 * 486 *
484 * This function is called when a new VXLAN interface has freed a port 487 * This function is called when a new VXLAN interface has freed a port
485 * number from the range that is currently in use for VXLAN. The freed 488 * number from the range that is currently in use for VXLAN. The freed
@@ -487,17 +490,20 @@ insert_tail:
487 * the port number for offloads. 490 * the port number for offloads.
488 **/ 491 **/
489static void fm10k_del_vxlan_port(struct net_device *dev, 492static void fm10k_del_vxlan_port(struct net_device *dev,
490 sa_family_t sa_family, __be16 port) { 493 struct udp_tunnel_info *ti)
494{
491 struct fm10k_intfc *interface = netdev_priv(dev); 495 struct fm10k_intfc *interface = netdev_priv(dev);
492 struct fm10k_vxlan_port *vxlan_port; 496 struct fm10k_vxlan_port *vxlan_port;
493 497
498 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
499 return;
494 if (interface->hw.mac.type != fm10k_mac_pf) 500 if (interface->hw.mac.type != fm10k_mac_pf)
495 return; 501 return;
496 502
497 /* find the port in the list and free it */ 503 /* find the port in the list and free it */
498 fm10k_vxlan_port_for_each(vxlan_port, interface) { 504 fm10k_vxlan_port_for_each(vxlan_port, interface) {
499 if ((vxlan_port->port == port) && 505 if ((vxlan_port->port == ti->port) &&
500 (vxlan_port->sa_family == sa_family)) { 506 (vxlan_port->sa_family == ti->sa_family)) {
501 list_del(&vxlan_port->list); 507 list_del(&vxlan_port->list);
502 kfree(vxlan_port); 508 kfree(vxlan_port);
503 break; 509 break;
@@ -553,10 +559,8 @@ int fm10k_open(struct net_device *netdev)
553 if (err) 559 if (err)
554 goto err_set_queues; 560 goto err_set_queues;
555 561
556#ifdef CONFIG_FM10K_VXLAN
557 /* update VXLAN port configuration */ 562 /* update VXLAN port configuration */
558 vxlan_get_rx_port(netdev); 563 udp_tunnel_get_rx_info(netdev);
559#endif
560 564
561 fm10k_up(interface); 565 fm10k_up(interface);
562 566
@@ -1375,8 +1379,8 @@ static const struct net_device_ops fm10k_netdev_ops = {
1375 .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan, 1379 .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
1376 .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, 1380 .ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
1377 .ndo_get_vf_config = fm10k_ndo_get_vf_config, 1381 .ndo_get_vf_config = fm10k_ndo_get_vf_config,
1378 .ndo_add_vxlan_port = fm10k_add_vxlan_port, 1382 .ndo_udp_tunnel_add = fm10k_add_vxlan_port,
1379 .ndo_del_vxlan_port = fm10k_del_vxlan_port, 1383 .ndo_udp_tunnel_del = fm10k_del_vxlan_port,
1380 .ndo_dfwd_add_station = fm10k_dfwd_add_station, 1384 .ndo_dfwd_add_station = fm10k_dfwd_add_station,
1381 .ndo_dfwd_del_station = fm10k_dfwd_del_station, 1385 .ndo_dfwd_del_station = fm10k_dfwd_del_station,
1382#ifdef CONFIG_NET_POLL_CONTROLLER 1386#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 9c44739da5e2..e83fc8afb30f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -283,6 +283,7 @@ struct i40e_pf {
283#endif /* I40E_FCOE */ 283#endif /* I40E_FCOE */
284 u16 num_lan_qps; /* num lan queues this PF has set up */ 284 u16 num_lan_qps; /* num lan queues this PF has set up */
285 u16 num_lan_msix; /* num queue vectors for the base PF vsi */ 285 u16 num_lan_msix; /* num queue vectors for the base PF vsi */
286 u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */
286 u16 num_iwarp_msix; /* num of iwarp vectors for this PF */ 287 u16 num_iwarp_msix; /* num of iwarp vectors for this PF */
287 int iwarp_base_vector; 288 int iwarp_base_vector;
288 int queues_left; /* queues left unclaimed */ 289 int queues_left; /* queues left unclaimed */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 422b41d61c9a..e447dc435464 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1967,6 +1967,62 @@ aq_add_vsi_exit:
1967} 1967}
1968 1968
1969/** 1969/**
1970 * i40e_aq_set_default_vsi
1971 * @hw: pointer to the hw struct
1972 * @seid: vsi number
1973 * @cmd_details: pointer to command details structure or NULL
1974 **/
1975i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
1976 u16 seid,
1977 struct i40e_asq_cmd_details *cmd_details)
1978{
1979 struct i40e_aq_desc desc;
1980 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1981 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1982 &desc.params.raw;
1983 i40e_status status;
1984
1985 i40e_fill_default_direct_cmd_desc(&desc,
1986 i40e_aqc_opc_set_vsi_promiscuous_modes);
1987
1988 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1989 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1990 cmd->seid = cpu_to_le16(seid);
1991
1992 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1993
1994 return status;
1995}
1996
1997/**
1998 * i40e_aq_clear_default_vsi
1999 * @hw: pointer to the hw struct
2000 * @seid: vsi number
2001 * @cmd_details: pointer to command details structure or NULL
2002 **/
2003i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
2004 u16 seid,
2005 struct i40e_asq_cmd_details *cmd_details)
2006{
2007 struct i40e_aq_desc desc;
2008 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2009 (struct i40e_aqc_set_vsi_promiscuous_modes *)
2010 &desc.params.raw;
2011 i40e_status status;
2012
2013 i40e_fill_default_direct_cmd_desc(&desc,
2014 i40e_aqc_opc_set_vsi_promiscuous_modes);
2015
2016 cmd->promiscuous_flags = cpu_to_le16(0);
2017 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2018 cmd->seid = cpu_to_le16(seid);
2019
2020 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2021
2022 return status;
2023}
2024
2025/**
1970 * i40e_aq_set_vsi_unicast_promiscuous 2026 * i40e_aq_set_vsi_unicast_promiscuous
1971 * @hw: pointer to the hw struct 2027 * @hw: pointer to the hw struct
1972 * @seid: vsi number 2028 * @seid: vsi number
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 5e8d84ff7d5f..4962e855fbd3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -313,8 +313,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
313 *advertising |= ADVERTISED_Autoneg | 313 *advertising |= ADVERTISED_Autoneg |
314 ADVERTISED_40000baseCR4_Full; 314 ADVERTISED_40000baseCR4_Full;
315 } 315 }
316 if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && 316 if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
317 !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
318 *supported |= SUPPORTED_Autoneg | 317 *supported |= SUPPORTED_Autoneg |
319 SUPPORTED_100baseT_Full; 318 SUPPORTED_100baseT_Full;
320 *advertising |= ADVERTISED_Autoneg | 319 *advertising |= ADVERTISED_Autoneg |
@@ -663,6 +662,7 @@ static int i40e_set_settings(struct net_device *netdev,
663 if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && 662 if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
664 hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && 663 hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
665 hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && 664 hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
665 hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
666 hw->phy.link_info.link_info & I40E_AQ_LINK_UP) 666 hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
667 return -EOPNOTSUPP; 667 return -EOPNOTSUPP;
668 668
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5ea22008d721..2b1140563a64 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -31,12 +31,7 @@
31/* Local includes */ 31/* Local includes */
32#include "i40e.h" 32#include "i40e.h"
33#include "i40e_diag.h" 33#include "i40e_diag.h"
34#if IS_ENABLED(CONFIG_VXLAN) 34#include <net/udp_tunnel.h>
35#include <net/vxlan.h>
36#endif
37#if IS_ENABLED(CONFIG_GENEVE)
38#include <net/geneve.h>
39#endif
40 35
41const char i40e_driver_name[] = "i40e"; 36const char i40e_driver_name[] = "i40e";
42static const char i40e_driver_string[] = 37static const char i40e_driver_string[] =
@@ -45,8 +40,8 @@ static const char i40e_driver_string[] =
45#define DRV_KERN "-k" 40#define DRV_KERN "-k"
46 41
47#define DRV_VERSION_MAJOR 1 42#define DRV_VERSION_MAJOR 1
48#define DRV_VERSION_MINOR 5 43#define DRV_VERSION_MINOR 6
49#define DRV_VERSION_BUILD 16 44#define DRV_VERSION_BUILD 4
50#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 45#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
51 __stringify(DRV_VERSION_MINOR) "." \ 46 __stringify(DRV_VERSION_MINOR) "." \
52 __stringify(DRV_VERSION_BUILD) DRV_KERN 47 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1584,14 +1579,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1584 vsi->tc_config.numtc = numtc; 1579 vsi->tc_config.numtc = numtc;
1585 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1580 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1586 /* Number of queues per enabled TC */ 1581 /* Number of queues per enabled TC */
1587 /* In MFP case we can have a much lower count of MSIx 1582 qcount = vsi->alloc_queue_pairs;
1588 * vectors available and so we need to lower the used 1583
1589 * q count.
1590 */
1591 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1592 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1593 else
1594 qcount = vsi->alloc_queue_pairs;
1595 num_tc_qps = qcount / numtc; 1584 num_tc_qps = qcount / numtc;
1596 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); 1585 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1597 1586
@@ -1845,8 +1834,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1845{ 1834{
1846 struct list_head tmp_del_list, tmp_add_list; 1835 struct list_head tmp_del_list, tmp_add_list;
1847 struct i40e_mac_filter *f, *ftmp, *fclone; 1836 struct i40e_mac_filter *f, *ftmp, *fclone;
1837 struct i40e_hw *hw = &vsi->back->hw;
1848 bool promisc_forced_on = false; 1838 bool promisc_forced_on = false;
1849 bool add_happened = false; 1839 bool add_happened = false;
1840 char vsi_name[16] = "PF";
1850 int filter_list_len = 0; 1841 int filter_list_len = 0;
1851 u32 changed_flags = 0; 1842 u32 changed_flags = 0;
1852 i40e_status aq_ret = 0; 1843 i40e_status aq_ret = 0;
@@ -1874,6 +1865,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1874 INIT_LIST_HEAD(&tmp_del_list); 1865 INIT_LIST_HEAD(&tmp_del_list);
1875 INIT_LIST_HEAD(&tmp_add_list); 1866 INIT_LIST_HEAD(&tmp_add_list);
1876 1867
1868 if (vsi->type == I40E_VSI_SRIOV)
1869 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
1870 else if (vsi->type != I40E_VSI_MAIN)
1871 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
1872
1877 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1873 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1878 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1874 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1879 1875
@@ -1925,7 +1921,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1925 if (!list_empty(&tmp_del_list)) { 1921 if (!list_empty(&tmp_del_list)) {
1926 int del_list_size; 1922 int del_list_size;
1927 1923
1928 filter_list_len = pf->hw.aq.asq_buf_size / 1924 filter_list_len = hw->aq.asq_buf_size /
1929 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1925 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1930 del_list_size = filter_list_len * 1926 del_list_size = filter_list_len *
1931 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1927 sizeof(struct i40e_aqc_remove_macvlan_element_data);
@@ -1957,21 +1953,21 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1957 1953
1958 /* flush a full buffer */ 1954 /* flush a full buffer */
1959 if (num_del == filter_list_len) { 1955 if (num_del == filter_list_len) {
1960 aq_ret = i40e_aq_remove_macvlan(&pf->hw, 1956 aq_ret =
1961 vsi->seid, 1957 i40e_aq_remove_macvlan(hw, vsi->seid,
1962 del_list, 1958 del_list,
1963 num_del, 1959 num_del, NULL);
1964 NULL); 1960 aq_err = hw->aq.asq_last_status;
1965 aq_err = pf->hw.aq.asq_last_status;
1966 num_del = 0; 1961 num_del = 0;
1967 memset(del_list, 0, del_list_size); 1962 memset(del_list, 0, del_list_size);
1968 1963
1969 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { 1964 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
1970 retval = -EIO; 1965 retval = -EIO;
1971 dev_err(&pf->pdev->dev, 1966 dev_err(&pf->pdev->dev,
1972 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", 1967 "ignoring delete macvlan error on %s, err %s, aq_err %s while flushing a full buffer\n",
1973 i40e_stat_str(&pf->hw, aq_ret), 1968 vsi_name,
1974 i40e_aq_str(&pf->hw, aq_err)); 1969 i40e_stat_str(hw, aq_ret),
1970 i40e_aq_str(hw, aq_err));
1975 } 1971 }
1976 } 1972 }
1977 /* Release memory for MAC filter entries which were 1973 /* Release memory for MAC filter entries which were
@@ -1982,17 +1978,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1982 } 1978 }
1983 1979
1984 if (num_del) { 1980 if (num_del) {
1985 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1981 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
1986 del_list, num_del, 1982 num_del, NULL);
1987 NULL); 1983 aq_err = hw->aq.asq_last_status;
1988 aq_err = pf->hw.aq.asq_last_status;
1989 num_del = 0; 1984 num_del = 0;
1990 1985
1991 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) 1986 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT)
1992 dev_info(&pf->pdev->dev, 1987 dev_info(&pf->pdev->dev,
1993 "ignoring delete macvlan error, err %s aq_err %s\n", 1988 "ignoring delete macvlan error on %s, err %s aq_err %s\n",
1994 i40e_stat_str(&pf->hw, aq_ret), 1989 vsi_name,
1995 i40e_aq_str(&pf->hw, aq_err)); 1990 i40e_stat_str(hw, aq_ret),
1991 i40e_aq_str(hw, aq_err));
1996 } 1992 }
1997 1993
1998 kfree(del_list); 1994 kfree(del_list);
@@ -2003,7 +1999,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2003 int add_list_size; 1999 int add_list_size;
2004 2000
2005 /* do all the adds now */ 2001 /* do all the adds now */
2006 filter_list_len = pf->hw.aq.asq_buf_size / 2002 filter_list_len = hw->aq.asq_buf_size /
2007 sizeof(struct i40e_aqc_add_macvlan_element_data), 2003 sizeof(struct i40e_aqc_add_macvlan_element_data),
2008 add_list_size = filter_list_len * 2004 add_list_size = filter_list_len *
2009 sizeof(struct i40e_aqc_add_macvlan_element_data); 2005 sizeof(struct i40e_aqc_add_macvlan_element_data);
@@ -2038,10 +2034,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2038 2034
2039 /* flush a full buffer */ 2035 /* flush a full buffer */
2040 if (num_add == filter_list_len) { 2036 if (num_add == filter_list_len) {
2041 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2037 aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
2042 add_list, num_add, 2038 add_list, num_add,
2043 NULL); 2039 NULL);
2044 aq_err = pf->hw.aq.asq_last_status; 2040 aq_err = hw->aq.asq_last_status;
2045 num_add = 0; 2041 num_add = 0;
2046 2042
2047 if (aq_ret) 2043 if (aq_ret)
@@ -2056,9 +2052,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2056 } 2052 }
2057 2053
2058 if (num_add) { 2054 if (num_add) {
2059 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2055 aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
2060 add_list, num_add, NULL); 2056 add_list, num_add, NULL);
2061 aq_err = pf->hw.aq.asq_last_status; 2057 aq_err = hw->aq.asq_last_status;
2062 num_add = 0; 2058 num_add = 0;
2063 } 2059 }
2064 kfree(add_list); 2060 kfree(add_list);
@@ -2067,16 +2063,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2067 if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { 2063 if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) {
2068 retval = i40e_aq_rc_to_posix(aq_ret, aq_err); 2064 retval = i40e_aq_rc_to_posix(aq_ret, aq_err);
2069 dev_info(&pf->pdev->dev, 2065 dev_info(&pf->pdev->dev,
2070 "add filter failed, err %s aq_err %s\n", 2066 "add filter failed on %s, err %s aq_err %s\n",
2071 i40e_stat_str(&pf->hw, aq_ret), 2067 vsi_name,
2072 i40e_aq_str(&pf->hw, aq_err)); 2068 i40e_stat_str(hw, aq_ret),
2073 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 2069 i40e_aq_str(hw, aq_err));
2070 if ((hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
2074 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2071 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2075 &vsi->state)) { 2072 &vsi->state)) {
2076 promisc_forced_on = true; 2073 promisc_forced_on = true;
2077 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2074 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2078 &vsi->state); 2075 &vsi->state);
2079 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 2076 dev_info(&pf->pdev->dev, "promiscuous mode forced on %s\n",
2077 vsi_name);
2080 } 2078 }
2081 } 2079 }
2082 } 2080 }
@@ -2098,12 +2096,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2098 NULL); 2096 NULL);
2099 if (aq_ret) { 2097 if (aq_ret) {
2100 retval = i40e_aq_rc_to_posix(aq_ret, 2098 retval = i40e_aq_rc_to_posix(aq_ret,
2101 pf->hw.aq.asq_last_status); 2099 hw->aq.asq_last_status);
2102 dev_info(&pf->pdev->dev, 2100 dev_info(&pf->pdev->dev,
2103 "set multi promisc failed, err %s aq_err %s\n", 2101 "set multi promisc failed on %s, err %s aq_err %s\n",
2104 i40e_stat_str(&pf->hw, aq_ret), 2102 vsi_name,
2105 i40e_aq_str(&pf->hw, 2103 i40e_stat_str(hw, aq_ret),
2106 pf->hw.aq.asq_last_status)); 2104 i40e_aq_str(hw, hw->aq.asq_last_status));
2107 } 2105 }
2108 } 2106 }
2109 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 2107 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
@@ -2122,33 +2120,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2122 */ 2120 */
2123 if (pf->cur_promisc != cur_promisc) { 2121 if (pf->cur_promisc != cur_promisc) {
2124 pf->cur_promisc = cur_promisc; 2122 pf->cur_promisc = cur_promisc;
2125 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 2123 if (cur_promisc)
2124 aq_ret =
2125 i40e_aq_set_default_vsi(hw,
2126 vsi->seid,
2127 NULL);
2128 else
2129 aq_ret =
2130 i40e_aq_clear_default_vsi(hw,
2131 vsi->seid,
2132 NULL);
2133 if (aq_ret) {
2134 retval = i40e_aq_rc_to_posix(aq_ret,
2135 hw->aq.asq_last_status);
2136 dev_info(&pf->pdev->dev,
2137 "Set default VSI failed on %s, err %s, aq_err %s\n",
2138 vsi_name,
2139 i40e_stat_str(hw, aq_ret),
2140 i40e_aq_str(hw,
2141 hw->aq.asq_last_status));
2142 }
2126 } 2143 }
2127 } else { 2144 } else {
2128 aq_ret = i40e_aq_set_vsi_unicast_promiscuous( 2145 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2129 &vsi->back->hw, 2146 hw,
2130 vsi->seid, 2147 vsi->seid,
2131 cur_promisc, NULL, 2148 cur_promisc, NULL,
2132 true); 2149 true);
2133 if (aq_ret) { 2150 if (aq_ret) {
2134 retval = 2151 retval =
2135 i40e_aq_rc_to_posix(aq_ret, 2152 i40e_aq_rc_to_posix(aq_ret,
2136 pf->hw.aq.asq_last_status); 2153 hw->aq.asq_last_status);
2137 dev_info(&pf->pdev->dev, 2154 dev_info(&pf->pdev->dev,
2138 "set unicast promisc failed, err %d, aq_err %d\n", 2155 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2139 aq_ret, pf->hw.aq.asq_last_status); 2156 vsi_name,
2157 i40e_stat_str(hw, aq_ret),
2158 i40e_aq_str(hw,
2159 hw->aq.asq_last_status));
2140 } 2160 }
2141 aq_ret = i40e_aq_set_vsi_multicast_promiscuous( 2161 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2142 &vsi->back->hw, 2162 hw,
2143 vsi->seid, 2163 vsi->seid,
2144 cur_promisc, NULL); 2164 cur_promisc, NULL);
2145 if (aq_ret) { 2165 if (aq_ret) {
2146 retval = 2166 retval =
2147 i40e_aq_rc_to_posix(aq_ret, 2167 i40e_aq_rc_to_posix(aq_ret,
2148 pf->hw.aq.asq_last_status); 2168 hw->aq.asq_last_status);
2149 dev_info(&pf->pdev->dev, 2169 dev_info(&pf->pdev->dev,
2150 "set multicast promisc failed, err %d, aq_err %d\n", 2170 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2151 aq_ret, pf->hw.aq.asq_last_status); 2171 vsi_name,
2172 i40e_stat_str(hw, aq_ret),
2173 i40e_aq_str(hw,
2174 hw->aq.asq_last_status));
2152 } 2175 }
2153 } 2176 }
2154 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 2177 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
@@ -2159,9 +2182,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2159 pf->hw.aq.asq_last_status); 2182 pf->hw.aq.asq_last_status);
2160 dev_info(&pf->pdev->dev, 2183 dev_info(&pf->pdev->dev,
2161 "set brdcast promisc failed, err %s, aq_err %s\n", 2184 "set brdcast promisc failed, err %s, aq_err %s\n",
2162 i40e_stat_str(&pf->hw, aq_ret), 2185 i40e_stat_str(hw, aq_ret),
2163 i40e_aq_str(&pf->hw, 2186 i40e_aq_str(hw,
2164 pf->hw.aq.asq_last_status)); 2187 hw->aq.asq_last_status));
2165 } 2188 }
2166 } 2189 }
2167out: 2190out:
@@ -3952,6 +3975,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3952 /* clear the affinity_mask in the IRQ descriptor */ 3975 /* clear the affinity_mask in the IRQ descriptor */
3953 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3976 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3954 NULL); 3977 NULL);
3978 synchronize_irq(pf->msix_entries[vector].vector);
3955 free_irq(pf->msix_entries[vector].vector, 3979 free_irq(pf->msix_entries[vector].vector,
3956 vsi->q_vectors[i]); 3980 vsi->q_vectors[i]);
3957 3981
@@ -4958,7 +4982,6 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4958 if (pf->vsi[v]->netdev) 4982 if (pf->vsi[v]->netdev)
4959 i40e_dcbnl_set_all(pf->vsi[v]); 4983 i40e_dcbnl_set_all(pf->vsi[v]);
4960 } 4984 }
4961 i40e_notify_client_of_l2_param_changes(pf->vsi[v]);
4962 } 4985 }
4963} 4986}
4964 4987
@@ -5183,12 +5206,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5183 usleep_range(1000, 2000); 5206 usleep_range(1000, 2000);
5184 i40e_down(vsi); 5207 i40e_down(vsi);
5185 5208
5186 /* Give a VF some time to respond to the reset. The
5187 * two second wait is based upon the watchdog cycle in
5188 * the VF driver.
5189 */
5190 if (vsi->type == I40E_VSI_SRIOV)
5191 msleep(2000);
5192 i40e_up(vsi); 5209 i40e_up(vsi);
5193 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 5210 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5194} 5211}
@@ -5231,6 +5248,9 @@ void i40e_down(struct i40e_vsi *vsi)
5231 i40e_clean_tx_ring(vsi->tx_rings[i]); 5248 i40e_clean_tx_ring(vsi->tx_rings[i]);
5232 i40e_clean_rx_ring(vsi->rx_rings[i]); 5249 i40e_clean_rx_ring(vsi->rx_rings[i]);
5233 } 5250 }
5251
5252 i40e_notify_client_of_netdev_close(vsi, false);
5253
5234} 5254}
5235 5255
5236/** 5256/**
@@ -5342,14 +5362,7 @@ int i40e_open(struct net_device *netdev)
5342 TCP_FLAG_CWR) >> 16); 5362 TCP_FLAG_CWR) >> 16);
5343 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 5363 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5344 5364
5345#ifdef CONFIG_I40E_VXLAN 5365 udp_tunnel_get_rx_info(netdev);
5346 vxlan_get_rx_port(netdev);
5347#endif
5348#ifdef CONFIG_I40E_GENEVE
5349 if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)
5350 geneve_get_rx_port(netdev);
5351#endif
5352
5353 i40e_notify_client_of_netdev_open(vsi); 5366 i40e_notify_client_of_netdev_open(vsi);
5354 5367
5355 return 0; 5368 return 0;
@@ -5716,6 +5729,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
5716 i40e_service_event_schedule(pf); 5729 i40e_service_event_schedule(pf);
5717 } else { 5730 } else {
5718 i40e_pf_unquiesce_all_vsi(pf); 5731 i40e_pf_unquiesce_all_vsi(pf);
5732 /* Notify the client for the DCB changes */
5733 i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
5719 } 5734 }
5720 5735
5721exit: 5736exit:
@@ -5940,7 +5955,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5940 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5955 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5941 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 5956 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5942 } 5957 }
5943
5944} 5958}
5945 5959
5946/** 5960/**
@@ -7057,7 +7071,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
7057 **/ 7071 **/
7058static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) 7072static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7059{ 7073{
7060#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
7061 struct i40e_hw *hw = &pf->hw; 7074 struct i40e_hw *hw = &pf->hw;
7062 i40e_status ret; 7075 i40e_status ret;
7063 __be16 port; 7076 __be16 port;
@@ -7092,7 +7105,6 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7092 } 7105 }
7093 } 7106 }
7094 } 7107 }
7095#endif
7096} 7108}
7097 7109
7098/** 7110/**
@@ -7174,7 +7186,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7174 vsi->alloc_queue_pairs = 1; 7186 vsi->alloc_queue_pairs = 1;
7175 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 7187 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7176 I40E_REQ_DESCRIPTOR_MULTIPLE); 7188 I40E_REQ_DESCRIPTOR_MULTIPLE);
7177 vsi->num_q_vectors = 1; 7189 vsi->num_q_vectors = pf->num_fdsb_msix;
7178 break; 7190 break;
7179 7191
7180 case I40E_VSI_VMDQ2: 7192 case I40E_VSI_VMDQ2:
@@ -7558,9 +7570,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
7558 /* reserve one vector for sideband flow director */ 7570 /* reserve one vector for sideband flow director */
7559 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7571 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7560 if (vectors_left) { 7572 if (vectors_left) {
7573 pf->num_fdsb_msix = 1;
7561 v_budget++; 7574 v_budget++;
7562 vectors_left--; 7575 vectors_left--;
7563 } else { 7576 } else {
7577 pf->num_fdsb_msix = 0;
7564 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7578 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7565 } 7579 }
7566 } 7580 }
@@ -8579,7 +8593,9 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8579 /* Enable filters and mark for reset */ 8593 /* Enable filters and mark for reset */
8580 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 8594 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8581 need_reset = true; 8595 need_reset = true;
8582 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8596 /* enable FD_SB only if there is MSI-X vector */
8597 if (pf->num_fdsb_msix > 0)
8598 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8583 } else { 8599 } else {
8584 /* turn off filters, mark for reset and clear SW filter list */ 8600 /* turn off filters, mark for reset and clear SW filter list */
8585 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 8601 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -8628,7 +8644,6 @@ static int i40e_set_features(struct net_device *netdev,
8628 return 0; 8644 return 0;
8629} 8645}
8630 8646
8631#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
8632/** 8647/**
8633 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port 8648 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
8634 * @pf: board private structure 8649 * @pf: board private structure
@@ -8648,21 +8663,18 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
8648 return i; 8663 return i;
8649} 8664}
8650 8665
8651#endif
8652
8653#if IS_ENABLED(CONFIG_VXLAN)
8654/** 8666/**
8655 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 8667 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
8656 * @netdev: This physical port's netdev 8668 * @netdev: This physical port's netdev
8657 * @sa_family: Socket Family that VXLAN is notifying us about 8669 * @ti: Tunnel endpoint information
8658 * @port: New UDP port number that VXLAN started listening to
8659 **/ 8670 **/
8660static void i40e_add_vxlan_port(struct net_device *netdev, 8671static void i40e_udp_tunnel_add(struct net_device *netdev,
8661 sa_family_t sa_family, __be16 port) 8672 struct udp_tunnel_info *ti)
8662{ 8673{
8663 struct i40e_netdev_priv *np = netdev_priv(netdev); 8674 struct i40e_netdev_priv *np = netdev_priv(netdev);
8664 struct i40e_vsi *vsi = np->vsi; 8675 struct i40e_vsi *vsi = np->vsi;
8665 struct i40e_pf *pf = vsi->back; 8676 struct i40e_pf *pf = vsi->back;
8677 __be16 port = ti->port;
8666 u8 next_idx; 8678 u8 next_idx;
8667 u8 idx; 8679 u8 idx;
8668 8680
@@ -8670,7 +8682,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
8670 8682
8671 /* Check if port already exists */ 8683 /* Check if port already exists */
8672 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8684 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8673 netdev_info(netdev, "vxlan port %d already offloaded\n", 8685 netdev_info(netdev, "port %d already offloaded\n",
8674 ntohs(port)); 8686 ntohs(port));
8675 return; 8687 return;
8676 } 8688 }
@@ -8679,131 +8691,75 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
8679 next_idx = i40e_get_udp_port_idx(pf, 0); 8691 next_idx = i40e_get_udp_port_idx(pf, 0);
8680 8692
8681 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8693 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8682 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", 8694 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
8683 ntohs(port));
8684 return;
8685 }
8686
8687 /* New port: add it and mark its index in the bitmap */
8688 pf->udp_ports[next_idx].index = port;
8689 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8690 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8691 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8692}
8693
8694/**
8695 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
8696 * @netdev: This physical port's netdev
8697 * @sa_family: Socket Family that VXLAN is notifying us about
8698 * @port: UDP port number that VXLAN stopped listening to
8699 **/
8700static void i40e_del_vxlan_port(struct net_device *netdev,
8701 sa_family_t sa_family, __be16 port)
8702{
8703 struct i40e_netdev_priv *np = netdev_priv(netdev);
8704 struct i40e_vsi *vsi = np->vsi;
8705 struct i40e_pf *pf = vsi->back;
8706 u8 idx;
8707
8708 idx = i40e_get_udp_port_idx(pf, port);
8709
8710 /* Check if port already exists */
8711 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8712 /* if port exists, set it to 0 (mark for deletion)
8713 * and make it pending
8714 */
8715 pf->udp_ports[idx].index = 0;
8716 pf->pending_udp_bitmap |= BIT_ULL(idx);
8717 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8718 } else {
8719 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
8720 ntohs(port));
8721 }
8722}
8723#endif
8724
8725#if IS_ENABLED(CONFIG_GENEVE)
8726/**
8727 * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
8728 * @netdev: This physical port's netdev
8729 * @sa_family: Socket Family that GENEVE is notifying us about
8730 * @port: New UDP port number that GENEVE started listening to
8731 **/
8732static void i40e_add_geneve_port(struct net_device *netdev,
8733 sa_family_t sa_family, __be16 port)
8734{
8735 struct i40e_netdev_priv *np = netdev_priv(netdev);
8736 struct i40e_vsi *vsi = np->vsi;
8737 struct i40e_pf *pf = vsi->back;
8738 u8 next_idx;
8739 u8 idx;
8740
8741 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8742 return;
8743
8744 idx = i40e_get_udp_port_idx(pf, port);
8745
8746 /* Check if port already exists */
8747 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8748 netdev_info(netdev, "udp port %d already offloaded\n",
8749 ntohs(port)); 8695 ntohs(port));
8750 return; 8696 return;
8751 } 8697 }
8752 8698
8753 /* Now check if there is space to add the new port */ 8699 switch (ti->type) {
8754 next_idx = i40e_get_udp_port_idx(pf, 0); 8700 case UDP_TUNNEL_TYPE_VXLAN:
8755 8701 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8756 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8702 break;
8757 netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", 8703 case UDP_TUNNEL_TYPE_GENEVE:
8758 ntohs(port)); 8704 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8705 return;
8706 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
8707 break;
8708 default:
8759 return; 8709 return;
8760 } 8710 }
8761 8711
8762 /* New port: add it and mark its index in the bitmap */ 8712 /* New port: add it and mark its index in the bitmap */
8763 pf->udp_ports[next_idx].index = port; 8713 pf->udp_ports[next_idx].index = port;
8764 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
8765 pf->pending_udp_bitmap |= BIT_ULL(next_idx); 8714 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8766 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8715 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8767
8768 dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
8769} 8716}
8770 8717
8771/** 8718/**
8772 * i40e_del_geneve_port - Get notifications about GENEVE ports that go away 8719 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
8773 * @netdev: This physical port's netdev 8720 * @netdev: This physical port's netdev
8774 * @sa_family: Socket Family that GENEVE is notifying us about 8721 * @ti: Tunnel endpoint information
8775 * @port: UDP port number that GENEVE stopped listening to
8776 **/ 8722 **/
8777static void i40e_del_geneve_port(struct net_device *netdev, 8723static void i40e_udp_tunnel_del(struct net_device *netdev,
8778 sa_family_t sa_family, __be16 port) 8724 struct udp_tunnel_info *ti)
8779{ 8725{
8780 struct i40e_netdev_priv *np = netdev_priv(netdev); 8726 struct i40e_netdev_priv *np = netdev_priv(netdev);
8781 struct i40e_vsi *vsi = np->vsi; 8727 struct i40e_vsi *vsi = np->vsi;
8782 struct i40e_pf *pf = vsi->back; 8728 struct i40e_pf *pf = vsi->back;
8729 __be16 port = ti->port;
8783 u8 idx; 8730 u8 idx;
8784 8731
8785 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8786 return;
8787
8788 idx = i40e_get_udp_port_idx(pf, port); 8732 idx = i40e_get_udp_port_idx(pf, port);
8789 8733
8790 /* Check if port already exists */ 8734 /* Check if port already exists */
8791 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8735 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
8792 /* if port exists, set it to 0 (mark for deletion) 8736 goto not_found;
8793 * and make it pending
8794 */
8795 pf->udp_ports[idx].index = 0;
8796 pf->pending_udp_bitmap |= BIT_ULL(idx);
8797 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8798 8737
8799 dev_info(&pf->pdev->dev, "deleting geneve port %d\n", 8738 switch (ti->type) {
8800 ntohs(port)); 8739 case UDP_TUNNEL_TYPE_VXLAN:
8801 } else { 8740 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
8802 netdev_warn(netdev, "geneve port %d was not found, not deleting\n", 8741 goto not_found;
8803 ntohs(port)); 8742 break;
8743 case UDP_TUNNEL_TYPE_GENEVE:
8744 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
8745 goto not_found;
8746 break;
8747 default:
8748 goto not_found;
8804 } 8749 }
8750
8751 /* if port exists, set it to 0 (mark for deletion)
8752 * and make it pending
8753 */
8754 pf->udp_ports[idx].index = 0;
8755 pf->pending_udp_bitmap |= BIT_ULL(idx);
8756 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8757
8758 return;
8759not_found:
8760 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
8761 ntohs(port));
8805} 8762}
8806#endif
8807 8763
8808static int i40e_get_phys_port_id(struct net_device *netdev, 8764static int i40e_get_phys_port_id(struct net_device *netdev,
8809 struct netdev_phys_item_id *ppid) 8765 struct netdev_phys_item_id *ppid)
@@ -9033,14 +8989,8 @@ static const struct net_device_ops i40e_netdev_ops = {
9033 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 8989 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
9034 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, 8990 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
9035 .ndo_set_vf_trust = i40e_ndo_set_vf_trust, 8991 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
9036#if IS_ENABLED(CONFIG_VXLAN) 8992 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
9037 .ndo_add_vxlan_port = i40e_add_vxlan_port, 8993 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
9038 .ndo_del_vxlan_port = i40e_del_vxlan_port,
9039#endif
9040#if IS_ENABLED(CONFIG_GENEVE)
9041 .ndo_add_geneve_port = i40e_add_geneve_port,
9042 .ndo_del_geneve_port = i40e_del_geneve_port,
9043#endif
9044 .ndo_get_phys_port_id = i40e_get_phys_port_id, 8994 .ndo_get_phys_port_id = i40e_get_phys_port_id,
9045 .ndo_fdb_add = i40e_ndo_fdb_add, 8995 .ndo_fdb_add = i40e_ndo_fdb_add,
9046 .ndo_features_check = i40e_features_check, 8996 .ndo_features_check = i40e_features_check,
@@ -10133,14 +10083,14 @@ void i40e_veb_release(struct i40e_veb *veb)
10133static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 10083static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10134{ 10084{
10135 struct i40e_pf *pf = veb->pf; 10085 struct i40e_pf *pf = veb->pf;
10136 bool is_default = veb->pf->cur_promisc;
10137 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); 10086 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10138 int ret; 10087 int ret;
10139 10088
10140 /* get a VEB from the hardware */
10141 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, 10089 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10142 veb->enabled_tc, is_default, 10090 veb->enabled_tc, false,
10143 &veb->seid, enable_stats, NULL); 10091 &veb->seid, enable_stats, NULL);
10092
10093 /* get a VEB from the hardware */
10144 if (ret) { 10094 if (ret) {
10145 dev_info(&pf->pdev->dev, 10095 dev_info(&pf->pdev->dev,
10146 "couldn't add VEB, err %s aq_err %s\n", 10096 "couldn't add VEB, err %s aq_err %s\n",
@@ -10689,12 +10639,8 @@ static void i40e_print_features(struct i40e_pf *pf)
10689 } 10639 }
10690 if (pf->flags & I40E_FLAG_DCB_CAPABLE) 10640 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
10691 i += snprintf(&buf[i], REMAIN(i), " DCB"); 10641 i += snprintf(&buf[i], REMAIN(i), " DCB");
10692#if IS_ENABLED(CONFIG_VXLAN)
10693 i += snprintf(&buf[i], REMAIN(i), " VxLAN"); 10642 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
10694#endif
10695#if IS_ENABLED(CONFIG_GENEVE)
10696 i += snprintf(&buf[i], REMAIN(i), " Geneve"); 10643 i += snprintf(&buf[i], REMAIN(i), " Geneve");
10697#endif
10698 if (pf->flags & I40E_FLAG_PTP) 10644 if (pf->flags & I40E_FLAG_PTP)
10699 i += snprintf(&buf[i], REMAIN(i), " PTP"); 10645 i += snprintf(&buf[i], REMAIN(i), " PTP");
10700#ifdef I40E_FCOE 10646#ifdef I40E_FCOE
@@ -11525,6 +11471,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11525{ 11471{
11526 struct i40e_pf *pf = pci_get_drvdata(pdev); 11472 struct i40e_pf *pf = pci_get_drvdata(pdev);
11527 struct i40e_hw *hw = &pf->hw; 11473 struct i40e_hw *hw = &pf->hw;
11474 int retval = 0;
11528 11475
11529 set_bit(__I40E_SUSPENDED, &pf->state); 11476 set_bit(__I40E_SUSPENDED, &pf->state);
11530 set_bit(__I40E_DOWN, &pf->state); 11477 set_bit(__I40E_DOWN, &pf->state);
@@ -11536,10 +11483,16 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11536 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11483 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11537 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11484 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11538 11485
11486 i40e_stop_misc_vector(pf);
11487
11488 retval = pci_save_state(pdev);
11489 if (retval)
11490 return retval;
11491
11539 pci_wake_from_d3(pdev, pf->wol_en); 11492 pci_wake_from_d3(pdev, pf->wol_en);
11540 pci_set_power_state(pdev, PCI_D3hot); 11493 pci_set_power_state(pdev, PCI_D3hot);
11541 11494
11542 return 0; 11495 return retval;
11543} 11496}
11544 11497
11545/** 11498/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 80403c6ee7f0..4660c5abc855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -98,6 +98,8 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
98 struct i40e_asq_cmd_details *cmd_details); 98 struct i40e_asq_cmd_details *cmd_details);
99i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, 99i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
100 struct i40e_asq_cmd_details *cmd_details); 100 struct i40e_asq_cmd_details *cmd_details);
101i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
102 struct i40e_asq_cmd_details *cmd_details);
101enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 103enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
102 bool qualified_modules, bool report_init, 104 bool qualified_modules, bool report_init,
103 struct i40e_aq_get_phy_abilities_resp *abilities, 105 struct i40e_aq_get_phy_abilities_resp *abilities,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 1fcafcfa8f14..6fcbf764f32b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -665,6 +665,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
665 goto error_alloc_vsi_res; 665 goto error_alloc_vsi_res;
666 } 666 }
667 if (type == I40E_VSI_SRIOV) { 667 if (type == I40E_VSI_SRIOV) {
668 u64 hena = i40e_pf_get_default_rss_hena(pf);
669
668 vf->lan_vsi_idx = vsi->idx; 670 vf->lan_vsi_idx = vsi->idx;
669 vf->lan_vsi_id = vsi->id; 671 vf->lan_vsi_id = vsi->id;
670 /* If the port VLAN has been configured and then the 672 /* If the port VLAN has been configured and then the
@@ -687,6 +689,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
687 vf->default_lan_addr.addr, vf->vf_id); 689 vf->default_lan_addr.addr, vf->vf_id);
688 } 690 }
689 spin_unlock_bh(&vsi->mac_filter_list_lock); 691 spin_unlock_bh(&vsi->mac_filter_list_lock);
692 i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
693 (u32)hena);
694 i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
695 (u32)(hena >> 32));
690 } 696 }
691 697
692 /* program mac filter */ 698 /* program mac filter */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 16c552952860..eac057b88055 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -37,8 +37,8 @@ static const char i40evf_driver_string[] =
37#define DRV_KERN "-k" 37#define DRV_KERN "-k"
38 38
39#define DRV_VERSION_MAJOR 1 39#define DRV_VERSION_MAJOR 1
40#define DRV_VERSION_MINOR 5 40#define DRV_VERSION_MINOR 6
41#define DRV_VERSION_BUILD 10 41#define DRV_VERSION_BUILD 4
42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
43 __stringify(DRV_VERSION_MINOR) "." \ 43 __stringify(DRV_VERSION_MINOR) "." \
44 __stringify(DRV_VERSION_BUILD) \ 44 __stringify(DRV_VERSION_BUILD) \
@@ -825,7 +825,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
825 825
826 ether_addr_copy(f->macaddr, macaddr); 826 ether_addr_copy(f->macaddr, macaddr);
827 827
828 list_add(&f->list, &adapter->mac_filter_list); 828 list_add_tail(&f->list, &adapter->mac_filter_list);
829 f->add = true; 829 f->add = true;
830 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; 830 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
831 } 831 }
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index f13445691507..d76c221d4c8a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -434,6 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
434 ether_addr_copy(veal->list[i].addr, f->macaddr); 434 ether_addr_copy(veal->list[i].addr, f->macaddr);
435 i++; 435 i++;
436 f->add = false; 436 f->add = false;
437 if (i == count)
438 break;
437 } 439 }
438 } 440 }
439 if (!more) 441 if (!more)
@@ -497,6 +499,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
497 i++; 499 i++;
498 list_del(&f->list); 500 list_del(&f->list);
499 kfree(f); 501 kfree(f);
502 if (i == count)
503 break;
500 } 504 }
501 } 505 }
502 if (!more) 506 if (!more)
@@ -560,6 +564,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
560 vvfl->vlan_id[i] = f->vlan; 564 vvfl->vlan_id[i] = f->vlan;
561 i++; 565 i++;
562 f->add = false; 566 f->add = false;
567 if (i == count)
568 break;
563 } 569 }
564 } 570 }
565 if (!more) 571 if (!more)
@@ -623,6 +629,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
623 i++; 629 i++;
624 list_del(&f->list); 630 list_del(&f->list);
625 kfree(f); 631 kfree(f);
632 if (i == count)
633 break;
626 } 634 }
627 } 635 }
628 if (!more) 636 if (!more)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 088c47cf27d9..468fa9ddfa06 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -50,7 +50,7 @@
50#include <linux/if_bridge.h> 50#include <linux/if_bridge.h>
51#include <linux/prefetch.h> 51#include <linux/prefetch.h>
52#include <scsi/fc/fc_fcoe.h> 52#include <scsi/fc/fc_fcoe.h>
53#include <net/vxlan.h> 53#include <net/udp_tunnel.h>
54#include <net/pkt_cls.h> 54#include <net/pkt_cls.h>
55#include <net/tc_act/tc_gact.h> 55#include <net/tc_act/tc_gact.h>
56#include <net/tc_act/tc_mirred.h> 56#include <net/tc_act/tc_mirred.h>
@@ -5722,9 +5722,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5722#ifdef CONFIG_IXGBE_DCA 5722#ifdef CONFIG_IXGBE_DCA
5723 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; 5723 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
5724#endif 5724#endif
5725#ifdef CONFIG_IXGBE_VXLAN
5726 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; 5725 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
5727#endif
5728 break; 5726 break;
5729 default: 5727 default:
5730 break; 5728 break;
@@ -6158,9 +6156,7 @@ int ixgbe_open(struct net_device *netdev)
6158 ixgbe_up_complete(adapter); 6156 ixgbe_up_complete(adapter);
6159 6157
6160 ixgbe_clear_vxlan_port(adapter); 6158 ixgbe_clear_vxlan_port(adapter);
6161#ifdef CONFIG_IXGBE_VXLAN 6159 udp_tunnel_get_rx_info(netdev);
6162 vxlan_get_rx_port(netdev);
6163#endif
6164 6160
6165 return 0; 6161 return 0;
6166 6162
@@ -7262,14 +7258,12 @@ static void ixgbe_service_task(struct work_struct *work)
7262 ixgbe_service_event_complete(adapter); 7258 ixgbe_service_event_complete(adapter);
7263 return; 7259 return;
7264 } 7260 }
7265#ifdef CONFIG_IXGBE_VXLAN
7266 rtnl_lock();
7267 if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { 7261 if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
7262 rtnl_lock();
7268 adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; 7263 adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
7269 vxlan_get_rx_port(adapter->netdev); 7264 udp_tunnel_get_rx_info(adapter->netdev);
7265 rtnl_unlock();
7270 } 7266 }
7271 rtnl_unlock();
7272#endif /* CONFIG_IXGBE_VXLAN */
7273 ixgbe_reset_subtask(adapter); 7267 ixgbe_reset_subtask(adapter);
7274 ixgbe_phy_interrupt_subtask(adapter); 7268 ixgbe_phy_interrupt_subtask(adapter);
7275 ixgbe_sfp_detection_subtask(adapter); 7269 ixgbe_sfp_detection_subtask(adapter);
@@ -7697,7 +7691,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7697 /* snag network header to get L4 type and address */ 7691 /* snag network header to get L4 type and address */
7698 skb = first->skb; 7692 skb = first->skb;
7699 hdr.network = skb_network_header(skb); 7693 hdr.network = skb_network_header(skb);
7700#ifdef CONFIG_IXGBE_VXLAN
7701 if (skb->encapsulation && 7694 if (skb->encapsulation &&
7702 first->protocol == htons(ETH_P_IP) && 7695 first->protocol == htons(ETH_P_IP) &&
7703 hdr.ipv4->protocol != IPPROTO_UDP) { 7696 hdr.ipv4->protocol != IPPROTO_UDP) {
@@ -7708,7 +7701,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7708 udp_hdr(skb)->dest == adapter->vxlan_port) 7701 udp_hdr(skb)->dest == adapter->vxlan_port)
7709 hdr.network = skb_inner_network_header(skb); 7702 hdr.network = skb_inner_network_header(skb);
7710 } 7703 }
7711#endif /* CONFIG_IXGBE_VXLAN */
7712 7704
7713 /* Currently only IPv4/IPv6 with TCP is supported */ 7705 /* Currently only IPv4/IPv6 with TCP is supported */
7714 switch (hdr.ipv4->version) { 7706 switch (hdr.ipv4->version) {
@@ -8770,14 +8762,12 @@ static int ixgbe_set_features(struct net_device *netdev,
8770 8762
8771 netdev->features = features; 8763 netdev->features = features;
8772 8764
8773#ifdef CONFIG_IXGBE_VXLAN
8774 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { 8765 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
8775 if (features & NETIF_F_RXCSUM) 8766 if (features & NETIF_F_RXCSUM)
8776 adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; 8767 adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
8777 else 8768 else
8778 ixgbe_clear_vxlan_port(adapter); 8769 ixgbe_clear_vxlan_port(adapter);
8779 } 8770 }
8780#endif /* CONFIG_IXGBE_VXLAN */
8781 8771
8782 if (need_reset) 8772 if (need_reset)
8783 ixgbe_do_reset(netdev); 8773 ixgbe_do_reset(netdev);
@@ -8788,23 +8778,27 @@ static int ixgbe_set_features(struct net_device *netdev,
8788 return 0; 8778 return 0;
8789} 8779}
8790 8780
8791#ifdef CONFIG_IXGBE_VXLAN
8792/** 8781/**
8793 * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up 8782 * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
8794 * @dev: The port's netdev 8783 * @dev: The port's netdev
8795 * @sa_family: Socket Family that VXLAN is notifiying us about 8784 * @sa_family: Socket Family that VXLAN is notifiying us about
8796 * @port: New UDP port number that VXLAN started listening to 8785 * @port: New UDP port number that VXLAN started listening to
8786 * @type: Enumerated type specifying UDP tunnel type
8797 **/ 8787 **/
8798static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, 8788static void ixgbe_add_vxlan_port(struct net_device *dev,
8799 __be16 port) 8789 struct udp_tunnel_info *ti)
8800{ 8790{
8801 struct ixgbe_adapter *adapter = netdev_priv(dev); 8791 struct ixgbe_adapter *adapter = netdev_priv(dev);
8802 struct ixgbe_hw *hw = &adapter->hw; 8792 struct ixgbe_hw *hw = &adapter->hw;
8793 __be16 port = ti->port;
8803 8794
8804 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) 8795 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
8805 return; 8796 return;
8806 8797
8807 if (sa_family == AF_INET6) 8798 if (ti->sa_family != AF_INET)
8799 return;
8800
8801 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8808 return; 8802 return;
8809 8803
8810 if (adapter->vxlan_port == port) 8804 if (adapter->vxlan_port == port)
@@ -8826,28 +8820,31 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8826 * @dev: The port's netdev 8820 * @dev: The port's netdev
8827 * @sa_family: Socket Family that VXLAN is notifying us about 8821 * @sa_family: Socket Family that VXLAN is notifying us about
8828 * @port: UDP port number that VXLAN stopped listening to 8822 * @port: UDP port number that VXLAN stopped listening to
8823 * @type: Enumerated type specifying UDP tunnel type
8829 **/ 8824 **/
8830static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, 8825static void ixgbe_del_vxlan_port(struct net_device *dev,
8831 __be16 port) 8826 struct udp_tunnel_info *ti)
8832{ 8827{
8833 struct ixgbe_adapter *adapter = netdev_priv(dev); 8828 struct ixgbe_adapter *adapter = netdev_priv(dev);
8834 8829
8835 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) 8830 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
8836 return; 8831 return;
8837 8832
8838 if (sa_family == AF_INET6) 8833 if (ti->sa_family != AF_INET)
8839 return; 8834 return;
8840 8835
8841 if (adapter->vxlan_port != port) { 8836 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8837 return;
8838
8839 if (adapter->vxlan_port != ti->port) {
8842 netdev_info(dev, "Port %d was not found, not deleting\n", 8840 netdev_info(dev, "Port %d was not found, not deleting\n",
8843 ntohs(port)); 8841 ntohs(ti->port));
8844 return; 8842 return;
8845 } 8843 }
8846 8844
8847 ixgbe_clear_vxlan_port(adapter); 8845 ixgbe_clear_vxlan_port(adapter);
8848 adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; 8846 adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
8849} 8847}
8850#endif /* CONFIG_IXGBE_VXLAN */
8851 8848
8852static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 8849static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8853 struct net_device *dev, 8850 struct net_device *dev,
@@ -9160,10 +9157,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
9160 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 9157 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
9161 .ndo_dfwd_add_station = ixgbe_fwd_add, 9158 .ndo_dfwd_add_station = ixgbe_fwd_add,
9162 .ndo_dfwd_del_station = ixgbe_fwd_del, 9159 .ndo_dfwd_del_station = ixgbe_fwd_del,
9163#ifdef CONFIG_IXGBE_VXLAN 9160 .ndo_udp_tunnel_add = ixgbe_add_vxlan_port,
9164 .ndo_add_vxlan_port = ixgbe_add_vxlan_port, 9161 .ndo_udp_tunnel_del = ixgbe_del_vxlan_port,
9165 .ndo_del_vxlan_port = ixgbe_del_vxlan_port,
9166#endif /* CONFIG_IXGBE_VXLAN */
9167 .ndo_features_check = ixgbe_features_check, 9162 .ndo_features_check = ixgbe_features_check,
9168}; 9163};
9169 9164
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index dc82b1b19574..0d2f8e934c59 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -411,7 +411,6 @@ static int
411ltq_etop_mdio_init(struct net_device *dev) 411ltq_etop_mdio_init(struct net_device *dev)
412{ 412{
413 struct ltq_etop_priv *priv = netdev_priv(dev); 413 struct ltq_etop_priv *priv = netdev_priv(dev);
414 int i;
415 int err; 414 int err;
416 415
417 priv->mii_bus = mdiobus_alloc(); 416 priv->mii_bus = mdiobus_alloc();
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 868a957f24bb..0b047178cda1 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -699,7 +699,6 @@ struct mvpp2_port {
699 u16 rx_ring_size; 699 u16 rx_ring_size;
700 struct mvpp2_pcpu_stats __percpu *stats; 700 struct mvpp2_pcpu_stats __percpu *stats;
701 701
702 struct phy_device *phy_dev;
703 phy_interface_t phy_interface; 702 phy_interface_t phy_interface;
704 struct device_node *phy_node; 703 struct device_node *phy_node;
705 unsigned int link; 704 unsigned int link;
@@ -4850,7 +4849,7 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4850static void mvpp2_link_event(struct net_device *dev) 4849static void mvpp2_link_event(struct net_device *dev)
4851{ 4850{
4852 struct mvpp2_port *port = netdev_priv(dev); 4851 struct mvpp2_port *port = netdev_priv(dev);
4853 struct phy_device *phydev = port->phy_dev; 4852 struct phy_device *phydev = dev->phydev;
4854 int status_change = 0; 4853 int status_change = 0;
4855 u32 val; 4854 u32 val;
4856 4855
@@ -5416,6 +5415,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
5416/* Set hw internals when starting port */ 5415/* Set hw internals when starting port */
5417static void mvpp2_start_dev(struct mvpp2_port *port) 5416static void mvpp2_start_dev(struct mvpp2_port *port)
5418{ 5417{
5418 struct net_device *ndev = port->dev;
5419
5419 mvpp2_gmac_max_rx_size_set(port); 5420 mvpp2_gmac_max_rx_size_set(port);
5420 mvpp2_txp_max_tx_size_set(port); 5421 mvpp2_txp_max_tx_size_set(port);
5421 5422
@@ -5425,13 +5426,15 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
5425 mvpp2_interrupts_enable(port); 5426 mvpp2_interrupts_enable(port);
5426 5427
5427 mvpp2_port_enable(port); 5428 mvpp2_port_enable(port);
5428 phy_start(port->phy_dev); 5429 phy_start(ndev->phydev);
5429 netif_tx_start_all_queues(port->dev); 5430 netif_tx_start_all_queues(port->dev);
5430} 5431}
5431 5432
5432/* Set hw internals when stopping port */ 5433/* Set hw internals when stopping port */
5433static void mvpp2_stop_dev(struct mvpp2_port *port) 5434static void mvpp2_stop_dev(struct mvpp2_port *port)
5434{ 5435{
5436 struct net_device *ndev = port->dev;
5437
5435 /* Stop new packets from arriving to RXQs */ 5438 /* Stop new packets from arriving to RXQs */
5436 mvpp2_ingress_disable(port); 5439 mvpp2_ingress_disable(port);
5437 5440
@@ -5447,7 +5450,7 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
5447 5450
5448 mvpp2_egress_disable(port); 5451 mvpp2_egress_disable(port);
5449 mvpp2_port_disable(port); 5452 mvpp2_port_disable(port);
5450 phy_stop(port->phy_dev); 5453 phy_stop(ndev->phydev);
5451} 5454}
5452 5455
5453/* Return positive if MTU is valid */ 5456/* Return positive if MTU is valid */
@@ -5535,7 +5538,6 @@ static int mvpp2_phy_connect(struct mvpp2_port *port)
5535 phy_dev->supported &= PHY_GBIT_FEATURES; 5538 phy_dev->supported &= PHY_GBIT_FEATURES;
5536 phy_dev->advertising = phy_dev->supported; 5539 phy_dev->advertising = phy_dev->supported;
5537 5540
5538 port->phy_dev = phy_dev;
5539 port->link = 0; 5541 port->link = 0;
5540 port->duplex = 0; 5542 port->duplex = 0;
5541 port->speed = 0; 5543 port->speed = 0;
@@ -5545,8 +5547,9 @@ static int mvpp2_phy_connect(struct mvpp2_port *port)
5545 5547
5546static void mvpp2_phy_disconnect(struct mvpp2_port *port) 5548static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5547{ 5549{
5548 phy_disconnect(port->phy_dev); 5550 struct net_device *ndev = port->dev;
5549 port->phy_dev = NULL; 5551
5552 phy_disconnect(ndev->phydev);
5550} 5553}
5551 5554
5552static int mvpp2_open(struct net_device *dev) 5555static int mvpp2_open(struct net_device *dev)
@@ -5796,13 +5799,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5796 5799
5797static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5800static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5798{ 5801{
5799 struct mvpp2_port *port = netdev_priv(dev);
5800 int ret; 5802 int ret;
5801 5803
5802 if (!port->phy_dev) 5804 if (!dev->phydev)
5803 return -ENOTSUPP; 5805 return -ENOTSUPP;
5804 5806
5805 ret = phy_mii_ioctl(port->phy_dev, ifr, cmd); 5807 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
5806 if (!ret) 5808 if (!ret)
5807 mvpp2_link_event(dev); 5809 mvpp2_link_event(dev);
5808 5810
@@ -5811,28 +5813,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5811 5813
5812/* Ethtool methods */ 5814/* Ethtool methods */
5813 5815
5814/* Get settings (phy address, speed) for ethtools */
5815static int mvpp2_ethtool_get_settings(struct net_device *dev,
5816 struct ethtool_cmd *cmd)
5817{
5818 struct mvpp2_port *port = netdev_priv(dev);
5819
5820 if (!port->phy_dev)
5821 return -ENODEV;
5822 return phy_ethtool_gset(port->phy_dev, cmd);
5823}
5824
5825/* Set settings (phy address, speed) for ethtools */
5826static int mvpp2_ethtool_set_settings(struct net_device *dev,
5827 struct ethtool_cmd *cmd)
5828{
5829 struct mvpp2_port *port = netdev_priv(dev);
5830
5831 if (!port->phy_dev)
5832 return -ENODEV;
5833 return phy_ethtool_sset(port->phy_dev, cmd);
5834}
5835
5836/* Set interrupt coalescing for ethtools */ 5816/* Set interrupt coalescing for ethtools */
5837static int mvpp2_ethtool_set_coalesce(struct net_device *dev, 5817static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5838 struct ethtool_coalesce *c) 5818 struct ethtool_coalesce *c)
@@ -5967,13 +5947,13 @@ static const struct net_device_ops mvpp2_netdev_ops = {
5967 5947
5968static const struct ethtool_ops mvpp2_eth_tool_ops = { 5948static const struct ethtool_ops mvpp2_eth_tool_ops = {
5969 .get_link = ethtool_op_get_link, 5949 .get_link = ethtool_op_get_link,
5970 .get_settings = mvpp2_ethtool_get_settings,
5971 .set_settings = mvpp2_ethtool_set_settings,
5972 .set_coalesce = mvpp2_ethtool_set_coalesce, 5950 .set_coalesce = mvpp2_ethtool_set_coalesce,
5973 .get_coalesce = mvpp2_ethtool_get_coalesce, 5951 .get_coalesce = mvpp2_ethtool_get_coalesce,
5974 .get_drvinfo = mvpp2_ethtool_get_drvinfo, 5952 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5975 .get_ringparam = mvpp2_ethtool_get_ringparam, 5953 .get_ringparam = mvpp2_ethtool_get_ringparam,
5976 .set_ringparam = mvpp2_ethtool_set_ringparam, 5954 .set_ringparam = mvpp2_ethtool_set_ringparam,
5955 .get_link_ksettings = phy_ethtool_get_link_ksettings,
5956 .set_link_ksettings = phy_ethtool_set_link_ksettings,
5977}; 5957};
5978 5958
5979/* Driver initialization */ 5959/* Driver initialization */
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 9ca3734ebb6b..5098e7f21987 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -24,13 +24,6 @@ config MLX4_EN_DCB
24 24
25 If unsure, set to Y 25 If unsure, set to Y
26 26
27config MLX4_EN_VXLAN
28 bool "VXLAN offloads Support"
29 default y
30 depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m)
31 ---help---
32 Say Y here if you want to use VXLAN offloads in the driver.
33
34config MLX4_CORE 27config MLX4_CORE
35 tristate 28 tristate
36 depends on PCI 29 depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index f01918c63f28..99c6bbdff501 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -37,6 +37,11 @@
37#include "mlx4_en.h" 37#include "mlx4_en.h"
38#include "fw_qos.h" 38#include "fw_qos.h"
39 39
40enum {
41 MLX4_CEE_STATE_DOWN = 0,
42 MLX4_CEE_STATE_UP = 1,
43};
44
40/* Definitions for QCN 45/* Definitions for QCN
41 */ 46 */
42 47
@@ -80,13 +85,202 @@ struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
80 __be32 reserved3[4]; 85 __be32 reserved3[4];
81}; 86};
82 87
88static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
89{
90 struct mlx4_en_priv *priv = netdev_priv(dev);
91
92 switch (capid) {
93 case DCB_CAP_ATTR_PFC:
94 *cap = true;
95 break;
96 case DCB_CAP_ATTR_DCBX:
97 *cap = priv->cee_params.dcbx_cap;
98 break;
99 case DCB_CAP_ATTR_PFC_TCS:
100 *cap = 1 << mlx4_max_tc(priv->mdev->dev);
101 break;
102 default:
103 *cap = false;
104 break;
105 }
106
107 return 0;
108}
109
110static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
111{
112 struct mlx4_en_priv *priv = netdev_priv(netdev);
113
114 return priv->cee_params.dcb_cfg.pfc_state;
115}
116
117static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
118{
119 struct mlx4_en_priv *priv = netdev_priv(netdev);
120
121 priv->cee_params.dcb_cfg.pfc_state = state;
122}
123
124static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
125 u8 *setting)
126{
127 struct mlx4_en_priv *priv = netdev_priv(netdev);
128
129 *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc;
130}
131
132static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
133 u8 setting)
134{
135 struct mlx4_en_priv *priv = netdev_priv(netdev);
136
137 priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting;
138 priv->cee_params.dcb_cfg.pfc_state = true;
139}
140
141static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
142{
143 struct mlx4_en_priv *priv = netdev_priv(netdev);
144
145 if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
146 return -EINVAL;
147
148 if (tcid == DCB_NUMTCS_ATTR_PFC)
149 *num = mlx4_max_tc(priv->mdev->dev);
150 else
151 *num = 0;
152
153 return 0;
154}
155
156static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
157{
158 struct mlx4_en_priv *priv = netdev_priv(netdev);
159 struct mlx4_en_dev *mdev = priv->mdev;
160 struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg;
161 int err = 0;
162
163 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
164 return -EINVAL;
165
166 if (dcb_cfg->pfc_state) {
167 int tc;
168
169 priv->prof->rx_pause = 0;
170 priv->prof->tx_pause = 0;
171 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
172 u8 tc_mask = 1 << tc;
173
174 switch (dcb_cfg->tc_config[tc].dcb_pfc) {
175 case pfc_disabled:
176 priv->prof->tx_ppp &= ~tc_mask;
177 priv->prof->rx_ppp &= ~tc_mask;
178 break;
179 case pfc_enabled_full:
180 priv->prof->tx_ppp |= tc_mask;
181 priv->prof->rx_ppp |= tc_mask;
182 break;
183 case pfc_enabled_tx:
184 priv->prof->tx_ppp |= tc_mask;
185 priv->prof->rx_ppp &= ~tc_mask;
186 break;
187 case pfc_enabled_rx:
188 priv->prof->tx_ppp &= ~tc_mask;
189 priv->prof->rx_ppp |= tc_mask;
190 break;
191 default:
192 break;
193 }
194 }
195 en_dbg(DRV, priv, "Set pfc on\n");
196 } else {
197 priv->prof->rx_pause = 1;
198 priv->prof->tx_pause = 1;
199 en_dbg(DRV, priv, "Set pfc off\n");
200 }
201
202 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
203 priv->rx_skb_size + ETH_FCS_LEN,
204 priv->prof->tx_pause,
205 priv->prof->tx_ppp,
206 priv->prof->rx_pause,
207 priv->prof->rx_ppp);
208 if (err)
209 en_err(priv, "Failed setting pause params\n");
210 return err;
211}
212
213static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
214{
215 struct mlx4_en_priv *priv = netdev_priv(dev);
216
217 if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED)
218 return MLX4_CEE_STATE_UP;
219
220 return MLX4_CEE_STATE_DOWN;
221}
222
223static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
224{
225 struct mlx4_en_priv *priv = netdev_priv(dev);
226 int num_tcs = 0;
227
228 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
229 return 1;
230
231 if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
232 return 0;
233
234 if (state) {
235 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
236 num_tcs = IEEE_8021QAZ_MAX_TCS;
237 } else {
238 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
239 }
240
241 return mlx4_en_setup_tc(dev, num_tcs);
242}
243
244/* On success returns a non-zero 802.1p user priority bitmap
245 * otherwise returns 0 as the invalid user priority bitmap to
246 * indicate an error.
247 */
248static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
249{
250 struct mlx4_en_priv *priv = netdev_priv(netdev);
251 struct dcb_app app = {
252 .selector = idtype,
253 .protocol = id,
254 };
255 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
256 return 0;
257
258 return dcb_getapp(netdev, &app);
259}
260
261static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
262 u16 id, u8 up)
263{
264 struct mlx4_en_priv *priv = netdev_priv(netdev);
265 struct dcb_app app;
266
267 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
268 return -EINVAL;
269
270 memset(&app, 0, sizeof(struct dcb_app));
271 app.selector = idtype;
272 app.protocol = id;
273 app.priority = up;
274
275 return dcb_setapp(netdev, &app);
276}
277
83static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, 278static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
84 struct ieee_ets *ets) 279 struct ieee_ets *ets)
85{ 280{
86 struct mlx4_en_priv *priv = netdev_priv(dev); 281 struct mlx4_en_priv *priv = netdev_priv(dev);
87 struct ieee_ets *my_ets = &priv->ets; 282 struct ieee_ets *my_ets = &priv->ets;
88 283
89 /* No IEEE PFC settings available */
90 if (!my_ets) 284 if (!my_ets)
91 return -EINVAL; 285 return -EINVAL;
92 286
@@ -237,18 +431,51 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
237 431
238static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) 432static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
239{ 433{
240 return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; 434 struct mlx4_en_priv *priv = netdev_priv(dev);
435
436 return priv->cee_params.dcbx_cap;
241} 437}
242 438
243static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) 439static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
244{ 440{
441 struct mlx4_en_priv *priv = netdev_priv(dev);
442 struct ieee_ets ets = {0};
443 struct ieee_pfc pfc = {0};
444
445 if (mode == priv->cee_params.dcbx_cap)
446 return 0;
447
245 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || 448 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
246 (mode & DCB_CAP_DCBX_VER_CEE) || 449 ((mode & DCB_CAP_DCBX_VER_IEEE) &&
247 !(mode & DCB_CAP_DCBX_VER_IEEE) || 450 (mode & DCB_CAP_DCBX_VER_CEE)) ||
248 !(mode & DCB_CAP_DCBX_HOST)) 451 !(mode & DCB_CAP_DCBX_HOST))
249 return 1; 452 goto err;
453
454 priv->cee_params.dcbx_cap = mode;
455
456 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
457 pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
458
459 if (mode & DCB_CAP_DCBX_VER_IEEE) {
460 if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
461 goto err;
462 if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
463 goto err;
464 } else if (mode & DCB_CAP_DCBX_VER_CEE) {
465 if (mlx4_en_dcbnl_set_all(dev))
466 goto err;
467 } else {
468 if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
469 goto err;
470 if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
471 goto err;
472 if (mlx4_en_setup_tc(dev, 0))
473 goto err;
474 }
250 475
251 return 0; 476 return 0;
477err:
478 return 1;
252} 479}
253 480
254#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */ 481#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
@@ -463,24 +690,46 @@ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
463} 690}
464 691
465const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { 692const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
466 .ieee_getets = mlx4_en_dcbnl_ieee_getets, 693 .ieee_getets = mlx4_en_dcbnl_ieee_getets,
467 .ieee_setets = mlx4_en_dcbnl_ieee_setets, 694 .ieee_setets = mlx4_en_dcbnl_ieee_setets,
468 .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, 695 .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
469 .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, 696 .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
470 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, 697 .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
471 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, 698 .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
699 .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
700 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
701 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
702
703 .getstate = mlx4_en_dcbnl_get_state,
704 .setstate = mlx4_en_dcbnl_set_state,
705 .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
706 .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
707 .setall = mlx4_en_dcbnl_set_all,
708 .getcap = mlx4_en_dcbnl_getcap,
709 .getnumtcs = mlx4_en_dcbnl_getnumtcs,
710 .getpfcstate = mlx4_en_dcbnl_getpfcstate,
711 .setpfcstate = mlx4_en_dcbnl_setpfcstate,
712 .getapp = mlx4_en_dcbnl_getapp,
713 .setapp = mlx4_en_dcbnl_setapp,
472 714
473 .getdcbx = mlx4_en_dcbnl_getdcbx, 715 .getdcbx = mlx4_en_dcbnl_getdcbx,
474 .setdcbx = mlx4_en_dcbnl_setdcbx, 716 .setdcbx = mlx4_en_dcbnl_setdcbx,
475 .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
476 .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
477 .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
478}; 717};
479 718
480const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { 719const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
481 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, 720 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
482 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, 721 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
483 722
723 .setstate = mlx4_en_dcbnl_set_state,
724 .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
725 .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
726 .setall = mlx4_en_dcbnl_set_all,
727 .getnumtcs = mlx4_en_dcbnl_getnumtcs,
728 .getpfcstate = mlx4_en_dcbnl_getpfcstate,
729 .setpfcstate = mlx4_en_dcbnl_setpfcstate,
730 .getapp = mlx4_en_dcbnl_getapp,
731 .setapp = mlx4_en_dcbnl_setapp,
732
484 .getdcbx = mlx4_en_dcbnl_getdcbx, 733 .getdcbx = mlx4_en_dcbnl_getdcbx,
485 .setdcbx = mlx4_en_dcbnl_setdcbx, 734 .setdcbx = mlx4_en_dcbnl_setdcbx,
486}; 735};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fc95affaf76b..51a2e8252b82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1107,7 +1107,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
1107{ 1107{
1108 struct mlx4_en_priv *priv = netdev_priv(dev); 1108 struct mlx4_en_priv *priv = netdev_priv(dev);
1109 1109
1110 return priv->rx_ring_num; 1110 return rounddown_pow_of_two(priv->rx_ring_num);
1111} 1111}
1112 1112
1113static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev) 1113static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
@@ -1141,19 +1141,17 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
1141 u8 *hfunc) 1141 u8 *hfunc)
1142{ 1142{
1143 struct mlx4_en_priv *priv = netdev_priv(dev); 1143 struct mlx4_en_priv *priv = netdev_priv(dev);
1144 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 1144 u32 n = mlx4_en_get_rxfh_indir_size(dev);
1145 int rss_rings; 1145 u32 i, rss_rings;
1146 size_t n = priv->rx_ring_num;
1147 int err = 0; 1146 int err = 0;
1148 1147
1149 rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; 1148 rss_rings = priv->prof->rss_rings ?: n;
1150 rss_rings = 1 << ilog2(rss_rings); 1149 rss_rings = rounddown_pow_of_two(rss_rings);
1151 1150
1152 while (n--) { 1151 for (i = 0; i < n; i++) {
1153 if (!ring_index) 1152 if (!ring_index)
1154 break; 1153 break;
1155 ring_index[n] = rss_map->qps[n % rss_rings].qpn - 1154 ring_index[i] = i % rss_rings;
1156 rss_map->base_qpn;
1157 } 1155 }
1158 if (key) 1156 if (key)
1159 memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); 1157 memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
@@ -1166,6 +1164,7 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
1166 const u8 *key, const u8 hfunc) 1164 const u8 *key, const u8 hfunc)
1167{ 1165{
1168 struct mlx4_en_priv *priv = netdev_priv(dev); 1166 struct mlx4_en_priv *priv = netdev_priv(dev);
1167 u32 n = mlx4_en_get_rxfh_indir_size(dev);
1169 struct mlx4_en_dev *mdev = priv->mdev; 1168 struct mlx4_en_dev *mdev = priv->mdev;
1170 int port_up = 0; 1169 int port_up = 0;
1171 int err = 0; 1170 int err = 0;
@@ -1175,18 +1174,18 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
1175 /* Calculate RSS table size and make sure flows are spread evenly 1174 /* Calculate RSS table size and make sure flows are spread evenly
1176 * between rings 1175 * between rings
1177 */ 1176 */
1178 for (i = 0; i < priv->rx_ring_num; i++) { 1177 for (i = 0; i < n; i++) {
1179 if (!ring_index) 1178 if (!ring_index)
1180 continue; 1179 break;
1181 if (i > 0 && !ring_index[i] && !rss_rings) 1180 if (i > 0 && !ring_index[i] && !rss_rings)
1182 rss_rings = i; 1181 rss_rings = i;
1183 1182
1184 if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num))) 1183 if (ring_index[i] != (i % (rss_rings ?: n)))
1185 return -EINVAL; 1184 return -EINVAL;
1186 } 1185 }
1187 1186
1188 if (!rss_rings) 1187 if (!rss_rings)
1189 rss_rings = priv->rx_ring_num; 1188 rss_rings = n;
1190 1189
1191 /* RSS table size must be an order of 2 */ 1190 /* RSS table size must be an order of 2 */
1192 if (!is_power_of_2(rss_rings)) 1191 if (!is_power_of_2(rss_rings))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0c0dfd6cdca6..6083775dae16 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -67,6 +67,17 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
67 offset += priv->num_tx_rings_p_up; 67 offset += priv->num_tx_rings_p_up;
68 } 68 }
69 69
70#ifdef CONFIG_MLX4_EN_DCB
71 if (!mlx4_is_slave(priv->mdev->dev)) {
72 if (up) {
73 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
74 } else {
75 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
76 priv->cee_params.dcb_cfg.pfc_state = false;
77 }
78 }
79#endif /* CONFIG_MLX4_EN_DCB */
80
70 return 0; 81 return 0;
71} 82}
72 83
@@ -1201,8 +1212,8 @@ static void mlx4_en_netpoll(struct net_device *dev)
1201 struct mlx4_en_cq *cq; 1212 struct mlx4_en_cq *cq;
1202 int i; 1213 int i;
1203 1214
1204 for (i = 0; i < priv->rx_ring_num; i++) { 1215 for (i = 0; i < priv->tx_ring_num; i++) {
1205 cq = priv->rx_cq[i]; 1216 cq = priv->tx_cq[i];
1206 napi_schedule(&cq->napi); 1217 napi_schedule(&cq->napi);
1207 } 1218 }
1208} 1219}
@@ -1696,10 +1707,9 @@ int mlx4_en_start_port(struct net_device *dev)
1696 /* Schedule multicast task to populate multicast list */ 1707 /* Schedule multicast task to populate multicast list */
1697 queue_work(mdev->workqueue, &priv->rx_mode_task); 1708 queue_work(mdev->workqueue, &priv->rx_mode_task);
1698 1709
1699#ifdef CONFIG_MLX4_EN_VXLAN
1700 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1710 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1701 vxlan_get_rx_port(dev); 1711 udp_tunnel_get_rx_info(dev);
1702#endif 1712
1703 priv->port_up = true; 1713 priv->port_up = true;
1704 netif_tx_start_all_queues(dev); 1714 netif_tx_start_all_queues(dev);
1705 netif_device_attach(dev); 1715 netif_device_attach(dev);
@@ -2359,7 +2369,6 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
2359 return 0; 2369 return 0;
2360} 2370}
2361 2371
2362#ifdef CONFIG_MLX4_EN_VXLAN
2363static void mlx4_en_add_vxlan_offloads(struct work_struct *work) 2372static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2364{ 2373{
2365 int ret; 2374 int ret;
@@ -2409,15 +2418,19 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2409} 2418}
2410 2419
2411static void mlx4_en_add_vxlan_port(struct net_device *dev, 2420static void mlx4_en_add_vxlan_port(struct net_device *dev,
2412 sa_family_t sa_family, __be16 port) 2421 struct udp_tunnel_info *ti)
2413{ 2422{
2414 struct mlx4_en_priv *priv = netdev_priv(dev); 2423 struct mlx4_en_priv *priv = netdev_priv(dev);
2424 __be16 port = ti->port;
2415 __be16 current_port; 2425 __be16 current_port;
2416 2426
2417 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2427 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2428 return;
2429
2430 if (ti->sa_family != AF_INET)
2418 return; 2431 return;
2419 2432
2420 if (sa_family == AF_INET6) 2433 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2421 return; 2434 return;
2422 2435
2423 current_port = priv->vxlan_port; 2436 current_port = priv->vxlan_port;
@@ -2432,15 +2445,19 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
2432} 2445}
2433 2446
2434static void mlx4_en_del_vxlan_port(struct net_device *dev, 2447static void mlx4_en_del_vxlan_port(struct net_device *dev,
2435 sa_family_t sa_family, __be16 port) 2448 struct udp_tunnel_info *ti)
2436{ 2449{
2437 struct mlx4_en_priv *priv = netdev_priv(dev); 2450 struct mlx4_en_priv *priv = netdev_priv(dev);
2451 __be16 port = ti->port;
2438 __be16 current_port; 2452 __be16 current_port;
2439 2453
2440 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2454 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2441 return; 2455 return;
2442 2456
2443 if (sa_family == AF_INET6) 2457 if (ti->sa_family != AF_INET)
2458 return;
2459
2460 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2444 return; 2461 return;
2445 2462
2446 current_port = priv->vxlan_port; 2463 current_port = priv->vxlan_port;
@@ -2475,7 +2492,6 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2475 2492
2476 return features; 2493 return features;
2477} 2494}
2478#endif
2479 2495
2480static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) 2496static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
2481{ 2497{
@@ -2528,11 +2544,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
2528 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2544 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2529#endif 2545#endif
2530 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2546 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2531#ifdef CONFIG_MLX4_EN_VXLAN 2547 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2532 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, 2548 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2533 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2534 .ndo_features_check = mlx4_en_features_check, 2549 .ndo_features_check = mlx4_en_features_check,
2535#endif
2536 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2550 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2537}; 2551};
2538 2552
@@ -2566,11 +2580,9 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2566 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2580 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2567#endif 2581#endif
2568 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2582 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2569#ifdef CONFIG_MLX4_EN_VXLAN 2583 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2570 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, 2584 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2571 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2572 .ndo_features_check = mlx4_en_features_check, 2585 .ndo_features_check = mlx4_en_features_check,
2573#endif
2574 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2586 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2575}; 2587};
2576 2588
@@ -2836,6 +2848,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2836 struct mlx4_en_priv *priv; 2848 struct mlx4_en_priv *priv;
2837 int i; 2849 int i;
2838 int err; 2850 int err;
2851#ifdef CONFIG_MLX4_EN_DCB
2852 struct tc_configuration *tc;
2853#endif
2839 2854
2840 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 2855 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
2841 MAX_TX_RINGS, MAX_RX_RINGS); 2856 MAX_TX_RINGS, MAX_RX_RINGS);
@@ -2861,10 +2876,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2861 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2876 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2862 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2877 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2863 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 2878 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2864#ifdef CONFIG_MLX4_EN_VXLAN
2865 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); 2879 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
2866 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); 2880 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
2867#endif
2868#ifdef CONFIG_RFS_ACCEL 2881#ifdef CONFIG_RFS_ACCEL
2869 INIT_LIST_HEAD(&priv->filters); 2882 INIT_LIST_HEAD(&priv->filters);
2870 spin_lock_init(&priv->filters_lock); 2883 spin_lock_init(&priv->filters_lock);
@@ -2904,6 +2917,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2904 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2917 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2905#ifdef CONFIG_MLX4_EN_DCB 2918#ifdef CONFIG_MLX4_EN_DCB
2906 if (!mlx4_is_slave(priv->mdev->dev)) { 2919 if (!mlx4_is_slave(priv->mdev->dev)) {
2920 priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE |
2921 DCB_CAP_DCBX_HOST |
2922 DCB_CAP_DCBX_VER_IEEE;
2923 priv->flags |= MLX4_EN_DCB_ENABLED;
2924 priv->cee_params.dcb_cfg.pfc_state = false;
2925
2926 for (i = 0; i < MLX4_EN_NUM_UP; i++) {
2927 tc = &priv->cee_params.dcb_cfg.tc_config[i];
2928 tc->dcb_pfc = pfc_disabled;
2929 }
2930
2907 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 2931 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
2908 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2932 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2909 } else { 2933 } else {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e97094598b2d..f4497cf4d06d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1128,6 +1128,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c
1128 port_cap->max_pkeys = 1 << (field & 0xf); 1128 port_cap->max_pkeys = 1 << (field & 0xf);
1129 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 1129 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
1130 port_cap->max_vl = field & 0xf; 1130 port_cap->max_vl = field & 0xf;
1131 port_cap->max_tc_eth = field >> 4;
1131 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); 1132 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
1132 port_cap->log_max_macs = field & 0xf; 1133 port_cap->log_max_macs = field & 0xf;
1133 port_cap->log_max_vlans = field >> 4; 1134 port_cap->log_max_vlans = field >> 4;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 7ea258af636a..cdbd76f10ced 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -53,6 +53,7 @@ struct mlx4_port_cap {
53 int ib_mtu; 53 int ib_mtu;
54 int max_port_width; 54 int max_port_width;
55 int max_vl; 55 int max_vl;
56 int max_tc_eth;
56 int max_gids; 57 int max_gids;
57 int max_pkeys; 58 int max_pkeys;
58 u64 def_mac; 59 u64 def_mac;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 546fab0ecc3b..b673a5fc6b6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -292,6 +292,7 @@ static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
292 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 292 dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
293 dev->caps.port_width_cap[port] = port_cap->max_port_width; 293 dev->caps.port_width_cap[port] = port_cap->max_port_width;
294 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 294 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
295 dev->caps.max_tc_eth = port_cap->max_tc_eth;
295 dev->caps.def_mac[port] = port_cap->def_mac; 296 dev->caps.def_mac[port] = port_cap->def_mac;
296 dev->caps.supported_type[port] = port_cap->supported_port_types; 297 dev->caps.supported_type[port] = port_cap->supported_port_types;
297 dev->caps.suggested_type[port] = port_cap->suggested_type; 298 dev->caps.suggested_type[port] = port_cap->suggested_type;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 467d47ed2c39..d39bf594abe4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -448,6 +448,27 @@ struct mlx4_en_frag_info {
448 448
449#define MLX4_EN_TC_ETS 7 449#define MLX4_EN_TC_ETS 7
450 450
451enum dcb_pfc_type {
452 pfc_disabled = 0,
453 pfc_enabled_full,
454 pfc_enabled_tx,
455 pfc_enabled_rx
456};
457
458struct tc_configuration {
459 enum dcb_pfc_type dcb_pfc;
460};
461
462struct mlx4_en_cee_config {
463 bool pfc_state;
464 struct tc_configuration tc_config[MLX4_EN_NUM_UP];
465};
466
467struct mlx4_en_cee_params {
468 u8 dcbx_cap;
469 struct mlx4_en_cee_config dcb_cfg;
470};
471
451#endif 472#endif
452 473
453struct ethtool_flow_id { 474struct ethtool_flow_id {
@@ -467,6 +488,9 @@ enum {
467 MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), 488 MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
468 MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4), 489 MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4),
469 MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5), 490 MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5),
491#ifdef CONFIG_MLX4_EN_DCB
492 MLX4_EN_FLAG_DCB_ENABLED = (1 << 6),
493#endif
470}; 494};
471 495
472#define PORT_BEACON_MAX_LIMIT (65535) 496#define PORT_BEACON_MAX_LIMIT (65535)
@@ -545,10 +569,8 @@ struct mlx4_en_priv {
545 struct work_struct linkstate_task; 569 struct work_struct linkstate_task;
546 struct delayed_work stats_task; 570 struct delayed_work stats_task;
547 struct delayed_work service_task; 571 struct delayed_work service_task;
548#ifdef CONFIG_MLX4_EN_VXLAN
549 struct work_struct vxlan_add_task; 572 struct work_struct vxlan_add_task;
550 struct work_struct vxlan_del_task; 573 struct work_struct vxlan_del_task;
551#endif
552 struct mlx4_en_perf_stats pstats; 574 struct mlx4_en_perf_stats pstats;
553 struct mlx4_en_pkt_stats pkstats; 575 struct mlx4_en_pkt_stats pkstats;
554 struct mlx4_en_counter_stats pf_stats; 576 struct mlx4_en_counter_stats pf_stats;
@@ -570,9 +592,11 @@ struct mlx4_en_priv {
570 u32 counter_index; 592 u32 counter_index;
571 593
572#ifdef CONFIG_MLX4_EN_DCB 594#ifdef CONFIG_MLX4_EN_DCB
595#define MLX4_EN_DCB_ENABLED 0x3
573 struct ieee_ets ets; 596 struct ieee_ets ets;
574 u16 maxrate[IEEE_8021QAZ_MAX_TCS]; 597 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
575 enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; 598 enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
599 struct mlx4_en_cee_params cee_params;
576#endif 600#endif
577#ifdef CONFIG_RFS_ACCEL 601#ifdef CONFIG_RFS_ACCEL
578 spinlock_t filters_lock; 602 spinlock_t filters_lock;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 087b23b320cb..3d2095e5c61c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -52,6 +52,7 @@
52 52
53#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 53#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
54#define MLX4_IGNORE_FCS_MASK 0x1 54#define MLX4_IGNORE_FCS_MASK 0x1
55#define MLNX4_TX_MAX_NUMBER 8
55 56
56void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) 57void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
57{ 58{
@@ -2015,3 +2016,14 @@ out:
2015 return ret; 2016 return ret;
2016} 2017}
2017EXPORT_SYMBOL(mlx4_get_module_info); 2018EXPORT_SYMBOL(mlx4_get_module_info);
2019
2020int mlx4_max_tc(struct mlx4_dev *dev)
2021{
2022 u8 num_tc = dev->caps.max_tc_eth;
2023
2024 if (!num_tc)
2025 num_tc = MLNX4_TX_MAX_NUMBER;
2026
2027 return num_tc;
2028}
2029EXPORT_SYMBOL(mlx4_max_tc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9ea7b583096a..c4f450f1c658 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -1,11 +1,13 @@
1obj-$(CONFIG_MLX5_CORE) += mlx5_core.o 1obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
2 2
3mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ 3mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ 4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
5 mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o 5 mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
6 fs_counters.o rl.o
6 7
7mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ 8mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
8 en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ 9 en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
9 en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o 10 en_rx_am.o en_txrx.o en_clock.o vxlan.o en_tc.o \
11 en_arfs.o
10 12
11mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o 13mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index baa991a23475..b97511bf4c7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -79,6 +79,7 @@
79 79
80#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) 80#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
81#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 81#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
82#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
82#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 83#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
83#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 84#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
84#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 85#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
@@ -88,6 +89,7 @@
88#define MLX5E_LOG_INDIR_RQT_SIZE 0x7 89#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
89#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) 90#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
90#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) 91#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
92#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
91#define MLX5E_TX_CQ_POLL_BUDGET 128 93#define MLX5E_TX_CQ_POLL_BUDGET 128
92#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ 94#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
93#define MLX5E_SQ_BF_BUDGET 16 95#define MLX5E_SQ_BF_BUDGET 16
@@ -143,11 +145,32 @@ struct mlx5e_umr_wqe {
143 struct mlx5_wqe_data_seg data; 145 struct mlx5_wqe_data_seg data;
144}; 146};
145 147
148static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
149 "rx_cqe_moder",
150};
151
152enum mlx5e_priv_flag {
153 MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
154};
155
156#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \
157 do { \
158 if (enable) \
159 priv->pflags |= pflag; \
160 else \
161 priv->pflags &= ~pflag; \
162 } while (0)
163
146#ifdef CONFIG_MLX5_CORE_EN_DCB 164#ifdef CONFIG_MLX5_CORE_EN_DCB
147#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ 165#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
148#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ 166#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
149#endif 167#endif
150 168
169struct mlx5e_cq_moder {
170 u16 usec;
171 u16 pkts;
172};
173
151struct mlx5e_params { 174struct mlx5e_params {
152 u8 log_sq_size; 175 u8 log_sq_size;
153 u8 rq_wq_type; 176 u8 rq_wq_type;
@@ -156,12 +179,11 @@ struct mlx5e_params {
156 u8 log_rq_size; 179 u8 log_rq_size;
157 u16 num_channels; 180 u16 num_channels;
158 u8 num_tc; 181 u8 num_tc;
182 u8 rx_cq_period_mode;
159 bool rx_cqe_compress_admin; 183 bool rx_cqe_compress_admin;
160 bool rx_cqe_compress; 184 bool rx_cqe_compress;
161 u16 rx_cq_moderation_usec; 185 struct mlx5e_cq_moder rx_cq_moderation;
162 u16 rx_cq_moderation_pkts; 186 struct mlx5e_cq_moder tx_cq_moderation;
163 u16 tx_cq_moderation_usec;
164 u16 tx_cq_moderation_pkts;
165 u16 min_rx_wqes; 187 u16 min_rx_wqes;
166 bool lro_en; 188 bool lro_en;
167 u32 lro_wqe_sz; 189 u32 lro_wqe_sz;
@@ -173,6 +195,7 @@ struct mlx5e_params {
173#ifdef CONFIG_MLX5_CORE_EN_DCB 195#ifdef CONFIG_MLX5_CORE_EN_DCB
174 struct ieee_ets ets; 196 struct ieee_ets ets;
175#endif 197#endif
198 bool rx_am_enabled;
176}; 199};
177 200
178struct mlx5e_tstamp { 201struct mlx5e_tstamp {
@@ -191,6 +214,7 @@ struct mlx5e_tstamp {
191enum { 214enum {
192 MLX5E_RQ_STATE_POST_WQES_ENABLE, 215 MLX5E_RQ_STATE_POST_WQES_ENABLE,
193 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, 216 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
217 MLX5E_RQ_STATE_AM,
194}; 218};
195 219
196struct mlx5e_cq { 220struct mlx5e_cq {
@@ -198,6 +222,7 @@ struct mlx5e_cq {
198 struct mlx5_cqwq wq; 222 struct mlx5_cqwq wq;
199 223
200 /* data path - accessed per napi poll */ 224 /* data path - accessed per napi poll */
225 u16 event_ctr;
201 struct napi_struct *napi; 226 struct napi_struct *napi;
202 struct mlx5_core_cq mcq; 227 struct mlx5_core_cq mcq;
203 struct mlx5e_channel *channel; 228 struct mlx5e_channel *channel;
@@ -225,6 +250,30 @@ struct mlx5e_dma_info {
225 dma_addr_t addr; 250 dma_addr_t addr;
226}; 251};
227 252
253struct mlx5e_rx_am_stats {
254 int ppms; /* packets per msec */
255 int epms; /* events per msec */
256};
257
258struct mlx5e_rx_am_sample {
259 ktime_t time;
260 unsigned int pkt_ctr;
261 u16 event_ctr;
262};
263
264struct mlx5e_rx_am { /* Adaptive Moderation */
265 u8 state;
266 struct mlx5e_rx_am_stats prev_stats;
267 struct mlx5e_rx_am_sample start_sample;
268 struct work_struct work;
269 u8 profile_ix;
270 u8 mode;
271 u8 tune_state;
272 u8 steps_right;
273 u8 steps_left;
274 u8 tired;
275};
276
228struct mlx5e_rq { 277struct mlx5e_rq {
229 /* data path */ 278 /* data path */
230 struct mlx5_wq_ll wq; 279 struct mlx5_wq_ll wq;
@@ -245,6 +294,8 @@ struct mlx5e_rq {
245 unsigned long state; 294 unsigned long state;
246 int ix; 295 int ix;
247 296
297 struct mlx5e_rx_am am; /* Adaptive Moderation */
298
248 /* control */ 299 /* control */
249 struct mlx5_wq_ctrl wq_ctrl; 300 struct mlx5_wq_ctrl wq_ctrl;
250 u8 wq_type; 301 u8 wq_type;
@@ -354,6 +405,7 @@ struct mlx5e_sq {
354 struct mlx5e_channel *channel; 405 struct mlx5e_channel *channel;
355 int tc; 406 int tc;
356 struct mlx5e_ico_wqe_info *ico_wqe_info; 407 struct mlx5e_ico_wqe_info *ico_wqe_info;
408 u32 rate_limit;
357} ____cacheline_aligned_in_smp; 409} ____cacheline_aligned_in_smp;
358 410
359static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) 411static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -530,6 +582,7 @@ struct mlx5e_priv {
530 u32 indir_rqtn; 582 u32 indir_rqtn;
531 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; 583 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
532 struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; 584 struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
585 u32 tx_rates[MLX5E_MAX_NUM_SQS];
533 586
534 struct mlx5e_flow_steering fs; 587 struct mlx5e_flow_steering fs;
535 struct mlx5e_vxlan_db vxlan; 588 struct mlx5e_vxlan_db vxlan;
@@ -540,6 +593,7 @@ struct mlx5e_priv {
540 struct work_struct set_rx_mode_work; 593 struct work_struct set_rx_mode_work;
541 struct delayed_work update_stats_work; 594 struct delayed_work update_stats_work;
542 595
596 u32 pflags;
543 struct mlx5_core_dev *mdev; 597 struct mlx5_core_dev *mdev;
544 struct net_device *netdev; 598 struct net_device *netdev;
545 struct mlx5e_stats stats; 599 struct mlx5e_stats stats;
@@ -562,6 +616,7 @@ enum mlx5e_link_mode {
562 MLX5E_10GBASE_ER = 14, 616 MLX5E_10GBASE_ER = 14,
563 MLX5E_40GBASE_SR4 = 15, 617 MLX5E_40GBASE_SR4 = 15,
564 MLX5E_40GBASE_LR4 = 16, 618 MLX5E_40GBASE_LR4 = 16,
619 MLX5E_50GBASE_SR2 = 18,
565 MLX5E_100GBASE_CR4 = 20, 620 MLX5E_100GBASE_CR4 = 20,
566 MLX5E_100GBASE_SR4 = 21, 621 MLX5E_100GBASE_SR4 = 21,
567 MLX5E_100GBASE_KR4 = 22, 622 MLX5E_100GBASE_KR4 = 22,
@@ -579,6 +634,9 @@ enum mlx5e_link_mode {
579 634
580#define MLX5E_PROT_MASK(link_mode) (1 << link_mode) 635#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
581 636
637
638void mlx5e_build_ptys2ethtool_map(void);
639
582void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw); 640void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
583u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 641u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
584 void *accel_priv, select_queue_fallback_t fallback); 642 void *accel_priv, select_queue_fallback_t fallback);
@@ -612,6 +670,10 @@ void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
612 struct mlx5e_mpw_info *wi); 670 struct mlx5e_mpw_info *wi);
613struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); 671struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
614 672
673void mlx5e_rx_am(struct mlx5e_rq *rq);
674void mlx5e_rx_am_work(struct work_struct *work);
675struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
676
615void mlx5e_update_stats(struct mlx5e_priv *priv); 677void mlx5e_update_stats(struct mlx5e_priv *priv);
616 678
617int mlx5e_create_flow_steering(struct mlx5e_priv *priv); 679int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
@@ -647,6 +709,9 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
647 int num_channels); 709 int num_channels);
648int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); 710int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
649 711
712void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
713 u8 cq_period_mode);
714
650static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, 715static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
651 struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) 716 struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
652{ 717{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index b2db180ae2a5..e6883132b555 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -191,7 +191,6 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
191{ 191{
192 struct mlx5e_priv *priv = netdev_priv(dev); 192 struct mlx5e_priv *priv = netdev_priv(dev);
193 struct mlx5_core_dev *mdev = priv->mdev; 193 struct mlx5_core_dev *mdev = priv->mdev;
194 enum mlx5_port_status ps;
195 u8 curr_pfc_en; 194 u8 curr_pfc_en;
196 int ret; 195 int ret;
197 196
@@ -200,14 +199,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
200 if (pfc->pfc_en == curr_pfc_en) 199 if (pfc->pfc_en == curr_pfc_en)
201 return 0; 200 return 0;
202 201
203 mlx5_query_port_admin_status(mdev, &ps);
204 if (ps == MLX5_PORT_UP)
205 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
206
207 ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); 202 ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
208 203 mlx5_toggle_port_link(mdev);
209 if (ps == MLX5_PORT_UP)
210 mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
211 204
212 return ret; 205 return ret;
213} 206}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index e667a870e0c2..b29684d9fcd6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -48,123 +48,85 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
48 sizeof(drvinfo->bus_info)); 48 sizeof(drvinfo->bus_info));
49} 49}
50 50
51static const struct { 51struct ptys2ethtool_config {
52 u32 supported; 52 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
53 u32 advertised; 53 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
54 u32 speed; 54 u32 speed;
55} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
56 [MLX5E_1000BASE_CX_SGMII] = {
57 .supported = SUPPORTED_1000baseKX_Full,
58 .advertised = ADVERTISED_1000baseKX_Full,
59 .speed = 1000,
60 },
61 [MLX5E_1000BASE_KX] = {
62 .supported = SUPPORTED_1000baseKX_Full,
63 .advertised = ADVERTISED_1000baseKX_Full,
64 .speed = 1000,
65 },
66 [MLX5E_10GBASE_CX4] = {
67 .supported = SUPPORTED_10000baseKX4_Full,
68 .advertised = ADVERTISED_10000baseKX4_Full,
69 .speed = 10000,
70 },
71 [MLX5E_10GBASE_KX4] = {
72 .supported = SUPPORTED_10000baseKX4_Full,
73 .advertised = ADVERTISED_10000baseKX4_Full,
74 .speed = 10000,
75 },
76 [MLX5E_10GBASE_KR] = {
77 .supported = SUPPORTED_10000baseKR_Full,
78 .advertised = ADVERTISED_10000baseKR_Full,
79 .speed = 10000,
80 },
81 [MLX5E_20GBASE_KR2] = {
82 .supported = SUPPORTED_20000baseKR2_Full,
83 .advertised = ADVERTISED_20000baseKR2_Full,
84 .speed = 20000,
85 },
86 [MLX5E_40GBASE_CR4] = {
87 .supported = SUPPORTED_40000baseCR4_Full,
88 .advertised = ADVERTISED_40000baseCR4_Full,
89 .speed = 40000,
90 },
91 [MLX5E_40GBASE_KR4] = {
92 .supported = SUPPORTED_40000baseKR4_Full,
93 .advertised = ADVERTISED_40000baseKR4_Full,
94 .speed = 40000,
95 },
96 [MLX5E_56GBASE_R4] = {
97 .supported = SUPPORTED_56000baseKR4_Full,
98 .advertised = ADVERTISED_56000baseKR4_Full,
99 .speed = 56000,
100 },
101 [MLX5E_10GBASE_CR] = {
102 .supported = SUPPORTED_10000baseKR_Full,
103 .advertised = ADVERTISED_10000baseKR_Full,
104 .speed = 10000,
105 },
106 [MLX5E_10GBASE_SR] = {
107 .supported = SUPPORTED_10000baseKR_Full,
108 .advertised = ADVERTISED_10000baseKR_Full,
109 .speed = 10000,
110 },
111 [MLX5E_10GBASE_ER] = {
112 .supported = SUPPORTED_10000baseKR_Full,
113 .advertised = ADVERTISED_10000baseKR_Full,
114 .speed = 10000,
115 },
116 [MLX5E_40GBASE_SR4] = {
117 .supported = SUPPORTED_40000baseSR4_Full,
118 .advertised = ADVERTISED_40000baseSR4_Full,
119 .speed = 40000,
120 },
121 [MLX5E_40GBASE_LR4] = {
122 .supported = SUPPORTED_40000baseLR4_Full,
123 .advertised = ADVERTISED_40000baseLR4_Full,
124 .speed = 40000,
125 },
126 [MLX5E_100GBASE_CR4] = {
127 .speed = 100000,
128 },
129 [MLX5E_100GBASE_SR4] = {
130 .speed = 100000,
131 },
132 [MLX5E_100GBASE_KR4] = {
133 .speed = 100000,
134 },
135 [MLX5E_100GBASE_LR4] = {
136 .speed = 100000,
137 },
138 [MLX5E_100BASE_TX] = {
139 .speed = 100,
140 },
141 [MLX5E_1000BASE_T] = {
142 .supported = SUPPORTED_1000baseT_Full,
143 .advertised = ADVERTISED_1000baseT_Full,
144 .speed = 1000,
145 },
146 [MLX5E_10GBASE_T] = {
147 .supported = SUPPORTED_10000baseT_Full,
148 .advertised = ADVERTISED_10000baseT_Full,
149 .speed = 1000,
150 },
151 [MLX5E_25GBASE_CR] = {
152 .speed = 25000,
153 },
154 [MLX5E_25GBASE_KR] = {
155 .speed = 25000,
156 },
157 [MLX5E_25GBASE_SR] = {
158 .speed = 25000,
159 },
160 [MLX5E_50GBASE_CR2] = {
161 .speed = 50000,
162 },
163 [MLX5E_50GBASE_KR2] = {
164 .speed = 50000,
165 },
166}; 55};
167 56
57static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER];
58
59#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
60 ({ \
61 struct ptys2ethtool_config *cfg; \
62 const unsigned int modes[] = { __VA_ARGS__ }; \
63 unsigned int i; \
64 cfg = &ptys2ethtool_table[reg_]; \
65 cfg->speed = speed_; \
66 bitmap_zero(cfg->supported, \
67 __ETHTOOL_LINK_MODE_MASK_NBITS); \
68 bitmap_zero(cfg->advertised, \
69 __ETHTOOL_LINK_MODE_MASK_NBITS); \
70 for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
71 __set_bit(modes[i], cfg->supported); \
72 __set_bit(modes[i], cfg->advertised); \
73 } \
74 })
75
76void mlx5e_build_ptys2ethtool_map(void)
77{
78 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000,
79 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
80 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000,
81 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
82 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000,
83 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
84 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000,
85 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
86 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000,
87 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
88 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000,
89 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
90 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000,
91 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
92 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000,
93 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
94 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000,
95 ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
96 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000,
97 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
98 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000,
99 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
100 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000,
101 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
102 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000,
103 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
104 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000,
105 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
106 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000,
107 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
108 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000,
109 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT);
110 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000,
111 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT);
112 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000,
113 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
114 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000,
115 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
116 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000,
117 ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
118 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000,
119 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT);
120 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000,
121 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT);
122 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000,
123 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
124 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000,
125 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT);
126 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000,
127 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
128}
129
168static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) 130static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
169{ 131{
170 struct mlx5_core_dev *mdev = priv->mdev; 132 struct mlx5_core_dev *mdev = priv->mdev;
@@ -200,6 +162,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
200 MLX5E_NUM_RQ_STATS(priv) + 162 MLX5E_NUM_RQ_STATS(priv) +
201 MLX5E_NUM_SQ_STATS(priv) + 163 MLX5E_NUM_SQ_STATS(priv) +
202 MLX5E_NUM_PFC_COUNTERS(priv); 164 MLX5E_NUM_PFC_COUNTERS(priv);
165 case ETH_SS_PRIV_FLAGS:
166 return ARRAY_SIZE(mlx5e_priv_flags);
203 /* fallthrough */ 167 /* fallthrough */
204 default: 168 default:
205 return -EOPNOTSUPP; 169 return -EOPNOTSUPP;
@@ -272,9 +236,12 @@ static void mlx5e_get_strings(struct net_device *dev,
272 uint32_t stringset, uint8_t *data) 236 uint32_t stringset, uint8_t *data)
273{ 237{
274 struct mlx5e_priv *priv = netdev_priv(dev); 238 struct mlx5e_priv *priv = netdev_priv(dev);
239 int i;
275 240
276 switch (stringset) { 241 switch (stringset) {
277 case ETH_SS_PRIV_FLAGS: 242 case ETH_SS_PRIV_FLAGS:
243 for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++)
244 strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]);
278 break; 245 break;
279 246
280 case ETH_SS_TEST: 247 case ETH_SS_TEST:
@@ -519,10 +486,11 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
519 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) 486 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
520 return -ENOTSUPP; 487 return -ENOTSUPP;
521 488
522 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; 489 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
523 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; 490 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
524 coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; 491 coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec;
525 coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts; 492 coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts;
493 coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
526 494
527 return 0; 495 return 0;
528} 496}
@@ -533,6 +501,10 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
533 struct mlx5e_priv *priv = netdev_priv(netdev); 501 struct mlx5e_priv *priv = netdev_priv(netdev);
534 struct mlx5_core_dev *mdev = priv->mdev; 502 struct mlx5_core_dev *mdev = priv->mdev;
535 struct mlx5e_channel *c; 503 struct mlx5e_channel *c;
504 bool restart =
505 !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled;
506 bool was_opened;
507 int err = 0;
536 int tc; 508 int tc;
537 int i; 509 int i;
538 510
@@ -540,12 +512,19 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
540 return -ENOTSUPP; 512 return -ENOTSUPP;
541 513
542 mutex_lock(&priv->state_lock); 514 mutex_lock(&priv->state_lock);
543 priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
544 priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
545 priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
546 priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
547 515
548 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 516 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
517 if (was_opened && restart) {
518 mlx5e_close_locked(netdev);
519 priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
520 }
521
522 priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
523 priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
524 priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
525 priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
526
527 if (!was_opened || restart)
549 goto out; 528 goto out;
550 529
551 for (i = 0; i < priv->params.num_channels; ++i) { 530 for (i = 0; i < priv->params.num_channels; ++i) {
@@ -564,35 +543,37 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
564 } 543 }
565 544
566out: 545out:
546 if (was_opened && restart)
547 err = mlx5e_open_locked(netdev);
548
567 mutex_unlock(&priv->state_lock); 549 mutex_unlock(&priv->state_lock);
568 return 0; 550 return err;
569} 551}
570 552
571static u32 ptys2ethtool_supported_link(u32 eth_proto_cap) 553static void ptys2ethtool_supported_link(unsigned long *supported_modes,
554 u32 eth_proto_cap)
572{ 555{
573 int i; 556 int proto;
574 u32 supported_modes = 0;
575 557
576 for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { 558 for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
577 if (eth_proto_cap & MLX5E_PROT_MASK(i)) 559 bitmap_or(supported_modes, supported_modes,
578 supported_modes |= ptys2ethtool_table[i].supported; 560 ptys2ethtool_table[proto].supported,
579 } 561 __ETHTOOL_LINK_MODE_MASK_NBITS);
580 return supported_modes;
581} 562}
582 563
583static u32 ptys2ethtool_adver_link(u32 eth_proto_cap) 564static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
565 u32 eth_proto_cap)
584{ 566{
585 int i; 567 int proto;
586 u32 advertising_modes = 0;
587 568
588 for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { 569 for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
589 if (eth_proto_cap & MLX5E_PROT_MASK(i)) 570 bitmap_or(advertising_modes, advertising_modes,
590 advertising_modes |= ptys2ethtool_table[i].advertised; 571 ptys2ethtool_table[proto].advertised,
591 } 572 __ETHTOOL_LINK_MODE_MASK_NBITS);
592 return advertising_modes;
593} 573}
594 574
595static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) 575static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings,
576 u32 eth_proto_cap)
596{ 577{
597 if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) 578 if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
598 | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) 579 | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
@@ -600,7 +581,7 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
600 | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) 581 | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
601 | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) 582 | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
602 | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { 583 | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
603 return SUPPORTED_FIBRE; 584 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE);
604 } 585 }
605 586
606 if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4) 587 if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
@@ -608,9 +589,8 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
608 | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) 589 | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
609 | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) 590 | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
610 | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) { 591 | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
611 return SUPPORTED_Backplane; 592 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane);
612 } 593 }
613 return 0;
614} 594}
615 595
616int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) 596int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
@@ -634,7 +614,7 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
634 614
635static void get_speed_duplex(struct net_device *netdev, 615static void get_speed_duplex(struct net_device *netdev,
636 u32 eth_proto_oper, 616 u32 eth_proto_oper,
637 struct ethtool_cmd *cmd) 617 struct ethtool_link_ksettings *link_ksettings)
638{ 618{
639 int i; 619 int i;
640 u32 speed = SPEED_UNKNOWN; 620 u32 speed = SPEED_UNKNOWN;
@@ -651,23 +631,32 @@ static void get_speed_duplex(struct net_device *netdev,
651 } 631 }
652 } 632 }
653out: 633out:
654 ethtool_cmd_speed_set(cmd, speed); 634 link_ksettings->base.speed = speed;
655 cmd->duplex = duplex; 635 link_ksettings->base.duplex = duplex;
656} 636}
657 637
658static void get_supported(u32 eth_proto_cap, u32 *supported) 638static void get_supported(u32 eth_proto_cap,
639 struct ethtool_link_ksettings *link_ksettings)
659{ 640{
660 *supported |= ptys2ethtool_supported_port(eth_proto_cap); 641 unsigned long *supported = link_ksettings->link_modes.supported;
661 *supported |= ptys2ethtool_supported_link(eth_proto_cap); 642
662 *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 643 ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
644 ptys2ethtool_supported_link(supported, eth_proto_cap);
645 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
646 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
663} 647}
664 648
665static void get_advertising(u32 eth_proto_cap, u8 tx_pause, 649static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
666 u8 rx_pause, u32 *advertising) 650 u8 rx_pause,
651 struct ethtool_link_ksettings *link_ksettings)
667{ 652{
668 *advertising |= ptys2ethtool_adver_link(eth_proto_cap); 653 unsigned long *advertising = link_ksettings->link_modes.advertising;
669 *advertising |= tx_pause ? ADVERTISED_Pause : 0; 654
670 *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0; 655 ptys2ethtool_adver_link(advertising, eth_proto_cap);
656 if (tx_pause)
657 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
658 if (tx_pause ^ rx_pause)
659 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
671} 660}
672 661
673static u8 get_connector_port(u32 eth_proto) 662static u8 get_connector_port(u32 eth_proto)
@@ -695,13 +684,16 @@ static u8 get_connector_port(u32 eth_proto)
695 return PORT_OTHER; 684 return PORT_OTHER;
696} 685}
697 686
698static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising) 687static void get_lp_advertising(u32 eth_proto_lp,
688 struct ethtool_link_ksettings *link_ksettings)
699{ 689{
700 *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp); 690 unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
691
692 ptys2ethtool_adver_link(lp_advertising, eth_proto_lp);
701} 693}
702 694
703static int mlx5e_get_settings(struct net_device *netdev, 695static int mlx5e_get_link_ksettings(struct net_device *netdev,
704 struct ethtool_cmd *cmd) 696 struct ethtool_link_ksettings *link_ksettings)
705{ 697{
706 struct mlx5e_priv *priv = netdev_priv(netdev); 698 struct mlx5e_priv *priv = netdev_priv(netdev);
707 struct mlx5_core_dev *mdev = priv->mdev; 699 struct mlx5_core_dev *mdev = priv->mdev;
@@ -710,6 +702,8 @@ static int mlx5e_get_settings(struct net_device *netdev,
710 u32 eth_proto_admin; 702 u32 eth_proto_admin;
711 u32 eth_proto_lp; 703 u32 eth_proto_lp;
712 u32 eth_proto_oper; 704 u32 eth_proto_oper;
705 u8 an_disable_admin;
706 u8 an_status;
713 int err; 707 int err;
714 708
715 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); 709 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
@@ -720,35 +714,49 @@ static int mlx5e_get_settings(struct net_device *netdev,
720 goto err_query_ptys; 714 goto err_query_ptys;
721 } 715 }
722 716
723 eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); 717 eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
724 eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); 718 eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
725 eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); 719 eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
726 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); 720 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
721 an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
722 an_status = MLX5_GET(ptys_reg, out, an_status);
727 723
728 cmd->supported = 0; 724 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
729 cmd->advertising = 0; 725 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
730 726
731 get_supported(eth_proto_cap, &cmd->supported); 727 get_supported(eth_proto_cap, link_ksettings);
732 get_advertising(eth_proto_admin, 0, 0, &cmd->advertising); 728 get_advertising(eth_proto_admin, 0, 0, link_ksettings);
733 get_speed_duplex(netdev, eth_proto_oper, cmd); 729 get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
734 730
735 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 731 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
736 732
737 cmd->port = get_connector_port(eth_proto_oper); 733 link_ksettings->base.port = get_connector_port(eth_proto_oper);
738 get_lp_advertising(eth_proto_lp, &cmd->lp_advertising); 734 get_lp_advertising(eth_proto_lp, link_ksettings);
735
736 if (an_status == MLX5_AN_COMPLETE)
737 ethtool_link_ksettings_add_link_mode(link_ksettings,
738 lp_advertising, Autoneg);
739 739
740 cmd->transceiver = XCVR_INTERNAL; 740 link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE :
741 AUTONEG_ENABLE;
742 ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
743 Autoneg);
744 if (!an_disable_admin)
745 ethtool_link_ksettings_add_link_mode(link_ksettings,
746 advertising, Autoneg);
741 747
742err_query_ptys: 748err_query_ptys:
743 return err; 749 return err;
744} 750}
745 751
746static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes) 752static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
747{ 753{
748 u32 i, ptys_modes = 0; 754 u32 i, ptys_modes = 0;
749 755
750 for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { 756 for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
751 if (ptys2ethtool_table[i].advertised & link_modes) 757 if (bitmap_intersects(ptys2ethtool_table[i].advertised,
758 link_modes,
759 __ETHTOOL_LINK_MODE_MASK_NBITS))
752 ptys_modes |= MLX5E_PROT_MASK(i); 760 ptys_modes |= MLX5E_PROT_MASK(i);
753 } 761 }
754 762
@@ -767,21 +775,25 @@ static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
767 return speed_links; 775 return speed_links;
768} 776}
769 777
770static int mlx5e_set_settings(struct net_device *netdev, 778static int mlx5e_set_link_ksettings(struct net_device *netdev,
771 struct ethtool_cmd *cmd) 779 const struct ethtool_link_ksettings *link_ksettings)
772{ 780{
773 struct mlx5e_priv *priv = netdev_priv(netdev); 781 struct mlx5e_priv *priv = netdev_priv(netdev);
774 struct mlx5_core_dev *mdev = priv->mdev; 782 struct mlx5_core_dev *mdev = priv->mdev;
783 u32 eth_proto_cap, eth_proto_admin;
784 bool an_changes = false;
785 u8 an_disable_admin;
786 u8 an_disable_cap;
787 bool an_disable;
775 u32 link_modes; 788 u32 link_modes;
789 u8 an_status;
776 u32 speed; 790 u32 speed;
777 u32 eth_proto_cap, eth_proto_admin;
778 enum mlx5_port_status ps;
779 int err; 791 int err;
780 792
781 speed = ethtool_cmd_speed(cmd); 793 speed = link_ksettings->base.speed;
782 794
783 link_modes = cmd->autoneg == AUTONEG_ENABLE ? 795 link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
784 mlx5e_ethtool2ptys_adver_link(cmd->advertising) : 796 mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) :
785 mlx5e_ethtool2ptys_speed_link(speed); 797 mlx5e_ethtool2ptys_speed_link(speed);
786 798
787 err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN); 799 err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
@@ -806,15 +818,18 @@ static int mlx5e_set_settings(struct net_device *netdev,
806 goto out; 818 goto out;
807 } 819 }
808 820
809 if (link_modes == eth_proto_admin) 821 mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status,
822 &an_disable_cap, &an_disable_admin);
823
824 an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
825 an_changes = ((!an_disable && an_disable_admin) ||
826 (an_disable && !an_disable_admin));
827
828 if (!an_changes && link_modes == eth_proto_admin)
810 goto out; 829 goto out;
811 830
812 mlx5_query_port_admin_status(mdev, &ps); 831 mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN);
813 if (ps == MLX5_PORT_UP) 832 mlx5_toggle_port_link(mdev);
814 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
815 mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
816 if (ps == MLX5_PORT_UP)
817 mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
818 833
819out: 834out:
820 return err; 835 return err;
@@ -1272,6 +1287,87 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
1272 return 0; 1287 return 0;
1273} 1288}
1274 1289
1290typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
1291
1292static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
1293{
1294 struct mlx5e_priv *priv = netdev_priv(netdev);
1295 struct mlx5_core_dev *mdev = priv->mdev;
1296 bool rx_mode_changed;
1297 u8 rx_cq_period_mode;
1298 int err = 0;
1299 bool reset;
1300
1301 rx_cq_period_mode = enable ?
1302 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1303 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1304 rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode;
1305
1306 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
1307 !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
1308 return -ENOTSUPP;
1309
1310 if (!rx_mode_changed)
1311 return 0;
1312
1313 reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
1314 if (reset)
1315 mlx5e_close_locked(netdev);
1316
1317 mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode);
1318
1319 if (reset)
1320 err = mlx5e_open_locked(netdev);
1321
1322 return err;
1323}
1324
1325static int mlx5e_handle_pflag(struct net_device *netdev,
1326 u32 wanted_flags,
1327 enum mlx5e_priv_flag flag,
1328 mlx5e_pflag_handler pflag_handler)
1329{
1330 struct mlx5e_priv *priv = netdev_priv(netdev);
1331 bool enable = !!(wanted_flags & flag);
1332 u32 changes = wanted_flags ^ priv->pflags;
1333 int err;
1334
1335 if (!(changes & flag))
1336 return 0;
1337
1338 err = pflag_handler(netdev, enable);
1339 if (err) {
1340 netdev_err(netdev, "%s private flag 0x%x failed err %d\n",
1341 enable ? "Enable" : "Disable", flag, err);
1342 return err;
1343 }
1344
1345 MLX5E_SET_PRIV_FLAG(priv, flag, enable);
1346 return 0;
1347}
1348
1349static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
1350{
1351 struct mlx5e_priv *priv = netdev_priv(netdev);
1352 int err;
1353
1354 mutex_lock(&priv->state_lock);
1355
1356 err = mlx5e_handle_pflag(netdev, pflags,
1357 MLX5E_PFLAG_RX_CQE_BASED_MODER,
1358 set_pflag_rx_cqe_based_moder);
1359
1360 mutex_unlock(&priv->state_lock);
1361 return err ? -EINVAL : 0;
1362}
1363
1364static u32 mlx5e_get_priv_flags(struct net_device *netdev)
1365{
1366 struct mlx5e_priv *priv = netdev_priv(netdev);
1367
1368 return priv->pflags;
1369}
1370
1275const struct ethtool_ops mlx5e_ethtool_ops = { 1371const struct ethtool_ops mlx5e_ethtool_ops = {
1276 .get_drvinfo = mlx5e_get_drvinfo, 1372 .get_drvinfo = mlx5e_get_drvinfo,
1277 .get_link = ethtool_op_get_link, 1373 .get_link = ethtool_op_get_link,
@@ -1284,8 +1380,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
1284 .set_channels = mlx5e_set_channels, 1380 .set_channels = mlx5e_set_channels,
1285 .get_coalesce = mlx5e_get_coalesce, 1381 .get_coalesce = mlx5e_get_coalesce,
1286 .set_coalesce = mlx5e_set_coalesce, 1382 .set_coalesce = mlx5e_set_coalesce,
1287 .get_settings = mlx5e_get_settings, 1383 .get_link_ksettings = mlx5e_get_link_ksettings,
1288 .set_settings = mlx5e_set_settings, 1384 .set_link_ksettings = mlx5e_set_link_ksettings,
1289 .get_rxfh_key_size = mlx5e_get_rxfh_key_size, 1385 .get_rxfh_key_size = mlx5e_get_rxfh_key_size,
1290 .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, 1386 .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
1291 .get_rxfh = mlx5e_get_rxfh, 1387 .get_rxfh = mlx5e_get_rxfh,
@@ -1301,4 +1397,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
1301 .set_wol = mlx5e_set_wol, 1397 .set_wol = mlx5e_set_wol,
1302 .get_module_info = mlx5e_get_module_info, 1398 .get_module_info = mlx5e_get_module_info,
1303 .get_module_eeprom = mlx5e_get_module_eeprom, 1399 .get_module_eeprom = mlx5e_get_module_eeprom,
1400 .get_priv_flags = mlx5e_get_priv_flags,
1401 .set_priv_flags = mlx5e_set_priv_flags
1304}; 1402};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cb6defd71fc1..a64ce5df5810 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -40,8 +40,9 @@
40#include "vxlan.h" 40#include "vxlan.h"
41 41
42struct mlx5e_rq_param { 42struct mlx5e_rq_param {
43 u32 rqc[MLX5_ST_SZ_DW(rqc)]; 43 u32 rqc[MLX5_ST_SZ_DW(rqc)];
44 struct mlx5_wq_param wq; 44 struct mlx5_wq_param wq;
45 bool am_enabled;
45}; 46};
46 47
47struct mlx5e_sq_param { 48struct mlx5e_sq_param {
@@ -55,6 +56,7 @@ struct mlx5e_cq_param {
55 u32 cqc[MLX5_ST_SZ_DW(cqc)]; 56 u32 cqc[MLX5_ST_SZ_DW(cqc)];
56 struct mlx5_wq_param wq; 57 struct mlx5_wq_param wq;
57 u16 eq_ix; 58 u16 eq_ix;
59 u8 cq_period_mode;
58}; 60};
59 61
60struct mlx5e_channel_param { 62struct mlx5e_channel_param {
@@ -335,6 +337,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
335 wqe->data.byte_count = cpu_to_be32(byte_count); 337 wqe->data.byte_count = cpu_to_be32(byte_count);
336 } 338 }
337 339
340 INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
341 rq->am.mode = priv->params.rx_cq_period_mode;
342
338 rq->wq_type = priv->params.rq_wq_type; 343 rq->wq_type = priv->params.rq_wq_type;
339 rq->pdev = c->pdev; 344 rq->pdev = c->pdev;
340 rq->netdev = c->netdev; 345 rq->netdev = c->netdev;
@@ -507,6 +512,9 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
507 if (err) 512 if (err)
508 goto err_disable_rq; 513 goto err_disable_rq;
509 514
515 if (param->am_enabled)
516 set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
517
510 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); 518 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
511 519
512 sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; 520 sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
@@ -535,6 +543,8 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
535 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ 543 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
536 napi_synchronize(&rq->channel->napi); 544 napi_synchronize(&rq->channel->napi);
537 545
546 cancel_work_sync(&rq->am.work);
547
538 mlx5e_disable_rq(rq); 548 mlx5e_disable_rq(rq);
539 mlx5e_destroy_rq(rq); 549 mlx5e_destroy_rq(rq);
540} 550}
@@ -701,7 +711,8 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
701 return err; 711 return err;
702} 712}
703 713
704static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) 714static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
715 int next_state, bool update_rl, int rl_index)
705{ 716{
706 struct mlx5e_channel *c = sq->channel; 717 struct mlx5e_channel *c = sq->channel;
707 struct mlx5e_priv *priv = c->priv; 718 struct mlx5e_priv *priv = c->priv;
@@ -721,6 +732,10 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
721 732
722 MLX5_SET(modify_sq_in, in, sq_state, curr_state); 733 MLX5_SET(modify_sq_in, in, sq_state, curr_state);
723 MLX5_SET(sqc, sqc, state, next_state); 734 MLX5_SET(sqc, sqc, state, next_state);
735 if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
736 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
737 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
738 }
724 739
725 err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen); 740 err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
726 741
@@ -736,6 +751,8 @@ static void mlx5e_disable_sq(struct mlx5e_sq *sq)
736 struct mlx5_core_dev *mdev = priv->mdev; 751 struct mlx5_core_dev *mdev = priv->mdev;
737 752
738 mlx5_core_destroy_sq(mdev, sq->sqn); 753 mlx5_core_destroy_sq(mdev, sq->sqn);
754 if (sq->rate_limit)
755 mlx5_rl_remove_rate(mdev, sq->rate_limit);
739} 756}
740 757
741static int mlx5e_open_sq(struct mlx5e_channel *c, 758static int mlx5e_open_sq(struct mlx5e_channel *c,
@@ -753,7 +770,8 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
753 if (err) 770 if (err)
754 goto err_destroy_sq; 771 goto err_destroy_sq;
755 772
756 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); 773 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
774 false, 0);
757 if (err) 775 if (err)
758 goto err_disable_sq; 776 goto err_disable_sq;
759 777
@@ -792,7 +810,8 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
792 if (mlx5e_sq_has_room_for(sq, 1)) 810 if (mlx5e_sq_has_room_for(sq, 1))
793 mlx5e_send_nop(sq, true); 811 mlx5e_send_nop(sq, true);
794 812
795 mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); 813 mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR,
814 false, 0);
796 } 815 }
797 816
798 while (sq->cc != sq->pc) /* wait till sq is empty */ 817 while (sq->cc != sq->pc) /* wait till sq is empty */
@@ -886,6 +905,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
886 905
887 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); 906 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
888 907
908 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
889 MLX5_SET(cqc, cqc, c_eqn, eqn); 909 MLX5_SET(cqc, cqc, c_eqn, eqn);
890 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); 910 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
891 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - 911 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
@@ -915,8 +935,7 @@ static void mlx5e_disable_cq(struct mlx5e_cq *cq)
915static int mlx5e_open_cq(struct mlx5e_channel *c, 935static int mlx5e_open_cq(struct mlx5e_channel *c,
916 struct mlx5e_cq_param *param, 936 struct mlx5e_cq_param *param,
917 struct mlx5e_cq *cq, 937 struct mlx5e_cq *cq,
918 u16 moderation_usecs, 938 struct mlx5e_cq_moder moderation)
919 u16 moderation_frames)
920{ 939{
921 int err; 940 int err;
922 struct mlx5e_priv *priv = c->priv; 941 struct mlx5e_priv *priv = c->priv;
@@ -932,8 +951,8 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
932 951
933 if (MLX5_CAP_GEN(mdev, cq_moderation)) 952 if (MLX5_CAP_GEN(mdev, cq_moderation))
934 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, 953 mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
935 moderation_usecs, 954 moderation.usec,
936 moderation_frames); 955 moderation.pkts);
937 return 0; 956 return 0;
938 957
939err_destroy_cq: 958err_destroy_cq:
@@ -962,8 +981,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
962 981
963 for (tc = 0; tc < c->num_tc; tc++) { 982 for (tc = 0; tc < c->num_tc; tc++) {
964 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, 983 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
965 priv->params.tx_cq_moderation_usec, 984 priv->params.tx_cq_moderation);
966 priv->params.tx_cq_moderation_pkts);
967 if (err) 985 if (err)
968 goto err_close_tx_cqs; 986 goto err_close_tx_cqs;
969 } 987 }
@@ -1023,14 +1041,91 @@ static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
1023 ix + i * priv->params.num_channels; 1041 ix + i * priv->params.num_channels;
1024} 1042}
1025 1043
1044static int mlx5e_set_sq_maxrate(struct net_device *dev,
1045 struct mlx5e_sq *sq, u32 rate)
1046{
1047 struct mlx5e_priv *priv = netdev_priv(dev);
1048 struct mlx5_core_dev *mdev = priv->mdev;
1049 u16 rl_index = 0;
1050 int err;
1051
1052 if (rate == sq->rate_limit)
1053 /* nothing to do */
1054 return 0;
1055
1056 if (sq->rate_limit)
1057 /* remove current rl index to free space to next ones */
1058 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1059
1060 sq->rate_limit = 0;
1061
1062 if (rate) {
1063 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1064 if (err) {
1065 netdev_err(dev, "Failed configuring rate %u: %d\n",
1066 rate, err);
1067 return err;
1068 }
1069 }
1070
1071 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
1072 MLX5_SQC_STATE_RDY, true, rl_index);
1073 if (err) {
1074 netdev_err(dev, "Failed configuring rate %u: %d\n",
1075 rate, err);
1076 /* remove the rate from the table */
1077 if (rate)
1078 mlx5_rl_remove_rate(mdev, rate);
1079 return err;
1080 }
1081
1082 sq->rate_limit = rate;
1083 return 0;
1084}
1085
1086static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1087{
1088 struct mlx5e_priv *priv = netdev_priv(dev);
1089 struct mlx5_core_dev *mdev = priv->mdev;
1090 struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
1091 int err = 0;
1092
1093 if (!mlx5_rl_is_supported(mdev)) {
1094 netdev_err(dev, "Rate limiting is not supported on this device\n");
1095 return -EINVAL;
1096 }
1097
1098 /* rate is given in Mb/sec, HW config is in Kb/sec */
1099 rate = rate << 10;
1100
1101 /* Check whether rate in valid range, 0 is always valid */
1102 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1103 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1104 return -ERANGE;
1105 }
1106
1107 mutex_lock(&priv->state_lock);
1108 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1109 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1110 if (!err)
1111 priv->tx_rates[index] = rate;
1112 mutex_unlock(&priv->state_lock);
1113
1114 return err;
1115}
1116
1026static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, 1117static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1027 struct mlx5e_channel_param *cparam, 1118 struct mlx5e_channel_param *cparam,
1028 struct mlx5e_channel **cp) 1119 struct mlx5e_channel **cp)
1029{ 1120{
1121 struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
1030 struct net_device *netdev = priv->netdev; 1122 struct net_device *netdev = priv->netdev;
1123 struct mlx5e_cq_moder rx_cq_profile;
1031 int cpu = mlx5e_get_cpu(priv, ix); 1124 int cpu = mlx5e_get_cpu(priv, ix);
1032 struct mlx5e_channel *c; 1125 struct mlx5e_channel *c;
1126 struct mlx5e_sq *sq;
1033 int err; 1127 int err;
1128 int i;
1034 1129
1035 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); 1130 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1036 if (!c) 1131 if (!c)
@@ -1044,11 +1139,16 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1044 c->mkey_be = cpu_to_be32(priv->mkey.key); 1139 c->mkey_be = cpu_to_be32(priv->mkey.key);
1045 c->num_tc = priv->params.num_tc; 1140 c->num_tc = priv->params.num_tc;
1046 1141
1142 if (priv->params.rx_am_enabled)
1143 rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
1144 else
1145 rx_cq_profile = priv->params.rx_cq_moderation;
1146
1047 mlx5e_build_channeltc_to_txq_map(priv, ix); 1147 mlx5e_build_channeltc_to_txq_map(priv, ix);
1048 1148
1049 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); 1149 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1050 1150
1051 err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0); 1151 err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
1052 if (err) 1152 if (err)
1053 goto err_napi_del; 1153 goto err_napi_del;
1054 1154
@@ -1057,8 +1157,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1057 goto err_close_icosq_cq; 1157 goto err_close_icosq_cq;
1058 1158
1059 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, 1159 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
1060 priv->params.rx_cq_moderation_usec, 1160 rx_cq_profile);
1061 priv->params.rx_cq_moderation_pkts);
1062 if (err) 1161 if (err)
1063 goto err_close_tx_cqs; 1162 goto err_close_tx_cqs;
1064 1163
@@ -1072,6 +1171,16 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1072 if (err) 1171 if (err)
1073 goto err_close_icosq; 1172 goto err_close_icosq;
1074 1173
1174 for (i = 0; i < priv->params.num_tc; i++) {
1175 u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
1176
1177 if (priv->tx_rates[txq_ix]) {
1178 sq = priv->txq_to_sq_map[txq_ix];
1179 mlx5e_set_sq_maxrate(priv->netdev, sq,
1180 priv->tx_rates[txq_ix]);
1181 }
1182 }
1183
1075 err = mlx5e_open_rq(c, &cparam->rq, &c->rq); 1184 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1076 if (err) 1185 if (err)
1077 goto err_close_sqs; 1186 goto err_close_sqs;
@@ -1148,6 +1257,8 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1148 1257
1149 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); 1258 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1150 param->wq.linear = 1; 1259 param->wq.linear = 1;
1260
1261 param->am_enabled = priv->params.rx_am_enabled;
1151} 1262}
1152 1263
1153static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) 1264static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
@@ -1213,6 +1324,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1213 } 1324 }
1214 1325
1215 mlx5e_build_common_cq_param(priv, param); 1326 mlx5e_build_common_cq_param(priv, param);
1327
1328 param->cq_period_mode = priv->params.rx_cq_period_mode;
1216} 1329}
1217 1330
1218static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, 1331static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
@@ -1223,6 +1336,8 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1223 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); 1336 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1224 1337
1225 mlx5e_build_common_cq_param(priv, param); 1338 mlx5e_build_common_cq_param(priv, param);
1339
1340 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1226} 1341}
1227 1342
1228static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, 1343static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
@@ -1234,6 +1349,8 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
1234 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); 1349 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
1235 1350
1236 mlx5e_build_common_cq_param(priv, param); 1351 mlx5e_build_common_cq_param(priv, param);
1352
1353 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1237} 1354}
1238 1355
1239static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, 1356static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
@@ -2519,25 +2636,31 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
2519} 2636}
2520 2637
2521static void mlx5e_add_vxlan_port(struct net_device *netdev, 2638static void mlx5e_add_vxlan_port(struct net_device *netdev,
2522 sa_family_t sa_family, __be16 port) 2639 struct udp_tunnel_info *ti)
2523{ 2640{
2524 struct mlx5e_priv *priv = netdev_priv(netdev); 2641 struct mlx5e_priv *priv = netdev_priv(netdev);
2525 2642
2643 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2644 return;
2645
2526 if (!mlx5e_vxlan_allowed(priv->mdev)) 2646 if (!mlx5e_vxlan_allowed(priv->mdev))
2527 return; 2647 return;
2528 2648
2529 mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1); 2649 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
2530} 2650}
2531 2651
2532static void mlx5e_del_vxlan_port(struct net_device *netdev, 2652static void mlx5e_del_vxlan_port(struct net_device *netdev,
2533 sa_family_t sa_family, __be16 port) 2653 struct udp_tunnel_info *ti)
2534{ 2654{
2535 struct mlx5e_priv *priv = netdev_priv(netdev); 2655 struct mlx5e_priv *priv = netdev_priv(netdev);
2536 2656
2657 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2658 return;
2659
2537 if (!mlx5e_vxlan_allowed(priv->mdev)) 2660 if (!mlx5e_vxlan_allowed(priv->mdev))
2538 return; 2661 return;
2539 2662
2540 mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0); 2663 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
2541} 2664}
2542 2665
2543static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, 2666static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2604,6 +2727,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
2604 .ndo_set_features = mlx5e_set_features, 2727 .ndo_set_features = mlx5e_set_features,
2605 .ndo_change_mtu = mlx5e_change_mtu, 2728 .ndo_change_mtu = mlx5e_change_mtu,
2606 .ndo_do_ioctl = mlx5e_ioctl, 2729 .ndo_do_ioctl = mlx5e_ioctl,
2730 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
2607#ifdef CONFIG_RFS_ACCEL 2731#ifdef CONFIG_RFS_ACCEL
2608 .ndo_rx_flow_steer = mlx5e_rx_flow_steer, 2732 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
2609#endif 2733#endif
@@ -2623,8 +2747,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2623 .ndo_set_features = mlx5e_set_features, 2747 .ndo_set_features = mlx5e_set_features,
2624 .ndo_change_mtu = mlx5e_change_mtu, 2748 .ndo_change_mtu = mlx5e_change_mtu,
2625 .ndo_do_ioctl = mlx5e_ioctl, 2749 .ndo_do_ioctl = mlx5e_ioctl,
2626 .ndo_add_vxlan_port = mlx5e_add_vxlan_port, 2750 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
2627 .ndo_del_vxlan_port = mlx5e_del_vxlan_port, 2751 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
2752 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
2628 .ndo_features_check = mlx5e_features_check, 2753 .ndo_features_check = mlx5e_features_check,
2629#ifdef CONFIG_RFS_ACCEL 2754#ifdef CONFIG_RFS_ACCEL
2630 .ndo_rx_flow_steer = mlx5e_rx_flow_steer, 2755 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
@@ -2753,6 +2878,20 @@ static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
2753 (pci_bw < 40000) && (pci_bw < link_speed)); 2878 (pci_bw < 40000) && (pci_bw < link_speed));
2754} 2879}
2755 2880
2881void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
2882{
2883 params->rx_cq_period_mode = cq_period_mode;
2884
2885 params->rx_cq_moderation.pkts =
2886 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2887 params->rx_cq_moderation.usec =
2888 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2889
2890 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
2891 params->rx_cq_moderation.usec =
2892 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
2893}
2894
2756static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, 2895static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2757 struct net_device *netdev, 2896 struct net_device *netdev,
2758 int num_channels) 2897 int num_channels)
@@ -2760,6 +2899,9 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2760 struct mlx5e_priv *priv = netdev_priv(netdev); 2899 struct mlx5e_priv *priv = netdev_priv(netdev);
2761 u32 link_speed = 0; 2900 u32 link_speed = 0;
2762 u32 pci_bw = 0; 2901 u32 pci_bw = 0;
2902 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
2903 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
2904 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2763 2905
2764 priv->params.log_sq_size = 2906 priv->params.log_sq_size =
2765 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 2907 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -2805,13 +2947,13 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2805 2947
2806 priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, 2948 priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
2807 BIT(priv->params.log_rq_size)); 2949 BIT(priv->params.log_rq_size));
2808 priv->params.rx_cq_moderation_usec = 2950
2809 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 2951 priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
2810 priv->params.rx_cq_moderation_pkts = 2952 mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
2811 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 2953
2812 priv->params.tx_cq_moderation_usec = 2954 priv->params.tx_cq_moderation.usec =
2813 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 2955 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
2814 priv->params.tx_cq_moderation_pkts = 2956 priv->params.tx_cq_moderation.pkts =
2815 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 2957 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
2816 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); 2958 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
2817 priv->params.num_tc = 1; 2959 priv->params.num_tc = 1;
@@ -2826,6 +2968,10 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2826 priv->params.lro_wqe_sz = 2968 priv->params.lro_wqe_sz =
2827 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 2969 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2828 2970
2971 /* Initialize pflags */
2972 MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
2973 priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
2974
2829 priv->mdev = mdev; 2975 priv->mdev = mdev;
2830 priv->netdev = netdev; 2976 priv->netdev = netdev;
2831 priv->params.num_channels = num_channels; 2977 priv->params.num_channels = num_channels;
@@ -3127,7 +3273,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
3127 3273
3128 if (mlx5e_vxlan_allowed(mdev)) { 3274 if (mlx5e_vxlan_allowed(mdev)) {
3129 rtnl_lock(); 3275 rtnl_lock();
3130 vxlan_get_rx_port(netdev); 3276 udp_tunnel_get_rx_info(netdev);
3131 rtnl_unlock(); 3277 rtnl_unlock();
3132 } 3278 }
3133 3279
@@ -3233,6 +3379,7 @@ static struct mlx5_interface mlx5e_interface = {
3233 3379
3234void mlx5e_init(void) 3380void mlx5e_init(void)
3235{ 3381{
3382 mlx5e_build_ptys2ethtool_map();
3236 mlx5_register_interface(&mlx5e_interface); 3383 mlx5_register_interface(&mlx5e_interface);
3237} 3384}
3238 3385
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
new file mode 100644
index 000000000000..1fffe48a93cc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -0,0 +1,335 @@
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "en.h"
34
35/* Adaptive moderation profiles */
36#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
37#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
38#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
39#define MLX5E_PARAMS_AM_NUM_PROFILES 5
40
41/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
42#define MLX5_AM_EQE_PROFILES { \
43 {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
44 {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
45 {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
46 {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
47 {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
48}
49
50#define MLX5_AM_CQE_PROFILES { \
51 {2, 256}, \
52 {8, 128}, \
53 {16, 64}, \
54 {32, 64}, \
55 {64, 64} \
56}
57
58static const struct mlx5e_cq_moder
59profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
60 MLX5_AM_EQE_PROFILES,
61 MLX5_AM_CQE_PROFILES,
62};
63
64static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
65{
66 return profile[cq_period_mode][ix];
67}
68
69struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
70{
71 int default_profile_ix;
72
73 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
74 default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
75 else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
76 default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
77
78 return profile[rx_cq_period_mode][default_profile_ix];
79}
80
81/* Adaptive moderation logic */
82enum {
83 MLX5E_AM_START_MEASURE,
84 MLX5E_AM_MEASURE_IN_PROGRESS,
85 MLX5E_AM_APPLY_NEW_PROFILE,
86};
87
88enum {
89 MLX5E_AM_PARKING_ON_TOP,
90 MLX5E_AM_PARKING_TIRED,
91 MLX5E_AM_GOING_RIGHT,
92 MLX5E_AM_GOING_LEFT,
93};
94
95enum {
96 MLX5E_AM_STATS_WORSE,
97 MLX5E_AM_STATS_SAME,
98 MLX5E_AM_STATS_BETTER,
99};
100
101enum {
102 MLX5E_AM_STEPPED,
103 MLX5E_AM_TOO_TIRED,
104 MLX5E_AM_ON_EDGE,
105};
106
107static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
108{
109 switch (am->tune_state) {
110 case MLX5E_AM_PARKING_ON_TOP:
111 case MLX5E_AM_PARKING_TIRED:
112 WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
113 return true;
114 case MLX5E_AM_GOING_RIGHT:
115 return (am->steps_left > 1) && (am->steps_right == 1);
116 default: /* MLX5E_AM_GOING_LEFT */
117 return (am->steps_right > 1) && (am->steps_left == 1);
118 }
119}
120
121static void mlx5e_am_turn(struct mlx5e_rx_am *am)
122{
123 switch (am->tune_state) {
124 case MLX5E_AM_PARKING_ON_TOP:
125 case MLX5E_AM_PARKING_TIRED:
126 WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
127 break;
128 case MLX5E_AM_GOING_RIGHT:
129 am->tune_state = MLX5E_AM_GOING_LEFT;
130 am->steps_left = 0;
131 break;
132 case MLX5E_AM_GOING_LEFT:
133 am->tune_state = MLX5E_AM_GOING_RIGHT;
134 am->steps_right = 0;
135 break;
136 }
137}
138
139static int mlx5e_am_step(struct mlx5e_rx_am *am)
140{
141 if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
142 return MLX5E_AM_TOO_TIRED;
143
144 switch (am->tune_state) {
145 case MLX5E_AM_PARKING_ON_TOP:
146 case MLX5E_AM_PARKING_TIRED:
147 WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
148 break;
149 case MLX5E_AM_GOING_RIGHT:
150 if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
151 return MLX5E_AM_ON_EDGE;
152 am->profile_ix++;
153 am->steps_right++;
154 break;
155 case MLX5E_AM_GOING_LEFT:
156 if (am->profile_ix == 0)
157 return MLX5E_AM_ON_EDGE;
158 am->profile_ix--;
159 am->steps_left++;
160 break;
161 }
162
163 am->tired++;
164 return MLX5E_AM_STEPPED;
165}
166
167static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
168{
169 am->steps_right = 0;
170 am->steps_left = 0;
171 am->tired = 0;
172 am->tune_state = MLX5E_AM_PARKING_ON_TOP;
173}
174
175static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
176{
177 am->steps_right = 0;
178 am->steps_left = 0;
179 am->tune_state = MLX5E_AM_PARKING_TIRED;
180}
181
182static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
183{
184 am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
185 MLX5E_AM_GOING_RIGHT;
186 mlx5e_am_step(am);
187}
188
189static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
190 struct mlx5e_rx_am_stats *prev)
191{
192 int diff;
193
194 if (!prev->ppms)
195 return curr->ppms ? MLX5E_AM_STATS_BETTER :
196 MLX5E_AM_STATS_SAME;
197
198 diff = curr->ppms - prev->ppms;
199 if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
200 return (diff > 0) ? MLX5E_AM_STATS_BETTER :
201 MLX5E_AM_STATS_WORSE;
202
203 if (!prev->epms)
204 return curr->epms ? MLX5E_AM_STATS_WORSE :
205 MLX5E_AM_STATS_SAME;
206
207 diff = curr->epms - prev->epms;
208 if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
209 return (diff < 0) ? MLX5E_AM_STATS_BETTER :
210 MLX5E_AM_STATS_WORSE;
211
212 return MLX5E_AM_STATS_SAME;
213}
214
215static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
216 struct mlx5e_rx_am *am)
217{
218 int prev_state = am->tune_state;
219 int prev_ix = am->profile_ix;
220 int stats_res;
221 int step_res;
222
223 switch (am->tune_state) {
224 case MLX5E_AM_PARKING_ON_TOP:
225 stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
226 if (stats_res != MLX5E_AM_STATS_SAME)
227 mlx5e_am_exit_parking(am);
228 break;
229
230 case MLX5E_AM_PARKING_TIRED:
231 am->tired--;
232 if (!am->tired)
233 mlx5e_am_exit_parking(am);
234 break;
235
236 case MLX5E_AM_GOING_RIGHT:
237 case MLX5E_AM_GOING_LEFT:
238 stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
239 if (stats_res != MLX5E_AM_STATS_BETTER)
240 mlx5e_am_turn(am);
241
242 if (mlx5e_am_on_top(am)) {
243 mlx5e_am_park_on_top(am);
244 break;
245 }
246
247 step_res = mlx5e_am_step(am);
248 switch (step_res) {
249 case MLX5E_AM_ON_EDGE:
250 mlx5e_am_park_on_top(am);
251 break;
252 case MLX5E_AM_TOO_TIRED:
253 mlx5e_am_park_tired(am);
254 break;
255 }
256
257 break;
258 }
259
260 if ((prev_state != MLX5E_AM_PARKING_ON_TOP) ||
261 (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
262 am->prev_stats = *curr_stats;
263
264 return am->profile_ix != prev_ix;
265}
266
267static void mlx5e_am_sample(struct mlx5e_rq *rq,
268 struct mlx5e_rx_am_sample *s)
269{
270 s->time = ktime_get();
271 s->pkt_ctr = rq->stats.packets;
272 s->event_ctr = rq->cq.event_ctr;
273}
274
275#define MLX5E_AM_NEVENTS 64
276
277static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
278 struct mlx5e_rx_am_sample *end,
279 struct mlx5e_rx_am_stats *curr_stats)
280{
281 /* u32 holds up to 71 minutes, should be enough */
282 u32 delta_us = ktime_us_delta(end->time, start->time);
283 unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
284
285 if (!delta_us) {
286 WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
287 return;
288 }
289
290 curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
291 curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
292}
293
294void mlx5e_rx_am_work(struct work_struct *work)
295{
296 struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
297 work);
298 struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
299 struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
300
301 mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq,
302 cur_profile.usec, cur_profile.pkts);
303
304 am->state = MLX5E_AM_START_MEASURE;
305}
306
307void mlx5e_rx_am(struct mlx5e_rq *rq)
308{
309 struct mlx5e_rx_am *am = &rq->am;
310 struct mlx5e_rx_am_sample end_sample;
311 struct mlx5e_rx_am_stats curr_stats;
312 u16 nevents;
313
314 switch (am->state) {
315 case MLX5E_AM_MEASURE_IN_PROGRESS:
316 nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
317 if (nevents < MLX5E_AM_NEVENTS)
318 break;
319 mlx5e_am_sample(rq, &end_sample);
320 mlx5e_am_calc_stats(&am->start_sample, &end_sample,
321 &curr_stats);
322 if (mlx5e_am_decision(&curr_stats, am)) {
323 am->state = MLX5E_AM_APPLY_NEW_PROFILE;
324 schedule_work(&am->work);
325 break;
326 }
327 /* fall through */
328 case MLX5E_AM_START_MEASURE:
329 mlx5e_am_sample(rq, &am->start_sample);
330 am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
331 break;
332 case MLX5E_AM_APPLY_NEW_PROFILE:
333 break;
334 }
335}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index c38781fa567d..64ae2e800daa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -136,6 +136,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
136 136
137 for (i = 0; i < c->num_tc; i++) 137 for (i = 0; i < c->num_tc; i++)
138 mlx5e_cq_arm(&c->sq[i].cq); 138 mlx5e_cq_arm(&c->sq[i].cq);
139
140 if (test_bit(MLX5E_RQ_STATE_AM, &c->rq.state))
141 mlx5e_rx_am(&c->rq);
142
139 mlx5e_cq_arm(&c->rq.cq); 143 mlx5e_cq_arm(&c->rq.cq);
140 mlx5e_cq_arm(&c->icosq.cq); 144 mlx5e_cq_arm(&c->icosq.cq);
141 145
@@ -146,6 +150,7 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
146{ 150{
147 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); 151 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
148 152
153 cq->event_ctr++;
149 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); 154 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
150 napi_schedule(cq->napi); 155 napi_schedule(cq->napi);
151} 156}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 75c7ae6a5cc4..77fc1aa26114 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -151,6 +151,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
151 return err; 151 return err;
152 } 152 }
153 153
154 if (MLX5_CAP_GEN(dev, qos)) {
155 err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
156 if (err)
157 return err;
158 }
159
154 return 0; 160 return 0;
155} 161}
156 162
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c65f4a13e17e..1f3b6d6a852e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1144,6 +1144,13 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1144 dev_err(&pdev->dev, "Failed to init flow steering\n"); 1144 dev_err(&pdev->dev, "Failed to init flow steering\n");
1145 goto err_fs; 1145 goto err_fs;
1146 } 1146 }
1147
1148 err = mlx5_init_rl_table(dev);
1149 if (err) {
1150 dev_err(&pdev->dev, "Failed to init rate limiting\n");
1151 goto err_rl;
1152 }
1153
1147#ifdef CONFIG_MLX5_CORE_EN 1154#ifdef CONFIG_MLX5_CORE_EN
1148 err = mlx5_eswitch_init(dev); 1155 err = mlx5_eswitch_init(dev);
1149 if (err) { 1156 if (err) {
@@ -1183,6 +1190,8 @@ err_sriov:
1183 mlx5_eswitch_cleanup(dev->priv.eswitch); 1190 mlx5_eswitch_cleanup(dev->priv.eswitch);
1184#endif 1191#endif
1185err_reg_dev: 1192err_reg_dev:
1193 mlx5_cleanup_rl_table(dev);
1194err_rl:
1186 mlx5_cleanup_fs(dev); 1195 mlx5_cleanup_fs(dev);
1187err_fs: 1196err_fs:
1188 mlx5_cleanup_mkey_table(dev); 1197 mlx5_cleanup_mkey_table(dev);
@@ -1253,6 +1262,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1253 mlx5_eswitch_cleanup(dev->priv.eswitch); 1262 mlx5_eswitch_cleanup(dev->priv.eswitch);
1254#endif 1263#endif
1255 1264
1265 mlx5_cleanup_rl_table(dev);
1256 mlx5_cleanup_fs(dev); 1266 mlx5_cleanup_fs(dev);
1257 mlx5_cleanup_mkey_table(dev); 1267 mlx5_cleanup_mkey_table(dev);
1258 mlx5_cleanup_srq_table(dev); 1268 mlx5_cleanup_srq_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 3e35611b19c3..752c08127138 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -202,15 +202,24 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
202} 202}
203EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper); 203EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
204 204
205int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, 205int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
206 int proto_mask) 206 u32 proto_admin, int proto_mask)
207{ 207{
208 u32 in[MLX5_ST_SZ_DW(ptys_reg)];
209 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 208 u32 out[MLX5_ST_SZ_DW(ptys_reg)];
209 u32 in[MLX5_ST_SZ_DW(ptys_reg)];
210 u8 an_disable_admin;
211 u8 an_disable_cap;
212 u8 an_status;
213
214 mlx5_query_port_autoneg(dev, proto_mask, &an_status,
215 &an_disable_cap, &an_disable_admin);
216 if (!an_disable_cap && an_disable)
217 return -EPERM;
210 218
211 memset(in, 0, sizeof(in)); 219 memset(in, 0, sizeof(in));
212 220
213 MLX5_SET(ptys_reg, in, local_port, 1); 221 MLX5_SET(ptys_reg, in, local_port, 1);
222 MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
214 MLX5_SET(ptys_reg, in, proto_mask, proto_mask); 223 MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
215 if (proto_mask == MLX5_PTYS_EN) 224 if (proto_mask == MLX5_PTYS_EN)
216 MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); 225 MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
@@ -220,7 +229,19 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
220 return mlx5_core_access_reg(dev, in, sizeof(in), out, 229 return mlx5_core_access_reg(dev, in, sizeof(in), out,
221 sizeof(out), MLX5_REG_PTYS, 0, 1); 230 sizeof(out), MLX5_REG_PTYS, 0, 1);
222} 231}
223EXPORT_SYMBOL_GPL(mlx5_set_port_proto); 232EXPORT_SYMBOL_GPL(mlx5_set_port_ptys);
233
234/* This function should be used after setting a port register only */
235void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
236{
237 enum mlx5_port_status ps;
238
239 mlx5_query_port_admin_status(dev, &ps);
240 mlx5_set_port_admin_status(dev, MLX5_PORT_DOWN);
241 if (ps == MLX5_PORT_UP)
242 mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
243}
244EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
224 245
225int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, 246int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
226 enum mlx5_port_status status) 247 enum mlx5_port_status status)
@@ -518,6 +539,25 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
518} 539}
519EXPORT_SYMBOL_GPL(mlx5_query_port_pfc); 540EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
520 541
542void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
543 u8 *an_status,
544 u8 *an_disable_cap, u8 *an_disable_admin)
545{
546 u32 out[MLX5_ST_SZ_DW(ptys_reg)];
547
548 *an_status = 0;
549 *an_disable_cap = 0;
550 *an_disable_admin = 0;
551
552 if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1))
553 return;
554
555 *an_status = MLX5_GET(ptys_reg, out, an_status);
556 *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
557 *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
558}
559EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
560
521int mlx5_max_tc(struct mlx5_core_dev *mdev) 561int mlx5_max_tc(struct mlx5_core_dev *mdev)
522{ 562{
523 u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8; 563 u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
new file mode 100644
index 000000000000..c07c28bd3d55
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -0,0 +1,209 @@
1/*
2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39/* Finds an entry where we can register the given rate
40 * If the rate already exists, return the entry where it is registered,
41 * otherwise return the first available entry.
42 * If the table is full, return NULL
43 */
44static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
45 u32 rate)
46{
47 struct mlx5_rl_entry *ret_entry = NULL;
48 bool empty_found = false;
49 int i;
50
51 for (i = 0; i < table->max_size; i++) {
52 if (table->rl_entry[i].rate == rate)
53 return &table->rl_entry[i];
54 if (!empty_found && !table->rl_entry[i].rate) {
55 empty_found = true;
56 ret_entry = &table->rl_entry[i];
57 }
58 }
59
60 return ret_entry;
61}
62
63static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
64 u32 rate, u16 index)
65{
66 u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)];
67 u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)];
68
69 memset(in, 0, sizeof(in));
70 memset(out, 0, sizeof(out));
71
72 MLX5_SET(set_rate_limit_in, in, opcode,
73 MLX5_CMD_OP_SET_RATE_LIMIT);
74 MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
75 MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
76
77 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
78 out, sizeof(out));
79}
80
81bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
82{
83 struct mlx5_rl_table *table = &dev->priv.rl_table;
84
85 return (rate <= table->max_rate && rate >= table->min_rate);
86}
87EXPORT_SYMBOL(mlx5_rl_is_in_range);
88
89int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
90{
91 struct mlx5_rl_table *table = &dev->priv.rl_table;
92 struct mlx5_rl_entry *entry;
93 int err = 0;
94
95 mutex_lock(&table->rl_lock);
96
97 if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
98 mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
99 rate, table->min_rate, table->max_rate);
100 err = -EINVAL;
101 goto out;
102 }
103
104 entry = find_rl_entry(table, rate);
105 if (!entry) {
106 mlx5_core_err(dev, "Max number of %u rates reached\n",
107 table->max_size);
108 err = -ENOSPC;
109 goto out;
110 }
111 if (entry->refcount) {
112 /* rate already configured */
113 entry->refcount++;
114 } else {
115 /* new rate limit */
116 err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
117 if (err) {
118 mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
119 rate, err);
120 goto out;
121 }
122 entry->rate = rate;
123 entry->refcount = 1;
124 }
125 *index = entry->index;
126
127out:
128 mutex_unlock(&table->rl_lock);
129 return err;
130}
131EXPORT_SYMBOL(mlx5_rl_add_rate);
132
133void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
134{
135 struct mlx5_rl_table *table = &dev->priv.rl_table;
136 struct mlx5_rl_entry *entry = NULL;
137
138 /* 0 is a reserved value for unlimited rate */
139 if (rate == 0)
140 return;
141
142 mutex_lock(&table->rl_lock);
143 entry = find_rl_entry(table, rate);
144 if (!entry || !entry->refcount) {
145 mlx5_core_warn(dev, "Rate %u is not configured\n", rate);
146 goto out;
147 }
148
149 entry->refcount--;
150 if (!entry->refcount) {
151 /* need to remove rate */
152 mlx5_set_rate_limit_cmd(dev, 0, entry->index);
153 entry->rate = 0;
154 }
155
156out:
157 mutex_unlock(&table->rl_lock);
158}
159EXPORT_SYMBOL(mlx5_rl_remove_rate);
160
161int mlx5_init_rl_table(struct mlx5_core_dev *dev)
162{
163 struct mlx5_rl_table *table = &dev->priv.rl_table;
164 int i;
165
166 mutex_init(&table->rl_lock);
167 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
168 table->max_size = 0;
169 return 0;
170 }
171
172 /* First entry is reserved for unlimited rate */
173 table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
174 table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
175 table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
176
177 table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
178 GFP_KERNEL);
179 if (!table->rl_entry)
180 return -ENOMEM;
181
182 /* The index represents the index in HW rate limit table
183 * Index 0 is reserved for unlimited rate
184 */
185 for (i = 0; i < table->max_size; i++)
186 table->rl_entry[i].index = i + 1;
187
188 /* Index 0 is reserved */
189 mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
190 table->max_size,
191 table->min_rate >> 10,
192 table->max_rate >> 10);
193
194 return 0;
195}
196
197void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
198{
199 struct mlx5_rl_table *table = &dev->priv.rl_table;
200 int i;
201
202 /* Clear all configured rates */
203 for (i = 0; i < table->max_size; i++)
204 if (table->rl_entry[i].rate)
205 mlx5_set_rate_limit_cmd(dev, 0,
206 table->rl_entry[i].index);
207
208 kfree(dev->priv.rl_table.rl_entry);
209}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index b0a0b01bb4ef..01ae54826d5c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1736,7 +1736,7 @@ static int __init mlxsw_core_module_init(void)
1736{ 1736{
1737 int err; 1737 int err;
1738 1738
1739 mlxsw_wq = create_workqueue(mlxsw_core_driver_name); 1739 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
1740 if (!mlxsw_wq) 1740 if (!mlxsw_wq)
1741 return -ENOMEM; 1741 return -ENOMEM;
1742 mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); 1742 mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 660429ebfbe1..a453fffaa1a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -49,6 +49,7 @@
49#include <linux/jiffies.h> 49#include <linux/jiffies.h>
50#include <linux/bitops.h> 50#include <linux/bitops.h>
51#include <linux/list.h> 51#include <linux/list.h>
52#include <linux/notifier.h>
52#include <linux/dcbnl.h> 53#include <linux/dcbnl.h>
53#include <net/switchdev.h> 54#include <net/switchdev.h>
54#include <generated/utsrelease.h> 55#include <generated/utsrelease.h>
@@ -636,14 +637,14 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
636 return 0; 637 return 0;
637} 638}
638 639
639static struct mlxsw_sp_vfid * 640static struct mlxsw_sp_fid *
640mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) 641mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
641{ 642{
642 struct mlxsw_sp_vfid *vfid; 643 struct mlxsw_sp_fid *f;
643 644
644 list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { 645 list_for_each_entry(f, &mlxsw_sp->port_vfids.list, list) {
645 if (vfid->vid == vid) 646 if (f->vid == vid)
646 return vfid; 647 return f;
647 } 648 }
648 649
649 return NULL; 650 return NULL;
@@ -655,75 +656,70 @@ static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
655 MLXSW_SP_VFID_PORT_MAX); 656 MLXSW_SP_VFID_PORT_MAX);
656} 657}
657 658
658static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) 659static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
659{ 660{
660 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
661 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 661 char sfmr_pl[MLXSW_REG_SFMR_LEN];
662 662
663 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); 663 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
664 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 664 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
665} 665}
666 666
667static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) 667static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
668{
669 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
670 char sfmr_pl[MLXSW_REG_SFMR_LEN];
671 668
672 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); 669static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
673 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 670 u16 vid)
674}
675
676static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
677 u16 vid)
678{ 671{
679 struct device *dev = mlxsw_sp->bus_info->dev; 672 struct device *dev = mlxsw_sp->bus_info->dev;
680 struct mlxsw_sp_vfid *vfid; 673 struct mlxsw_sp_fid *f;
681 u16 n_vfid; 674 u16 vfid, fid;
682 int err; 675 int err;
683 676
684 n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); 677 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
685 if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { 678 if (vfid == MLXSW_SP_VFID_PORT_MAX) {
686 dev_err(dev, "No available vFIDs\n"); 679 dev_err(dev, "No available vFIDs\n");
687 return ERR_PTR(-ERANGE); 680 return ERR_PTR(-ERANGE);
688 } 681 }
689 682
690 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); 683 fid = mlxsw_sp_vfid_to_fid(vfid);
684 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
691 if (err) { 685 if (err) {
692 dev_err(dev, "Failed to create vFID=%d\n", n_vfid); 686 dev_err(dev, "Failed to create FID=%d\n", fid);
693 return ERR_PTR(err); 687 return ERR_PTR(err);
694 } 688 }
695 689
696 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); 690 f = kzalloc(sizeof(*f), GFP_KERNEL);
697 if (!vfid) 691 if (!f)
698 goto err_allocate_vfid; 692 goto err_allocate_vfid;
699 693
700 vfid->vfid = n_vfid; 694 f->leave = mlxsw_sp_vport_vfid_leave;
701 vfid->vid = vid; 695 f->fid = fid;
696 f->vid = vid;
702 697
703 list_add(&vfid->list, &mlxsw_sp->port_vfids.list); 698 list_add(&f->list, &mlxsw_sp->port_vfids.list);
704 set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); 699 set_bit(vfid, mlxsw_sp->port_vfids.mapped);
705 700
706 return vfid; 701 return f;
707 702
708err_allocate_vfid: 703err_allocate_vfid:
709 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); 704 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
710 return ERR_PTR(-ENOMEM); 705 return ERR_PTR(-ENOMEM);
711} 706}
712 707
713static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 708static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
714 struct mlxsw_sp_vfid *vfid) 709 struct mlxsw_sp_fid *f)
715{ 710{
716 clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); 711 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
717 list_del(&vfid->list); 712
713 clear_bit(vfid, mlxsw_sp->port_vfids.mapped);
714 list_del(&f->list);
718 715
719 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); 716 mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
720 717
721 kfree(vfid); 718 kfree(f);
722} 719}
723 720
724static struct mlxsw_sp_port * 721static struct mlxsw_sp_port *
725mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, 722mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
726 struct mlxsw_sp_vfid *vfid)
727{ 723{
728 struct mlxsw_sp_port *mlxsw_sp_vport; 724 struct mlxsw_sp_port *mlxsw_sp_vport;
729 725
@@ -741,8 +737,7 @@ mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
741 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; 737 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
742 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; 738 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
743 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; 739 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
744 mlxsw_sp_vport->vport.vfid = vfid; 740 mlxsw_sp_vport->vport.vid = vid;
745 mlxsw_sp_vport->vport.vid = vfid->vid;
746 741
747 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); 742 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
748 743
@@ -755,13 +750,72 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
755 kfree(mlxsw_sp_vport); 750 kfree(mlxsw_sp_vport);
756} 751}
757 752
753static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
754 bool valid)
755{
756 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
757 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
758
759 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
760 vid);
761}
762
763static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport)
764{
765 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
766 struct mlxsw_sp_fid *f;
767 int err;
768
769 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, vid);
770 if (!f) {
771 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, vid);
772 if (IS_ERR(f))
773 return PTR_ERR(f);
774 }
775
776 if (!f->ref_count) {
777 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
778 if (err)
779 goto err_vport_flood_set;
780 }
781
782 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
783 if (err)
784 goto err_vport_fid_map;
785
786 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
787 f->ref_count++;
788
789 return 0;
790
791err_vport_fid_map:
792 if (!f->ref_count)
793 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
794err_vport_flood_set:
795 if (!f->ref_count)
796 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
797 return err;
798}
799
800static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
801{
802 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
803
804 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
805
806 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
807
808 if (--f->ref_count == 0) {
809 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
810 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
811 }
812}
813
758int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 814int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
759 u16 vid) 815 u16 vid)
760{ 816{
761 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 817 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
762 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
763 struct mlxsw_sp_port *mlxsw_sp_vport; 818 struct mlxsw_sp_port *mlxsw_sp_vport;
764 struct mlxsw_sp_vfid *vfid;
765 int err; 819 int err;
766 820
767 /* VLAN 0 is added to HW filter when device goes up, but it is 821 /* VLAN 0 is added to HW filter when device goes up, but it is
@@ -775,31 +829,10 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
775 return 0; 829 return 0;
776 } 830 }
777 831
778 vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); 832 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
779 if (!vfid) {
780 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
781 if (IS_ERR(vfid)) {
782 netdev_err(dev, "Failed to create vFID for VID=%d\n",
783 vid);
784 return PTR_ERR(vfid);
785 }
786 }
787
788 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
789 if (!mlxsw_sp_vport) { 833 if (!mlxsw_sp_vport) {
790 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); 834 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
791 err = -ENOMEM; 835 return -ENOMEM;
792 goto err_port_vport_create;
793 }
794
795 if (!vfid->nr_vports) {
796 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
797 true, false);
798 if (err) {
799 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
800 vfid->vfid);
801 goto err_vport_flood_set;
802 }
803 } 836 }
804 837
805 /* When adding the first VLAN interface on a bridged port we need to 838 /* When adding the first VLAN interface on a bridged port we need to
@@ -814,15 +847,10 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
814 } 847 }
815 } 848 }
816 849
817 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 850 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
818 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
819 true,
820 mlxsw_sp_vfid_to_fid(vfid->vfid),
821 vid);
822 if (err) { 851 if (err) {
823 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", 852 netdev_err(dev, "Failed to join vFID\n");
824 vid, vfid->vfid); 853 goto err_vport_vfid_join;
825 goto err_port_vid_to_fid_set;
826 } 854 }
827 855
828 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 856 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
@@ -845,8 +873,6 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
845 goto err_port_stp_state_set; 873 goto err_port_stp_state_set;
846 } 874 }
847 875
848 vfid->nr_vports++;
849
850 return 0; 876 return 0;
851 877
852err_port_stp_state_set: 878err_port_stp_state_set:
@@ -854,21 +880,12 @@ err_port_stp_state_set:
854err_port_add_vid: 880err_port_add_vid:
855 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 881 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
856err_port_vid_learning_set: 882err_port_vid_learning_set:
857 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 883 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
858 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, 884err_vport_vfid_join:
859 mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
860err_port_vid_to_fid_set:
861 if (list_is_singular(&mlxsw_sp_port->vports_list)) 885 if (list_is_singular(&mlxsw_sp_port->vports_list))
862 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 886 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
863err_port_vp_mode_trans: 887err_port_vp_mode_trans:
864 if (!vfid->nr_vports)
865 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
866 false);
867err_vport_flood_set:
868 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 888 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
869err_port_vport_create:
870 if (!vfid->nr_vports)
871 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
872 return err; 889 return err;
873} 890}
874 891
@@ -877,7 +894,7 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
877{ 894{
878 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 895 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
879 struct mlxsw_sp_port *mlxsw_sp_vport; 896 struct mlxsw_sp_port *mlxsw_sp_vport;
880 struct mlxsw_sp_vfid *vfid; 897 struct mlxsw_sp_fid *f;
881 int err; 898 int err;
882 899
883 /* VLAN 0 is removed from HW filter when device goes down, but 900 /* VLAN 0 is removed from HW filter when device goes down, but
@@ -892,8 +909,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
892 return 0; 909 return 0;
893 } 910 }
894 911
895 vfid = mlxsw_sp_vport->vport.vfid;
896
897 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 912 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
898 MLXSW_REG_SPMS_STATE_DISCARDING); 913 MLXSW_REG_SPMS_STATE_DISCARDING);
899 if (err) { 914 if (err) {
@@ -914,16 +929,12 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
914 return err; 929 return err;
915 } 930 }
916 931
917 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 932 /* Drop FID reference. If this was the last reference the
918 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 933 * resources will be freed.
919 false, 934 */
920 mlxsw_sp_vfid_to_fid(vfid->vfid), 935 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
921 vid); 936 if (f && !WARN_ON(!f->leave))
922 if (err) { 937 f->leave(mlxsw_sp_vport);
923 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
924 vid, vfid->vfid);
925 return err;
926 }
927 938
928 /* When removing the last VLAN interface on a bridged port we need to 939 /* When removing the last VLAN interface on a bridged port we need to
929 * transition all active 802.1Q bridge VLANs to use VID to FID 940 * transition all active 802.1Q bridge VLANs to use VID to FID
@@ -937,13 +948,8 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
937 } 948 }
938 } 949 }
939 950
940 vfid->nr_vports--;
941 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 951 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
942 952
943 /* Destroy the vFID if no vPorts are assigned to it anymore. */
944 if (!vfid->nr_vports)
945 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
946
947 return 0; 953 return 0;
948} 954}
949 955
@@ -2403,6 +2409,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2403 2409
2404 mlxsw_sp->core = mlxsw_core; 2410 mlxsw_sp->core = mlxsw_core;
2405 mlxsw_sp->bus_info = mlxsw_bus_info; 2411 mlxsw_sp->bus_info = mlxsw_bus_info;
2412 INIT_LIST_HEAD(&mlxsw_sp->fids);
2406 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); 2413 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2407 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); 2414 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2408 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 2415 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
@@ -2479,6 +2486,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2479 mlxsw_sp_traps_fini(mlxsw_sp); 2486 mlxsw_sp_traps_fini(mlxsw_sp);
2480 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2487 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2481 mlxsw_sp_ports_remove(mlxsw_sp); 2488 mlxsw_sp_ports_remove(mlxsw_sp);
2489 WARN_ON(!list_empty(&mlxsw_sp->fids));
2482} 2490}
2483 2491
2484static struct mlxsw_config_profile mlxsw_sp_config_profile = { 2492static struct mlxsw_config_profile mlxsw_sp_config_profile = {
@@ -2540,16 +2548,37 @@ static struct mlxsw_driver mlxsw_sp_driver = {
2540 .profile = &mlxsw_sp_config_profile, 2548 .profile = &mlxsw_sp_config_profile,
2541}; 2549};
2542 2550
2543static int 2551static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
2544mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) 2552 u16 fid)
2553{
2554 if (mlxsw_sp_fid_is_vfid(fid))
2555 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
2556 else
2557 return test_bit(fid, lag_port->active_vlans);
2558}
2559
2560static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2561 u16 fid)
2545{ 2562{
2546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2547 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2564 u8 local_port = mlxsw_sp_port->local_port;
2565 u16 lag_id = mlxsw_sp_port->lag_id;
2566 int i, count = 0;
2548 2567
2549 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); 2568 if (!mlxsw_sp_port->lagged)
2550 mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); 2569 return true;
2551 2570
2552 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2571 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2572 struct mlxsw_sp_port *lag_port;
2573
2574 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2575 if (!lag_port || lag_port->local_port == local_port)
2576 continue;
2577 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
2578 count++;
2579 }
2580
2581 return !count;
2553} 2582}
2554 2583
2555static int 2584static int
@@ -2564,17 +2593,8 @@ mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2564 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, 2593 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2565 mlxsw_sp_port->local_port); 2594 mlxsw_sp_port->local_port);
2566 2595
2567 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2596 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
2568} 2597 mlxsw_sp_port->local_port, fid);
2569
2570static int
2571mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2572{
2573 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2574 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2575
2576 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2577 mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2578 2598
2579 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2599 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2580} 2600}
@@ -2590,71 +2610,51 @@ mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2590 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 2610 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2591 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 2611 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2592 2612
2613 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
2614 mlxsw_sp_port->lag_id, fid);
2615
2593 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2616 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2594} 2617}
2595 2618
2596static int 2619int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
2597__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2598{ 2620{
2599 int err, last_err = 0; 2621 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
2600 u16 vid; 2622 return 0;
2601
2602 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2603 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2604 if (err)
2605 last_err = err;
2606 }
2607 2623
2608 return last_err; 2624 if (mlxsw_sp_port->lagged)
2625 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
2626 fid);
2627 else
2628 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
2609} 2629}
2610 2630
2611static int 2631static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2612__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2613{ 2632{
2614 int err, last_err = 0; 2633 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2615 u16 vid;
2616
2617 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2618 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2619 if (err)
2620 last_err = err;
2621 }
2622
2623 return last_err;
2624} 2634}
2625 2635
2626static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) 2636static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2637 struct net_device *br_dev)
2627{ 2638{
2628 if (!list_empty(&mlxsw_sp_port->vports_list)) 2639 return !mlxsw_sp->master_bridge.dev ||
2629 if (mlxsw_sp_port->lagged) 2640 mlxsw_sp->master_bridge.dev == br_dev;
2630 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2631 else
2632 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2633 else
2634 if (mlxsw_sp_port->lagged)
2635 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2636 else
2637 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2638} 2641}
2639 2642
2640static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) 2643static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2644 struct net_device *br_dev)
2641{ 2645{
2642 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); 2646 mlxsw_sp->master_bridge.dev = br_dev;
2643 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 2647 mlxsw_sp->master_bridge.ref_count++;
2644
2645 if (mlxsw_sp_vport->lagged)
2646 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2647 fid);
2648 else
2649 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2650} 2648}
2651 2649
2652static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 2650static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
2653{ 2651{
2654 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 2652 if (--mlxsw_sp->master_bridge.ref_count == 0)
2653 mlxsw_sp->master_bridge.dev = NULL;
2655} 2654}
2656 2655
2657static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) 2656static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2657 struct net_device *br_dev)
2658{ 2658{
2659 struct net_device *dev = mlxsw_sp_port->dev; 2659 struct net_device *dev = mlxsw_sp_port->dev;
2660 int err; 2660 int err;
@@ -2668,6 +2668,8 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2668 if (err) 2668 if (err)
2669 return err; 2669 return err;
2670 2670
2671 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
2672
2671 mlxsw_sp_port->learning = 1; 2673 mlxsw_sp_port->learning = 1;
2672 mlxsw_sp_port->learning_sync = 1; 2674 mlxsw_sp_port->learning_sync = 1;
2673 mlxsw_sp_port->uc_flood = 1; 2675 mlxsw_sp_port->uc_flood = 1;
@@ -2676,16 +2678,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2676 return 0; 2678 return 0;
2677} 2679}
2678 2680
2679static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2681static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2680 bool flush_fdb)
2681{ 2682{
2682 struct net_device *dev = mlxsw_sp_port->dev; 2683 struct net_device *dev = mlxsw_sp_port->dev;
2683 2684
2684 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2685 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2686
2687 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 2685 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2688 2686
2687 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
2688
2689 mlxsw_sp_port->learning = 0; 2689 mlxsw_sp_port->learning = 0;
2690 mlxsw_sp_port->learning_sync = 0; 2690 mlxsw_sp_port->learning_sync = 0;
2691 mlxsw_sp_port->uc_flood = 0; 2691 mlxsw_sp_port->uc_flood = 0;
@@ -2694,28 +2694,7 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2694 /* Add implicit VLAN interface in the device, so that untagged 2694 /* Add implicit VLAN interface in the device, so that untagged
2695 * packets will be classified to the default vFID. 2695 * packets will be classified to the default vFID.
2696 */ 2696 */
2697 return mlxsw_sp_port_add_vid(dev, 0, 1); 2697 mlxsw_sp_port_add_vid(dev, 0, 1);
2698}
2699
2700static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2701 struct net_device *br_dev)
2702{
2703 return !mlxsw_sp->master_bridge.dev ||
2704 mlxsw_sp->master_bridge.dev == br_dev;
2705}
2706
2707static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2708 struct net_device *br_dev)
2709{
2710 mlxsw_sp->master_bridge.dev = br_dev;
2711 mlxsw_sp->master_bridge.ref_count++;
2712}
2713
2714static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2715 struct net_device *br_dev)
2716{
2717 if (--mlxsw_sp->master_bridge.ref_count == 0)
2718 mlxsw_sp->master_bridge.dev = NULL;
2719} 2698}
2720 2699
2721static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 2700static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
@@ -2876,65 +2855,33 @@ err_col_port_add:
2876 return err; 2855 return err;
2877} 2856}
2878 2857
2879static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, 2858static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2880 struct net_device *br_dev, 2859 struct net_device *lag_dev)
2881 bool flush_fdb);
2882
2883static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2884 struct net_device *lag_dev)
2885{ 2860{
2886 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2861 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2887 struct mlxsw_sp_port *mlxsw_sp_vport;
2888 struct mlxsw_sp_upper *lag;
2889 u16 lag_id = mlxsw_sp_port->lag_id; 2862 u16 lag_id = mlxsw_sp_port->lag_id;
2890 int err; 2863 struct mlxsw_sp_upper *lag;
2891 2864
2892 if (!mlxsw_sp_port->lagged) 2865 if (!mlxsw_sp_port->lagged)
2893 return 0; 2866 return;
2894 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 2867 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2895 WARN_ON(lag->ref_count == 0); 2868 WARN_ON(lag->ref_count == 0);
2896 2869
2897 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 2870 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2898 if (err) 2871 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2899 return err;
2900 err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2901 if (err)
2902 return err;
2903
2904 /* In case we leave a LAG device that has bridges built on top,
2905 * then their teardown sequence is never issued and we need to
2906 * invoke the necessary cleanup routines ourselves.
2907 */
2908 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2909 vport.list) {
2910 struct net_device *br_dev;
2911
2912 if (!mlxsw_sp_vport->bridged)
2913 continue;
2914
2915 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2916 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2917 }
2918 2872
2919 if (mlxsw_sp_port->bridged) { 2873 if (mlxsw_sp_port->bridged) {
2920 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); 2874 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2921 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); 2875 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
2922 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2923 } 2876 }
2924 2877
2925 if (lag->ref_count == 1) { 2878 if (lag->ref_count == 1)
2926 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) 2879 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2927 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2928 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2929 if (err)
2930 return err;
2931 }
2932 2880
2933 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 2881 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2934 mlxsw_sp_port->local_port); 2882 mlxsw_sp_port->local_port);
2935 mlxsw_sp_port->lagged = 0; 2883 mlxsw_sp_port->lagged = 0;
2936 lag->ref_count--; 2884 lag->ref_count--;
2937 return 0;
2938} 2885}
2939 2886
2940static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 2887static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -2983,42 +2930,25 @@ static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2983 u16 vid = vlan_dev_vlan_id(vlan_dev); 2930 u16 vid = vlan_dev_vlan_id(vlan_dev);
2984 2931
2985 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2932 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2986 if (!mlxsw_sp_vport) { 2933 if (WARN_ON(!mlxsw_sp_vport))
2987 WARN_ON(!mlxsw_sp_vport);
2988 return -EINVAL; 2934 return -EINVAL;
2989 }
2990 2935
2991 mlxsw_sp_vport->dev = vlan_dev; 2936 mlxsw_sp_vport->dev = vlan_dev;
2992 2937
2993 return 0; 2938 return 0;
2994} 2939}
2995 2940
2996static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, 2941static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2997 struct net_device *vlan_dev) 2942 struct net_device *vlan_dev)
2998{ 2943{
2999 struct mlxsw_sp_port *mlxsw_sp_vport; 2944 struct mlxsw_sp_port *mlxsw_sp_vport;
3000 u16 vid = vlan_dev_vlan_id(vlan_dev); 2945 u16 vid = vlan_dev_vlan_id(vlan_dev);
3001 2946
3002 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2947 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3003 if (!mlxsw_sp_vport) { 2948 if (WARN_ON(!mlxsw_sp_vport))
3004 WARN_ON(!mlxsw_sp_vport); 2949 return;
3005 return -EINVAL;
3006 }
3007
3008 /* When removing a VLAN device while still bridged we should first
3009 * remove it from the bridge, as we receive the bridge's notification
3010 * when the vPort is already gone.
3011 */
3012 if (mlxsw_sp_vport->bridged) {
3013 struct net_device *br_dev;
3014
3015 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
3016 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
3017 }
3018 2950
3019 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 2951 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3020
3021 return 0;
3022} 2952}
3023 2953
3024static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, 2954static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
@@ -3028,7 +2958,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3028 struct mlxsw_sp_port *mlxsw_sp_port; 2958 struct mlxsw_sp_port *mlxsw_sp_port;
3029 struct net_device *upper_dev; 2959 struct net_device *upper_dev;
3030 struct mlxsw_sp *mlxsw_sp; 2960 struct mlxsw_sp *mlxsw_sp;
3031 int err; 2961 int err = 0;
3032 2962
3033 mlxsw_sp_port = netdev_priv(dev); 2963 mlxsw_sp_port = netdev_priv(dev);
3034 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2964 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -3037,73 +2967,56 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3037 switch (event) { 2967 switch (event) {
3038 case NETDEV_PRECHANGEUPPER: 2968 case NETDEV_PRECHANGEUPPER:
3039 upper_dev = info->upper_dev; 2969 upper_dev = info->upper_dev;
3040 if (!info->master || !info->linking) 2970 if (!is_vlan_dev(upper_dev) &&
2971 !netif_is_lag_master(upper_dev) &&
2972 !netif_is_bridge_master(upper_dev))
2973 return -EINVAL;
2974 if (!info->linking)
3041 break; 2975 break;
3042 /* HW limitation forbids to put ports to multiple bridges. */ 2976 /* HW limitation forbids to put ports to multiple bridges. */
3043 if (netif_is_bridge_master(upper_dev) && 2977 if (netif_is_bridge_master(upper_dev) &&
3044 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 2978 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3045 return NOTIFY_BAD; 2979 return -EINVAL;
3046 if (netif_is_lag_master(upper_dev) && 2980 if (netif_is_lag_master(upper_dev) &&
3047 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 2981 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3048 info->upper_info)) 2982 info->upper_info))
3049 return NOTIFY_BAD; 2983 return -EINVAL;
2984 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
2985 return -EINVAL;
2986 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
2987 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
2988 return -EINVAL;
3050 break; 2989 break;
3051 case NETDEV_CHANGEUPPER: 2990 case NETDEV_CHANGEUPPER:
3052 upper_dev = info->upper_dev; 2991 upper_dev = info->upper_dev;
3053 if (is_vlan_dev(upper_dev)) { 2992 if (is_vlan_dev(upper_dev)) {
3054 if (info->linking) { 2993 if (info->linking)
3055 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, 2994 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3056 upper_dev); 2995 upper_dev);
3057 if (err) { 2996 else
3058 netdev_err(dev, "Failed to link VLAN device\n"); 2997 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3059 return NOTIFY_BAD; 2998 upper_dev);
3060 }
3061 } else {
3062 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3063 upper_dev);
3064 if (err) {
3065 netdev_err(dev, "Failed to unlink VLAN device\n");
3066 return NOTIFY_BAD;
3067 }
3068 }
3069 } else if (netif_is_bridge_master(upper_dev)) { 2999 } else if (netif_is_bridge_master(upper_dev)) {
3070 if (info->linking) { 3000 if (info->linking)
3071 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); 3001 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3072 if (err) { 3002 upper_dev);
3073 netdev_err(dev, "Failed to join bridge\n"); 3003 else
3074 return NOTIFY_BAD; 3004 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3075 }
3076 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
3077 } else {
3078 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
3079 true);
3080 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
3081 if (err) {
3082 netdev_err(dev, "Failed to leave bridge\n");
3083 return NOTIFY_BAD;
3084 }
3085 }
3086 } else if (netif_is_lag_master(upper_dev)) { 3005 } else if (netif_is_lag_master(upper_dev)) {
3087 if (info->linking) { 3006 if (info->linking)
3088 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 3007 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3089 upper_dev); 3008 upper_dev);
3090 if (err) { 3009 else
3091 netdev_err(dev, "Failed to join link aggregation\n"); 3010 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3092 return NOTIFY_BAD; 3011 upper_dev);
3093 } 3012 } else {
3094 } else { 3013 err = -EINVAL;
3095 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, 3014 WARN_ON(1);
3096 upper_dev);
3097 if (err) {
3098 netdev_err(dev, "Failed to leave link aggregation\n");
3099 return NOTIFY_BAD;
3100 }
3101 }
3102 } 3015 }
3103 break; 3016 break;
3104 } 3017 }
3105 3018
3106 return NOTIFY_DONE; 3019 return err;
3107} 3020}
3108 3021
3109static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 3022static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
@@ -3127,7 +3040,7 @@ static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3127 break; 3040 break;
3128 } 3041 }
3129 3042
3130 return NOTIFY_DONE; 3043 return 0;
3131} 3044}
3132 3045
3133static int mlxsw_sp_netdevice_port_event(struct net_device *dev, 3046static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
@@ -3141,7 +3054,7 @@ static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3141 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); 3054 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3142 } 3055 }
3143 3056
3144 return NOTIFY_DONE; 3057 return 0;
3145} 3058}
3146 3059
3147static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 3060static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
@@ -3154,23 +3067,23 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3154 netdev_for_each_lower_dev(lag_dev, dev, iter) { 3067 netdev_for_each_lower_dev(lag_dev, dev, iter) {
3155 if (mlxsw_sp_port_dev_check(dev)) { 3068 if (mlxsw_sp_port_dev_check(dev)) {
3156 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); 3069 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3157 if (ret == NOTIFY_BAD) 3070 if (ret)
3158 return ret; 3071 return ret;
3159 } 3072 }
3160 } 3073 }
3161 3074
3162 return NOTIFY_DONE; 3075 return 0;
3163} 3076}
3164 3077
3165static struct mlxsw_sp_vfid * 3078static struct mlxsw_sp_fid *
3166mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, 3079mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3167 const struct net_device *br_dev) 3080 const struct net_device *br_dev)
3168{ 3081{
3169 struct mlxsw_sp_vfid *vfid; 3082 struct mlxsw_sp_fid *f;
3170 3083
3171 list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { 3084 list_for_each_entry(f, &mlxsw_sp->br_vfids.list, list) {
3172 if (vfid->br_dev == br_dev) 3085 if (f->dev == br_dev)
3173 return vfid; 3086 return f;
3174 } 3087 }
3175 3088
3176 return NULL; 3089 return NULL;
@@ -3192,180 +3105,127 @@ static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3192 MLXSW_SP_VFID_BR_MAX); 3105 MLXSW_SP_VFID_BR_MAX);
3193} 3106}
3194 3107
3195static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, 3108static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3196 struct net_device *br_dev) 3109
3110static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3111 struct net_device *br_dev)
3197{ 3112{
3198 struct device *dev = mlxsw_sp->bus_info->dev; 3113 struct device *dev = mlxsw_sp->bus_info->dev;
3199 struct mlxsw_sp_vfid *vfid; 3114 struct mlxsw_sp_fid *f;
3200 u16 n_vfid; 3115 u16 vfid, fid;
3201 int err; 3116 int err;
3202 3117
3203 n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); 3118 vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3204 if (n_vfid == MLXSW_SP_VFID_MAX) { 3119 if (vfid == MLXSW_SP_VFID_MAX) {
3205 dev_err(dev, "No available vFIDs\n"); 3120 dev_err(dev, "No available vFIDs\n");
3206 return ERR_PTR(-ERANGE); 3121 return ERR_PTR(-ERANGE);
3207 } 3122 }
3208 3123
3209 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); 3124 fid = mlxsw_sp_vfid_to_fid(vfid);
3125 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3210 if (err) { 3126 if (err) {
3211 dev_err(dev, "Failed to create vFID=%d\n", n_vfid); 3127 dev_err(dev, "Failed to create FID=%d\n", fid);
3212 return ERR_PTR(err); 3128 return ERR_PTR(err);
3213 } 3129 }
3214 3130
3215 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); 3131 f = kzalloc(sizeof(*f), GFP_KERNEL);
3216 if (!vfid) 3132 if (!f)
3217 goto err_allocate_vfid; 3133 goto err_allocate_vfid;
3218 3134
3219 vfid->vfid = n_vfid; 3135 f->leave = mlxsw_sp_vport_br_vfid_leave;
3220 vfid->br_dev = br_dev; 3136 f->fid = fid;
3137 f->dev = br_dev;
3221 3138
3222 list_add(&vfid->list, &mlxsw_sp->br_vfids.list); 3139 list_add(&f->list, &mlxsw_sp->br_vfids.list);
3223 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); 3140 set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped);
3224 3141
3225 return vfid; 3142 return f;
3226 3143
3227err_allocate_vfid: 3144err_allocate_vfid:
3228 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); 3145 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3229 return ERR_PTR(-ENOMEM); 3146 return ERR_PTR(-ENOMEM);
3230} 3147}
3231 3148
3232static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 3149static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3233 struct mlxsw_sp_vfid *vfid) 3150 struct mlxsw_sp_fid *f)
3234{ 3151{
3235 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); 3152 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
3153 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid);
3236 3154
3237 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); 3155 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3238 list_del(&vfid->list); 3156 list_del(&f->list);
3239 3157
3240 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); 3158 mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
3241 3159
3242 kfree(vfid); 3160 kfree(f);
3243} 3161}
3244 3162
3245static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, 3163static int mlxsw_sp_vport_br_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3246 struct net_device *br_dev, 3164 struct net_device *br_dev)
3247 bool flush_fdb)
3248{ 3165{
3249 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3166 struct mlxsw_sp_fid *f;
3250 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3251 struct net_device *dev = mlxsw_sp_vport->dev;
3252 struct mlxsw_sp_vfid *vfid, *new_vfid;
3253 int err; 3167 int err;
3254 3168
3255 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); 3169 f = mlxsw_sp_br_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
3256 if (!vfid) { 3170 if (!f) {
3257 WARN_ON(!vfid); 3171 f = mlxsw_sp_br_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
3258 return -EINVAL; 3172 if (IS_ERR(f))
3173 return PTR_ERR(f);
3259 } 3174 }
3260 3175
3261 /* We need a vFID to go back to after leaving the bridge's vFID. */ 3176 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
3262 new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); 3177 if (err)
3263 if (!new_vfid) { 3178 goto err_vport_flood_set;
3264 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
3265 if (IS_ERR(new_vfid)) {
3266 netdev_err(dev, "Failed to create vFID for VID=%d\n",
3267 vid);
3268 return PTR_ERR(new_vfid);
3269 }
3270 }
3271 3179
3272 /* Invalidate existing {Port, VID} to vFID mapping and create a new 3180 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
3273 * one for the new vFID. 3181 if (err)
3274 */ 3182 goto err_vport_fid_map;
3275 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3276 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3277 false,
3278 mlxsw_sp_vfid_to_fid(vfid->vfid),
3279 vid);
3280 if (err) {
3281 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3282 vfid->vfid);
3283 goto err_port_vid_to_fid_invalidate;
3284 }
3285 3183
3286 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 3184 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
3287 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 3185 f->ref_count++;
3288 true,
3289 mlxsw_sp_vfid_to_fid(new_vfid->vfid),
3290 vid);
3291 if (err) {
3292 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3293 new_vfid->vfid);
3294 goto err_port_vid_to_fid_validate;
3295 }
3296 3186
3297 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 3187 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
3298 if (err) {
3299 netdev_err(dev, "Failed to disable learning\n");
3300 goto err_port_vid_learning_set;
3301 }
3302 3188
3303 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, 3189 return 0;
3304 false);
3305 if (err) {
3306 netdev_err(dev, "Failed clear to clear flooding\n");
3307 goto err_vport_flood_set;
3308 }
3309 3190
3310 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 3191err_vport_fid_map:
3311 MLXSW_REG_SPMS_STATE_FORWARDING); 3192 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3312 if (err) { 3193err_vport_flood_set:
3313 netdev_err(dev, "Failed to set STP state\n"); 3194 if (!f->ref_count)
3314 goto err_port_stp_state_set; 3195 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3315 } 3196 return err;
3197}
3198
3199static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3200{
3201 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3316 3202
3317 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) 3203 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3318 netdev_err(dev, "Failed to flush FDB\n");
3319 3204
3320 /* Switch between the vFIDs and destroy the old one if needed. */ 3205 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
3321 new_vfid->nr_vports++;
3322 mlxsw_sp_vport->vport.vfid = new_vfid;
3323 vfid->nr_vports--;
3324 if (!vfid->nr_vports)
3325 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3326 3206
3327 mlxsw_sp_vport->learning = 0; 3207 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3328 mlxsw_sp_vport->learning_sync = 0;
3329 mlxsw_sp_vport->uc_flood = 0;
3330 mlxsw_sp_vport->bridged = 0;
3331 3208
3332 return 0; 3209 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
3333 3210
3334err_port_stp_state_set: 3211 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3335err_vport_flood_set: 3212 if (--f->ref_count == 0)
3336err_port_vid_learning_set: 3213 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3337err_port_vid_to_fid_validate:
3338err_port_vid_to_fid_invalidate:
3339 /* Rollback vFID only if new. */
3340 if (!new_vfid->nr_vports)
3341 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3342 return err;
3343} 3214}
3344 3215
3345static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3216static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3346 struct net_device *br_dev) 3217 struct net_device *br_dev)
3347{ 3218{
3348 struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3349 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3350 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 3219 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3351 struct net_device *dev = mlxsw_sp_vport->dev; 3220 struct net_device *dev = mlxsw_sp_vport->dev;
3352 struct mlxsw_sp_vfid *vfid;
3353 int err; 3221 int err;
3354 3222
3355 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); 3223 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3356 if (!vfid) {
3357 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3358 if (IS_ERR(vfid)) {
3359 netdev_err(dev, "Failed to create bridge vFID\n");
3360 return PTR_ERR(vfid);
3361 }
3362 }
3363 3224
3364 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); 3225 err = mlxsw_sp_vport_br_vfid_join(mlxsw_sp_vport, br_dev);
3365 if (err) { 3226 if (err) {
3366 netdev_err(dev, "Failed to setup flooding for vFID=%d\n", 3227 netdev_err(dev, "Failed to join vFID\n");
3367 vfid->vfid); 3228 goto err_vport_br_vfid_join;
3368 goto err_port_flood_set;
3369 } 3229 }
3370 3230
3371 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 3231 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
@@ -3374,38 +3234,6 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3374 goto err_port_vid_learning_set; 3234 goto err_port_vid_learning_set;
3375 } 3235 }
3376 3236
3377 /* We need to invalidate existing {Port, VID} to vFID mapping and
3378 * create a new one for the bridge's vFID.
3379 */
3380 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3381 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3382 false,
3383 mlxsw_sp_vfid_to_fid(old_vfid->vfid),
3384 vid);
3385 if (err) {
3386 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3387 old_vfid->vfid);
3388 goto err_port_vid_to_fid_invalidate;
3389 }
3390
3391 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3392 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3393 true,
3394 mlxsw_sp_vfid_to_fid(vfid->vfid),
3395 vid);
3396 if (err) {
3397 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3398 vfid->vfid);
3399 goto err_port_vid_to_fid_validate;
3400 }
3401
3402 /* Switch between the vFIDs and destroy the old one if needed. */
3403 vfid->nr_vports++;
3404 mlxsw_sp_vport->vport.vfid = vfid;
3405 old_vfid->nr_vports--;
3406 if (!old_vfid->nr_vports)
3407 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3408
3409 mlxsw_sp_vport->learning = 1; 3237 mlxsw_sp_vport->learning = 1;
3410 mlxsw_sp_vport->learning_sync = 1; 3238 mlxsw_sp_vport->learning_sync = 1;
3411 mlxsw_sp_vport->uc_flood = 1; 3239 mlxsw_sp_vport->uc_flood = 1;
@@ -3413,20 +3241,32 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3413 3241
3414 return 0; 3242 return 0;
3415 3243
3416err_port_vid_to_fid_validate:
3417 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3418 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
3419 mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
3420err_port_vid_to_fid_invalidate:
3421 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3422err_port_vid_learning_set: 3244err_port_vid_learning_set:
3423 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); 3245 mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport);
3424err_port_flood_set: 3246err_vport_br_vfid_join:
3425 if (!vfid->nr_vports) 3247 mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3426 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3427 return err; 3248 return err;
3428} 3249}
3429 3250
3251static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3252{
3253 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3254
3255 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3256
3257 mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport);
3258
3259 mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3260
3261 mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3262 MLXSW_REG_SPMS_STATE_FORWARDING);
3263
3264 mlxsw_sp_vport->learning = 0;
3265 mlxsw_sp_vport->learning_sync = 0;
3266 mlxsw_sp_vport->uc_flood = 0;
3267 mlxsw_sp_vport->bridged = 0;
3268}
3269
3430static bool 3270static bool
3431mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, 3271mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3432 const struct net_device *br_dev) 3272 const struct net_device *br_dev)
@@ -3435,7 +3275,9 @@ mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3435 3275
3436 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 3276 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3437 vport.list) { 3277 vport.list) {
3438 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) 3278 struct net_device *dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
3279
3280 if (dev && dev == br_dev)
3439 return false; 3281 return false;
3440 } 3282 }
3441 3283
@@ -3450,56 +3292,39 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3450 struct netdev_notifier_changeupper_info *info = ptr; 3292 struct netdev_notifier_changeupper_info *info = ptr;
3451 struct mlxsw_sp_port *mlxsw_sp_vport; 3293 struct mlxsw_sp_port *mlxsw_sp_vport;
3452 struct net_device *upper_dev; 3294 struct net_device *upper_dev;
3453 int err; 3295 int err = 0;
3454 3296
3455 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3297 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3456 3298
3457 switch (event) { 3299 switch (event) {
3458 case NETDEV_PRECHANGEUPPER: 3300 case NETDEV_PRECHANGEUPPER:
3459 upper_dev = info->upper_dev; 3301 upper_dev = info->upper_dev;
3460 if (!info->master || !info->linking)
3461 break;
3462 if (!netif_is_bridge_master(upper_dev)) 3302 if (!netif_is_bridge_master(upper_dev))
3463 return NOTIFY_BAD; 3303 return -EINVAL;
3304 if (!info->linking)
3305 break;
3464 /* We can't have multiple VLAN interfaces configured on 3306 /* We can't have multiple VLAN interfaces configured on
3465 * the same port and being members in the same bridge. 3307 * the same port and being members in the same bridge.
3466 */ 3308 */
3467 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 3309 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3468 upper_dev)) 3310 upper_dev))
3469 return NOTIFY_BAD; 3311 return -EINVAL;
3470 break; 3312 break;
3471 case NETDEV_CHANGEUPPER: 3313 case NETDEV_CHANGEUPPER:
3472 upper_dev = info->upper_dev; 3314 upper_dev = info->upper_dev;
3473 if (!info->master)
3474 break;
3475 if (info->linking) { 3315 if (info->linking) {
3476 if (!mlxsw_sp_vport) { 3316 if (WARN_ON(!mlxsw_sp_vport))
3477 WARN_ON(!mlxsw_sp_vport); 3317 return -EINVAL;
3478 return NOTIFY_BAD;
3479 }
3480 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 3318 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3481 upper_dev); 3319 upper_dev);
3482 if (err) {
3483 netdev_err(dev, "Failed to join bridge\n");
3484 return NOTIFY_BAD;
3485 }
3486 } else { 3320 } else {
3487 /* We ignore bridge's unlinking notifications if vPort
3488 * is gone, since we already left the bridge when the
3489 * VLAN device was unlinked from the real device.
3490 */
3491 if (!mlxsw_sp_vport) 3321 if (!mlxsw_sp_vport)
3492 return NOTIFY_DONE; 3322 return 0;
3493 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, 3323 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
3494 upper_dev, true);
3495 if (err) {
3496 netdev_err(dev, "Failed to leave bridge\n");
3497 return NOTIFY_BAD;
3498 }
3499 } 3324 }
3500 } 3325 }
3501 3326
3502 return NOTIFY_DONE; 3327 return err;
3503} 3328}
3504 3329
3505static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, 3330static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
@@ -3514,12 +3339,12 @@ static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3514 if (mlxsw_sp_port_dev_check(dev)) { 3339 if (mlxsw_sp_port_dev_check(dev)) {
3515 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, 3340 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3516 vid); 3341 vid);
3517 if (ret == NOTIFY_BAD) 3342 if (ret)
3518 return ret; 3343 return ret;
3519 } 3344 }
3520 } 3345 }
3521 3346
3522 return NOTIFY_DONE; 3347 return 0;
3523} 3348}
3524 3349
3525static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 3350static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
@@ -3535,24 +3360,23 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3535 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 3360 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3536 vid); 3361 vid);
3537 3362
3538 return NOTIFY_DONE; 3363 return 0;
3539} 3364}
3540 3365
3541static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3366static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3542 unsigned long event, void *ptr) 3367 unsigned long event, void *ptr)
3543{ 3368{
3544 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3369 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3370 int err = 0;
3545 3371
3546 if (mlxsw_sp_port_dev_check(dev)) 3372 if (mlxsw_sp_port_dev_check(dev))
3547 return mlxsw_sp_netdevice_port_event(dev, event, ptr); 3373 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3548 3374 else if (netif_is_lag_master(dev))
3549 if (netif_is_lag_master(dev)) 3375 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3550 return mlxsw_sp_netdevice_lag_event(dev, event, ptr); 3376 else if (is_vlan_dev(dev))
3551 3377 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3552 if (is_vlan_dev(dev))
3553 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3554 3378
3555 return NOTIFY_DONE; 3379 return notifier_from_errno(err);
3556} 3380}
3557 3381
3558static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 3382static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 13b30eaa13d4..36c9835ea20b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -87,11 +87,12 @@ struct mlxsw_sp_upper {
87 unsigned int ref_count; 87 unsigned int ref_count;
88}; 88};
89 89
90struct mlxsw_sp_vfid { 90struct mlxsw_sp_fid {
91 void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
91 struct list_head list; 92 struct list_head list;
92 u16 nr_vports; 93 unsigned int ref_count;
93 u16 vfid; /* Starting at 0 */ 94 struct net_device *dev;
94 struct net_device *br_dev; 95 u16 fid;
95 u16 vid; 96 u16 vid;
96}; 97};
97 98
@@ -155,17 +156,17 @@ struct mlxsw_sp_sb {
155struct mlxsw_sp { 156struct mlxsw_sp {
156 struct { 157 struct {
157 struct list_head list; 158 struct list_head list;
158 unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_PORT_MAX)]; 159 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_PORT_MAX);
159 } port_vfids; 160 } port_vfids;
160 struct { 161 struct {
161 struct list_head list; 162 struct list_head list;
162 unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)]; 163 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_BR_MAX);
163 } br_vfids; 164 } br_vfids;
164 struct { 165 struct {
165 struct list_head list; 166 struct list_head list;
166 unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_MID_MAX)]; 167 DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
167 } br_mids; 168 } br_mids;
168 unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)]; 169 struct list_head fids; /* VLAN-aware bridge FIDs */
169 struct mlxsw_sp_port **ports; 170 struct mlxsw_sp_port **ports;
170 struct mlxsw_core *core; 171 struct mlxsw_core *core;
171 const struct mlxsw_bus_info *bus_info; 172 const struct mlxsw_bus_info *bus_info;
@@ -217,7 +218,7 @@ struct mlxsw_sp_port {
217 u16 lag_id; 218 u16 lag_id;
218 struct { 219 struct {
219 struct list_head list; 220 struct list_head list;
220 struct mlxsw_sp_vfid *vfid; 221 struct mlxsw_sp_fid *f;
221 u16 vid; 222 u16 vid;
222 } vport; 223 } vport;
223 struct { 224 struct {
@@ -259,28 +260,38 @@ mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
259 return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL; 260 return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
260} 261}
261 262
263static inline u16
264mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
265{
266 return mlxsw_sp_vport->vport.vid;
267}
268
262static inline bool 269static inline bool
263mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port) 270mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
264{ 271{
265 return mlxsw_sp_port->vport.vfid; 272 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
273
274 return vid != 0;
266} 275}
267 276
268static inline struct net_device * 277static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
269mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport) 278 struct mlxsw_sp_fid *f)
270{ 279{
271 return mlxsw_sp_vport->vport.vfid->br_dev; 280 mlxsw_sp_vport->vport.f = f;
272} 281}
273 282
274static inline u16 283static inline struct mlxsw_sp_fid *
275mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) 284mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
276{ 285{
277 return mlxsw_sp_vport->vport.vid; 286 return mlxsw_sp_vport->vport.f;
278} 287}
279 288
280static inline u16 289static inline struct net_device *
281mlxsw_sp_vport_vfid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) 290mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
282{ 291{
283 return mlxsw_sp_vport->vport.vfid->vfid; 292 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
293
294 return f ? f->dev : NULL;
284} 295}
285 296
286static inline struct mlxsw_sp_port * 297static inline struct mlxsw_sp_port *
@@ -298,14 +309,16 @@ mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
298} 309}
299 310
300static inline struct mlxsw_sp_port * 311static inline struct mlxsw_sp_port *
301mlxsw_sp_port_vport_find_by_vfid(const struct mlxsw_sp_port *mlxsw_sp_port, 312mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
302 u16 vfid) 313 u16 fid)
303{ 314{
304 struct mlxsw_sp_port *mlxsw_sp_vport; 315 struct mlxsw_sp_port *mlxsw_sp_vport;
305 316
306 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 317 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
307 vport.list) { 318 vport.list) {
308 if (mlxsw_sp_vport_vfid_get(mlxsw_sp_vport) == vfid) 319 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
320
321 if (f && f->fid == fid)
309 return mlxsw_sp_vport; 322 return mlxsw_sp_vport;
310 } 323 }
311 324
@@ -366,10 +379,11 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
366 u16 vid); 379 u16 vid);
367int mlxsw_sp_port_kill_vid(struct net_device *dev, 380int mlxsw_sp_port_kill_vid(struct net_device *dev,
368 __be16 __always_unused proto, u16 vid); 381 __be16 __always_unused proto, u16 vid);
369int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, 382int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
370 bool set, bool only_uc); 383 bool set);
371void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 384void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
372int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); 385int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
386int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
373int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 387int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
374 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 388 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
375 bool dwrr, u8 dwrr_weight); 389 bool dwrr, u8 dwrr_weight);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 3710f19ed6bb..a0c7376ee517 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -55,13 +55,10 @@
55static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, 55static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
56 u16 vid) 56 u16 vid)
57{ 57{
58 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
58 u16 fid = vid; 59 u16 fid = vid;
59 60
60 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 61 fid = f ? f->fid : fid;
61 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
62
63 fid = mlxsw_sp_vfid_to_fid(vfid);
64 }
65 62
66 if (!fid) 63 if (!fid)
67 fid = mlxsw_sp_port->pvid; 64 fid = mlxsw_sp_port->pvid;
@@ -236,7 +233,8 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
236 int err; 233 int err;
237 234
238 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 235 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
239 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); 236 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
237 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
240 238
241 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, 239 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
242 set, true); 240 set, true);
@@ -260,14 +258,17 @@ err_port_flood_set:
260 return err; 258 return err;
261} 259}
262 260
263int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, 261int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
264 bool set, bool only_uc) 262 bool set)
265{ 263{
264 u16 vfid;
265
266 /* In case of vFIDs, index into the flooding table is relative to 266 /* In case of vFIDs, index into the flooding table is relative to
267 * the start of the vFIDs range. 267 * the start of the vFIDs range.
268 */ 268 */
269 vfid = mlxsw_sp_fid_to_vfid(fid);
269 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, 270 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
270 only_uc); 271 false);
271} 272}
272 273
273static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 274static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -383,6 +384,198 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
383 return err; 384 return err;
384} 385}
385 386
387static struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
388 u16 fid)
389{
390 struct mlxsw_sp_fid *f;
391
392 list_for_each_entry(f, &mlxsw_sp->fids, list)
393 if (f->fid == fid)
394 return f;
395
396 return NULL;
397}
398
399static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
400{
401 char sfmr_pl[MLXSW_REG_SFMR_LEN];
402
403 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
404 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
405}
406
407static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
408{
409 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
410 char svfa_pl[MLXSW_REG_SVFA_LEN];
411
412 mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
413 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
414}
415
416static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
417{
418 struct mlxsw_sp_fid *f;
419
420 f = kzalloc(sizeof(*f), GFP_KERNEL);
421 if (!f)
422 return NULL;
423
424 f->fid = fid;
425
426 return f;
427}
428
429static struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp,
430 u16 fid)
431{
432 struct mlxsw_sp_fid *f;
433 int err;
434
435 err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
436 if (err)
437 return ERR_PTR(err);
438
439 /* Although all the ports member in the FID might be using a
440 * {Port, VID} to FID mapping, we create a global VID-to-FID
441 * mapping. This allows a port to transition to VLAN mode,
442 * knowing the global mapping exists.
443 */
444 err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
445 if (err)
446 goto err_fid_map;
447
448 f = mlxsw_sp_fid_alloc(fid);
449 if (!f) {
450 err = -ENOMEM;
451 goto err_allocate_fid;
452 }
453
454 list_add(&f->list, &mlxsw_sp->fids);
455
456 return f;
457
458err_allocate_fid:
459 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
460err_fid_map:
461 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
462 return ERR_PTR(err);
463}
464
465static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp,
466 struct mlxsw_sp_fid *f)
467{
468 u16 fid = f->fid;
469
470 list_del(&f->list);
471
472 kfree(f);
473
474 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
475}
476
477static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
478 u16 fid)
479{
480 struct mlxsw_sp_fid *f;
481
482 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
483 if (!f) {
484 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
485 if (IS_ERR(f))
486 return PTR_ERR(f);
487 }
488
489 f->ref_count++;
490
491 netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
492
493 return 0;
494}
495
496static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
497 u16 fid)
498{
499 struct mlxsw_sp_fid *f;
500
501 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
502 if (WARN_ON(!f))
503 return;
504
505 netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
506
507 mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
508
509 if (--f->ref_count == 0)
510 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
511}
512
513static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
514 bool valid)
515{
516 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
517
518 /* If port doesn't have vPorts, then it can use the global
519 * VID-to-FID mapping.
520 */
521 if (list_empty(&mlxsw_sp_port->vports_list))
522 return 0;
523
524 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
525}
526
527static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
528 u16 fid_begin, u16 fid_end)
529{
530 int fid, err;
531
532 for (fid = fid_begin; fid <= fid_end; fid++) {
533 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
534 if (err)
535 goto err_port_fid_join;
536 }
537
538 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
539 true, false);
540 if (err)
541 goto err_port_flood_set;
542
543 for (fid = fid_begin; fid <= fid_end; fid++) {
544 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
545 if (err)
546 goto err_port_fid_map;
547 }
548
549 return 0;
550
551err_port_fid_map:
552 for (fid--; fid >= fid_begin; fid--)
553 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
554 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
555 false);
556err_port_flood_set:
557 fid = fid_end;
558err_port_fid_join:
559 for (fid--; fid >= fid_begin; fid--)
560 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
561 return err;
562}
563
564static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
565 u16 fid_begin, u16 fid_end)
566{
567 int fid;
568
569 for (fid = fid_begin; fid <= fid_end; fid++)
570 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
571
572 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
573 false);
574
575 for (fid = fid_begin; fid <= fid_end; fid++)
576 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
577}
578
386static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 579static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
387 u16 vid) 580 u16 vid)
388{ 581{
@@ -440,55 +633,6 @@ err_port_allow_untagged_set:
440 return err; 633 return err;
441} 634}
442 635
443static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
444{
445 char sfmr_pl[MLXSW_REG_SFMR_LEN];
446 int err;
447
448 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
449 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
450
451 if (err)
452 return err;
453
454 set_bit(fid, mlxsw_sp->active_fids);
455 return 0;
456}
457
458static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
459{
460 char sfmr_pl[MLXSW_REG_SFMR_LEN];
461
462 clear_bit(fid, mlxsw_sp->active_fids);
463
464 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
465 fid, fid);
466 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
467}
468
469static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
470{
471 enum mlxsw_reg_svfa_mt mt;
472
473 if (!list_empty(&mlxsw_sp_port->vports_list))
474 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
475 else
476 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
477
478 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
479}
480
481static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
482{
483 enum mlxsw_reg_svfa_mt mt;
484
485 if (list_empty(&mlxsw_sp_port->vports_list))
486 return 0;
487
488 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
489 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
490}
491
492static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin, 636static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
493 u16 vid_end) 637 u16 vid_end)
494{ 638{
@@ -533,10 +677,8 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
533 u16 vid_begin, u16 vid_end, 677 u16 vid_begin, u16 vid_end,
534 bool flag_untagged, bool flag_pvid) 678 bool flag_untagged, bool flag_pvid)
535{ 679{
536 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
537 struct net_device *dev = mlxsw_sp_port->dev; 680 struct net_device *dev = mlxsw_sp_port->dev;
538 u16 vid, last_visited_vid, old_pvid; 681 u16 vid, old_pvid;
539 enum mlxsw_reg_svfa_mt mt;
540 int err; 682 int err;
541 683
542 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is 684 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
@@ -546,44 +688,10 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
546 if (!mlxsw_sp_port->bridged) 688 if (!mlxsw_sp_port->bridged)
547 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end); 689 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
548 690
549 for (vid = vid_begin; vid <= vid_end; vid++) { 691 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
550 if (!test_bit(vid, mlxsw_sp->active_fids)) {
551 err = mlxsw_sp_fid_create(mlxsw_sp, vid);
552 if (err) {
553 netdev_err(dev, "Failed to create FID=%d\n",
554 vid);
555 return err;
556 }
557
558 /* When creating a FID, we set a VID to FID mapping
559 * regardless of the port's mode.
560 */
561 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
562 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
563 true, vid, vid);
564 if (err) {
565 netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
566 vid);
567 goto err_port_vid_to_fid_set;
568 }
569 }
570 }
571
572 /* Set FID mapping according to port's mode */
573 for (vid = vid_begin; vid <= vid_end; vid++) {
574 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
575 if (err) {
576 netdev_err(dev, "Failed to map FID=%d", vid);
577 last_visited_vid = --vid;
578 goto err_port_fid_map;
579 }
580 }
581
582 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
583 true, false);
584 if (err) { 692 if (err) {
585 netdev_err(dev, "Failed to configure flooding\n"); 693 netdev_err(dev, "Failed to join FIDs\n");
586 goto err_port_flood_set; 694 return err;
587 } 695 }
588 696
589 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 697 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
@@ -628,10 +736,6 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
628 736
629 return 0; 737 return 0;
630 738
631err_port_vid_to_fid_set:
632 mlxsw_sp_fid_destroy(mlxsw_sp, vid);
633 return err;
634
635err_port_stp_state_set: 739err_port_stp_state_set:
636 for (vid = vid_begin; vid <= vid_end; vid++) 740 for (vid = vid_begin; vid <= vid_end; vid++)
637 clear_bit(vid, mlxsw_sp_port->active_vlans); 741 clear_bit(vid, mlxsw_sp_port->active_vlans);
@@ -641,13 +745,7 @@ err_port_pvid_set:
641 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, 745 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
642 false); 746 false);
643err_port_vlans_set: 747err_port_vlans_set:
644 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false, 748 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
645 false);
646err_port_flood_set:
647 last_visited_vid = vid_end;
648err_port_fid_map:
649 for (vid = last_visited_vid; vid >= vid_begin; vid--)
650 mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
651 return err; 749 return err;
652} 750}
653 751
@@ -970,21 +1068,7 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
970 } 1068 }
971 } 1069 }
972 1070
973 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 1071 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
974 false, false);
975 if (err) {
976 netdev_err(dev, "Failed to clear flooding\n");
977 return err;
978 }
979
980 for (vid = vid_begin; vid <= vid_end; vid++) {
981 /* Remove FID mapping in case of Virtual mode */
982 err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
983 if (err) {
984 netdev_err(dev, "Failed to unmap FID=%d", vid);
985 return err;
986 }
987 }
988 1072
989out: 1073out:
990 /* Changing activity bits only if HW operation succeded */ 1074 /* Changing activity bits only if HW operation succeded */
@@ -1118,7 +1202,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1118{ 1202{
1119 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1203 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1120 struct mlxsw_sp_port *tmp; 1204 struct mlxsw_sp_port *tmp;
1121 u16 vport_fid = 0; 1205 struct mlxsw_sp_fid *f;
1206 u16 vport_fid;
1122 char *sfd_pl; 1207 char *sfd_pl;
1123 char mac[ETH_ALEN]; 1208 char mac[ETH_ALEN];
1124 u16 fid; 1209 u16 fid;
@@ -1133,12 +1218,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1133 if (!sfd_pl) 1218 if (!sfd_pl)
1134 return -ENOMEM; 1219 return -ENOMEM;
1135 1220
1136 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1221 f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
1137 u16 tmp; 1222 vport_fid = f ? f->fid : 0;
1138
1139 tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
1140 vport_fid = mlxsw_sp_vfid_to_fid(tmp);
1141 }
1142 1223
1143 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); 1224 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1144 do { 1225 do {
@@ -1310,11 +1391,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1310 } 1391 }
1311 1392
1312 if (mlxsw_sp_fid_is_vfid(fid)) { 1393 if (mlxsw_sp_fid_is_vfid(fid)) {
1313 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
1314 struct mlxsw_sp_port *mlxsw_sp_vport; 1394 struct mlxsw_sp_port *mlxsw_sp_vport;
1315 1395
1316 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, 1396 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1317 vfid); 1397 fid);
1318 if (!mlxsw_sp_vport) { 1398 if (!mlxsw_sp_vport) {
1319 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1399 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1320 goto just_remove; 1400 goto just_remove;
@@ -1370,11 +1450,10 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1370 } 1450 }
1371 1451
1372 if (mlxsw_sp_fid_is_vfid(fid)) { 1452 if (mlxsw_sp_fid_is_vfid(fid)) {
1373 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
1374 struct mlxsw_sp_port *mlxsw_sp_vport; 1453 struct mlxsw_sp_port *mlxsw_sp_vport;
1375 1454
1376 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, 1455 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1377 vfid); 1456 fid);
1378 if (!mlxsw_sp_vport) { 1457 if (!mlxsw_sp_vport) {
1379 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1458 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1380 goto just_remove; 1459 goto just_remove;
@@ -1495,14 +1574,6 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1495 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw); 1574 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1496} 1575}
1497 1576
1498static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
1499{
1500 u16 fid;
1501
1502 for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
1503 mlxsw_sp_fid_destroy(mlxsw_sp, fid);
1504}
1505
1506int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1577int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1507{ 1578{
1508 return mlxsw_sp_fdb_init(mlxsw_sp); 1579 return mlxsw_sp_fdb_init(mlxsw_sp);
@@ -1511,7 +1582,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1511void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 1582void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1512{ 1583{
1513 mlxsw_sp_fdb_fini(mlxsw_sp); 1584 mlxsw_sp_fdb_fini(mlxsw_sp);
1514 mlxsw_sp_fids_fini(mlxsw_sp);
1515} 1585}
1516 1586
1517int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) 1587int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index ba26bb356b8d..c25a8ba6cf9f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1979,7 +1979,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
1979 if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { 1979 if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
1980 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); 1980 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
1981 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); 1981 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
1982 vxlan_get_rx_port(nn->netdev); 1982 udp_tunnel_get_rx_info(nn->netdev);
1983 } 1983 }
1984 1984
1985 return err; 1985 return err;
@@ -2551,26 +2551,32 @@ static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
2551} 2551}
2552 2552
2553static void nfp_net_add_vxlan_port(struct net_device *netdev, 2553static void nfp_net_add_vxlan_port(struct net_device *netdev,
2554 sa_family_t sa_family, __be16 port) 2554 struct udp_tunnel_info *ti)
2555{ 2555{
2556 struct nfp_net *nn = netdev_priv(netdev); 2556 struct nfp_net *nn = netdev_priv(netdev);
2557 int idx; 2557 int idx;
2558 2558
2559 idx = nfp_net_find_vxlan_idx(nn, port); 2559 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2560 return;
2561
2562 idx = nfp_net_find_vxlan_idx(nn, ti->port);
2560 if (idx == -ENOSPC) 2563 if (idx == -ENOSPC)
2561 return; 2564 return;
2562 2565
2563 if (!nn->vxlan_usecnt[idx]++) 2566 if (!nn->vxlan_usecnt[idx]++)
2564 nfp_net_set_vxlan_port(nn, idx, port); 2567 nfp_net_set_vxlan_port(nn, idx, ti->port);
2565} 2568}
2566 2569
2567static void nfp_net_del_vxlan_port(struct net_device *netdev, 2570static void nfp_net_del_vxlan_port(struct net_device *netdev,
2568 sa_family_t sa_family, __be16 port) 2571 struct udp_tunnel_info *ti)
2569{ 2572{
2570 struct nfp_net *nn = netdev_priv(netdev); 2573 struct nfp_net *nn = netdev_priv(netdev);
2571 int idx; 2574 int idx;
2572 2575
2573 idx = nfp_net_find_vxlan_idx(nn, port); 2576 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2577 return;
2578
2579 idx = nfp_net_find_vxlan_idx(nn, ti->port);
2574 if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC) 2580 if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC)
2575 return; 2581 return;
2576 2582
@@ -2589,8 +2595,8 @@ static const struct net_device_ops nfp_net_netdev_ops = {
2589 .ndo_set_mac_address = eth_mac_addr, 2595 .ndo_set_mac_address = eth_mac_addr,
2590 .ndo_set_features = nfp_net_set_features, 2596 .ndo_set_features = nfp_net_set_features,
2591 .ndo_features_check = nfp_net_features_check, 2597 .ndo_features_check = nfp_net_features_check,
2592 .ndo_add_vxlan_port = nfp_net_add_vxlan_port, 2598 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
2593 .ndo_del_vxlan_port = nfp_net_del_vxlan_port, 2599 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
2594}; 2600};
2595 2601
2596/** 2602/**
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index b1ce7aaa8f8b..01b50ff7c708 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -425,7 +425,6 @@ struct netdata_local {
425 unsigned int last_tx_idx; 425 unsigned int last_tx_idx;
426 unsigned int num_used_tx_buffs; 426 unsigned int num_used_tx_buffs;
427 struct mii_bus *mii_bus; 427 struct mii_bus *mii_bus;
428 struct phy_device *phy_dev;
429 struct clk *clk; 428 struct clk *clk;
430 dma_addr_t dma_buff_base_p; 429 dma_addr_t dma_buff_base_p;
431 void *dma_buff_base_v; 430 void *dma_buff_base_v;
@@ -750,7 +749,7 @@ static int lpc_mdio_reset(struct mii_bus *bus)
750static void lpc_handle_link_change(struct net_device *ndev) 749static void lpc_handle_link_change(struct net_device *ndev)
751{ 750{
752 struct netdata_local *pldat = netdev_priv(ndev); 751 struct netdata_local *pldat = netdev_priv(ndev);
753 struct phy_device *phydev = pldat->phy_dev; 752 struct phy_device *phydev = ndev->phydev;
754 unsigned long flags; 753 unsigned long flags;
755 754
756 bool status_change = false; 755 bool status_change = false;
@@ -814,7 +813,6 @@ static int lpc_mii_probe(struct net_device *ndev)
814 pldat->link = 0; 813 pldat->link = 0;
815 pldat->speed = 0; 814 pldat->speed = 0;
816 pldat->duplex = -1; 815 pldat->duplex = -1;
817 pldat->phy_dev = phydev;
818 816
819 phy_attached_info(phydev); 817 phy_attached_info(phydev);
820 818
@@ -1048,8 +1046,8 @@ static int lpc_eth_close(struct net_device *ndev)
1048 napi_disable(&pldat->napi); 1046 napi_disable(&pldat->napi);
1049 netif_stop_queue(ndev); 1047 netif_stop_queue(ndev);
1050 1048
1051 if (pldat->phy_dev) 1049 if (ndev->phydev)
1052 phy_stop(pldat->phy_dev); 1050 phy_stop(ndev->phydev);
1053 1051
1054 spin_lock_irqsave(&pldat->lock, flags); 1052 spin_lock_irqsave(&pldat->lock, flags);
1055 __lpc_eth_reset(pldat); 1053 __lpc_eth_reset(pldat);
@@ -1186,7 +1184,7 @@ static void lpc_eth_set_multicast_list(struct net_device *ndev)
1186static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) 1184static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1187{ 1185{
1188 struct netdata_local *pldat = netdev_priv(ndev); 1186 struct netdata_local *pldat = netdev_priv(ndev);
1189 struct phy_device *phydev = pldat->phy_dev; 1187 struct phy_device *phydev = ndev->phydev;
1190 1188
1191 if (!netif_running(ndev)) 1189 if (!netif_running(ndev))
1192 return -EINVAL; 1190 return -EINVAL;
@@ -1207,14 +1205,14 @@ static int lpc_eth_open(struct net_device *ndev)
1207 __lpc_eth_clock_enable(pldat, true); 1205 __lpc_eth_clock_enable(pldat, true);
1208 1206
1209 /* Suspended PHY makes LPC ethernet core block, so resume now */ 1207 /* Suspended PHY makes LPC ethernet core block, so resume now */
1210 phy_resume(pldat->phy_dev); 1208 phy_resume(ndev->phydev);
1211 1209
1212 /* Reset and initialize */ 1210 /* Reset and initialize */
1213 __lpc_eth_reset(pldat); 1211 __lpc_eth_reset(pldat);
1214 __lpc_eth_init(pldat); 1212 __lpc_eth_init(pldat);
1215 1213
1216 /* schedule a link state check */ 1214 /* schedule a link state check */
1217 phy_start(pldat->phy_dev); 1215 phy_start(ndev->phydev);
1218 netif_start_queue(ndev); 1216 netif_start_queue(ndev);
1219 napi_enable(&pldat->napi); 1217 napi_enable(&pldat->napi);
1220 1218
@@ -1247,37 +1245,13 @@ static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1247 pldat->msg_enable = level; 1245 pldat->msg_enable = level;
1248} 1246}
1249 1247
1250static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
1251 struct ethtool_cmd *cmd)
1252{
1253 struct netdata_local *pldat = netdev_priv(ndev);
1254 struct phy_device *phydev = pldat->phy_dev;
1255
1256 if (!phydev)
1257 return -EOPNOTSUPP;
1258
1259 return phy_ethtool_gset(phydev, cmd);
1260}
1261
1262static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
1263 struct ethtool_cmd *cmd)
1264{
1265 struct netdata_local *pldat = netdev_priv(ndev);
1266 struct phy_device *phydev = pldat->phy_dev;
1267
1268 if (!phydev)
1269 return -EOPNOTSUPP;
1270
1271 return phy_ethtool_sset(phydev, cmd);
1272}
1273
1274static const struct ethtool_ops lpc_eth_ethtool_ops = { 1248static const struct ethtool_ops lpc_eth_ethtool_ops = {
1275 .get_drvinfo = lpc_eth_ethtool_getdrvinfo, 1249 .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
1276 .get_settings = lpc_eth_ethtool_getsettings,
1277 .set_settings = lpc_eth_ethtool_setsettings,
1278 .get_msglevel = lpc_eth_ethtool_getmsglevel, 1250 .get_msglevel = lpc_eth_ethtool_getmsglevel,
1279 .set_msglevel = lpc_eth_ethtool_setmsglevel, 1251 .set_msglevel = lpc_eth_ethtool_setmsglevel,
1280 .get_link = ethtool_op_get_link, 1252 .get_link = ethtool_op_get_link,
1253 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1254 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1281}; 1255};
1282 1256
1283static const struct net_device_ops lpc_netdev_ops = { 1257static const struct net_device_ops lpc_netdev_ops = {
@@ -1460,7 +1434,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1460 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", 1434 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
1461 res->start, ndev->irq); 1435 res->start, ndev->irq);
1462 1436
1463 phydev = pldat->phy_dev; 1437 phydev = ndev->phydev;
1464 1438
1465 device_init_wakeup(&pdev->dev, 1); 1439 device_init_wakeup(&pdev->dev, 1);
1466 device_set_wakeup_enable(&pdev->dev, 0); 1440 device_set_wakeup_enable(&pdev->dev, 0);
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 680d8c736d2b..6ba48406899e 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -54,16 +54,6 @@ config QLCNIC_DCB
54 mode of DCB is supported. PG and PFC values are related only 54 mode of DCB is supported. PG and PFC values are related only
55 to Tx. 55 to Tx.
56 56
57config QLCNIC_VXLAN
58 bool "Virtual eXtensible Local Area Network (VXLAN) offload support"
59 default n
60 depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m)
61 ---help---
62 This enables hardware offload support for VXLAN protocol over QLogic's
63 84XX series adapters.
64 Say Y here if you want to enable hardware offload support for
65 Virtual eXtensible Local Area Network (VXLAN) in the driver.
66
67config QLCNIC_HWMON 57config QLCNIC_HWMON
68 bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support" 58 bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
69 depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m) 59 depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
@@ -114,24 +104,4 @@ config QEDE
114 ---help--- 104 ---help---
115 This enables the support for ... 105 This enables the support for ...
116 106
117config QEDE_VXLAN
118 bool "Virtual eXtensible Local Area Network support"
119 default n
120 depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m)
121 ---help---
122 This enables hardware offload support for VXLAN protocol over
123 qede module. Say Y here if you want to enable hardware offload
124 support for Virtual eXtensible Local Area Network (VXLAN)
125 in the driver.
126
127config QEDE_GENEVE
128 bool "Generic Network Virtualization Encapsulation (GENEVE) support"
129 depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m)
130 ---help---
131 This allows one to create GENEVE virtual interfaces that provide
132 Layer 2 Networks over Layer 3 Networks. GENEVE is often used
133 to tunnel virtual network infrastructure in virtualized environments.
134 Say Y here if you want to enable hardware offload support for
135 Generic Network Virtualization Encapsulation (GENEVE) in the driver.
136
137endif # NET_VENDOR_QLOGIC 107endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 1042f2af854a..35e53771533f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -127,6 +127,8 @@ struct qed_tunn_update_params {
127 */ 127 */
128enum qed_pci_personality { 128enum qed_pci_personality {
129 QED_PCI_ETH, 129 QED_PCI_ETH,
130 QED_PCI_ISCSI,
131 QED_PCI_ETH_ROCE,
130 QED_PCI_DEFAULT /* default in shmem */ 132 QED_PCI_DEFAULT /* default in shmem */
131}; 133};
132 134
@@ -170,6 +172,8 @@ enum QED_PORT_MODE {
170 172
171enum qed_dev_cap { 173enum qed_dev_cap {
172 QED_DEV_CAP_ETH, 174 QED_DEV_CAP_ETH,
175 QED_DEV_CAP_ISCSI,
176 QED_DEV_CAP_ROCE,
173}; 177};
174 178
175struct qed_hw_info { 179struct qed_hw_info {
@@ -183,6 +187,8 @@ struct qed_hw_info {
183 187
184#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) 188#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
185#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) 189#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
190#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
191 RESC_NUM(_p_hwfn, resc))
186#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) 192#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
187 193
188 u8 num_tc; 194 u8 num_tc;
@@ -255,6 +261,7 @@ struct qed_qm_info {
255 u8 pure_lb_pq; 261 u8 pure_lb_pq;
256 u8 offload_pq; 262 u8 offload_pq;
257 u8 pure_ack_pq; 263 u8 pure_ack_pq;
264 u8 ooo_pq;
258 u8 vf_queues_offset; 265 u8 vf_queues_offset;
259 u16 num_pqs; 266 u16 num_pqs;
260 u16 num_vf_pqs; 267 u16 num_vf_pqs;
@@ -267,6 +274,7 @@ struct qed_qm_info {
267 u8 pf_wfq; 274 u8 pf_wfq;
268 u32 pf_rl; 275 u32 pf_rl;
269 struct qed_wfq_data *wfq_data; 276 struct qed_wfq_data *wfq_data;
277 u8 num_pf_rls;
270}; 278};
271 279
272struct storm_stats { 280struct storm_stats {
@@ -312,6 +320,7 @@ struct qed_hwfn {
312 bool hw_init_done; 320 bool hw_init_done;
313 321
314 u8 num_funcs_on_engine; 322 u8 num_funcs_on_engine;
323 u8 enabled_func_idx;
315 324
316 /* BAR access */ 325 /* BAR access */
317 void __iomem *regview; 326 void __iomem *regview;
@@ -350,6 +359,9 @@ struct qed_hwfn {
350 /* Protocol related */ 359 /* Protocol related */
351 struct qed_pf_params pf_params; 360 struct qed_pf_params pf_params;
352 361
362 bool b_rdma_enabled_in_prs;
363 u32 rdma_prs_search_reg;
364
353 /* Array of sb_info of all status blocks */ 365 /* Array of sb_info of all status blocks */
354 struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD]; 366 struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
355 u16 num_sbs; 367 u16 num_sbs;
@@ -477,8 +489,8 @@ struct qed_dev {
477 489
478 u32 int_mode; 490 u32 int_mode;
479 enum qed_coalescing_mode int_coalescing_mode; 491 enum qed_coalescing_mode int_coalescing_mode;
480 u8 rx_coalesce_usecs; 492 u16 rx_coalesce_usecs;
481 u8 tx_coalesce_usecs; 493 u16 tx_coalesce_usecs;
482 494
483 /* Start Bar offset of first hwfn */ 495 /* Start Bar offset of first hwfn */
484 void __iomem *regview; 496 void __iomem *regview;
@@ -555,6 +567,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
555} 567}
556 568
557#define PURE_LB_TC 8 569#define PURE_LB_TC 8
570#define OOO_LB_TC 9
558 571
559int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); 572int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
560void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); 573void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index ac284c58d8c2..1c35f376143e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -39,6 +39,14 @@
39#define DQ_RANGE_SHIFT 4 39#define DQ_RANGE_SHIFT 4
40#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT) 40#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
41 41
42/* Searcher constants */
43#define SRC_MIN_NUM_ELEMS 256
44
45/* Timers constants */
46#define TM_SHIFT 7
47#define TM_ALIGN BIT(TM_SHIFT)
48#define TM_ELEM_SIZE 4
49
42/* ILT constants */ 50/* ILT constants */
43#define ILT_DEFAULT_HW_P_SIZE 3 51#define ILT_DEFAULT_HW_P_SIZE 3
44#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) 52#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
@@ -56,26 +64,71 @@
56union conn_context { 64union conn_context {
57 struct core_conn_context core_ctx; 65 struct core_conn_context core_ctx;
58 struct eth_conn_context eth_ctx; 66 struct eth_conn_context eth_ctx;
67 struct iscsi_conn_context iscsi_ctx;
68 struct roce_conn_context roce_ctx;
69};
70
71/* TYPE-0 task context - iSCSI */
72union type0_task_context {
73 struct iscsi_task_context iscsi_ctx;
59}; 74};
60 75
76/* TYPE-1 task context - ROCE */
77union type1_task_context {
78 struct rdma_task_context roce_ctx;
79};
80
81struct src_ent {
82 u8 opaque[56];
83 u64 next;
84};
85
86#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
87#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
88
61#define CONN_CXT_SIZE(p_hwfn) \ 89#define CONN_CXT_SIZE(p_hwfn) \
62 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) 90 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
63 91
92#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
93
94#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
95 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
96
97/* Alignment is inherent to the type1_task_context structure */
98#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
99
64/* PF per protocl configuration object */ 100/* PF per protocl configuration object */
101#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
102#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
103
104struct qed_tid_seg {
105 u32 count;
106 u8 type;
107 bool has_fl_mem;
108};
109
65struct qed_conn_type_cfg { 110struct qed_conn_type_cfg {
66 u32 cid_count; 111 u32 cid_count;
67 u32 cid_start; 112 u32 cid_start;
68 u32 cids_per_vf; 113 u32 cids_per_vf;
114 struct qed_tid_seg tid_seg[TASK_SEGMENTS];
69}; 115};
70 116
71/* ILT Client configuration, Per connection type (protocol) resources. */ 117/* ILT Client configuration, Per connection type (protocol) resources. */
72#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) 118#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
73#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2) 119#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
74#define CDUC_BLK (0) 120#define CDUC_BLK (0)
121#define SRQ_BLK (0)
122#define CDUT_SEG_BLK(n) (1 + (u8)(n))
123#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
75 124
76enum ilt_clients { 125enum ilt_clients {
77 ILT_CLI_CDUC, 126 ILT_CLI_CDUC,
127 ILT_CLI_CDUT,
78 ILT_CLI_QM, 128 ILT_CLI_QM,
129 ILT_CLI_TM,
130 ILT_CLI_SRC,
131 ILT_CLI_TSDM,
79 ILT_CLI_MAX 132 ILT_CLI_MAX
80}; 133};
81 134
@@ -88,6 +141,7 @@ struct qed_ilt_cli_blk {
88 u32 total_size; /* 0 means not active */ 141 u32 total_size; /* 0 means not active */
89 u32 real_size_in_page; 142 u32 real_size_in_page;
90 u32 start_line; 143 u32 start_line;
144 u32 dynamic_line_cnt;
91}; 145};
92 146
93struct qed_ilt_client_cfg { 147struct qed_ilt_client_cfg {
@@ -131,18 +185,44 @@ struct qed_cxt_mngr {
131 /* computed ILT structure */ 185 /* computed ILT structure */
132 struct qed_ilt_client_cfg clients[ILT_CLI_MAX]; 186 struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
133 187
188 /* Task type sizes */
189 u32 task_type_size[NUM_TASK_TYPES];
190
134 /* total number of VFs for this hwfn - 191 /* total number of VFs for this hwfn -
135 * ALL VFs are symmetric in terms of HW resources 192 * ALL VFs are symmetric in terms of HW resources
136 */ 193 */
137 u32 vf_count; 194 u32 vf_count;
138 195
196 /* total number of SRQ's for this hwfn */
197 u32 srq_count;
198
139 /* Acquired CIDs */ 199 /* Acquired CIDs */
140 struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; 200 struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
141 201
142 /* ILT shadow table */ 202 /* ILT shadow table */
143 struct qed_dma_mem *ilt_shadow; 203 struct qed_dma_mem *ilt_shadow;
144 u32 pf_start_line; 204 u32 pf_start_line;
205
206 /* Mutex for a dynamic ILT allocation */
207 struct mutex mutex;
208
209 /* SRC T2 */
210 struct qed_dma_mem *t2;
211 u32 t2_num_pages;
212 u64 first_free;
213 u64 last_free;
145}; 214};
215static bool src_proto(enum protocol_type type)
216{
217 return type == PROTOCOLID_ISCSI ||
218 type == PROTOCOLID_ROCE;
219}
220
221static bool tm_cid_proto(enum protocol_type type)
222{
223 return type == PROTOCOLID_ISCSI ||
224 type == PROTOCOLID_ROCE;
225}
146 226
147/* counts the iids for the CDU/CDUC ILT client configuration */ 227/* counts the iids for the CDU/CDUC ILT client configuration */
148struct qed_cdu_iids { 228struct qed_cdu_iids {
@@ -161,21 +241,120 @@ static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
161 } 241 }
162} 242}
163 243
244/* counts the iids for the Searcher block configuration */
245struct qed_src_iids {
246 u32 pf_cids;
247 u32 per_vf_cids;
248};
249
250static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
251 struct qed_src_iids *iids)
252{
253 u32 i;
254
255 for (i = 0; i < MAX_CONN_TYPES; i++) {
256 if (!src_proto(i))
257 continue;
258
259 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
260 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
261 }
262}
263
264/* counts the iids for the Timers block configuration */
265struct qed_tm_iids {
266 u32 pf_cids;
267 u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
268 u32 pf_tids_total;
269 u32 per_vf_cids;
270 u32 per_vf_tids;
271};
272
273static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
274 struct qed_tm_iids *iids)
275{
276 u32 i, j;
277
278 for (i = 0; i < MAX_CONN_TYPES; i++) {
279 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
280
281 if (tm_cid_proto(i)) {
282 iids->pf_cids += p_cfg->cid_count;
283 iids->per_vf_cids += p_cfg->cids_per_vf;
284 }
285 }
286
287 iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
288 iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
289 iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
290
291 for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
292 iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
293 iids->pf_tids_total += iids->pf_tids[j];
294 }
295}
296
164static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, 297static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
165 struct qed_qm_iids *iids) 298 struct qed_qm_iids *iids)
166{ 299{
167 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 300 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
168 u32 vf_cids = 0, type; 301 struct qed_tid_seg *segs;
302 u32 vf_cids = 0, type, j;
303 u32 vf_tids = 0;
169 304
170 for (type = 0; type < MAX_CONN_TYPES; type++) { 305 for (type = 0; type < MAX_CONN_TYPES; type++) {
171 iids->cids += p_mngr->conn_cfg[type].cid_count; 306 iids->cids += p_mngr->conn_cfg[type].cid_count;
172 vf_cids += p_mngr->conn_cfg[type].cids_per_vf; 307 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
308
309 segs = p_mngr->conn_cfg[type].tid_seg;
310 /* for each segment there is at most one
311 * protocol for which count is not 0.
312 */
313 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
314 iids->tids += segs[j].count;
315
316 /* The last array elelment is for the VFs. As for PF
317 * segments there can be only one protocol for
318 * which this value is not 0.
319 */
320 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
173 } 321 }
174 322
175 iids->vf_cids += vf_cids * p_mngr->vf_count; 323 iids->vf_cids += vf_cids * p_mngr->vf_count;
324 iids->tids += vf_tids * p_mngr->vf_count;
325
176 DP_VERBOSE(p_hwfn, QED_MSG_ILT, 326 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
177 "iids: CIDS %08x vf_cids %08x\n", 327 "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
178 iids->cids, iids->vf_cids); 328 iids->cids, iids->vf_cids, iids->tids, vf_tids);
329}
330
331static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
332 u32 seg)
333{
334 struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
335 u32 i;
336
337 /* Find the protocol with tid count > 0 for this segment.
338 * Note: there can only be one and this is already validated.
339 */
340 for (i = 0; i < MAX_CONN_TYPES; i++)
341 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
342 return &p_cfg->conn_cfg[i].tid_seg[seg];
343 return NULL;
344}
345
346void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
347{
348 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
349
350 p_mgr->srq_count = num_srqs;
351}
352
353u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
354{
355 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
356
357 return p_mgr->srq_count;
179} 358}
180 359
181/* set the iids count per protocol */ 360/* set the iids count per protocol */
@@ -188,6 +367,14 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
188 367
189 p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN); 368 p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
190 p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN); 369 p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
370
371 if (type == PROTOCOLID_ROCE) {
372 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
373 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
374 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
375
376 p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
377 }
191} 378}
192 379
193u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, 380u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
@@ -200,6 +387,37 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
200 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; 387 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
201} 388}
202 389
390u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
391 enum protocol_type type)
392{
393 return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
394}
395
396u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
397 enum protocol_type type)
398{
399 u32 cnt = 0;
400 int i;
401
402 for (i = 0; i < TASK_SEGMENTS; i++)
403 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
404
405 return cnt;
406}
407
408static void
409qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
410 enum protocol_type proto,
411 u8 seg, u8 seg_type, u32 count, bool has_fl)
412{
413 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
414 struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
415
416 p_seg->count = count;
417 p_seg->has_fl_mem = has_fl;
418 p_seg->type = seg_type;
419}
420
203static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, 421static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
204 struct qed_ilt_cli_blk *p_blk, 422 struct qed_ilt_cli_blk *p_blk,
205 u32 start_line, u32 total_size, 423 u32 start_line, u32 total_size,
@@ -241,17 +459,42 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
241 p_blk->real_size_in_page, p_blk->start_line); 459 p_blk->real_size_in_page, p_blk->start_line);
242} 460}
243 461
462static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
463 enum ilt_clients ilt_client)
464{
465 u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
466 struct qed_ilt_client_cfg *p_cli;
467 u32 lines_to_skip = 0;
468 u32 cxts_per_p;
469
470 if (ilt_client == ILT_CLI_CDUC) {
471 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
472
473 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
474 (u32) CONN_CXT_SIZE(p_hwfn);
475
476 lines_to_skip = cid_count / cxts_per_p;
477 }
478
479 return lines_to_skip;
480}
481
244int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) 482int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
245{ 483{
246 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 484 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
485 u32 curr_line, total, i, task_size, line;
247 struct qed_ilt_client_cfg *p_cli; 486 struct qed_ilt_client_cfg *p_cli;
248 struct qed_ilt_cli_blk *p_blk; 487 struct qed_ilt_cli_blk *p_blk;
249 struct qed_cdu_iids cdu_iids; 488 struct qed_cdu_iids cdu_iids;
489 struct qed_src_iids src_iids;
250 struct qed_qm_iids qm_iids; 490 struct qed_qm_iids qm_iids;
251 u32 curr_line, total, i; 491 struct qed_tm_iids tm_iids;
492 struct qed_tid_seg *p_seg;
252 493
253 memset(&qm_iids, 0, sizeof(qm_iids)); 494 memset(&qm_iids, 0, sizeof(qm_iids));
254 memset(&cdu_iids, 0, sizeof(cdu_iids)); 495 memset(&cdu_iids, 0, sizeof(cdu_iids));
496 memset(&src_iids, 0, sizeof(src_iids));
497 memset(&tm_iids, 0, sizeof(tm_iids));
255 498
256 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); 499 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
257 500
@@ -279,6 +522,9 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
279 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); 522 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
280 p_cli->pf_total_lines = curr_line - p_blk->start_line; 523 p_cli->pf_total_lines = curr_line - p_blk->start_line;
281 524
525 p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
526 ILT_CLI_CDUC);
527
282 /* CDUC VF */ 528 /* CDUC VF */
283 p_blk = &p_cli->vf_blks[CDUC_BLK]; 529 p_blk = &p_cli->vf_blks[CDUC_BLK];
284 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); 530 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
@@ -293,21 +539,128 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
293 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 539 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
294 ILT_CLI_CDUC); 540 ILT_CLI_CDUC);
295 541
542 /* CDUT PF */
543 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
544 p_cli->first.val = curr_line;
545
546 /* first the 'working' task memory */
547 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
548 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
549 if (!p_seg || p_seg->count == 0)
550 continue;
551
552 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
553 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
554 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
555 p_mngr->task_type_size[p_seg->type]);
556
557 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
558 ILT_CLI_CDUT);
559 }
560
561 /* next the 'init' task memory (forced load memory) */
562 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
563 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
564 if (!p_seg || p_seg->count == 0)
565 continue;
566
567 p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
568
569 if (!p_seg->has_fl_mem) {
570 /* The segment is active (total size pf 'working'
571 * memory is > 0) but has no FL (forced-load, Init)
572 * memory. Thus:
573 *
574 * 1. The total-size in the corrsponding FL block of
575 * the ILT client is set to 0 - No ILT line are
576 * provisioned and no ILT memory allocated.
577 *
578 * 2. The start-line of said block is set to the
579 * start line of the matching working memory
580 * block in the ILT client. This is later used to
581 * configure the CDU segment offset registers and
582 * results in an FL command for TIDs of this
583 * segement behaves as regular load commands
584 * (loading TIDs from the working memory).
585 */
586 line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
587
588 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
589 continue;
590 }
591 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
592
593 qed_ilt_cli_blk_fill(p_cli, p_blk,
594 curr_line, total,
595 p_mngr->task_type_size[p_seg->type]);
596
597 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
598 ILT_CLI_CDUT);
599 }
600 p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
601
602 /* CDUT VF */
603 p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
604 if (p_seg && p_seg->count) {
605 /* Stricly speaking we need to iterate over all VF
606 * task segment types, but a VF has only 1 segment
607 */
608
609 /* 'working' memory */
610 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
611
612 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
613 qed_ilt_cli_blk_fill(p_cli, p_blk,
614 curr_line, total,
615 p_mngr->task_type_size[p_seg->type]);
616
617 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
618 ILT_CLI_CDUT);
619
620 /* 'init' memory */
621 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
622 if (!p_seg->has_fl_mem) {
623 /* see comment above */
624 line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
625 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
626 } else {
627 task_size = p_mngr->task_type_size[p_seg->type];
628 qed_ilt_cli_blk_fill(p_cli, p_blk,
629 curr_line, total, task_size);
630 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
631 ILT_CLI_CDUT);
632 }
633 p_cli->vf_total_lines = curr_line -
634 p_cli->vf_blks[0].start_line;
635
636 /* Now for the rest of the VFs */
637 for (i = 1; i < p_mngr->vf_count; i++) {
638 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
639 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
640 ILT_CLI_CDUT);
641
642 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
643 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
644 ILT_CLI_CDUT);
645 }
646 }
647
296 /* QM */ 648 /* QM */
297 p_cli = &p_mngr->clients[ILT_CLI_QM]; 649 p_cli = &p_mngr->clients[ILT_CLI_QM];
298 p_blk = &p_cli->pf_blks[0]; 650 p_blk = &p_cli->pf_blks[0];
299 651
300 qed_cxt_qm_iids(p_hwfn, &qm_iids); 652 qed_cxt_qm_iids(p_hwfn, &qm_iids);
301 total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 653 total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
302 qm_iids.vf_cids, 0, 654 qm_iids.vf_cids, qm_iids.tids,
303 p_hwfn->qm_info.num_pqs, 655 p_hwfn->qm_info.num_pqs,
304 p_hwfn->qm_info.num_vf_pqs); 656 p_hwfn->qm_info.num_vf_pqs);
305 657
306 DP_VERBOSE(p_hwfn, 658 DP_VERBOSE(p_hwfn,
307 QED_MSG_ILT, 659 QED_MSG_ILT,
308 "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n", 660 "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
309 qm_iids.cids, 661 qm_iids.cids,
310 qm_iids.vf_cids, 662 qm_iids.vf_cids,
663 qm_iids.tids,
311 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total); 664 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
312 665
313 qed_ilt_cli_blk_fill(p_cli, p_blk, 666 qed_ilt_cli_blk_fill(p_cli, p_blk,
@@ -317,6 +670,75 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
317 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); 670 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
318 p_cli->pf_total_lines = curr_line - p_blk->start_line; 671 p_cli->pf_total_lines = curr_line - p_blk->start_line;
319 672
673 /* SRC */
674 p_cli = &p_mngr->clients[ILT_CLI_SRC];
675 qed_cxt_src_iids(p_mngr, &src_iids);
676
677 /* Both the PF and VFs searcher connections are stored in the per PF
678 * database. Thus sum the PF searcher cids and all the VFs searcher
679 * cids.
680 */
681 total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
682 if (total) {
683 u32 local_max = max_t(u32, total,
684 SRC_MIN_NUM_ELEMS);
685
686 total = roundup_pow_of_two(local_max);
687
688 p_blk = &p_cli->pf_blks[0];
689 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
690 total * sizeof(struct src_ent),
691 sizeof(struct src_ent));
692
693 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
694 ILT_CLI_SRC);
695 p_cli->pf_total_lines = curr_line - p_blk->start_line;
696 }
697
698 /* TM PF */
699 p_cli = &p_mngr->clients[ILT_CLI_TM];
700 qed_cxt_tm_iids(p_mngr, &tm_iids);
701 total = tm_iids.pf_cids + tm_iids.pf_tids_total;
702 if (total) {
703 p_blk = &p_cli->pf_blks[0];
704 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
705 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
706
707 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
708 ILT_CLI_TM);
709 p_cli->pf_total_lines = curr_line - p_blk->start_line;
710 }
711
712 /* TM VF */
713 total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
714 if (total) {
715 p_blk = &p_cli->vf_blks[0];
716 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
717 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
718
719 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
720 ILT_CLI_TM);
721 p_cli->pf_total_lines = curr_line - p_blk->start_line;
722
723 for (i = 1; i < p_mngr->vf_count; i++)
724 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
725 ILT_CLI_TM);
726 }
727
728 /* TSDM (SRQ CONTEXT) */
729 total = qed_cxt_get_srq_count(p_hwfn);
730
731 if (total) {
732 p_cli = &p_mngr->clients[ILT_CLI_TSDM];
733 p_blk = &p_cli->pf_blks[SRQ_BLK];
734 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
735 total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
736
737 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
738 ILT_CLI_TSDM);
739 p_cli->pf_total_lines = curr_line - p_blk->start_line;
740 }
741
320 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > 742 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
321 RESC_NUM(p_hwfn, QED_ILT)) { 743 RESC_NUM(p_hwfn, QED_ILT)) {
322 DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n", 744 DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
@@ -327,8 +749,122 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
327 return 0; 749 return 0;
328} 750}
329 751
752static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
753{
754 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
755 u32 i;
756
757 if (!p_mngr->t2)
758 return;
759
760 for (i = 0; i < p_mngr->t2_num_pages; i++)
761 if (p_mngr->t2[i].p_virt)
762 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
763 p_mngr->t2[i].size,
764 p_mngr->t2[i].p_virt,
765 p_mngr->t2[i].p_phys);
766
767 kfree(p_mngr->t2);
768 p_mngr->t2 = NULL;
769}
770
771static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
772{
773 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
774 u32 conn_num, total_size, ent_per_page, psz, i;
775 struct qed_ilt_client_cfg *p_src;
776 struct qed_src_iids src_iids;
777 struct qed_dma_mem *p_t2;
778 int rc;
779
780 memset(&src_iids, 0, sizeof(src_iids));
781
782 /* if the SRC ILT client is inactive - there are no connection
783 * requiring the searcer, leave.
784 */
785 p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
786 if (!p_src->active)
787 return 0;
788
789 qed_cxt_src_iids(p_mngr, &src_iids);
790 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
791 total_size = conn_num * sizeof(struct src_ent);
792
793 /* use the same page size as the SRC ILT client */
794 psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
795 p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
796
797 /* allocate t2 */
798 p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem),
799 GFP_KERNEL);
800 if (!p_mngr->t2) {
801 DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
802 rc = -ENOMEM;
803 goto t2_fail;
804 }
805
806 /* allocate t2 pages */
807 for (i = 0; i < p_mngr->t2_num_pages; i++) {
808 u32 size = min_t(u32, total_size, psz);
809 void **p_virt = &p_mngr->t2[i].p_virt;
810
811 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
812 size,
813 &p_mngr->t2[i].p_phys, GFP_KERNEL);
814 if (!p_mngr->t2[i].p_virt) {
815 rc = -ENOMEM;
816 goto t2_fail;
817 }
818 memset(*p_virt, 0, size);
819 p_mngr->t2[i].size = size;
820 total_size -= size;
821 }
822
823 /* Set the t2 pointers */
824
825 /* entries per page - must be a power of two */
826 ent_per_page = psz / sizeof(struct src_ent);
827
828 p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
829
830 p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
831 p_mngr->last_free = (u64) p_t2->p_phys +
832 ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
833
834 for (i = 0; i < p_mngr->t2_num_pages; i++) {
835 u32 ent_num = min_t(u32,
836 ent_per_page,
837 conn_num);
838 struct src_ent *entries = p_mngr->t2[i].p_virt;
839 u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
840 u32 j;
841
842 for (j = 0; j < ent_num - 1; j++) {
843 val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
844 entries[j].next = cpu_to_be64(val);
845 }
846
847 if (i < p_mngr->t2_num_pages - 1)
848 val = (u64) p_mngr->t2[i + 1].p_phys;
849 else
850 val = 0;
851 entries[j].next = cpu_to_be64(val);
852
853 conn_num -= ent_num;
854 }
855
856 return 0;
857
858t2_fail:
859 qed_cxt_src_t2_free(p_hwfn);
860 return rc;
861}
862
330#define for_each_ilt_valid_client(pos, clients) \ 863#define for_each_ilt_valid_client(pos, clients) \
331 for (pos = 0; pos < ILT_CLI_MAX; pos++) 864 for (pos = 0; pos < ILT_CLI_MAX; pos++) \
865 if (!clients[pos].active) { \
866 continue; \
867 } else \
332 868
333/* Total number of ILT lines used by this PF */ 869/* Total number of ILT lines used by this PF */
334static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients) 870static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
@@ -336,12 +872,8 @@ static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
336 u32 size = 0; 872 u32 size = 0;
337 u32 i; 873 u32 i;
338 874
339 for_each_ilt_valid_client(i, ilt_clients) { 875 for_each_ilt_valid_client(i, ilt_clients)
340 if (!ilt_clients[i].active) 876 size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
341 continue;
342 size += (ilt_clients[i].last.val -
343 ilt_clients[i].first.val + 1);
344 }
345 877
346 return size; 878 return size;
347} 879}
@@ -372,15 +904,22 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
372 u32 start_line_offset) 904 u32 start_line_offset)
373{ 905{
374 struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; 906 struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
375 u32 lines, line, sz_left; 907 u32 lines, line, sz_left, lines_to_skip = 0;
908
909 /* Special handling for RoCE that supports dynamic allocation */
910 if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
911 ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
912 return 0;
913
914 lines_to_skip = p_blk->dynamic_line_cnt;
376 915
377 if (!p_blk->total_size) 916 if (!p_blk->total_size)
378 return 0; 917 return 0;
379 918
380 sz_left = p_blk->total_size; 919 sz_left = p_blk->total_size;
381 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page); 920 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
382 line = p_blk->start_line + start_line_offset - 921 line = p_blk->start_line + start_line_offset -
383 p_hwfn->p_cxt_mngr->pf_start_line; 922 p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
384 923
385 for (; lines; lines--) { 924 for (; lines; lines--) {
386 dma_addr_t p_phys; 925 dma_addr_t p_phys;
@@ -434,8 +973,6 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
434 (u32)(size * sizeof(struct qed_dma_mem))); 973 (u32)(size * sizeof(struct qed_dma_mem)));
435 974
436 for_each_ilt_valid_client(i, clients) { 975 for_each_ilt_valid_client(i, clients) {
437 if (!clients[i].active)
438 continue;
439 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { 976 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
440 p_blk = &clients[i].pf_blks[j]; 977 p_blk = &clients[i].pf_blks[j];
441 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0); 978 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
@@ -514,6 +1051,7 @@ cid_map_fail:
514 1051
515int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) 1052int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
516{ 1053{
1054 struct qed_ilt_client_cfg *clients;
517 struct qed_cxt_mngr *p_mngr; 1055 struct qed_cxt_mngr *p_mngr;
518 u32 i; 1056 u32 i;
519 1057
@@ -524,20 +1062,42 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
524 } 1062 }
525 1063
526 /* Initialize ILT client registers */ 1064 /* Initialize ILT client registers */
527 p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); 1065 clients = p_mngr->clients;
528 p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); 1066 clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
529 p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); 1067 clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
530 1068 clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
531 p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); 1069
532 p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); 1070 clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
533 p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); 1071 clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
534 1072 clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
1073
1074 clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1075 clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1076 clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1077
1078 clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1079 clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1080 clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1081
1082 clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1083 clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1084 clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1085
1086 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1087 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1088 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
535 /* default ILT page size for all clients is 32K */ 1089 /* default ILT page size for all clients is 32K */
536 for (i = 0; i < ILT_CLI_MAX; i++) 1090 for (i = 0; i < ILT_CLI_MAX; i++)
537 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; 1091 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
538 1092
1093 /* Initialize task sizes */
1094 p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1095 p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1096
539 if (p_hwfn->cdev->p_iov_info) 1097 if (p_hwfn->cdev->p_iov_info)
540 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs; 1098 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
1099 /* Initialize the dynamic ILT allocation mutex */
1100 mutex_init(&p_mngr->mutex);
541 1101
542 /* Set the cxt mangr pointer priori to further allocations */ 1102 /* Set the cxt mangr pointer priori to further allocations */
543 p_hwfn->p_cxt_mngr = p_mngr; 1103 p_hwfn->p_cxt_mngr = p_mngr;
@@ -556,6 +1116,13 @@ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
556 goto tables_alloc_fail; 1116 goto tables_alloc_fail;
557 } 1117 }
558 1118
1119 /* Allocate the T2 table */
1120 rc = qed_cxt_src_t2_alloc(p_hwfn);
1121 if (rc) {
1122 DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n");
1123 goto tables_alloc_fail;
1124 }
1125
559 /* Allocate and initialize the acquired cids bitmaps */ 1126 /* Allocate and initialize the acquired cids bitmaps */
560 rc = qed_cid_map_alloc(p_hwfn); 1127 rc = qed_cid_map_alloc(p_hwfn);
561 if (rc) { 1128 if (rc) {
@@ -576,6 +1143,7 @@ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
576 return; 1143 return;
577 1144
578 qed_cid_map_free(p_hwfn); 1145 qed_cid_map_free(p_hwfn);
1146 qed_cxt_src_t2_free(p_hwfn);
579 qed_ilt_shadow_free(p_hwfn); 1147 qed_ilt_shadow_free(p_hwfn);
580 kfree(p_hwfn->p_cxt_mngr); 1148 kfree(p_hwfn->p_cxt_mngr);
581 1149
@@ -620,6 +1188,48 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
620#define CDUC_NCIB_MASK \ 1188#define CDUC_NCIB_MASK \
621 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT) 1189 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
622 1190
1191#define CDUT_TYPE0_CXT_SIZE_SHIFT \
1192 CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1193
1194#define CDUT_TYPE0_CXT_SIZE_MASK \
1195 (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1196 CDUT_TYPE0_CXT_SIZE_SHIFT)
1197
1198#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1199 CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1200
1201#define CDUT_TYPE0_BLOCK_WASTE_MASK \
1202 (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1203 CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1204
1205#define CDUT_TYPE0_NCIB_SHIFT \
1206 CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1207
1208#define CDUT_TYPE0_NCIB_MASK \
1209 (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1210 CDUT_TYPE0_NCIB_SHIFT)
1211
1212#define CDUT_TYPE1_CXT_SIZE_SHIFT \
1213 CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1214
1215#define CDUT_TYPE1_CXT_SIZE_MASK \
1216 (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1217 CDUT_TYPE1_CXT_SIZE_SHIFT)
1218
1219#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1220 CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1221
1222#define CDUT_TYPE1_BLOCK_WASTE_MASK \
1223 (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1224 CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1225
1226#define CDUT_TYPE1_NCIB_SHIFT \
1227 CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1228
1229#define CDUT_TYPE1_NCIB_MASK \
1230 (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1231 CDUT_TYPE1_NCIB_SHIFT)
1232
623static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) 1233static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
624{ 1234{
625 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; 1235 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
@@ -634,6 +1244,92 @@ static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
634 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); 1244 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
635 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); 1245 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
636 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); 1246 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
1247
1248 /* CDUT - type-0 tasks configuration */
1249 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1250 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1251 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1252 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1253
1254 /* cxt size and block-waste are multipes of 8 */
1255 cdu_params = 0;
1256 SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1257 SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1258 SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1259 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1260
1261 /* CDUT - type-1 tasks configuration */
1262 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1263 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1264 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1265
1266 /* cxt size and block-waste are multipes of 8 */
1267 cdu_params = 0;
1268 SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1269 SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1270 SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1271 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1272}
1273
1274/* CDU PF */
1275#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1276#define CDU_SEG_REG_TYPE_MASK 0x1
1277#define CDU_SEG_REG_OFFSET_SHIFT 0
1278#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1279
1280static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1281{
1282 struct qed_ilt_client_cfg *p_cli;
1283 struct qed_tid_seg *p_seg;
1284 u32 cdu_seg_params, offset;
1285 int i;
1286
1287 static const u32 rt_type_offset_arr[] = {
1288 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1289 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1290 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1291 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1292 };
1293
1294 static const u32 rt_type_offset_fl_arr[] = {
1295 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1296 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1297 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1298 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1299 };
1300
1301 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1302
1303 /* There are initializations only for CDUT during pf Phase */
1304 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1305 /* Segment 0 */
1306 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1307 if (!p_seg)
1308 continue;
1309
1310 /* Note: start_line is already adjusted for the CDU
1311 * segment register granularity, so we just need to
1312 * divide. Adjustment is implicit as we assume ILT
1313 * Page size is larger than 32K!
1314 */
1315 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1316 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1317 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1318
1319 cdu_seg_params = 0;
1320 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1321 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1322 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1323
1324 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1325 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1326 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1327
1328 cdu_seg_params = 0;
1329 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1330 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1331 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1332 }
637} 1333}
638 1334
639void qed_qm_init_pf(struct qed_hwfn *p_hwfn) 1335void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
@@ -742,14 +1438,11 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
742 1438
743 ilt_clients = p_hwfn->p_cxt_mngr->clients; 1439 ilt_clients = p_hwfn->p_cxt_mngr->clients;
744 for_each_ilt_valid_client(i, ilt_clients) { 1440 for_each_ilt_valid_client(i, ilt_clients) {
745 if (!ilt_clients[i].active)
746 continue;
747 STORE_RT_REG(p_hwfn, 1441 STORE_RT_REG(p_hwfn,
748 ilt_clients[i].first.reg, 1442 ilt_clients[i].first.reg,
749 ilt_clients[i].first.val); 1443 ilt_clients[i].first.val);
750 STORE_RT_REG(p_hwfn, 1444 STORE_RT_REG(p_hwfn,
751 ilt_clients[i].last.reg, 1445 ilt_clients[i].last.reg, ilt_clients[i].last.val);
752 ilt_clients[i].last.val);
753 STORE_RT_REG(p_hwfn, 1446 STORE_RT_REG(p_hwfn,
754 ilt_clients[i].p_size.reg, 1447 ilt_clients[i].p_size.reg,
755 ilt_clients[i].p_size.val); 1448 ilt_clients[i].p_size.val);
@@ -786,6 +1479,33 @@ static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
786 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET, 1479 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
787 p_cli->vf_total_lines); 1480 p_cli->vf_total_lines);
788 } 1481 }
1482
1483 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1484 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1485 if (p_cli->active) {
1486 STORE_RT_REG(p_hwfn,
1487 PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1488 blk_factor);
1489 STORE_RT_REG(p_hwfn,
1490 PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1491 p_cli->pf_total_lines);
1492 STORE_RT_REG(p_hwfn,
1493 PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1494 p_cli->vf_total_lines);
1495 }
1496
1497 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1498 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1499 if (p_cli->active) {
1500 STORE_RT_REG(p_hwfn,
1501 PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1502 STORE_RT_REG(p_hwfn,
1503 PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1504 p_cli->pf_total_lines);
1505 STORE_RT_REG(p_hwfn,
1506 PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1507 p_cli->vf_total_lines);
1508 }
789} 1509}
790 1510
791/* ILT (PSWRQ2) PF */ 1511/* ILT (PSWRQ2) PF */
@@ -804,9 +1524,6 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
804 clients = p_hwfn->p_cxt_mngr->clients; 1524 clients = p_hwfn->p_cxt_mngr->clients;
805 1525
806 for_each_ilt_valid_client(i, clients) { 1526 for_each_ilt_valid_client(i, clients) {
807 if (!clients[i].active)
808 continue;
809
810 /** Client's 1st val and RT array are absolute, ILT shadows' 1527 /** Client's 1st val and RT array are absolute, ILT shadows'
811 * lines are relative. 1528 * lines are relative.
812 */ 1529 */
@@ -837,6 +1554,137 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
837 } 1554 }
838} 1555}
839 1556
1557/* SRC (Searcher) PF */
1558static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1559{
1560 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1561 u32 rounded_conn_num, conn_num, conn_max;
1562 struct qed_src_iids src_iids;
1563
1564 memset(&src_iids, 0, sizeof(src_iids));
1565 qed_cxt_src_iids(p_mngr, &src_iids);
1566 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1567 if (!conn_num)
1568 return;
1569
1570 conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1571 rounded_conn_num = roundup_pow_of_two(conn_max);
1572
1573 STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1574 STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1575 ilog2(rounded_conn_num));
1576
1577 STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1578 p_hwfn->p_cxt_mngr->first_free);
1579 STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1580 p_hwfn->p_cxt_mngr->last_free);
1581}
1582
1583/* Timers PF */
1584#define TM_CFG_NUM_IDS_SHIFT 0
1585#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
1586#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
1587#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
1588#define TM_CFG_PARENT_PF_SHIFT 25
1589#define TM_CFG_PARENT_PF_MASK 0x7ULL
1590
1591#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
1592#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
1593
1594#define TM_CFG_TID_OFFSET_SHIFT 30
1595#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
1596#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
1597#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
1598
1599static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1600{
1601 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1602 u32 active_seg_mask = 0, tm_offset, rt_reg;
1603 struct qed_tm_iids tm_iids;
1604 u64 cfg_word;
1605 u8 i;
1606
1607 memset(&tm_iids, 0, sizeof(tm_iids));
1608 qed_cxt_tm_iids(p_mngr, &tm_iids);
1609
1610 /* @@@TBD No pre-scan for now */
1611
1612 /* Note: We assume consecutive VFs for a PF */
1613 for (i = 0; i < p_mngr->vf_count; i++) {
1614 cfg_word = 0;
1615 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1616 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1617 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1618 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1619 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1620 (sizeof(cfg_word) / sizeof(u32)) *
1621 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1622 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1623 }
1624
1625 cfg_word = 0;
1626 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1627 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1628 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
1629 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1630
1631 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1632 (sizeof(cfg_word) / sizeof(u32)) *
1633 (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1634 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1635
1636 /* enale scan */
1637 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1638 tm_iids.pf_cids ? 0x1 : 0x0);
1639
1640 /* @@@TBD how to enable the scan for the VFs */
1641
1642 tm_offset = tm_iids.per_vf_cids;
1643
1644 /* Note: We assume consecutive VFs for a PF */
1645 for (i = 0; i < p_mngr->vf_count; i++) {
1646 cfg_word = 0;
1647 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1648 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1649 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1650 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1651 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1652
1653 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1654 (sizeof(cfg_word) / sizeof(u32)) *
1655 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1656
1657 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1658 }
1659
1660 tm_offset = tm_iids.pf_cids;
1661 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1662 cfg_word = 0;
1663 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1664 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1665 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1666 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1667 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1668
1669 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1670 (sizeof(cfg_word) / sizeof(u32)) *
1671 (NUM_OF_VFS(p_hwfn->cdev) +
1672 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1673
1674 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1675 active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
1676
1677 tm_offset += tm_iids.pf_tids[i];
1678 }
1679
1680 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
1681 active_seg_mask = 0;
1682
1683 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1684
1685 /* @@@TBD how to enable the scan for the VFs */
1686}
1687
840void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) 1688void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
841{ 1689{
842 qed_cdu_init_common(p_hwfn); 1690 qed_cdu_init_common(p_hwfn);
@@ -847,7 +1695,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
847 qed_qm_init_pf(p_hwfn); 1695 qed_qm_init_pf(p_hwfn);
848 qed_cm_init_pf(p_hwfn); 1696 qed_cm_init_pf(p_hwfn);
849 qed_dq_init_pf(p_hwfn); 1697 qed_dq_init_pf(p_hwfn);
1698 qed_cdu_init_pf(p_hwfn);
850 qed_ilt_init_pf(p_hwfn); 1699 qed_ilt_init_pf(p_hwfn);
1700 qed_src_init_pf(p_hwfn);
1701 qed_tm_init_pf(p_hwfn);
851} 1702}
852 1703
853int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, 1704int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
@@ -968,17 +1819,439 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
968 return 0; 1819 return 0;
969} 1820}
970 1821
971int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) 1822void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
1823 struct qed_rdma_pf_params *p_params)
972{ 1824{
973 struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; 1825 u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
1826 enum protocol_type proto;
1827
1828 num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
1829 num_tasks = num_mrs; /* each mr uses a single task id */
1830 num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
1831
1832 switch (p_hwfn->hw_info.personality) {
1833 case QED_PCI_ETH_ROCE:
1834 num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
1835 num_cons = num_qps * 2; /* each QP requires two connections */
1836 proto = PROTOCOLID_ROCE;
1837 break;
1838 default:
1839 return;
1840 }
1841
1842 if (num_cons && num_tasks) {
1843 qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
1844
1845 /* Deliberatly passing ROCE for tasks id. This is because
1846 * iWARP / RoCE share the task id.
1847 */
1848 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
1849 QED_CXT_ROCE_TID_SEG, 1,
1850 num_tasks, false);
1851 qed_cxt_set_srq_count(p_hwfn, num_srqs);
1852 } else {
1853 DP_INFO(p_hwfn->cdev,
1854 "RDMA personality used without setting params!\n");
1855 }
1856}
974 1857
1858int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
1859{
975 /* Set the number of required CORE connections */ 1860 /* Set the number of required CORE connections */
976 u32 core_cids = 1; /* SPQ */ 1861 u32 core_cids = 1; /* SPQ */
977 1862
978 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); 1863 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
979 1864
980 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 1865 switch (p_hwfn->hw_info.personality) {
981 p_params->num_cons, 1); 1866 case QED_PCI_ETH_ROCE:
1867 {
1868 qed_rdma_set_pf_params(p_hwfn,
1869 &p_hwfn->
1870 pf_params.rdma_pf_params);
1871 /* no need for break since RoCE coexist with Ethernet */
1872 }
1873 case QED_PCI_ETH:
1874 {
1875 struct qed_eth_pf_params *p_params =
1876 &p_hwfn->pf_params.eth_pf_params;
1877
1878 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
1879 p_params->num_cons, 1);
1880 break;
1881 }
1882 case QED_PCI_ISCSI:
1883 {
1884 struct qed_iscsi_pf_params *p_params;
1885
1886 p_params = &p_hwfn->pf_params.iscsi_pf_params;
1887
1888 if (p_params->num_cons && p_params->num_tasks) {
1889 qed_cxt_set_proto_cid_count(p_hwfn,
1890 PROTOCOLID_ISCSI,
1891 p_params->num_cons,
1892 0);
1893
1894 qed_cxt_set_proto_tid_count(p_hwfn,
1895 PROTOCOLID_ISCSI,
1896 QED_CXT_ISCSI_TID_SEG,
1897 0,
1898 p_params->num_tasks,
1899 true);
1900 } else {
1901 DP_INFO(p_hwfn->cdev,
1902 "Iscsi personality used without setting params!\n");
1903 }
1904 break;
1905 }
1906 default:
1907 return -EINVAL;
1908 }
1909
1910 return 0;
1911}
1912
1913int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
1914 struct qed_tid_mem *p_info)
1915{
1916 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1917 u32 proto, seg, total_lines, i, shadow_line;
1918 struct qed_ilt_client_cfg *p_cli;
1919 struct qed_ilt_cli_blk *p_fl_seg;
1920 struct qed_tid_seg *p_seg_info;
1921
1922 /* Verify the personality */
1923 switch (p_hwfn->hw_info.personality) {
1924 case QED_PCI_ISCSI:
1925 proto = PROTOCOLID_ISCSI;
1926 seg = QED_CXT_ISCSI_TID_SEG;
1927 break;
1928 default:
1929 return -EINVAL;
1930 }
1931
1932 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
1933 if (!p_cli->active)
1934 return -EINVAL;
1935
1936 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
1937 if (!p_seg_info->has_fl_mem)
1938 return -EINVAL;
1939
1940 p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
1941 total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
1942 p_fl_seg->real_size_in_page);
1943
1944 for (i = 0; i < total_lines; i++) {
1945 shadow_line = i + p_fl_seg->start_line -
1946 p_hwfn->p_cxt_mngr->pf_start_line;
1947 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
1948 }
1949 p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
1950 p_fl_seg->real_size_in_page;
1951 p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
1952 p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
1953 p_info->tid_size;
1954
1955 return 0;
1956}
1957
1958/* This function is very RoCE oriented, if another protocol in the future
1959 * will want this feature we'll need to modify the function to be more generic
1960 */
1961int
1962qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
1963 enum qed_cxt_elem_type elem_type, u32 iid)
1964{
1965 u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
1966 struct qed_ilt_client_cfg *p_cli;
1967 struct qed_ilt_cli_blk *p_blk;
1968 struct qed_ptt *p_ptt;
1969 dma_addr_t p_phys;
1970 u64 ilt_hw_entry;
1971 void *p_virt;
1972 int rc = 0;
1973
1974 switch (elem_type) {
1975 case QED_ELEM_CXT:
1976 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1977 elem_size = CONN_CXT_SIZE(p_hwfn);
1978 p_blk = &p_cli->pf_blks[CDUC_BLK];
1979 break;
1980 case QED_ELEM_SRQ:
1981 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
1982 elem_size = SRQ_CXT_SIZE;
1983 p_blk = &p_cli->pf_blks[SRQ_BLK];
1984 break;
1985 case QED_ELEM_TASK:
1986 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1987 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
1988 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
1989 break;
1990 default:
1991 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
1992 return -EINVAL;
1993 }
1994
1995 /* Calculate line in ilt */
1996 hw_p_size = p_cli->p_size.val;
1997 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
1998 line = p_blk->start_line + (iid / elems_per_p);
1999 shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2000
2001 /* If line is already allocated, do nothing, otherwise allocate it and
2002 * write it to the PSWRQ2 registers.
2003 * This section can be run in parallel from different contexts and thus
2004 * a mutex protection is needed.
2005 */
2006
2007 mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
2008
2009 if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
2010 goto out0;
2011
2012 p_ptt = qed_ptt_acquire(p_hwfn);
2013 if (!p_ptt) {
2014 DP_NOTICE(p_hwfn,
2015 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2016 rc = -EBUSY;
2017 goto out0;
2018 }
2019
2020 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2021 p_blk->real_size_in_page,
2022 &p_phys, GFP_KERNEL);
2023 if (!p_virt) {
2024 rc = -ENOMEM;
2025 goto out1;
2026 }
2027 memset(p_virt, 0, p_blk->real_size_in_page);
2028
2029 /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2030 * to compensate for a HW bug, but it is configured even if DIF is not
2031 * enabled. This is harmless and allows us to avoid a dedicated API. We
2032 * configure the field for all of the contexts on the newly allocated
2033 * page.
2034 */
2035 if (elem_type == QED_ELEM_TASK) {
2036 u32 elem_i;
2037 u8 *elem_start = (u8 *)p_virt;
2038 union type1_task_context *elem;
2039
2040 for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2041 elem = (union type1_task_context *)elem_start;
2042 SET_FIELD(elem->roce_ctx.tdif_context.flags1,
2043 TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
2044 elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2045 }
2046 }
2047
2048 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
2049 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
2050 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2051 p_blk->real_size_in_page;
2052
2053 /* compute absolute offset */
2054 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2055 (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2056
2057 ilt_hw_entry = 0;
2058 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2059 SET_FIELD(ilt_hw_entry,
2060 ILT_ENTRY_PHY_ADDR,
2061 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
2062
2063 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2064 qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2065 reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
2066
2067 if (elem_type == QED_ELEM_CXT) {
2068 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2069 elems_per_p;
2070
2071 /* Update the relevant register in the parser */
2072 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2073 last_cid_allocated - 1);
2074
2075 if (!p_hwfn->b_rdma_enabled_in_prs) {
2076 /* Enable RoCE search */
2077 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2078 p_hwfn->b_rdma_enabled_in_prs = true;
2079 }
2080 }
2081
2082out1:
2083 qed_ptt_release(p_hwfn, p_ptt);
2084out0:
2085 mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2086
2087 return rc;
2088}
2089
2090/* This function is very RoCE oriented, if another protocol in the future
2091 * will want this feature we'll need to modify the function to be more generic
2092 */
2093static int
2094qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2095 enum qed_cxt_elem_type elem_type,
2096 u32 start_iid, u32 count)
2097{
2098 u32 start_line, end_line, shadow_start_line, shadow_end_line;
2099 u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2100 struct qed_ilt_client_cfg *p_cli;
2101 struct qed_ilt_cli_blk *p_blk;
2102 u32 end_iid = start_iid + count;
2103 struct qed_ptt *p_ptt;
2104 u64 ilt_hw_entry = 0;
2105 u32 i;
2106
2107 switch (elem_type) {
2108 case QED_ELEM_CXT:
2109 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2110 elem_size = CONN_CXT_SIZE(p_hwfn);
2111 p_blk = &p_cli->pf_blks[CDUC_BLK];
2112 break;
2113 case QED_ELEM_SRQ:
2114 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2115 elem_size = SRQ_CXT_SIZE;
2116 p_blk = &p_cli->pf_blks[SRQ_BLK];
2117 break;
2118 case QED_ELEM_TASK:
2119 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2120 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2121 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2122 break;
2123 default:
2124 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2125 return -EINVAL;
2126 }
2127
2128 /* Calculate line in ilt */
2129 hw_p_size = p_cli->p_size.val;
2130 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2131 start_line = p_blk->start_line + (start_iid / elems_per_p);
2132 end_line = p_blk->start_line + (end_iid / elems_per_p);
2133 if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2134 end_line--;
2135
2136 shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2137 shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2138
2139 p_ptt = qed_ptt_acquire(p_hwfn);
2140 if (!p_ptt) {
2141 DP_NOTICE(p_hwfn,
2142 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2143 return -EBUSY;
2144 }
2145
2146 for (i = shadow_start_line; i < shadow_end_line; i++) {
2147 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
2148 continue;
2149
2150 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2151 p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2152 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
2153 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
2154
2155 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
2156 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
2157 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2158
2159 /* compute absolute offset */
2160 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2161 ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2162 ILT_ENTRY_IN_REGS);
2163
2164 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2165 * wide-bus.
2166 */
2167 qed_dmae_host2grc(p_hwfn, p_ptt,
2168 (u64) (uintptr_t) &ilt_hw_entry,
2169 reg_offset,
2170 sizeof(ilt_hw_entry) / sizeof(u32),
2171 0);
2172 }
2173
2174 qed_ptt_release(p_hwfn, p_ptt);
2175
2176 return 0;
2177}
2178
2179int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2180{
2181 int rc;
2182 u32 cid;
2183
2184 /* Free Connection CXT */
2185 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2186 qed_cxt_get_proto_cid_start(p_hwfn,
2187 proto),
2188 qed_cxt_get_proto_cid_count(p_hwfn,
2189 proto, &cid));
2190
2191 if (rc)
2192 return rc;
2193
2194 /* Free Task CXT */
2195 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2196 qed_cxt_get_proto_tid_count(p_hwfn, proto));
2197 if (rc)
2198 return rc;
2199
2200 /* Free TSDM CXT */
2201 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
2202 qed_cxt_get_srq_count(p_hwfn));
2203
2204 return rc;
2205}
2206
2207int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2208 u32 tid, u8 ctx_type, void **pp_task_ctx)
2209{
2210 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2211 struct qed_ilt_client_cfg *p_cli;
2212 struct qed_ilt_cli_blk *p_seg;
2213 struct qed_tid_seg *p_seg_info;
2214 u32 proto, seg;
2215 u32 total_lines;
2216 u32 tid_size, ilt_idx;
2217 u32 num_tids_per_block;
2218
2219 /* Verify the personality */
2220 switch (p_hwfn->hw_info.personality) {
2221 case QED_PCI_ISCSI:
2222 proto = PROTOCOLID_ISCSI;
2223 seg = QED_CXT_ISCSI_TID_SEG;
2224 break;
2225 default:
2226 return -EINVAL;
2227 }
2228
2229 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2230 if (!p_cli->active)
2231 return -EINVAL;
2232
2233 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2234
2235 if (ctx_type == QED_CTX_WORKING_MEM) {
2236 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2237 } else if (ctx_type == QED_CTX_FL_MEM) {
2238 if (!p_seg_info->has_fl_mem)
2239 return -EINVAL;
2240 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2241 } else {
2242 return -EINVAL;
2243 }
2244 total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2245 tid_size = p_mngr->task_type_size[p_seg_info->type];
2246 num_tids_per_block = p_seg->real_size_in_page / tid_size;
2247
2248 if (total_lines < tid / num_tids_per_block)
2249 return -EINVAL;
2250
2251 ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2252 p_mngr->pf_start_line;
2253 *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
2254 (tid % num_tids_per_block) * tid_size;
982 2255
983 return 0; 2256 return 0;
984} 2257}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 234c0fa8db2a..c6f6f2e8192d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -21,6 +21,14 @@ struct qed_cxt_info {
21 enum protocol_type type; 21 enum protocol_type type;
22}; 22};
23 23
24#define MAX_TID_BLOCKS 512
25struct qed_tid_mem {
26 u32 tid_size;
27 u32 num_tids_per_block;
28 u32 waste;
29 u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
30};
31
24/** 32/**
25 * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type 33 * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
26 * 34 *
@@ -46,8 +54,22 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
46int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, 54int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
47 struct qed_cxt_info *p_info); 55 struct qed_cxt_info *p_info);
48 56
57/**
58 * @brief qed_cxt_get_tid_mem_info
59 *
60 * @param p_hwfn
61 * @param p_info
62 *
63 * @return int
64 */
65int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
66 struct qed_tid_mem *p_info);
67
68#define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI
69#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE
49enum qed_cxt_elem_type { 70enum qed_cxt_elem_type {
50 QED_ELEM_CXT, 71 QED_ELEM_CXT,
72 QED_ELEM_SRQ,
51 QED_ELEM_TASK 73 QED_ELEM_TASK
52}; 74};
53 75
@@ -149,4 +171,6 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
149void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, 171void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
150 u32 cid); 172 u32 cid);
151 173
174#define QED_CTX_WORKING_MEM 0
175#define QED_CTX_FL_MEM 1
152#endif 176#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 21ec1c2df2c7..d0dc28f93c0e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -9,6 +9,7 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <asm/byteorder.h> 10#include <asm/byteorder.h>
11#include <linux/bitops.h> 11#include <linux/bitops.h>
12#include <linux/dcbnl.h>
12#include <linux/errno.h> 13#include <linux/errno.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
@@ -18,6 +19,9 @@
18#include "qed_dcbx.h" 19#include "qed_dcbx.h"
19#include "qed_hsi.h" 20#include "qed_hsi.h"
20#include "qed_sp.h" 21#include "qed_sp.h"
22#ifdef CONFIG_DCB
23#include <linux/qed/qed_eth_if.h>
24#endif
21 25
22#define QED_DCBX_MAX_MIB_READ_TRY (100) 26#define QED_DCBX_MAX_MIB_READ_TRY (100)
23#define QED_ETH_TYPE_DEFAULT (0) 27#define QED_ETH_TYPE_DEFAULT (0)
@@ -252,7 +256,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
252 if (p_data->arr[type].update) 256 if (p_data->arr[type].update)
253 continue; 257 continue;
254 258
255 enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled; 259 enable = !(type == DCBX_PROTOCOL_ETH);
256 qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, 260 qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
257 priority, tc, type); 261 priority, tc, type);
258 } 262 }
@@ -351,6 +355,293 @@ qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
351 return rc; 355 return rc;
352} 356}
353 357
358#ifdef CONFIG_DCB
359static void
360qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn,
361 struct qed_dcbx_app_prio *p_prio,
362 struct qed_dcbx_results *p_results)
363{
364 u8 val;
365
366 p_prio->roce = QED_DCBX_INVALID_PRIORITY;
367 p_prio->roce_v2 = QED_DCBX_INVALID_PRIORITY;
368 p_prio->iscsi = QED_DCBX_INVALID_PRIORITY;
369 p_prio->fcoe = QED_DCBX_INVALID_PRIORITY;
370
371 if (p_results->arr[DCBX_PROTOCOL_ROCE].update &&
372 p_results->arr[DCBX_PROTOCOL_ROCE].enable)
373 p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority;
374
375 if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update &&
376 p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) {
377 val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority;
378 p_prio->roce_v2 = val;
379 }
380
381 if (p_results->arr[DCBX_PROTOCOL_ISCSI].update &&
382 p_results->arr[DCBX_PROTOCOL_ISCSI].enable)
383 p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority;
384
385 if (p_results->arr[DCBX_PROTOCOL_FCOE].update &&
386 p_results->arr[DCBX_PROTOCOL_FCOE].enable)
387 p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority;
388
389 if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
390 p_results->arr[DCBX_PROTOCOL_ETH].enable)
391 p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
392
393 DP_VERBOSE(p_hwfn, QED_MSG_DCB,
394 "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n",
395 p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe,
396 p_prio->eth);
397}
398
399static void
400qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
401 struct dcbx_app_priority_feature *p_app,
402 struct dcbx_app_priority_entry *p_tbl,
403 struct qed_dcbx_params *p_params)
404{
405 struct qed_app_entry *entry;
406 u8 pri_map;
407 int i;
408
409 p_params->app_willing = QED_MFW_GET_FIELD(p_app->flags,
410 DCBX_APP_WILLING);
411 p_params->app_valid = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ENABLED);
412 p_params->app_error = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
413 p_params->num_app_entries = QED_MFW_GET_FIELD(p_app->flags,
414 DCBX_APP_NUM_ENTRIES);
415 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
416 entry = &p_params->app_entry[i];
417 entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
418 DCBX_APP_SF));
419 pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
420 entry->prio = ffs(pri_map) - 1;
421 entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
422 DCBX_APP_PROTOCOL_ID);
423 qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
424 entry->proto_id,
425 &entry->proto_type);
426 }
427
428 DP_VERBOSE(p_hwfn, QED_MSG_DCB,
429 "APP params: willing %d, valid %d error = %d\n",
430 p_params->app_willing, p_params->app_valid,
431 p_params->app_error);
432}
433
434static void
435qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn,
436 u32 pfc, struct qed_dcbx_params *p_params)
437{
438 u8 pfc_map;
439
440 p_params->pfc.willing = QED_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
441 p_params->pfc.max_tc = QED_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
442 p_params->pfc.enabled = QED_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
443 pfc_map = QED_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
444 p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
445 p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
446 p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
447 p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3);
448 p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4);
449 p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5);
450 p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6);
451 p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
452
453 DP_VERBOSE(p_hwfn, QED_MSG_DCB,
454 "PFC params: willing %d, pfc_bitmap %d\n",
455 p_params->pfc.willing, pfc_map);
456}
457
458static void
459qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
460 struct dcbx_ets_feature *p_ets,
461 struct qed_dcbx_params *p_params)
462{
463 u32 bw_map[2], tsa_map[2], pri_map;
464 int i;
465
466 p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags,
467 DCBX_ETS_WILLING);
468 p_params->ets_enabled = QED_MFW_GET_FIELD(p_ets->flags,
469 DCBX_ETS_ENABLED);
470 p_params->ets_cbs = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
471 p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags,
472 DCBX_ETS_MAX_TCS);
473 DP_VERBOSE(p_hwfn, QED_MSG_DCB,
474 "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
475 p_params->ets_willing,
476 p_params->ets_cbs,
477 p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
478
479 /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
480 * encoded in a type u32 array of size 2.
481 */
482 bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]);
483 bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
484 tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
485 tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
486 pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]);
487 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
488 p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
489 p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
490 p_params->ets_pri_tc_tbl[i] = QED_DCBX_PRIO2TC(pri_map, i);
491 DP_VERBOSE(p_hwfn, QED_MSG_DCB,
492 "elem %d bw_tbl %x tsa_tbl %x\n",
493 i, p_params->ets_tc_bw_tbl[i],
494 p_params->ets_tc_tsa_tbl[i]);
495 }
496}
497
498static void
499qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
500 struct dcbx_app_priority_feature *p_app,
501 struct dcbx_app_priority_entry *p_tbl,
502 struct dcbx_ets_feature *p_ets,
503 u32 pfc, struct qed_dcbx_params *p_params)
504{
505 qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params);
506 qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
507 qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
508}
509
510static void
511qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
512 struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
513{
514 struct dcbx_features *p_feat;
515
516 p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
517 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
518 p_feat->app.app_pri_tbl, &p_feat->ets,
519 p_feat->pfc, &params->local.params);
520 params->local.valid = true;
521}
522
523static void
524qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
525 struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
526{
527 struct dcbx_features *p_feat;
528
529 p_feat = &p_hwfn->p_dcbx_info->remote.features;
530 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
531 p_feat->app.app_pri_tbl, &p_feat->ets,
532 p_feat->pfc, &params->remote.params);
533 params->remote.valid = true;
534}
535
536static void
537qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
538 struct qed_ptt *p_ptt,
539 struct qed_dcbx_get *params)
540{
541 struct qed_dcbx_operational_params *p_operational;
542 struct qed_dcbx_results *p_results;
543 struct dcbx_features *p_feat;
544 bool enabled, err;
545 u32 flags;
546 bool val;
547
548 flags = p_hwfn->p_dcbx_info->operational.flags;
549
550 /* If DCBx version is non zero, then negotiation
551 * was successfuly performed
552 */
553 p_operational = &params->operational;
554 enabled = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
555 DCBX_CONFIG_VERSION_DISABLED);
556 if (!enabled) {
557 p_operational->enabled = enabled;
558 p_operational->valid = false;
559 return;
560 }
561
562 p_feat = &p_hwfn->p_dcbx_info->operational.features;
563 p_results = &p_hwfn->p_dcbx_info->results;
564
565 val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
566 DCBX_CONFIG_VERSION_IEEE);
567 p_operational->ieee = val;
568 val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
569 DCBX_CONFIG_VERSION_CEE);
570 p_operational->cee = val;
571
572 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n",
573 p_operational->ieee, p_operational->cee);
574
575 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
576 p_feat->app.app_pri_tbl, &p_feat->ets,
577 p_feat->pfc, &params->operational.params);
578 qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
579 err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
580 p_operational->err = err;
581 p_operational->enabled = enabled;
582 p_operational->valid = true;
583}
584
585static void
586qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
587 struct qed_ptt *p_ptt,
588 struct qed_dcbx_get *params)
589{
590 struct lldp_config_params_s *p_local;
591
592 p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
593
594 memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
595 ARRAY_SIZE(p_local->local_chassis_id));
596 memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
597 ARRAY_SIZE(p_local->local_port_id));
598}
599
600static void
601qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
602 struct qed_ptt *p_ptt,
603 struct qed_dcbx_get *params)
604{
605 struct lldp_status_params_s *p_remote;
606
607 p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
608
609 memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
610 ARRAY_SIZE(p_remote->peer_chassis_id));
611 memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
612 ARRAY_SIZE(p_remote->peer_port_id));
613}
614
615static int
616qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
617 struct qed_dcbx_get *p_params,
618 enum qed_mib_read_type type)
619{
620 switch (type) {
621 case QED_DCBX_REMOTE_MIB:
622 qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
623 break;
624 case QED_DCBX_LOCAL_MIB:
625 qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
626 break;
627 case QED_DCBX_OPERATIONAL_MIB:
628 qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
629 break;
630 case QED_DCBX_REMOTE_LLDP_MIB:
631 qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
632 break;
633 case QED_DCBX_LOCAL_LLDP_MIB:
634 qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
635 break;
636 default:
637 DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
638 return -EINVAL;
639 }
640
641 return 0;
642}
643#endif
644
354static int 645static int
355qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 646qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
356{ 647{
@@ -561,3 +852,1333 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
561 p_dcb_data = &p_dest->eth_dcb_data; 852 p_dcb_data = &p_dest->eth_dcb_data;
562 qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH); 853 qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
563} 854}
855
856#ifdef CONFIG_DCB
857static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
858 struct qed_dcbx_get *p_get,
859 enum qed_mib_read_type type)
860{
861 struct qed_ptt *p_ptt;
862 int rc;
863
864 p_ptt = qed_ptt_acquire(p_hwfn);
865 if (!p_ptt)
866 return -EBUSY;
867
868 rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
869 if (rc)
870 goto out;
871
872 rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
873
874out:
875 qed_ptt_release(p_hwfn, p_ptt);
876 return rc;
877}
878
879static void
880qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
881 u32 *pfc, struct qed_dcbx_params *p_params)
882{
883 u8 pfc_map = 0;
884 int i;
885
886 if (p_params->pfc.willing)
887 *pfc |= DCBX_PFC_WILLING_MASK;
888 else
889 *pfc &= ~DCBX_PFC_WILLING_MASK;
890
891 if (p_params->pfc.enabled)
892 *pfc |= DCBX_PFC_ENABLED_MASK;
893 else
894 *pfc &= ~DCBX_PFC_ENABLED_MASK;
895
896 *pfc &= ~DCBX_PFC_CAPS_MASK;
897 *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
898
899 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
900 if (p_params->pfc.prio[i])
901 pfc_map |= BIT(i);
902
903 *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
904
905 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
906}
907
908static void
909qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
910 struct dcbx_ets_feature *p_ets,
911 struct qed_dcbx_params *p_params)
912{
913 u8 *bw_map, *tsa_map;
914 u32 val;
915 int i;
916
917 if (p_params->ets_willing)
918 p_ets->flags |= DCBX_ETS_WILLING_MASK;
919 else
920 p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
921
922 if (p_params->ets_cbs)
923 p_ets->flags |= DCBX_ETS_CBS_MASK;
924 else
925 p_ets->flags &= ~DCBX_ETS_CBS_MASK;
926
927 if (p_params->ets_enabled)
928 p_ets->flags |= DCBX_ETS_ENABLED_MASK;
929 else
930 p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
931
932 p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
933 p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
934
935 bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
936 tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
937 p_ets->pri_tc_tbl[0] = 0;
938 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
939 bw_map[i] = p_params->ets_tc_bw_tbl[i];
940 tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
941 /* Copy the priority value to the corresponding 4 bits in the
942 * traffic class table.
943 */
944 val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
945 p_ets->pri_tc_tbl[0] |= val;
946 }
947 p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]);
948 for (i = 0; i < 2; i++) {
949 p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
950 p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
951 }
952}
953
954static void
955qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
956 struct dcbx_app_priority_feature *p_app,
957 struct qed_dcbx_params *p_params)
958{
959 u32 *entry;
960 int i;
961
962 if (p_params->app_willing)
963 p_app->flags |= DCBX_APP_WILLING_MASK;
964 else
965 p_app->flags &= ~DCBX_APP_WILLING_MASK;
966
967 if (p_params->app_valid)
968 p_app->flags |= DCBX_APP_ENABLED_MASK;
969 else
970 p_app->flags &= ~DCBX_APP_ENABLED_MASK;
971
972 p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
973 p_app->flags |= (u32)p_params->num_app_entries <<
974 DCBX_APP_NUM_ENTRIES_SHIFT;
975
976 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
977 entry = &p_app->app_pri_tbl[i].entry;
978 *entry &= ~DCBX_APP_SF_MASK;
979 if (p_params->app_entry[i].ethtype)
980 *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
981 DCBX_APP_SF_SHIFT);
982 else
983 *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT);
984 *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
985 *entry |= ((u32)p_params->app_entry[i].proto_id <<
986 DCBX_APP_PROTOCOL_ID_SHIFT);
987 *entry &= ~DCBX_APP_PRI_MAP_MASK;
988 *entry |= ((u32)(p_params->app_entry[i].prio) <<
989 DCBX_APP_PRI_MAP_SHIFT);
990 }
991}
992
993static void
994qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
995 struct dcbx_local_params *local_admin,
996 struct qed_dcbx_set *params)
997{
998 local_admin->flags = 0;
999 memcpy(&local_admin->features,
1000 &p_hwfn->p_dcbx_info->operational.features,
1001 sizeof(local_admin->features));
1002
1003 if (params->enabled)
1004 local_admin->config = params->ver_num;
1005 else
1006 local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
1007
1008 if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
1009 qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
1010 &params->config.params);
1011
1012 if (params->override_flags & QED_DCBX_OVERRIDE_ETS_CFG)
1013 qed_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets,
1014 &params->config.params);
1015
1016 if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
1017 qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
1018 &params->config.params);
1019}
1020
1021int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1022 struct qed_dcbx_set *params, bool hw_commit)
1023{
1024 struct dcbx_local_params local_admin;
1025 struct qed_dcbx_mib_meta_data data;
1026 u32 resp = 0, param = 0;
1027 int rc = 0;
1028
1029 if (!hw_commit) {
1030 memcpy(&p_hwfn->p_dcbx_info->set, params,
1031 sizeof(struct qed_dcbx_set));
1032 return 0;
1033 }
1034
1035 /* clear set-parmas cache */
1036 memset(&p_hwfn->p_dcbx_info->set, 0, sizeof(p_hwfn->p_dcbx_info->set));
1037
1038 memset(&local_admin, 0, sizeof(local_admin));
1039 qed_dcbx_set_local_params(p_hwfn, &local_admin, params);
1040
1041 data.addr = p_hwfn->mcp_info->port_addr +
1042 offsetof(struct public_port, local_admin_dcbx_mib);
1043 data.local_admin = &local_admin;
1044 data.size = sizeof(struct dcbx_local_params);
1045 qed_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size);
1046
1047 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
1048 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
1049 if (rc)
1050 DP_NOTICE(p_hwfn, "Failed to send DCBX update request\n");
1051
1052 return rc;
1053}
1054
1055int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
1056 struct qed_dcbx_set *params)
1057{
1058 struct qed_dcbx_get *dcbx_info;
1059 int rc;
1060
1061 if (p_hwfn->p_dcbx_info->set.config.valid) {
1062 memcpy(params, &p_hwfn->p_dcbx_info->set,
1063 sizeof(struct qed_dcbx_set));
1064 return 0;
1065 }
1066
1067 dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
1068 if (!dcbx_info) {
1069 DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
1070 return -ENOMEM;
1071 }
1072
1073 rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
1074 if (rc) {
1075 kfree(dcbx_info);
1076 return rc;
1077 }
1078
1079 p_hwfn->p_dcbx_info->set.override_flags = 0;
1080 p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED;
1081 if (dcbx_info->operational.cee)
1082 p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
1083 if (dcbx_info->operational.ieee)
1084 p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
1085
1086 p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
1087 memcpy(&p_hwfn->p_dcbx_info->set.config.params,
1088 &dcbx_info->operational.params,
1089 sizeof(struct qed_dcbx_admin_params));
1090 p_hwfn->p_dcbx_info->set.config.valid = true;
1091
1092 memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
1093
1094 kfree(dcbx_info);
1095
1096 return 0;
1097}
1098
1099static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
1100 enum qed_mib_read_type type)
1101{
1102 struct qed_dcbx_get *dcbx_info;
1103
1104 dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
1105 if (!dcbx_info) {
1106 DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
1107 return NULL;
1108 }
1109
1110 if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
1111 kfree(dcbx_info);
1112 return NULL;
1113 }
1114
1115 if ((type == QED_DCBX_OPERATIONAL_MIB) &&
1116 !dcbx_info->operational.enabled) {
1117 DP_INFO(hwfn, "DCBX is not enabled/operational\n");
1118 kfree(dcbx_info);
1119 return NULL;
1120 }
1121
1122 return dcbx_info;
1123}
1124
1125static u8 qed_dcbnl_getstate(struct qed_dev *cdev)
1126{
1127 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1128 struct qed_dcbx_get *dcbx_info;
1129 bool enabled;
1130
1131 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1132 if (!dcbx_info)
1133 return 0;
1134
1135 enabled = dcbx_info->operational.enabled;
1136 DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", enabled);
1137 kfree(dcbx_info);
1138
1139 return enabled;
1140}
1141
1142static u8 qed_dcbnl_setstate(struct qed_dev *cdev, u8 state)
1143{
1144 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1145 struct qed_dcbx_set dcbx_set;
1146 struct qed_ptt *ptt;
1147 int rc;
1148
1149 DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", state);
1150
1151 memset(&dcbx_set, 0, sizeof(dcbx_set));
1152 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
1153 if (rc)
1154 return 1;
1155
1156 dcbx_set.enabled = !!state;
1157
1158 ptt = qed_ptt_acquire(hwfn);
1159 if (!ptt)
1160 return 1;
1161
1162 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
1163
1164 qed_ptt_release(hwfn, ptt);
1165
1166 return rc ? 1 : 0;
1167}
1168
1169static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type,
1170 u8 *pgid, u8 *bw_pct, u8 *up_map)
1171{
1172 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1173 struct qed_dcbx_get *dcbx_info;
1174
1175 DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc);
1176 *prio_type = *pgid = *bw_pct = *up_map = 0;
1177 if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
1178 DP_INFO(hwfn, "Invalid tc %d\n", tc);
1179 return;
1180 }
1181
1182 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1183 if (!dcbx_info)
1184 return;
1185
1186 *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc];
1187 kfree(dcbx_info);
1188}
1189
1190static void qed_dcbnl_getpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 *bw_pct)
1191{
1192 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1193 struct qed_dcbx_get *dcbx_info;
1194
1195 *bw_pct = 0;
1196 DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d\n", pgid);
1197 if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
1198 DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
1199 return;
1200 }
1201
1202 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1203 if (!dcbx_info)
1204 return;
1205
1206 *bw_pct = dcbx_info->operational.params.ets_tc_bw_tbl[pgid];
1207 DP_VERBOSE(hwfn, QED_MSG_DCB, "bw_pct = %d\n", *bw_pct);
1208 kfree(dcbx_info);
1209}
1210
1211static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio,
1212 u8 *bwg_id, u8 *bw_pct, u8 *up_map)
1213{
1214 DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
1215 *prio = *bwg_id = *bw_pct = *up_map = 0;
1216}
1217
1218static void qed_dcbnl_getpgbwgcfgrx(struct qed_dev *cdev,
1219 int bwg_id, u8 *bw_pct)
1220{
1221 DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
1222 *bw_pct = 0;
1223}
1224
1225static void qed_dcbnl_getpfccfg(struct qed_dev *cdev,
1226 int priority, u8 *setting)
1227{
1228 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1229 struct qed_dcbx_get *dcbx_info;
1230
1231 DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d\n", priority);
1232 if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
1233 DP_INFO(hwfn, "Invalid priority %d\n", priority);
1234 return;
1235 }
1236
1237 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1238 if (!dcbx_info)
1239 return;
1240
1241 *setting = dcbx_info->operational.params.pfc.prio[priority];
1242 DP_VERBOSE(hwfn, QED_MSG_DCB, "setting = %d\n", *setting);
1243 kfree(dcbx_info);
1244}
1245
1246static void qed_dcbnl_setpfccfg(struct qed_dev *cdev, int priority, u8 setting)
1247{
1248 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1249 struct qed_dcbx_set dcbx_set;
1250 struct qed_ptt *ptt;
1251 int rc;
1252
1253 DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d setting = %d\n",
1254 priority, setting);
1255 if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
1256 DP_INFO(hwfn, "Invalid priority %d\n", priority);
1257 return;
1258 }
1259
1260 memset(&dcbx_set, 0, sizeof(dcbx_set));
1261 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
1262 if (rc)
1263 return;
1264
1265 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
1266 dcbx_set.config.params.pfc.prio[priority] = !!setting;
1267
1268 ptt = qed_ptt_acquire(hwfn);
1269 if (!ptt)
1270 return;
1271
1272 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
1273
1274 qed_ptt_release(hwfn, ptt);
1275}
1276
1277static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
1278{
1279 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1280 struct qed_dcbx_get *dcbx_info;
1281 int rc = 0;
1282
1283 DP_VERBOSE(hwfn, QED_MSG_DCB, "capid = %d\n", capid);
1284 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1285 if (!dcbx_info)
1286 return 1;
1287
1288 switch (capid) {
1289 case DCB_CAP_ATTR_PG:
1290 case DCB_CAP_ATTR_PFC:
1291 case DCB_CAP_ATTR_UP2TC:
1292 case DCB_CAP_ATTR_GSP:
1293 *cap = true;
1294 break;
1295 case DCB_CAP_ATTR_PG_TCS:
1296 case DCB_CAP_ATTR_PFC_TCS:
1297 *cap = 0x80;
1298 break;
1299 case DCB_CAP_ATTR_DCBX:
1300 *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
1301 DCB_CAP_DCBX_VER_IEEE);
1302 break;
1303 default:
1304 *cap = false;
1305 rc = 1;
1306 }
1307
1308 DP_VERBOSE(hwfn, QED_MSG_DCB, "id = %d caps = %d\n", capid, *cap);
1309 kfree(dcbx_info);
1310
1311 return rc;
1312}
1313
1314static int qed_dcbnl_getnumtcs(struct qed_dev *cdev, int tcid, u8 *num)
1315{
1316 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1317 struct qed_dcbx_get *dcbx_info;
1318 int rc = 0;
1319
1320 DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d\n", tcid);
1321 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1322 if (!dcbx_info)
1323 return -EINVAL;
1324
1325 switch (tcid) {
1326 case DCB_NUMTCS_ATTR_PG:
1327 *num = dcbx_info->operational.params.max_ets_tc;
1328 break;
1329 case DCB_NUMTCS_ATTR_PFC:
1330 *num = dcbx_info->operational.params.pfc.max_tc;
1331 break;
1332 default:
1333 rc = -EINVAL;
1334 }
1335
1336 kfree(dcbx_info);
1337 DP_VERBOSE(hwfn, QED_MSG_DCB, "numtcs = %d\n", *num);
1338
1339 return rc;
1340}
1341
1342static u8 qed_dcbnl_getpfcstate(struct qed_dev *cdev)
1343{
1344 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1345 struct qed_dcbx_get *dcbx_info;
1346 bool enabled;
1347
1348 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1349 if (!dcbx_info)
1350 return 0;
1351
1352 enabled = dcbx_info->operational.params.pfc.enabled;
1353 DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d\n", enabled);
1354 kfree(dcbx_info);
1355
1356 return enabled;
1357}
1358
1359static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
1360{
1361 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1362 struct qed_dcbx_get *dcbx_info;
1363 u8 mode = 0;
1364
1365 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1366 if (!dcbx_info)
1367 return 0;
1368
1369 if (dcbx_info->operational.enabled)
1370 mode |= DCB_CAP_DCBX_LLD_MANAGED;
1371 if (dcbx_info->operational.ieee)
1372 mode |= DCB_CAP_DCBX_VER_IEEE;
1373 if (dcbx_info->operational.cee)
1374 mode |= DCB_CAP_DCBX_VER_CEE;
1375
1376 DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode);
1377 kfree(dcbx_info);
1378
1379 return mode;
1380}
1381
1382static void qed_dcbnl_setpgtccfgtx(struct qed_dev *cdev,
1383 int tc,
1384 u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
1385{
1386 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1387 struct qed_dcbx_set dcbx_set;
1388 struct qed_ptt *ptt;
1389 int rc;
1390
1391 DP_VERBOSE(hwfn, QED_MSG_DCB,
1392 "tc = %d pri_type = %d pgid = %d bw_pct = %d up_map = %d\n",
1393 tc, pri_type, pgid, bw_pct, up_map);
1394
1395 if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
1396 DP_INFO(hwfn, "Invalid tc %d\n", tc);
1397 return;
1398 }
1399
1400 memset(&dcbx_set, 0, sizeof(dcbx_set));
1401 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
1402 if (rc)
1403 return;
1404
1405 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
1406 dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid;
1407
1408 ptt = qed_ptt_acquire(hwfn);
1409 if (!ptt)
1410 return;
1411
1412 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
1413
1414 qed_ptt_release(hwfn, ptt);
1415}
1416
1417static void qed_dcbnl_setpgtccfgrx(struct qed_dev *cdev, int prio,
1418 u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
1419{
1420 DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
1421}
1422
1423static void qed_dcbnl_setpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 bw_pct)
1424{
1425 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1426 struct qed_dcbx_set dcbx_set;
1427 struct qed_ptt *ptt;
1428 int rc;
1429
1430 DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d bw_pct = %d\n", pgid, bw_pct);
1431 if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
1432 DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
1433 return;
1434 }
1435
1436 memset(&dcbx_set, 0, sizeof(dcbx_set));
1437 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
1438 if (rc)
1439 return;
1440
1441 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
1442 dcbx_set.config.params.ets_tc_bw_tbl[pgid] = bw_pct;
1443
1444 ptt = qed_ptt_acquire(hwfn);
1445 if (!ptt)
1446 return;
1447
1448 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
1449
1450 qed_ptt_release(hwfn, ptt);
1451}
1452
1453static void qed_dcbnl_setpgbwgcfgrx(struct qed_dev *cdev, int pgid, u8 bw_pct)
1454{
1455 DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
1456}
1457
1458static u8 qed_dcbnl_setall(struct qed_dev *cdev)
1459{
1460 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1461 struct qed_dcbx_set dcbx_set;
1462 struct qed_ptt *ptt;
1463 int rc;
1464
1465 memset(&dcbx_set, 0, sizeof(dcbx_set));
1466 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
1467 if (rc)
1468 return 1;
1469
1470 ptt = qed_ptt_acquire(hwfn);
1471 if (!ptt)
1472 return 1;
1473
1474 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 1);
1475
1476 qed_ptt_release(hwfn, ptt);
1477
1478 return rc;
1479}
1480
1481static int qed_dcbnl_setnumtcs(struct qed_dev *cdev, int tcid, u8 num)
1482{
1483 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1484 struct qed_dcbx_set dcbx_set;
1485 struct qed_ptt *ptt;
1486 int rc;
1487
1488 DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d num = %d\n", tcid, num);
1489 memset(&dcbx_set, 0, sizeof(dcbx_set));
1490 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
1491 if (rc)
1492 return 1;
1493
1494 switch (tcid) {
1495 case DCB_NUMTCS_ATTR_PG:
1496 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
1497 dcbx_set.config.params.max_ets_tc = num;
1498 break;
1499 case DCB_NUMTCS_ATTR_PFC:
1500 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
1501 dcbx_set.config.params.pfc.max_tc = num;
1502 break;
1503 default:
1504 DP_INFO(hwfn, "Invalid tcid %d\n", tcid);
1505 return -EINVAL;
1506 }
1507
1508 ptt = qed_ptt_acquire(hwfn);
1509 if (!ptt)
1510 return -EINVAL;
1511
1512 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
1513
1514 qed_ptt_release(hwfn, ptt);
1515
1516 return 0;
1517}
1518
1519static void qed_dcbnl_setpfcstate(struct qed_dev *cdev, u8 state)
1520{
1521 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1522 struct qed_dcbx_set dcbx_set;
1523 struct qed_ptt *ptt;
1524 int rc;
1525
1526 DP_VERBOSE(hwfn, QED_MSG_DCB, "new state = %d\n", state);
1527
1528 memset(&dcbx_set, 0, sizeof(dcbx_set));
1529 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
1530 if (rc)
1531 return;
1532
1533 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
1534 dcbx_set.config.params.pfc.enabled = !!state;
1535
1536 ptt = qed_ptt_acquire(hwfn);
1537 if (!ptt)
1538 return;
1539
1540 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
1541
1542 qed_ptt_release(hwfn, ptt);
1543}
1544
1545static int qed_dcbnl_getapp(struct qed_dev *cdev, u8 idtype, u16 idval)
1546{
1547 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1548 struct qed_dcbx_get *dcbx_info;
1549 struct qed_app_entry *entry;
1550 bool ethtype;
1551 u8 prio = 0;
1552 int i;
1553
1554 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1555 if (!dcbx_info)
1556 return -EINVAL;
1557
1558 ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
1559 for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
1560 entry = &dcbx_info->operational.params.app_entry[i];
1561 if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) {
1562 prio = entry->prio;
1563 break;
1564 }
1565 }
1566
1567 if (i == QED_DCBX_MAX_APP_PROTOCOL) {
1568 DP_ERR(cdev, "App entry (%d, %d) not found\n", idtype, idval);
1569 kfree(dcbx_info);
1570 return -EINVAL;
1571 }
1572
1573 kfree(dcbx_info);
1574
1575 return prio;
1576}
1577
1578static int qed_dcbnl_setapp(struct qed_dev *cdev,
1579 u8 idtype, u16 idval, u8 pri_map)
1580{
1581 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1582 struct qed_dcbx_set dcbx_set;
1583 struct qed_app_entry *entry;
1584 struct qed_ptt *ptt;
1585 bool ethtype;
1586 int rc, i;
1587
1588 memset(&dcbx_set, 0, sizeof(dcbx_set));
1589 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
1590 if (rc)
1591 return -EINVAL;
1592
1593 ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
1594 for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
1595 entry = &dcbx_set.config.params.app_entry[i];
1596 if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
1597 break;
1598 /* First empty slot */
1599 if (!entry->proto_id)
1600 break;
1601 }
1602
1603 if (i == QED_DCBX_MAX_APP_PROTOCOL) {
1604 DP_ERR(cdev, "App table is full\n");
1605 return -EBUSY;
1606 }
1607
1608 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
1609 dcbx_set.config.params.app_entry[i].ethtype = ethtype;
1610 dcbx_set.config.params.app_entry[i].proto_id = idval;
1611 dcbx_set.config.params.app_entry[i].prio = pri_map;
1612
1613 ptt = qed_ptt_acquire(hwfn);
1614 if (!ptt)
1615 return -EBUSY;
1616
1617 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
1618
1619 qed_ptt_release(hwfn, ptt);
1620
1621 return rc;
1622}
1623
1624static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
1625{
1626 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1627 struct qed_dcbx_set dcbx_set;
1628 struct qed_ptt *ptt;
1629 int rc;
1630
1631 DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode);
1632
1633 if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) {
1634 DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n");
1635 return 1;
1636 }
1637
1638 memset(&dcbx_set, 0, sizeof(dcbx_set));
1639 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
1640 if (rc)
1641 return 1;
1642
1643 dcbx_set.ver_num = 0;
1644 if (mode & DCB_CAP_DCBX_VER_CEE) {
1645 dcbx_set.ver_num |= DCBX_CONFIG_VERSION_CEE;
1646 dcbx_set.enabled = true;
1647 }
1648
1649 if (mode & DCB_CAP_DCBX_VER_IEEE) {
1650 dcbx_set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
1651 dcbx_set.enabled = true;
1652 }
1653
1654 ptt = qed_ptt_acquire(hwfn);
1655 if (!ptt)
1656 return 1;
1657
1658 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
1659
1660 qed_ptt_release(hwfn, ptt);
1661
1662 return 0;
1663}
1664
1665static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags)
1666{
1667 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1668 struct qed_dcbx_get *dcbx_info;
1669
1670 DP_VERBOSE(hwfn, QED_MSG_DCB, "Feature id = %d\n", featid);
1671 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1672 if (!dcbx_info)
1673 return 1;
1674
1675 *flags = 0;
1676 switch (featid) {
1677 case DCB_FEATCFG_ATTR_PG:
1678 if (dcbx_info->operational.params.ets_enabled)
1679 *flags = DCB_FEATCFG_ENABLE;
1680 else
1681 *flags = DCB_FEATCFG_ERROR;
1682 break;
1683 case DCB_FEATCFG_ATTR_PFC:
1684 if (dcbx_info->operational.params.pfc.enabled)
1685 *flags = DCB_FEATCFG_ENABLE;
1686 else
1687 *flags = DCB_FEATCFG_ERROR;
1688 break;
1689 case DCB_FEATCFG_ATTR_APP:
1690 if (dcbx_info->operational.params.app_valid)
1691 *flags = DCB_FEATCFG_ENABLE;
1692 else
1693 *flags = DCB_FEATCFG_ERROR;
1694 break;
1695 default:
1696 DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
1697 kfree(dcbx_info);
1698 return 1;
1699 }
1700
1701 DP_VERBOSE(hwfn, QED_MSG_DCB, "flags = %d\n", *flags);
1702 kfree(dcbx_info);
1703
1704 return 0;
1705}
1706
1707static u8 qed_dcbnl_setfeatcfg(struct qed_dev *cdev, int featid, u8 flags)
1708{
1709 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1710 struct qed_dcbx_set dcbx_set;
1711 bool enabled, willing;
1712 struct qed_ptt *ptt;
1713 int rc;
1714
1715 DP_VERBOSE(hwfn, QED_MSG_DCB, "featid = %d flags = %d\n",
1716 featid, flags);
1717 memset(&dcbx_set, 0, sizeof(dcbx_set));
1718 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
1719 if (rc)
1720 return 1;
1721
1722 enabled = !!(flags & DCB_FEATCFG_ENABLE);
1723 willing = !!(flags & DCB_FEATCFG_WILLING);
1724 switch (featid) {
1725 case DCB_FEATCFG_ATTR_PG:
1726 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
1727 dcbx_set.config.params.ets_enabled = enabled;
1728 dcbx_set.config.params.ets_willing = willing;
1729 break;
1730 case DCB_FEATCFG_ATTR_PFC:
1731 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
1732 dcbx_set.config.params.pfc.enabled = enabled;
1733 dcbx_set.config.params.pfc.willing = willing;
1734 break;
1735 case DCB_FEATCFG_ATTR_APP:
1736 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
1737 dcbx_set.config.params.app_willing = willing;
1738 break;
1739 default:
1740 DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
1741 return 1;
1742 }
1743
1744 ptt = qed_ptt_acquire(hwfn);
1745 if (!ptt)
1746 return 1;
1747
1748 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
1749
1750 qed_ptt_release(hwfn, ptt);
1751
1752 return 0;
1753}
1754
1755static int qed_dcbnl_peer_getappinfo(struct qed_dev *cdev,
1756 struct dcb_peer_app_info *info,
1757 u16 *app_count)
1758{
1759 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1760 struct qed_dcbx_get *dcbx_info;
1761
1762 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
1763 if (!dcbx_info)
1764 return -EINVAL;
1765
1766 info->willing = dcbx_info->remote.params.app_willing;
1767 info->error = dcbx_info->remote.params.app_error;
1768 *app_count = dcbx_info->remote.params.num_app_entries;
1769 kfree(dcbx_info);
1770
1771 return 0;
1772}
1773
1774static int qed_dcbnl_peer_getapptable(struct qed_dev *cdev,
1775 struct dcb_app *table)
1776{
1777 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1778 struct qed_dcbx_get *dcbx_info;
1779 int i;
1780
1781 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
1782 if (!dcbx_info)
1783 return -EINVAL;
1784
1785 for (i = 0; i < dcbx_info->remote.params.num_app_entries; i++) {
1786 if (dcbx_info->remote.params.app_entry[i].ethtype)
1787 table[i].selector = DCB_APP_IDTYPE_ETHTYPE;
1788 else
1789 table[i].selector = DCB_APP_IDTYPE_PORTNUM;
1790 table[i].priority = dcbx_info->remote.params.app_entry[i].prio;
1791 table[i].protocol =
1792 dcbx_info->remote.params.app_entry[i].proto_id;
1793 }
1794
1795 kfree(dcbx_info);
1796
1797 return 0;
1798}
1799
1800static int qed_dcbnl_cee_peer_getpfc(struct qed_dev *cdev, struct cee_pfc *pfc)
1801{
1802 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1803 struct qed_dcbx_get *dcbx_info;
1804 int i;
1805
1806 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
1807 if (!dcbx_info)
1808 return -EINVAL;
1809
1810 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
1811 if (dcbx_info->remote.params.pfc.prio[i])
1812 pfc->pfc_en |= BIT(i);
1813
1814 pfc->tcs_supported = dcbx_info->remote.params.pfc.max_tc;
1815 DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d tcs_supported = %d\n",
1816 pfc->pfc_en, pfc->tcs_supported);
1817 kfree(dcbx_info);
1818
1819 return 0;
1820}
1821
1822static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg)
1823{
1824 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1825 struct qed_dcbx_get *dcbx_info;
1826 int i;
1827
1828 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
1829 if (!dcbx_info)
1830 return -EINVAL;
1831
1832 pg->willing = dcbx_info->remote.params.ets_willing;
1833 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
1834 pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i];
1835 pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i];
1836 }
1837
1838 DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing);
1839 kfree(dcbx_info);
1840
1841 return 0;
1842}
1843
1844static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
1845 struct ieee_pfc *pfc, bool remote)
1846{
1847 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1848 struct qed_dcbx_params *params;
1849 struct qed_dcbx_get *dcbx_info;
1850 int rc, i;
1851
1852 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1853 if (!dcbx_info)
1854 return -EINVAL;
1855
1856 if (!dcbx_info->operational.ieee) {
1857 DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
1858 return -EINVAL;
1859 }
1860
1861 if (remote) {
1862 memset(dcbx_info, 0, sizeof(*dcbx_info));
1863 rc = qed_dcbx_query_params(hwfn, dcbx_info,
1864 QED_DCBX_REMOTE_MIB);
1865 if (rc) {
1866 kfree(dcbx_info);
1867 return -EINVAL;
1868 }
1869
1870 params = &dcbx_info->remote.params;
1871 } else {
1872 params = &dcbx_info->operational.params;
1873 }
1874
1875 pfc->pfc_cap = params->pfc.max_tc;
1876 pfc->pfc_en = 0;
1877 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
1878 if (params->pfc.prio[i])
1879 pfc->pfc_en |= BIT(i);
1880
1881 kfree(dcbx_info);
1882
1883 return 0;
1884}
1885
1886static int qed_dcbnl_ieee_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
1887{
1888 return qed_dcbnl_get_ieee_pfc(cdev, pfc, false);
1889}
1890
1891static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
1892{
1893 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1894 struct qed_dcbx_get *dcbx_info;
1895 struct qed_dcbx_set dcbx_set;
1896 struct qed_ptt *ptt;
1897 int rc, i;
1898
1899 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1900 if (!dcbx_info)
1901 return -EINVAL;
1902
1903 if (!dcbx_info->operational.ieee) {
1904 DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
1905 kfree(dcbx_info);
1906 return -EINVAL;
1907 }
1908
1909 kfree(dcbx_info);
1910
1911 memset(&dcbx_set, 0, sizeof(dcbx_set));
1912 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
1913 if (rc)
1914 return -EINVAL;
1915
1916 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
1917 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
1918 dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
1919
1920 ptt = qed_ptt_acquire(hwfn);
1921 if (!ptt)
1922 return -EINVAL;
1923
1924 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
1925
1926 qed_ptt_release(hwfn, ptt);
1927
1928 return rc;
1929}
1930
1931static int qed_dcbnl_get_ieee_ets(struct qed_dev *cdev,
1932 struct ieee_ets *ets, bool remote)
1933{
1934 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1935 struct qed_dcbx_get *dcbx_info;
1936 struct qed_dcbx_params *params;
1937 int rc;
1938
1939 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1940 if (!dcbx_info)
1941 return -EINVAL;
1942
1943 if (!dcbx_info->operational.ieee) {
1944 DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
1945 kfree(dcbx_info);
1946 return -EINVAL;
1947 }
1948
1949 if (remote) {
1950 memset(dcbx_info, 0, sizeof(*dcbx_info));
1951 rc = qed_dcbx_query_params(hwfn, dcbx_info,
1952 QED_DCBX_REMOTE_MIB);
1953 if (rc) {
1954 kfree(dcbx_info);
1955 return -EINVAL;
1956 }
1957
1958 params = &dcbx_info->remote.params;
1959 } else {
1960 params = &dcbx_info->operational.params;
1961 }
1962
1963 ets->ets_cap = params->max_ets_tc;
1964 ets->willing = params->ets_willing;
1965 ets->cbs = params->ets_cbs;
1966 memcpy(ets->tc_tx_bw, params->ets_tc_bw_tbl, sizeof(ets->tc_tx_bw));
1967 memcpy(ets->tc_tsa, params->ets_tc_tsa_tbl, sizeof(ets->tc_tsa));
1968 memcpy(ets->prio_tc, params->ets_pri_tc_tbl, sizeof(ets->prio_tc));
1969 kfree(dcbx_info);
1970
1971 return 0;
1972}
1973
1974static int qed_dcbnl_ieee_getets(struct qed_dev *cdev, struct ieee_ets *ets)
1975{
1976 return qed_dcbnl_get_ieee_ets(cdev, ets, false);
1977}
1978
1979static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
1980{
1981 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1982 struct qed_dcbx_get *dcbx_info;
1983 struct qed_dcbx_set dcbx_set;
1984 struct qed_ptt *ptt;
1985 int rc;
1986
1987 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
1988 if (!dcbx_info)
1989 return -EINVAL;
1990
1991 if (!dcbx_info->operational.ieee) {
1992 DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
1993 kfree(dcbx_info);
1994 return -EINVAL;
1995 }
1996
1997 kfree(dcbx_info);
1998
1999 memset(&dcbx_set, 0, sizeof(dcbx_set));
2000 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
2001 if (rc)
2002 return -EINVAL;
2003
2004 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
2005 dcbx_set.config.params.max_ets_tc = ets->ets_cap;
2006 dcbx_set.config.params.ets_willing = ets->willing;
2007 dcbx_set.config.params.ets_cbs = ets->cbs;
2008 memcpy(dcbx_set.config.params.ets_tc_bw_tbl, ets->tc_tx_bw,
2009 sizeof(ets->tc_tx_bw));
2010 memcpy(dcbx_set.config.params.ets_tc_tsa_tbl, ets->tc_tsa,
2011 sizeof(ets->tc_tsa));
2012 memcpy(dcbx_set.config.params.ets_pri_tc_tbl, ets->prio_tc,
2013 sizeof(ets->prio_tc));
2014
2015 ptt = qed_ptt_acquire(hwfn);
2016 if (!ptt)
2017 return -EINVAL;
2018
2019 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
2020
2021 qed_ptt_release(hwfn, ptt);
2022
2023 return rc;
2024}
2025
2026int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
2027{
2028 return qed_dcbnl_get_ieee_ets(cdev, ets, true);
2029}
2030
2031int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
2032{
2033 return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
2034}
2035
2036int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
2037{
2038 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2039 struct qed_dcbx_get *dcbx_info;
2040 struct qed_app_entry *entry;
2041 bool ethtype;
2042 u8 prio = 0;
2043 int i;
2044
2045 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
2046 if (!dcbx_info)
2047 return -EINVAL;
2048
2049 if (!dcbx_info->operational.ieee) {
2050 DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
2051 kfree(dcbx_info);
2052 return -EINVAL;
2053 }
2054
2055 /* ieee defines the selector field value for ethertype to be 1 */
2056 ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
2057 for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
2058 entry = &dcbx_info->operational.params.app_entry[i];
2059 if ((entry->ethtype == ethtype) &&
2060 (entry->proto_id == app->protocol)) {
2061 prio = entry->prio;
2062 break;
2063 }
2064 }
2065
2066 if (i == QED_DCBX_MAX_APP_PROTOCOL) {
2067 DP_ERR(cdev, "App entry (%d, %d) not found\n", app->selector,
2068 app->protocol);
2069 kfree(dcbx_info);
2070 return -EINVAL;
2071 }
2072
2073 app->priority = ffs(prio) - 1;
2074
2075 kfree(dcbx_info);
2076
2077 return 0;
2078}
2079
2080int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
2081{
2082 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2083 struct qed_dcbx_get *dcbx_info;
2084 struct qed_dcbx_set dcbx_set;
2085 struct qed_app_entry *entry;
2086 struct qed_ptt *ptt;
2087 bool ethtype;
2088 int rc, i;
2089
2090 if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) {
2091 DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
2092 return -EINVAL;
2093 }
2094
2095 dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
2096 if (!dcbx_info)
2097 return -EINVAL;
2098
2099 if (!dcbx_info->operational.ieee) {
2100 DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
2101 kfree(dcbx_info);
2102 return -EINVAL;
2103 }
2104
2105 kfree(dcbx_info);
2106
2107 memset(&dcbx_set, 0, sizeof(dcbx_set));
2108 rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
2109 if (rc)
2110 return -EINVAL;
2111
2112 /* ieee defines the selector field value for ethertype to be 1 */
2113 ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
2114 for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
2115 entry = &dcbx_set.config.params.app_entry[i];
2116 if ((entry->ethtype == ethtype) &&
2117 (entry->proto_id == app->protocol))
2118 break;
2119 /* First empty slot */
2120 if (!entry->proto_id)
2121 break;
2122 }
2123
2124 if (i == QED_DCBX_MAX_APP_PROTOCOL) {
2125 DP_ERR(cdev, "App table is full\n");
2126 return -EBUSY;
2127 }
2128
2129 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
2130 dcbx_set.config.params.app_entry[i].ethtype = ethtype;
2131 dcbx_set.config.params.app_entry[i].proto_id = app->protocol;
2132 dcbx_set.config.params.app_entry[i].prio = BIT(app->priority);
2133
2134 ptt = qed_ptt_acquire(hwfn);
2135 if (!ptt)
2136 return -EBUSY;
2137
2138 rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
2139
2140 qed_ptt_release(hwfn, ptt);
2141
2142 return rc;
2143}
2144
2145const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass = {
2146 .getstate = qed_dcbnl_getstate,
2147 .setstate = qed_dcbnl_setstate,
2148 .getpgtccfgtx = qed_dcbnl_getpgtccfgtx,
2149 .getpgbwgcfgtx = qed_dcbnl_getpgbwgcfgtx,
2150 .getpgtccfgrx = qed_dcbnl_getpgtccfgrx,
2151 .getpgbwgcfgrx = qed_dcbnl_getpgbwgcfgrx,
2152 .getpfccfg = qed_dcbnl_getpfccfg,
2153 .setpfccfg = qed_dcbnl_setpfccfg,
2154 .getcap = qed_dcbnl_getcap,
2155 .getnumtcs = qed_dcbnl_getnumtcs,
2156 .getpfcstate = qed_dcbnl_getpfcstate,
2157 .getdcbx = qed_dcbnl_getdcbx,
2158 .setpgtccfgtx = qed_dcbnl_setpgtccfgtx,
2159 .setpgtccfgrx = qed_dcbnl_setpgtccfgrx,
2160 .setpgbwgcfgtx = qed_dcbnl_setpgbwgcfgtx,
2161 .setpgbwgcfgrx = qed_dcbnl_setpgbwgcfgrx,
2162 .setall = qed_dcbnl_setall,
2163 .setnumtcs = qed_dcbnl_setnumtcs,
2164 .setpfcstate = qed_dcbnl_setpfcstate,
2165 .setapp = qed_dcbnl_setapp,
2166 .setdcbx = qed_dcbnl_setdcbx,
2167 .setfeatcfg = qed_dcbnl_setfeatcfg,
2168 .getfeatcfg = qed_dcbnl_getfeatcfg,
2169 .getapp = qed_dcbnl_getapp,
2170 .peer_getappinfo = qed_dcbnl_peer_getappinfo,
2171 .peer_getapptable = qed_dcbnl_peer_getapptable,
2172 .cee_peer_getpfc = qed_dcbnl_cee_peer_getpfc,
2173 .cee_peer_getpg = qed_dcbnl_cee_peer_getpg,
2174 .ieee_getpfc = qed_dcbnl_ieee_getpfc,
2175 .ieee_setpfc = qed_dcbnl_ieee_setpfc,
2176 .ieee_getets = qed_dcbnl_ieee_getets,
2177 .ieee_setets = qed_dcbnl_ieee_setets,
2178 .ieee_peer_getpfc = qed_dcbnl_ieee_peer_getpfc,
2179 .ieee_peer_getets = qed_dcbnl_ieee_peer_getets,
2180 .ieee_getapp = qed_dcbnl_ieee_getapp,
2181 .ieee_setapp = qed_dcbnl_ieee_setapp,
2182};
2183
2184#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index e7f834dbda2d..9ba681643d05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -33,6 +33,24 @@ struct qed_dcbx_app_data {
33 u8 tc; /* Traffic Class */ 33 u8 tc; /* Traffic Class */
34}; 34};
35 35
36#ifdef CONFIG_DCB
37#define QED_DCBX_VERSION_DISABLED 0
38#define QED_DCBX_VERSION_IEEE 1
39#define QED_DCBX_VERSION_CEE 2
40
41struct qed_dcbx_set {
42#define QED_DCBX_OVERRIDE_STATE BIT(0)
43#define QED_DCBX_OVERRIDE_PFC_CFG BIT(1)
44#define QED_DCBX_OVERRIDE_ETS_CFG BIT(2)
45#define QED_DCBX_OVERRIDE_APP_CFG BIT(3)
46#define QED_DCBX_OVERRIDE_DSCP_CFG BIT(4)
47 u32 override_flags;
48 bool enabled;
49 struct qed_dcbx_admin_params config;
50 u32 ver_num;
51};
52#endif
53
36struct qed_dcbx_results { 54struct qed_dcbx_results {
37 bool dcbx_enabled; 55 bool dcbx_enabled;
38 u8 pf_id; 56 u8 pf_id;
@@ -55,6 +73,9 @@ struct qed_dcbx_info {
55 struct qed_dcbx_results results; 73 struct qed_dcbx_results results;
56 struct dcbx_mib operational; 74 struct dcbx_mib operational;
57 struct dcbx_mib remote; 75 struct dcbx_mib remote;
76#ifdef CONFIG_DCB
77 struct qed_dcbx_set set;
78#endif
58 u8 dcbx_cap; 79 u8 dcbx_cap;
59}; 80};
60 81
@@ -67,6 +88,13 @@ struct qed_dcbx_mib_meta_data {
67 u32 addr; 88 u32 addr;
68}; 89};
69 90
91#ifdef CONFIG_DCB
92int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);
93
94int qed_dcbx_config_params(struct qed_hwfn *,
95 struct qed_ptt *, struct qed_dcbx_set *, bool);
96#endif
97
70/* QED local interface routines */ 98/* QED local interface routines */
71int 99int
72qed_dcbx_mib_update_event(struct qed_hwfn *, 100qed_dcbx_mib_update_event(struct qed_hwfn *,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 2d89e8c16b32..b26fe267a150 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -17,6 +17,7 @@
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/string.h> 19#include <linux/string.h>
20#include <linux/vmalloc.h>
20#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
21#include <linux/qed/qed_chain.h> 22#include <linux/qed/qed_chain.h>
22#include <linux/qed/qed_if.h> 23#include <linux/qed/qed_if.h>
@@ -160,9 +161,13 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
160 u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; 161 u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
161 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 162 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
162 struct init_qm_port_params *p_qm_port; 163 struct init_qm_port_params *p_qm_port;
164 bool init_rdma_offload_pq = false;
165 bool init_pure_ack_pq = false;
166 bool init_ooo_pq = false;
163 u16 num_pqs, multi_cos_tcs = 1; 167 u16 num_pqs, multi_cos_tcs = 1;
164 u8 pf_wfq = qm_info->pf_wfq; 168 u8 pf_wfq = qm_info->pf_wfq;
165 u32 pf_rl = qm_info->pf_rl; 169 u32 pf_rl = qm_info->pf_rl;
170 u16 num_pf_rls = 0;
166 u16 num_vfs = 0; 171 u16 num_vfs = 0;
167 172
168#ifdef CONFIG_QED_SRIOV 173#ifdef CONFIG_QED_SRIOV
@@ -174,6 +179,25 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
174 num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */ 179 num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
175 num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); 180 num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
176 181
182 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
183 num_pqs++; /* for RoCE queue */
184 init_rdma_offload_pq = true;
185 /* we subtract num_vfs because each require a rate limiter,
186 * and one default rate limiter
187 */
188 if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn)
189 num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1;
190
191 num_pqs += num_pf_rls;
192 qm_info->num_pf_rls = (u8) num_pf_rls;
193 }
194
195 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
196 num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */
197 init_pure_ack_pq = true;
198 init_ooo_pq = true;
199 }
200
177 /* Sanity checking that setup requires legal number of resources */ 201 /* Sanity checking that setup requires legal number of resources */
178 if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) { 202 if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
179 DP_ERR(p_hwfn, 203 DP_ERR(p_hwfn,
@@ -211,12 +235,22 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
211 235
212 vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); 236 vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
213 237
238 /* First init rate limited queues */
239 for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
240 qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
241 qm_info->qm_pq_params[curr_queue].tc_id =
242 p_hwfn->hw_info.non_offload_tc;
243 qm_info->qm_pq_params[curr_queue].wrr_group = 1;
244 qm_info->qm_pq_params[curr_queue].rl_valid = 1;
245 }
246
214 /* First init per-TC PQs */ 247 /* First init per-TC PQs */
215 for (i = 0; i < multi_cos_tcs; i++) { 248 for (i = 0; i < multi_cos_tcs; i++) {
216 struct init_qm_pq_params *params = 249 struct init_qm_pq_params *params =
217 &qm_info->qm_pq_params[curr_queue++]; 250 &qm_info->qm_pq_params[curr_queue++];
218 251
219 if (p_hwfn->hw_info.personality == QED_PCI_ETH) { 252 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
253 p_hwfn->hw_info.personality == QED_PCI_ETH) {
220 params->vport_id = vport_id; 254 params->vport_id = vport_id;
221 params->tc_id = p_hwfn->hw_info.non_offload_tc; 255 params->tc_id = p_hwfn->hw_info.non_offload_tc;
222 params->wrr_group = 1; 256 params->wrr_group = 1;
@@ -236,6 +270,32 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
236 curr_queue++; 270 curr_queue++;
237 271
238 qm_info->offload_pq = 0; 272 qm_info->offload_pq = 0;
273 if (init_rdma_offload_pq) {
274 qm_info->offload_pq = curr_queue;
275 qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
276 qm_info->qm_pq_params[curr_queue].tc_id =
277 p_hwfn->hw_info.offload_tc;
278 qm_info->qm_pq_params[curr_queue].wrr_group = 1;
279 curr_queue++;
280 }
281
282 if (init_pure_ack_pq) {
283 qm_info->pure_ack_pq = curr_queue;
284 qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
285 qm_info->qm_pq_params[curr_queue].tc_id =
286 p_hwfn->hw_info.offload_tc;
287 qm_info->qm_pq_params[curr_queue].wrr_group = 1;
288 curr_queue++;
289 }
290
291 if (init_ooo_pq) {
292 qm_info->ooo_pq = curr_queue;
293 qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
294 qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
295 qm_info->qm_pq_params[curr_queue].wrr_group = 1;
296 curr_queue++;
297 }
298
239 /* Then init per-VF PQs */ 299 /* Then init per-VF PQs */
240 vf_offset = curr_queue; 300 vf_offset = curr_queue;
241 for (i = 0; i < num_vfs; i++) { 301 for (i = 0; i < num_vfs; i++) {
@@ -244,6 +304,7 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
244 qm_info->qm_pq_params[curr_queue].tc_id = 304 qm_info->qm_pq_params[curr_queue].tc_id =
245 p_hwfn->hw_info.non_offload_tc; 305 p_hwfn->hw_info.non_offload_tc;
246 qm_info->qm_pq_params[curr_queue].wrr_group = 1; 306 qm_info->qm_pq_params[curr_queue].wrr_group = 1;
307 qm_info->qm_pq_params[curr_queue].rl_valid = 1;
247 curr_queue++; 308 curr_queue++;
248 } 309 }
249 310
@@ -256,7 +317,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
256 for (i = 0; i < num_ports; i++) { 317 for (i = 0; i < num_ports; i++) {
257 p_qm_port = &qm_info->qm_port_params[i]; 318 p_qm_port = &qm_info->qm_port_params[i];
258 p_qm_port->active = 1; 319 p_qm_port->active = 1;
259 p_qm_port->num_active_phys_tcs = 4; 320 if (num_ports == 4)
321 p_qm_port->active_phys_tcs = 0x7;
322 else
323 p_qm_port->active_phys_tcs = 0x9f;
260 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; 324 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
261 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 325 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
262 } 326 }
@@ -366,21 +430,20 @@ int qed_resc_alloc(struct qed_dev *cdev)
366 if (!p_hwfn->p_tx_cids) { 430 if (!p_hwfn->p_tx_cids) {
367 DP_NOTICE(p_hwfn, 431 DP_NOTICE(p_hwfn,
368 "Failed to allocate memory for Tx Cids\n"); 432 "Failed to allocate memory for Tx Cids\n");
369 rc = -ENOMEM; 433 goto alloc_no_mem;
370 goto alloc_err;
371 } 434 }
372 435
373 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL); 436 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
374 if (!p_hwfn->p_rx_cids) { 437 if (!p_hwfn->p_rx_cids) {
375 DP_NOTICE(p_hwfn, 438 DP_NOTICE(p_hwfn,
376 "Failed to allocate memory for Rx Cids\n"); 439 "Failed to allocate memory for Rx Cids\n");
377 rc = -ENOMEM; 440 goto alloc_no_mem;
378 goto alloc_err;
379 } 441 }
380 } 442 }
381 443
382 for_each_hwfn(cdev, i) { 444 for_each_hwfn(cdev, i) {
383 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 445 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
446 u32 n_eqes, num_cons;
384 447
385 /* First allocate the context manager structure */ 448 /* First allocate the context manager structure */
386 rc = qed_cxt_mngr_alloc(p_hwfn); 449 rc = qed_cxt_mngr_alloc(p_hwfn);
@@ -429,18 +492,34 @@ int qed_resc_alloc(struct qed_dev *cdev)
429 goto alloc_err; 492 goto alloc_err;
430 493
431 /* EQ */ 494 /* EQ */
432 p_eq = qed_eq_alloc(p_hwfn, 256); 495 n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
433 if (!p_eq) { 496 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
434 rc = -ENOMEM; 497 num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
498 PROTOCOLID_ROCE,
499 0) * 2;
500 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
501 } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
502 num_cons =
503 qed_cxt_get_proto_cid_count(p_hwfn,
504 PROTOCOLID_ISCSI, 0);
505 n_eqes += 2 * num_cons;
506 }
507
508 if (n_eqes > 0xFFFF) {
509 DP_ERR(p_hwfn,
510 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
511 n_eqes, 0xFFFF);
435 goto alloc_err; 512 goto alloc_err;
436 } 513 }
514
515 p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes);
516 if (!p_eq)
517 goto alloc_no_mem;
437 p_hwfn->p_eq = p_eq; 518 p_hwfn->p_eq = p_eq;
438 519
439 p_consq = qed_consq_alloc(p_hwfn); 520 p_consq = qed_consq_alloc(p_hwfn);
440 if (!p_consq) { 521 if (!p_consq)
441 rc = -ENOMEM; 522 goto alloc_no_mem;
442 goto alloc_err;
443 }
444 p_hwfn->p_consq = p_consq; 523 p_hwfn->p_consq = p_consq;
445 524
446 /* DMA info initialization */ 525 /* DMA info initialization */
@@ -469,6 +548,8 @@ int qed_resc_alloc(struct qed_dev *cdev)
469 548
470 return 0; 549 return 0;
471 550
551alloc_no_mem:
552 rc = -ENOMEM;
472alloc_err: 553alloc_err:
473 qed_resc_free(cdev); 554 qed_resc_free(cdev);
474 return rc; 555 return rc;
@@ -634,6 +715,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
634 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 715 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
635 struct qed_qm_common_rt_init_params params; 716 struct qed_qm_common_rt_init_params params;
636 struct qed_dev *cdev = p_hwfn->cdev; 717 struct qed_dev *cdev = p_hwfn->cdev;
718 u16 num_pfs, pf_id;
637 u32 concrete_fid; 719 u32 concrete_fid;
638 int rc = 0; 720 int rc = 0;
639 u8 vf_id; 721 u8 vf_id;
@@ -682,9 +764,16 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
682 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 764 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
683 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 765 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
684 766
685 /* Disable relaxed ordering in the PCI config space */ 767 if (QED_IS_BB(p_hwfn->cdev)) {
686 qed_wr(p_hwfn, p_ptt, 0x20b4, 768 num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev);
687 qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10); 769 for (pf_id = 0; pf_id < num_pfs; pf_id++) {
770 qed_fid_pretend(p_hwfn, p_ptt, pf_id);
771 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
772 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
773 }
774 /* pretend to original PF */
775 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
776 }
688 777
689 for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) { 778 for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
690 concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); 779 concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
@@ -703,8 +792,31 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
703{ 792{
704 int rc = 0; 793 int rc = 0;
705 794
706 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, 795 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
707 hw_mode); 796 if (rc != 0)
797 return rc;
798
799 if (hw_mode & (1 << MODE_MF_SI)) {
800 u8 pf_id = 0;
801
802 if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
803 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
804 "PF[%08x] is first eth on engine\n", pf_id);
805
806 /* We should have configured BIT for ppfid, i.e., the
807 * relative function number in the port. But there's a
808 * bug in LLH in BB where the ppfid is actually engine
809 * based, so we need to take this into account.
810 */
811 qed_wr(p_hwfn, p_ptt,
812 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
813 }
814
815 /* Take the protocol-based hit vector if there is a hit,
816 * otherwise take the other vector.
817 */
818 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
819 }
708 return rc; 820 return rc;
709} 821}
710 822
@@ -751,7 +863,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
751 } 863 }
752 864
753 /* Protocl Configuration */ 865 /* Protocl Configuration */
754 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0); 866 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
867 (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
755 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0); 868 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
756 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 869 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
757 870
@@ -773,6 +886,21 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
773 /* Pure runtime initializations - directly to the HW */ 886 /* Pure runtime initializations - directly to the HW */
774 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 887 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
775 888
889 if (hw_mode & (1 << MODE_MF_SI)) {
890 u8 pf_id = 0;
891 u32 val;
892
893 if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
894 if (p_hwfn->rel_pf_id == pf_id) {
895 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
896 "PF[%d] is first ETH on engine\n",
897 pf_id);
898 val = 1;
899 }
900 qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
901 }
902 }
903
776 if (b_hw_start) { 904 if (b_hw_start) {
777 /* enable interrupts */ 905 /* enable interrupts */
778 qed_int_igu_enable(p_hwfn, p_ptt, int_mode); 906 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
@@ -1213,8 +1341,9 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
1213 num_features); 1341 num_features);
1214} 1342}
1215 1343
1216static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) 1344static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1217{ 1345{
1346 u8 enabled_func_idx = p_hwfn->enabled_func_idx;
1218 u32 *resc_start = p_hwfn->hw_info.resc_start; 1347 u32 *resc_start = p_hwfn->hw_info.resc_start;
1219 u8 num_funcs = p_hwfn->num_funcs_on_engine; 1348 u8 num_funcs = p_hwfn->num_funcs_on_engine;
1220 u32 *resc_num = p_hwfn->hw_info.resc_num; 1349 u32 *resc_num = p_hwfn->hw_info.resc_num;
@@ -1238,14 +1367,22 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1238 resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs; 1367 resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
1239 resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs; 1368 resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1240 resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs; 1369 resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1241 resc_num[QED_RL] = 8; 1370 resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]);
1242 resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs; 1371 resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1243 resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / 1372 resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
1244 num_funcs; 1373 num_funcs;
1245 resc_num[QED_ILT] = 950; 1374 resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
1246 1375
1247 for (i = 0; i < QED_MAX_RESC; i++) 1376 for (i = 0; i < QED_MAX_RESC; i++)
1248 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id; 1377 resc_start[i] = resc_num[i] * enabled_func_idx;
1378
1379 /* Sanity for ILT */
1380 if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) {
1381 DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
1382 RESC_START(p_hwfn, QED_ILT),
1383 RESC_END(p_hwfn, QED_ILT) - 1);
1384 return -EINVAL;
1385 }
1249 1386
1250 qed_hw_set_feat(p_hwfn); 1387 qed_hw_set_feat(p_hwfn);
1251 1388
@@ -1275,6 +1412,8 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1275 p_hwfn->hw_info.resc_start[QED_VLAN], 1412 p_hwfn->hw_info.resc_start[QED_VLAN],
1276 p_hwfn->hw_info.resc_num[QED_ILT], 1413 p_hwfn->hw_info.resc_num[QED_ILT],
1277 p_hwfn->hw_info.resc_start[QED_ILT]); 1414 p_hwfn->hw_info.resc_start[QED_ILT]);
1415
1416 return 0;
1278} 1417}
1279 1418
1280static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, 1419static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
@@ -1304,31 +1443,31 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1304 1443
1305 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 1444 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1306 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 1445 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1307 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G: 1446 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
1308 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; 1447 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1309 break; 1448 break;
1310 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G: 1449 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
1311 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; 1450 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1312 break; 1451 break;
1313 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G: 1452 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
1314 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; 1453 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1315 break; 1454 break;
1316 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F: 1455 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
1317 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; 1456 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1318 break; 1457 break;
1319 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E: 1458 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
1320 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; 1459 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1321 break; 1460 break;
1322 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G: 1461 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
1323 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; 1462 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1324 break; 1463 break;
1325 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G: 1464 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
1326 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; 1465 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1327 break; 1466 break;
1328 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G: 1467 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
1329 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; 1468 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1330 break; 1469 break;
1331 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G: 1470 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
1332 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; 1471 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1333 break; 1472 break;
1334 default: 1473 default:
@@ -1373,7 +1512,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1373 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 1512 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1374 link->speed.forced_speed = 50000; 1513 link->speed.forced_speed = 50000;
1375 break; 1514 break;
1376 case NVM_CFG1_PORT_DRV_LINK_SPEED_100G: 1515 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
1377 link->speed.forced_speed = 100000; 1516 link->speed.forced_speed = 100000;
1378 break; 1517 break;
1379 default: 1518 default:
@@ -1429,14 +1568,20 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1429 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 1568 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1430 __set_bit(QED_DEV_CAP_ETH, 1569 __set_bit(QED_DEV_CAP_ETH,
1431 &p_hwfn->hw_info.device_capabilities); 1570 &p_hwfn->hw_info.device_capabilities);
1571 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
1572 __set_bit(QED_DEV_CAP_ISCSI,
1573 &p_hwfn->hw_info.device_capabilities);
1574 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
1575 __set_bit(QED_DEV_CAP_ROCE,
1576 &p_hwfn->hw_info.device_capabilities);
1432 1577
1433 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 1578 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1434} 1579}
1435 1580
1436static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1581static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1437{ 1582{
1438 u32 reg_function_hide, tmp, eng_mask; 1583 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
1439 u8 num_funcs; 1584 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
1440 1585
1441 num_funcs = MAX_NUM_PFS_BB; 1586 num_funcs = MAX_NUM_PFS_BB;
1442 1587
@@ -1466,9 +1611,19 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1466 num_funcs++; 1611 num_funcs++;
1467 tmp >>= 0x1; 1612 tmp >>= 0x1;
1468 } 1613 }
1614
1615 /* Get the PF index within the enabled functions */
1616 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
1617 tmp = reg_function_hide & eng_mask & low_pfs_mask;
1618 while (tmp) {
1619 if (tmp & 0x1)
1620 enabled_func_idx--;
1621 tmp >>= 0x1;
1622 }
1469 } 1623 }
1470 1624
1471 p_hwfn->num_funcs_on_engine = num_funcs; 1625 p_hwfn->num_funcs_on_engine = num_funcs;
1626 p_hwfn->enabled_func_idx = enabled_func_idx;
1472 1627
1473 DP_VERBOSE(p_hwfn, 1628 DP_VERBOSE(p_hwfn,
1474 NETIF_MSG_PROBE, 1629 NETIF_MSG_PROBE,
@@ -1538,9 +1693,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
1538 1693
1539 qed_get_num_funcs(p_hwfn, p_ptt); 1694 qed_get_num_funcs(p_hwfn, p_ptt);
1540 1695
1541 qed_hw_get_resc(p_hwfn); 1696 return qed_hw_get_resc(p_hwfn);
1542
1543 return rc;
1544} 1697}
1545 1698
1546static int qed_get_dev_info(struct qed_dev *cdev) 1699static int qed_get_dev_info(struct qed_dev *cdev)
@@ -1737,92 +1890,285 @@ void qed_hw_remove(struct qed_dev *cdev)
1737 qed_iov_free_hw_info(cdev); 1890 qed_iov_free_hw_info(cdev);
1738} 1891}
1739 1892
1740int qed_chain_alloc(struct qed_dev *cdev, 1893static void qed_chain_free_next_ptr(struct qed_dev *cdev,
1741 enum qed_chain_use_mode intended_use, 1894 struct qed_chain *p_chain)
1742 enum qed_chain_mode mode, 1895{
1743 u16 num_elems, 1896 void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
1744 size_t elem_size, 1897 dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
1745 struct qed_chain *p_chain) 1898 struct qed_chain_next *p_next;
1899 u32 size, i;
1900
1901 if (!p_virt)
1902 return;
1903
1904 size = p_chain->elem_size * p_chain->usable_per_page;
1905
1906 for (i = 0; i < p_chain->page_cnt; i++) {
1907 if (!p_virt)
1908 break;
1909
1910 p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
1911 p_virt_next = p_next->next_virt;
1912 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
1913
1914 dma_free_coherent(&cdev->pdev->dev,
1915 QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
1916
1917 p_virt = p_virt_next;
1918 p_phys = p_phys_next;
1919 }
1920}
1921
1922static void qed_chain_free_single(struct qed_dev *cdev,
1923 struct qed_chain *p_chain)
1924{
1925 if (!p_chain->p_virt_addr)
1926 return;
1927
1928 dma_free_coherent(&cdev->pdev->dev,
1929 QED_CHAIN_PAGE_SIZE,
1930 p_chain->p_virt_addr, p_chain->p_phys_addr);
1931}
1932
1933static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
1934{
1935 void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
1936 u32 page_cnt = p_chain->page_cnt, i, pbl_size;
1937 u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
1938
1939 if (!pp_virt_addr_tbl)
1940 return;
1941
1942 if (!p_chain->pbl.p_virt_table)
1943 goto out;
1944
1945 for (i = 0; i < page_cnt; i++) {
1946 if (!pp_virt_addr_tbl[i])
1947 break;
1948
1949 dma_free_coherent(&cdev->pdev->dev,
1950 QED_CHAIN_PAGE_SIZE,
1951 pp_virt_addr_tbl[i],
1952 *(dma_addr_t *)p_pbl_virt);
1953
1954 p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
1955 }
1956
1957 pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1958 dma_free_coherent(&cdev->pdev->dev,
1959 pbl_size,
1960 p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
1961out:
1962 vfree(p_chain->pbl.pp_virt_addr_tbl);
1963}
1964
1965void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
1746{ 1966{
1747 dma_addr_t p_pbl_phys = 0; 1967 switch (p_chain->mode) {
1748 void *p_pbl_virt = NULL; 1968 case QED_CHAIN_MODE_NEXT_PTR:
1969 qed_chain_free_next_ptr(cdev, p_chain);
1970 break;
1971 case QED_CHAIN_MODE_SINGLE:
1972 qed_chain_free_single(cdev, p_chain);
1973 break;
1974 case QED_CHAIN_MODE_PBL:
1975 qed_chain_free_pbl(cdev, p_chain);
1976 break;
1977 }
1978}
1979
1980static int
1981qed_chain_alloc_sanity_check(struct qed_dev *cdev,
1982 enum qed_chain_cnt_type cnt_type,
1983 size_t elem_size, u32 page_cnt)
1984{
1985 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
1986
1987 /* The actual chain size can be larger than the maximal possible value
1988 * after rounding up the requested elements number to pages, and after
1989 * taking into acount the unusuable elements (next-ptr elements).
1990 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
1991 * size/capacity fields are of a u32 type.
1992 */
1993 if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
1994 chain_size > 0x10000) ||
1995 (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
1996 chain_size > 0x100000000ULL)) {
1997 DP_NOTICE(cdev,
1998 "The actual chain size (0x%llx) is larger than the maximal possible value\n",
1999 chain_size);
2000 return -EINVAL;
2001 }
2002
2003 return 0;
2004}
2005
2006static int
2007qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
2008{
2009 void *p_virt = NULL, *p_virt_prev = NULL;
1749 dma_addr_t p_phys = 0; 2010 dma_addr_t p_phys = 0;
1750 void *p_virt = NULL; 2011 u32 i;
1751 u16 page_cnt = 0;
1752 size_t size;
1753 2012
1754 if (mode == QED_CHAIN_MODE_SINGLE) 2013 for (i = 0; i < p_chain->page_cnt; i++) {
1755 page_cnt = 1; 2014 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1756 else 2015 QED_CHAIN_PAGE_SIZE,
1757 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 2016 &p_phys, GFP_KERNEL);
2017 if (!p_virt) {
2018 DP_NOTICE(cdev, "Failed to allocate chain memory\n");
2019 return -ENOMEM;
2020 }
2021
2022 if (i == 0) {
2023 qed_chain_init_mem(p_chain, p_virt, p_phys);
2024 qed_chain_reset(p_chain);
2025 } else {
2026 qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
2027 p_virt, p_phys);
2028 }
2029
2030 p_virt_prev = p_virt;
2031 }
2032 /* Last page's next element should point to the beginning of the
2033 * chain.
2034 */
2035 qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
2036 p_chain->p_virt_addr,
2037 p_chain->p_phys_addr);
2038
2039 return 0;
2040}
2041
2042static int
2043qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
2044{
2045 dma_addr_t p_phys = 0;
2046 void *p_virt = NULL;
1758 2047
1759 size = page_cnt * QED_CHAIN_PAGE_SIZE;
1760 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 2048 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1761 size, &p_phys, GFP_KERNEL); 2049 QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
1762 if (!p_virt) { 2050 if (!p_virt) {
1763 DP_NOTICE(cdev, "Failed to allocate chain mem\n"); 2051 DP_NOTICE(cdev, "Failed to allocate chain memory\n");
1764 goto nomem; 2052 return -ENOMEM;
1765 } 2053 }
1766 2054
1767 if (mode == QED_CHAIN_MODE_PBL) { 2055 qed_chain_init_mem(p_chain, p_virt, p_phys);
1768 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 2056 qed_chain_reset(p_chain);
1769 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1770 size, &p_pbl_phys,
1771 GFP_KERNEL);
1772 if (!p_pbl_virt) {
1773 DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1774 goto nomem;
1775 }
1776 2057
1777 qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt, 2058 return 0;
1778 (u8)elem_size, intended_use, 2059}
1779 p_pbl_phys, p_pbl_virt); 2060
1780 } else { 2061static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
1781 qed_chain_init(p_chain, p_virt, p_phys, page_cnt, 2062{
1782 (u8)elem_size, intended_use, mode); 2063 u32 page_cnt = p_chain->page_cnt, size, i;
2064 dma_addr_t p_phys = 0, p_pbl_phys = 0;
2065 void **pp_virt_addr_tbl = NULL;
2066 u8 *p_pbl_virt = NULL;
2067 void *p_virt = NULL;
2068
2069 size = page_cnt * sizeof(*pp_virt_addr_tbl);
2070 pp_virt_addr_tbl = vmalloc(size);
2071 if (!pp_virt_addr_tbl) {
2072 DP_NOTICE(cdev,
2073 "Failed to allocate memory for the chain virtual addresses table\n");
2074 return -ENOMEM;
1783 } 2075 }
2076 memset(pp_virt_addr_tbl, 0, size);
1784 2077
1785 return 0; 2078 /* The allocation of the PBL table is done with its full size, since it
2079 * is expected to be successive.
2080 * qed_chain_init_pbl_mem() is called even in a case of an allocation
2081 * failure, since pp_virt_addr_tbl was previously allocated, and it
2082 * should be saved to allow its freeing during the error flow.
2083 */
2084 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
2085 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
2086 size, &p_pbl_phys, GFP_KERNEL);
2087 qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
2088 pp_virt_addr_tbl);
2089 if (!p_pbl_virt) {
2090 DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n");
2091 return -ENOMEM;
2092 }
1786 2093
1787nomem: 2094 for (i = 0; i < page_cnt; i++) {
1788 dma_free_coherent(&cdev->pdev->dev, 2095 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1789 page_cnt * QED_CHAIN_PAGE_SIZE, 2096 QED_CHAIN_PAGE_SIZE,
1790 p_virt, p_phys); 2097 &p_phys, GFP_KERNEL);
1791 dma_free_coherent(&cdev->pdev->dev, 2098 if (!p_virt) {
1792 page_cnt * QED_CHAIN_PBL_ENTRY_SIZE, 2099 DP_NOTICE(cdev, "Failed to allocate chain memory\n");
1793 p_pbl_virt, p_pbl_phys); 2100 return -ENOMEM;
2101 }
1794 2102
1795 return -ENOMEM; 2103 if (i == 0) {
2104 qed_chain_init_mem(p_chain, p_virt, p_phys);
2105 qed_chain_reset(p_chain);
2106 }
2107
2108 /* Fill the PBL table with the physical address of the page */
2109 *(dma_addr_t *)p_pbl_virt = p_phys;
2110 /* Keep the virtual address of the page */
2111 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
2112
2113 p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
2114 }
2115
2116 return 0;
1796} 2117}
1797 2118
1798void qed_chain_free(struct qed_dev *cdev, 2119int qed_chain_alloc(struct qed_dev *cdev,
1799 struct qed_chain *p_chain) 2120 enum qed_chain_use_mode intended_use,
2121 enum qed_chain_mode mode,
2122 enum qed_chain_cnt_type cnt_type,
2123 u32 num_elems, size_t elem_size, struct qed_chain *p_chain)
1800{ 2124{
1801 size_t size; 2125 u32 page_cnt;
2126 int rc = 0;
1802 2127
1803 if (!p_chain->p_virt_addr) 2128 if (mode == QED_CHAIN_MODE_SINGLE)
1804 return; 2129 page_cnt = 1;
2130 else
2131 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
2132
2133 rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
2134 if (rc) {
2135 DP_NOTICE(cdev,
2136 "Cannot allocate a chain with the given arguments:\n"
2137 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
2138 intended_use, mode, cnt_type, num_elems, elem_size);
2139 return rc;
2140 }
2141
2142 qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
2143 mode, cnt_type);
1805 2144
1806 if (p_chain->mode == QED_CHAIN_MODE_PBL) { 2145 switch (mode) {
1807 size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 2146 case QED_CHAIN_MODE_NEXT_PTR:
1808 dma_free_coherent(&cdev->pdev->dev, size, 2147 rc = qed_chain_alloc_next_ptr(cdev, p_chain);
1809 p_chain->pbl.p_virt_table, 2148 break;
1810 p_chain->pbl.p_phys_table); 2149 case QED_CHAIN_MODE_SINGLE:
2150 rc = qed_chain_alloc_single(cdev, p_chain);
2151 break;
2152 case QED_CHAIN_MODE_PBL:
2153 rc = qed_chain_alloc_pbl(cdev, p_chain);
2154 break;
1811 } 2155 }
2156 if (rc)
2157 goto nomem;
2158
2159 return 0;
1812 2160
1813 size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE; 2161nomem:
1814 dma_free_coherent(&cdev->pdev->dev, size, 2162 qed_chain_free(cdev, p_chain);
1815 p_chain->p_virt_addr, 2163 return rc;
1816 p_chain->p_phys_addr);
1817} 2164}
1818 2165
1819int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, 2166int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
1820 u16 src_id, u16 *dst_id)
1821{ 2167{
1822 if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { 2168 if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1823 u16 min, max; 2169 u16 min, max;
1824 2170
1825 min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); 2171 min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
1826 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); 2172 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1827 DP_NOTICE(p_hwfn, 2173 DP_NOTICE(p_hwfn,
1828 "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 2174 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
@@ -1876,6 +2222,110 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1876 return 0; 2222 return 0;
1877} 2223}
1878 2224
2225static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2226 u32 hw_addr, void *p_eth_qzone,
2227 size_t eth_qzone_size, u8 timeset)
2228{
2229 struct coalescing_timeset *p_coal_timeset;
2230
2231 if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) {
2232 DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n");
2233 return -EINVAL;
2234 }
2235
2236 p_coal_timeset = p_eth_qzone;
2237 memset(p_coal_timeset, 0, eth_qzone_size);
2238 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
2239 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
2240 qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
2241
2242 return 0;
2243}
2244
2245int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2246 u16 coalesce, u8 qid, u16 sb_id)
2247{
2248 struct ustorm_eth_queue_zone eth_qzone;
2249 u8 timeset, timer_res;
2250 u16 fw_qid = 0;
2251 u32 address;
2252 int rc;
2253
2254 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
2255 if (coalesce <= 0x7F) {
2256 timer_res = 0;
2257 } else if (coalesce <= 0xFF) {
2258 timer_res = 1;
2259 } else if (coalesce <= 0x1FF) {
2260 timer_res = 2;
2261 } else {
2262 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
2263 return -EINVAL;
2264 }
2265 timeset = (u8)(coalesce >> timer_res);
2266
2267 rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
2268 if (rc)
2269 return rc;
2270
2271 rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
2272 if (rc)
2273 goto out;
2274
2275 address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
2276
2277 rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
2278 sizeof(struct ustorm_eth_queue_zone), timeset);
2279 if (rc)
2280 goto out;
2281
2282 p_hwfn->cdev->rx_coalesce_usecs = coalesce;
2283out:
2284 return rc;
2285}
2286
2287int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2288 u16 coalesce, u8 qid, u16 sb_id)
2289{
2290 struct xstorm_eth_queue_zone eth_qzone;
2291 u8 timeset, timer_res;
2292 u16 fw_qid = 0;
2293 u32 address;
2294 int rc;
2295
2296 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
2297 if (coalesce <= 0x7F) {
2298 timer_res = 0;
2299 } else if (coalesce <= 0xFF) {
2300 timer_res = 1;
2301 } else if (coalesce <= 0x1FF) {
2302 timer_res = 2;
2303 } else {
2304 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
2305 return -EINVAL;
2306 }
2307 timeset = (u8)(coalesce >> timer_res);
2308
2309 rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
2310 if (rc)
2311 return rc;
2312
2313 rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
2314 if (rc)
2315 goto out;
2316
2317 address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
2318
2319 rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
2320 sizeof(struct xstorm_eth_queue_zone), timeset);
2321 if (rc)
2322 goto out;
2323
2324 p_hwfn->cdev->tx_coalesce_usecs = coalesce;
2325out:
2326 return rc;
2327}
2328
1879/* Calculate final WFQ values for all vports and configure them. 2329/* Calculate final WFQ values for all vports and configure them.
1880 * After this configuration each vport will have 2330 * After this configuration each vport will have
1881 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) 2331 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index dde364d6f502..343bb0344f62 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -212,6 +212,20 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
212 u32 size_in_dwords, 212 u32 size_in_dwords,
213 u32 flags); 213 u32 flags);
214 214
215 /**
216 * @brief qed_dmae_grc2host - Read data from dmae data offset
217 * to source address using the given ptt
218 *
219 * @param p_ptt
220 * @param grc_addr (dmae_data_offset)
221 * @param dest_addr
222 * @param size_in_dwords
223 * @param flags - one of the flags defined above
224 */
225int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
226 u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
227 u32 flags);
228
215/** 229/**
216 * @brief qed_dmae_host2host - copy data from to source address 230 * @brief qed_dmae_host2host - copy data from to source address
217 * to a destination adress (for SRIOV) using the given ptt 231 * to a destination adress (for SRIOV) using the given ptt
@@ -245,9 +259,8 @@ int
245qed_chain_alloc(struct qed_dev *cdev, 259qed_chain_alloc(struct qed_dev *cdev,
246 enum qed_chain_use_mode intended_use, 260 enum qed_chain_use_mode intended_use,
247 enum qed_chain_mode mode, 261 enum qed_chain_mode mode,
248 u16 num_elems, 262 enum qed_chain_cnt_type cnt_type,
249 size_t elem_size, 263 u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
250 struct qed_chain *p_chain);
251 264
252/** 265/**
253 * @brief qed_chain_free - Free chain DMA memory 266 * @brief qed_chain_free - Free chain DMA memory
@@ -255,8 +268,7 @@ qed_chain_alloc(struct qed_dev *cdev,
255 * @param p_hwfn 268 * @param p_hwfn
256 * @param p_chain 269 * @param p_chain
257 */ 270 */
258void qed_chain_free(struct qed_dev *cdev, 271void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain);
259 struct qed_chain *p_chain);
260 272
261/** 273/**
262 * @@brief qed_fw_l2_queue - Get absolute L2 queue ID 274 * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
@@ -310,4 +322,37 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
310int qed_final_cleanup(struct qed_hwfn *p_hwfn, 322int qed_final_cleanup(struct qed_hwfn *p_hwfn,
311 struct qed_ptt *p_ptt, u16 id, bool is_vf); 323 struct qed_ptt *p_ptt, u16 id, bool is_vf);
312 324
325/**
326 * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
327 * The fact that we can configure coalescing to up to 511, but on varying
328 * accuracy [the bigger the value the less accurate] up to a mistake of 3usec
329 * for the highest values.
330 *
331 * @param p_hwfn
332 * @param p_ptt
333 * @param coalesce - Coalesce value in micro seconds.
334 * @param qid - Queue index.
335 * @param qid - SB Id
336 *
337 * @return int
338 */
339int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
340 u16 coalesce, u8 qid, u16 sb_id);
341
342/**
343 * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue
344 * While the API allows setting coalescing per-qid, all tx queues sharing a
345 * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
346 * otherwise configuration would break.
347 *
348 * @param p_hwfn
349 * @param p_ptt
350 * @param coalesce - Coalesce value in micro seconds.
351 * @param qid - Queue index.
352 * @param qid - SB Id
353 *
354 * @return int
355 */
356int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
357 u16 coalesce, u8 qid, u16 sb_id);
313#endif 358#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index e29ed5a69566..592784019994 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -17,13 +17,15 @@
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/qed/common_hsi.h> 19#include <linux/qed/common_hsi.h>
20#include <linux/qed/storage_common.h>
21#include <linux/qed/tcp_common.h>
20#include <linux/qed/eth_common.h> 22#include <linux/qed/eth_common.h>
23#include <linux/qed/iscsi_common.h>
24#include <linux/qed/rdma_common.h>
25#include <linux/qed/roce_common.h>
21 26
22struct qed_hwfn; 27struct qed_hwfn;
23struct qed_ptt; 28struct qed_ptt;
24/********************************/
25/* Add include to common target */
26/********************************/
27 29
28/* opcodes for the event ring */ 30/* opcodes for the event ring */
29enum common_event_opcode { 31enum common_event_opcode {
@@ -32,9 +34,10 @@ enum common_event_opcode {
32 COMMON_EVENT_VF_START, 34 COMMON_EVENT_VF_START,
33 COMMON_EVENT_VF_STOP, 35 COMMON_EVENT_VF_STOP,
34 COMMON_EVENT_VF_PF_CHANNEL, 36 COMMON_EVENT_VF_PF_CHANNEL,
35 COMMON_EVENT_RESERVED4, 37 COMMON_EVENT_VF_FLR,
36 COMMON_EVENT_RESERVED5, 38 COMMON_EVENT_PF_UPDATE,
37 COMMON_EVENT_RESERVED6, 39 COMMON_EVENT_MALICIOUS_VF,
40 COMMON_EVENT_RL_UPDATE,
38 COMMON_EVENT_EMPTY, 41 COMMON_EVENT_EMPTY,
39 MAX_COMMON_EVENT_OPCODE 42 MAX_COMMON_EVENT_OPCODE
40}; 43};
@@ -42,11 +45,12 @@ enum common_event_opcode {
42/* Common Ramrod Command IDs */ 45/* Common Ramrod Command IDs */
43enum common_ramrod_cmd_id { 46enum common_ramrod_cmd_id {
44 COMMON_RAMROD_UNUSED, 47 COMMON_RAMROD_UNUSED,
45 COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, 48 COMMON_RAMROD_PF_START,
46 COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, 49 COMMON_RAMROD_PF_STOP,
47 COMMON_RAMROD_VF_START, 50 COMMON_RAMROD_VF_START,
48 COMMON_RAMROD_VF_STOP, 51 COMMON_RAMROD_VF_STOP,
49 COMMON_RAMROD_PF_UPDATE, 52 COMMON_RAMROD_PF_UPDATE,
53 COMMON_RAMROD_RL_UPDATE,
50 COMMON_RAMROD_EMPTY, 54 COMMON_RAMROD_EMPTY,
51 MAX_COMMON_RAMROD_CMD_ID 55 MAX_COMMON_RAMROD_CMD_ID
52}; 56};
@@ -63,448 +67,448 @@ struct pstorm_core_conn_st_ctx {
63 67
64/* Core Slowpath Connection storm context of Xstorm */ 68/* Core Slowpath Connection storm context of Xstorm */
65struct xstorm_core_conn_st_ctx { 69struct xstorm_core_conn_st_ctx {
66 __le32 spq_base_lo /* SPQ Ring Base Address low dword */; 70 __le32 spq_base_lo;
67 __le32 spq_base_hi /* SPQ Ring Base Address high dword */; 71 __le32 spq_base_hi;
68 struct regpair consolid_base_addr; 72 struct regpair consolid_base_addr;
69 __le16 spq_cons /* SPQ Ring Consumer */; 73 __le16 spq_cons;
70 __le16 consolid_cons /* Consolidation Ring Consumer */; 74 __le16 consolid_cons;
71 __le32 reserved0[55] /* Pad to 15 cycles */; 75 __le32 reserved0[55];
72}; 76};
73 77
74struct xstorm_core_conn_ag_ctx { 78struct xstorm_core_conn_ag_ctx {
75 u8 reserved0 /* cdu_validation */; 79 u8 reserved0;
76 u8 core_state /* state */; 80 u8 core_state;
77 u8 flags0; 81 u8 flags0;
78#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 82#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
79#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 83#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
80#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 84#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
81#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 85#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
82#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 86#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
83#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 87#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
84#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 88#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
85#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 89#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
86#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 90#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
87#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 91#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
88#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 92#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
89#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 93#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
90#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */ 94#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
91#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 95#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
92#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */ 96#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
93#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 97#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
94 u8 flags1; 98 u8 flags1;
95#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */ 99#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
96#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 100#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
97#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */ 101#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
98#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 102#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
99#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */ 103#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
100#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 104#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
101#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */ 105#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
102#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 106#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
103#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */ 107#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
104#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 108#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
105#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */ 109#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
106#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 110#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
107#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */ 111#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
108#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 112#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
109#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */ 113#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
110#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 114#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
111 u8 flags2; 115 u8 flags2;
112#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ 116#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
113#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 117#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
114#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ 118#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
115#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 119#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
116#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ 120#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
117#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 121#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
118#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 122#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
119#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 123#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
120 u8 flags3; 124 u8 flags3;
121#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ 125#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
122#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 126#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
123#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ 127#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
124#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 128#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
125#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ 129#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
126#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 130#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
127#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ 131#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
128#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 132#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
129 u8 flags4; 133 u8 flags4;
130#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ 134#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
131#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 135#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
132#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ 136#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
133#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 137#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
134#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ 138#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
135#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 139#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
136#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */ 140#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
137#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 141#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
138 u8 flags5; 142 u8 flags5;
139#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */ 143#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
140#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 144#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
141#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */ 145#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
142#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 146#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
143#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */ 147#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
144#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 148#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
145#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */ 149#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
146#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 150#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
147 u8 flags6; 151 u8 flags6;
148#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */ 152#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
149#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 153#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
150#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 154#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
151#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 155#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
152#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */ 156#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
153#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 157#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
154#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */ 158#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
155#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 159#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
156 u8 flags7; 160 u8 flags7;
157#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */ 161#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
158#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 162#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
159#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */ 163#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
160#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 164#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
161#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */ 165#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
162#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 166#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
163#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ 167#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
164#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 168#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
165#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ 169#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
166#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 170#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
167 u8 flags8; 171 u8 flags8;
168#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ 172#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
169#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 173#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
170#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ 174#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
171#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 175#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
172#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ 176#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
173#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 177#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
174#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ 178#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
175#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 179#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
176#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ 180#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
177#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 181#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
178#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ 182#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
179#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 183#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
180#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ 184#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
181#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 185#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
182#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ 186#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
183#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 187#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
184 u8 flags9; 188 u8 flags9;
185#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ 189#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
186#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 190#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
187#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */ 191#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
188#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 192#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
189#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */ 193#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
190#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 194#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
191#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */ 195#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
192#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 196#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
193#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */ 197#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
194#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 198#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
195#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */ 199#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
196#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 200#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
197#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */ 201#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
198#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 202#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
199#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 203#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
200#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 204#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
201 u8 flags10; 205 u8 flags10;
202#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */ 206#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
203#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 207#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
204#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */ 208#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
205#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 209#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
206#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */ 210#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
207#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 211#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
208#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */ 212#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
209#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 213#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
210#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */ 214#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
211#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 215#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
212#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */ 216#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
213#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 217#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
214#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */ 218#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
215#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 219#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
216#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */ 220#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
217#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 221#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
218 u8 flags11; 222 u8 flags11;
219#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */ 223#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
220#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 224#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
221#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */ 225#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
222#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 226#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
223#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */ 227#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
224#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 228#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
225#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ 229#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
226#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 230#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
227#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ 231#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
228#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 232#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
229#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ 233#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
230#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 234#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
231#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */ 235#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
232#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 236#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
233#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */ 237#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
234#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 238#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
235 u8 flags12; 239 u8 flags12;
236#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */ 240#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
237#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 241#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
238#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */ 242#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
239#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 243#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
240#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */ 244#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
241#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 245#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
242#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */ 246#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
243#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 247#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
244#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */ 248#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
245#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 249#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
246#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */ 250#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
247#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 251#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
248#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */ 252#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
249#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 253#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
250#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */ 254#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
251#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 255#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
252 u8 flags13; 256 u8 flags13;
253#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */ 257#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
254#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 258#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
255#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */ 259#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
256#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 260#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
257#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */ 261#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
258#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 262#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
259#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */ 263#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
260#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 264#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
261#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */ 265#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
262#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 266#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
263#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */ 267#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
264#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 268#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
265#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */ 269#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
266#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 270#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
267#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */ 271#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
268#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 272#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
269 u8 flags14; 273 u8 flags14;
270#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */ 274#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
271#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 275#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
272#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */ 276#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
273#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 277#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
274#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */ 278#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
275#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 279#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
276#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */ 280#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
277#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 281#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
278#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */ 282#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
279#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 283#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
280#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */ 284#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
281#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 285#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
282#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */ 286#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
283#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 287#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
284 u8 byte2 /* byte2 */; 288 u8 byte2;
285 __le16 physical_q0 /* physical_q0 */; 289 __le16 physical_q0;
286 __le16 consolid_prod /* physical_q1 */; 290 __le16 consolid_prod;
287 __le16 reserved16 /* physical_q2 */; 291 __le16 reserved16;
288 __le16 tx_bd_cons /* word3 */; 292 __le16 tx_bd_cons;
289 __le16 tx_bd_or_spq_prod /* word4 */; 293 __le16 tx_bd_or_spq_prod;
290 __le16 word5 /* word5 */; 294 __le16 word5;
291 __le16 conn_dpi /* conn_dpi */; 295 __le16 conn_dpi;
292 u8 byte3 /* byte3 */; 296 u8 byte3;
293 u8 byte4 /* byte4 */; 297 u8 byte4;
294 u8 byte5 /* byte5 */; 298 u8 byte5;
295 u8 byte6 /* byte6 */; 299 u8 byte6;
296 __le32 reg0 /* reg0 */; 300 __le32 reg0;
297 __le32 reg1 /* reg1 */; 301 __le32 reg1;
298 __le32 reg2 /* reg2 */; 302 __le32 reg2;
299 __le32 reg3 /* reg3 */; 303 __le32 reg3;
300 __le32 reg4 /* reg4 */; 304 __le32 reg4;
301 __le32 reg5 /* cf_array0 */; 305 __le32 reg5;
302 __le32 reg6 /* cf_array1 */; 306 __le32 reg6;
303 __le16 word7 /* word7 */; 307 __le16 word7;
304 __le16 word8 /* word8 */; 308 __le16 word8;
305 __le16 word9 /* word9 */; 309 __le16 word9;
306 __le16 word10 /* word10 */; 310 __le16 word10;
307 __le32 reg7 /* reg7 */; 311 __le32 reg7;
308 __le32 reg8 /* reg8 */; 312 __le32 reg8;
309 __le32 reg9 /* reg9 */; 313 __le32 reg9;
310 u8 byte7 /* byte7 */; 314 u8 byte7;
311 u8 byte8 /* byte8 */; 315 u8 byte8;
312 u8 byte9 /* byte9 */; 316 u8 byte9;
313 u8 byte10 /* byte10 */; 317 u8 byte10;
314 u8 byte11 /* byte11 */; 318 u8 byte11;
315 u8 byte12 /* byte12 */; 319 u8 byte12;
316 u8 byte13 /* byte13 */; 320 u8 byte13;
317 u8 byte14 /* byte14 */; 321 u8 byte14;
318 u8 byte15 /* byte15 */; 322 u8 byte15;
319 u8 byte16 /* byte16 */; 323 u8 byte16;
320 __le16 word11 /* word11 */; 324 __le16 word11;
321 __le32 reg10 /* reg10 */; 325 __le32 reg10;
322 __le32 reg11 /* reg11 */; 326 __le32 reg11;
323 __le32 reg12 /* reg12 */; 327 __le32 reg12;
324 __le32 reg13 /* reg13 */; 328 __le32 reg13;
325 __le32 reg14 /* reg14 */; 329 __le32 reg14;
326 __le32 reg15 /* reg15 */; 330 __le32 reg15;
327 __le32 reg16 /* reg16 */; 331 __le32 reg16;
328 __le32 reg17 /* reg17 */; 332 __le32 reg17;
329 __le32 reg18 /* reg18 */; 333 __le32 reg18;
330 __le32 reg19 /* reg19 */; 334 __le32 reg19;
331 __le16 word12 /* word12 */; 335 __le16 word12;
332 __le16 word13 /* word13 */; 336 __le16 word13;
333 __le16 word14 /* word14 */; 337 __le16 word14;
334 __le16 word15 /* word15 */; 338 __le16 word15;
335}; 339};
336 340
337struct tstorm_core_conn_ag_ctx { 341struct tstorm_core_conn_ag_ctx {
338 u8 byte0 /* cdu_validation */; 342 u8 byte0;
339 u8 byte1 /* state */; 343 u8 byte1;
340 u8 flags0; 344 u8 flags0;
341#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ 345#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
342#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 346#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
343#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ 347#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
344#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 348#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
345#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ 349#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
346#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 350#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
347#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ 351#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
348#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 352#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
349#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ 353#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
350#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 354#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
351#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ 355#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
352#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 356#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
353#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ 357#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
354#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 358#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
355 u8 flags1; 359 u8 flags1;
356#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ 360#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
357#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 361#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
358#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ 362#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
359#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 363#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
360#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ 364#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
361#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 365#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
362#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ 366#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
363#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 367#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
364 u8 flags2; 368 u8 flags2;
365#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ 369#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
366#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 370#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
367#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ 371#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
368#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 372#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
369#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ 373#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
370#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 374#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
371#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ 375#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
372#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 376#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
373 u8 flags3; 377 u8 flags3;
374#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ 378#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
375#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 379#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
376#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ 380#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
377#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 381#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
378#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ 382#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
379#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 383#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
380#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ 384#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
381#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 385#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
382#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ 386#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
383#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 387#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
384#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ 388#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
385#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 389#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
386 u8 flags4; 390 u8 flags4;
387#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ 391#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
388#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 392#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
389#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ 393#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
390#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 394#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
391#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ 395#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
392#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 396#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
393#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ 397#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
394#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 398#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
395#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ 399#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
396#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 400#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
397#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ 401#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
398#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 402#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
399#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ 403#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
400#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 404#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
401#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ 405#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
402#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 406#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
403 u8 flags5; 407 u8 flags5;
404#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ 408#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
405#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 409#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
406#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ 410#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
407#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 411#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
408#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ 412#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
409#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 413#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
410#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ 414#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
411#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 415#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
412#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ 416#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
413#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 417#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
414#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ 418#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
415#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 419#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
416#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ 420#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
417#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 421#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
418#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ 422#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
419#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 423#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
420 __le32 reg0 /* reg0 */; 424 __le32 reg0;
421 __le32 reg1 /* reg1 */; 425 __le32 reg1;
422 __le32 reg2 /* reg2 */; 426 __le32 reg2;
423 __le32 reg3 /* reg3 */; 427 __le32 reg3;
424 __le32 reg4 /* reg4 */; 428 __le32 reg4;
425 __le32 reg5 /* reg5 */; 429 __le32 reg5;
426 __le32 reg6 /* reg6 */; 430 __le32 reg6;
427 __le32 reg7 /* reg7 */; 431 __le32 reg7;
428 __le32 reg8 /* reg8 */; 432 __le32 reg8;
429 u8 byte2 /* byte2 */; 433 u8 byte2;
430 u8 byte3 /* byte3 */; 434 u8 byte3;
431 __le16 word0 /* word0 */; 435 __le16 word0;
432 u8 byte4 /* byte4 */; 436 u8 byte4;
433 u8 byte5 /* byte5 */; 437 u8 byte5;
434 __le16 word1 /* word1 */; 438 __le16 word1;
435 __le16 word2 /* conn_dpi */; 439 __le16 word2;
436 __le16 word3 /* word3 */; 440 __le16 word3;
437 __le32 reg9 /* reg9 */; 441 __le32 reg9;
438 __le32 reg10 /* reg10 */; 442 __le32 reg10;
439}; 443};
440 444
441struct ustorm_core_conn_ag_ctx { 445struct ustorm_core_conn_ag_ctx {
442 u8 reserved /* cdu_validation */; 446 u8 reserved;
443 u8 byte1 /* state */; 447 u8 byte1;
444 u8 flags0; 448 u8 flags0;
445#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ 449#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
446#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 450#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
447#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ 451#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
448#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 452#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
449#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ 453#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
450#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 454#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
451#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ 455#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
452#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 456#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
453#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ 457#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
454#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 458#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
455 u8 flags1; 459 u8 flags1;
456#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ 460#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
457#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 461#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
458#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ 462#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
459#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 463#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
460#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ 464#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
461#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 465#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
462#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ 466#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
463#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 467#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
464 u8 flags2; 468 u8 flags2;
465#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ 469#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
466#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 470#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
467#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ 471#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
468#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 472#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
469#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ 473#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
470#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 474#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
471#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ 475#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
472#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 476#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
473#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ 477#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
474#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 478#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
475#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ 479#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
476#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 480#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
477#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ 481#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
478#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 482#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
479#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ 483#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
480#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 484#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
481 u8 flags3; 485 u8 flags3;
482#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ 486#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
483#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 487#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
484#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ 488#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
485#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 489#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
486#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ 490#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
487#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 491#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
488#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ 492#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
489#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 493#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
490#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ 494#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
491#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 495#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
492#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ 496#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
493#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 497#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
494#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ 498#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
495#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 499#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
496#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ 500#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
497#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 501#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
498 u8 byte2 /* byte2 */; 502 u8 byte2;
499 u8 byte3 /* byte3 */; 503 u8 byte3;
500 __le16 word0 /* conn_dpi */; 504 __le16 word0;
501 __le16 word1 /* word1 */; 505 __le16 word1;
502 __le32 rx_producers /* reg0 */; 506 __le32 rx_producers;
503 __le32 reg1 /* reg1 */; 507 __le32 reg1;
504 __le32 reg2 /* reg2 */; 508 __le32 reg2;
505 __le32 reg3 /* reg3 */; 509 __le32 reg3;
506 __le16 word2 /* word2 */; 510 __le16 word2;
507 __le16 word3 /* word3 */; 511 __le16 word3;
508}; 512};
509 513
510/* The core storm context for the Mstorm */ 514/* The core storm context for the Mstorm */
@@ -519,122 +523,186 @@ struct ustorm_core_conn_st_ctx {
519 523
520/* core connection context */ 524/* core connection context */
521struct core_conn_context { 525struct core_conn_context {
522 struct ystorm_core_conn_st_ctx ystorm_st_context; 526 struct ystorm_core_conn_st_ctx ystorm_st_context;
523 struct regpair ystorm_st_padding[2] /* padding */; 527 struct regpair ystorm_st_padding[2];
524 struct pstorm_core_conn_st_ctx pstorm_st_context; 528 struct pstorm_core_conn_st_ctx pstorm_st_context;
525 struct regpair pstorm_st_padding[2]; 529 struct regpair pstorm_st_padding[2];
526 struct xstorm_core_conn_st_ctx xstorm_st_context; 530 struct xstorm_core_conn_st_ctx xstorm_st_context;
527 struct xstorm_core_conn_ag_ctx xstorm_ag_context; 531 struct xstorm_core_conn_ag_ctx xstorm_ag_context;
528 struct tstorm_core_conn_ag_ctx tstorm_ag_context; 532 struct tstorm_core_conn_ag_ctx tstorm_ag_context;
529 struct ustorm_core_conn_ag_ctx ustorm_ag_context; 533 struct ustorm_core_conn_ag_ctx ustorm_ag_context;
530 struct mstorm_core_conn_st_ctx mstorm_st_context; 534 struct mstorm_core_conn_st_ctx mstorm_st_context;
531 struct ustorm_core_conn_st_ctx ustorm_st_context; 535 struct ustorm_core_conn_st_ctx ustorm_st_context;
532 struct regpair ustorm_st_padding[2] /* padding */; 536 struct regpair ustorm_st_padding[2];
537};
538
539struct eth_mstorm_per_pf_stat {
540 struct regpair gre_discard_pkts;
541 struct regpair vxlan_discard_pkts;
542 struct regpair geneve_discard_pkts;
543 struct regpair lb_discard_pkts;
533}; 544};
534 545
535struct eth_mstorm_per_queue_stat { 546struct eth_mstorm_per_queue_stat {
536 struct regpair ttl0_discard; 547 struct regpair ttl0_discard;
537 struct regpair packet_too_big_discard; 548 struct regpair packet_too_big_discard;
538 struct regpair no_buff_discard; 549 struct regpair no_buff_discard;
539 struct regpair not_active_discard; 550 struct regpair not_active_discard;
540 struct regpair tpa_coalesced_pkts; 551 struct regpair tpa_coalesced_pkts;
541 struct regpair tpa_coalesced_events; 552 struct regpair tpa_coalesced_events;
542 struct regpair tpa_aborts_num; 553 struct regpair tpa_aborts_num;
543 struct regpair tpa_coalesced_bytes; 554 struct regpair tpa_coalesced_bytes;
555};
556
557/* Ethernet TX Per PF */
558struct eth_pstorm_per_pf_stat {
559 struct regpair sent_lb_ucast_bytes;
560 struct regpair sent_lb_mcast_bytes;
561 struct regpair sent_lb_bcast_bytes;
562 struct regpair sent_lb_ucast_pkts;
563 struct regpair sent_lb_mcast_pkts;
564 struct regpair sent_lb_bcast_pkts;
565 struct regpair sent_gre_bytes;
566 struct regpair sent_vxlan_bytes;
567 struct regpair sent_geneve_bytes;
568 struct regpair sent_gre_pkts;
569 struct regpair sent_vxlan_pkts;
570 struct regpair sent_geneve_pkts;
571 struct regpair gre_drop_pkts;
572 struct regpair vxlan_drop_pkts;
573 struct regpair geneve_drop_pkts;
574};
575
576/* Ethernet TX Per Queue Stats */
577struct eth_pstorm_per_queue_stat {
578 struct regpair sent_ucast_bytes;
579 struct regpair sent_mcast_bytes;
580 struct regpair sent_bcast_bytes;
581 struct regpair sent_ucast_pkts;
582 struct regpair sent_mcast_pkts;
583 struct regpair sent_bcast_pkts;
584 struct regpair error_drop_pkts;
585};
586
587/* ETH Rx producers data */
588struct eth_rx_rate_limit {
589 __le16 mult;
590 __le16 cnst;
591 u8 add_sub_cnst;
592 u8 reserved0;
593 __le16 reserved1;
544}; 594};
545 595
546struct eth_pstorm_per_queue_stat { 596struct eth_ustorm_per_pf_stat {
547 struct regpair sent_ucast_bytes; 597 struct regpair rcv_lb_ucast_bytes;
548 struct regpair sent_mcast_bytes; 598 struct regpair rcv_lb_mcast_bytes;
549 struct regpair sent_bcast_bytes; 599 struct regpair rcv_lb_bcast_bytes;
550 struct regpair sent_ucast_pkts; 600 struct regpair rcv_lb_ucast_pkts;
551 struct regpair sent_mcast_pkts; 601 struct regpair rcv_lb_mcast_pkts;
552 struct regpair sent_bcast_pkts; 602 struct regpair rcv_lb_bcast_pkts;
553 struct regpair error_drop_pkts; 603 struct regpair rcv_gre_bytes;
604 struct regpair rcv_vxlan_bytes;
605 struct regpair rcv_geneve_bytes;
606 struct regpair rcv_gre_pkts;
607 struct regpair rcv_vxlan_pkts;
608 struct regpair rcv_geneve_pkts;
554}; 609};
555 610
556struct eth_ustorm_per_queue_stat { 611struct eth_ustorm_per_queue_stat {
557 struct regpair rcv_ucast_bytes; 612 struct regpair rcv_ucast_bytes;
558 struct regpair rcv_mcast_bytes; 613 struct regpair rcv_mcast_bytes;
559 struct regpair rcv_bcast_bytes; 614 struct regpair rcv_bcast_bytes;
560 struct regpair rcv_ucast_pkts; 615 struct regpair rcv_ucast_pkts;
561 struct regpair rcv_mcast_pkts; 616 struct regpair rcv_mcast_pkts;
562 struct regpair rcv_bcast_pkts; 617 struct regpair rcv_bcast_pkts;
563}; 618};
564 619
565/* Event Ring Next Page Address */ 620/* Event Ring Next Page Address */
566struct event_ring_next_addr { 621struct event_ring_next_addr {
567 struct regpair addr /* Next Page Address */; 622 struct regpair addr;
568 __le32 reserved[2] /* Reserved */; 623 __le32 reserved[2];
569}; 624};
570 625
626/* Event Ring Element */
571union event_ring_element { 627union event_ring_element {
572 struct event_ring_entry entry /* Event Ring Entry */; 628 struct event_ring_entry entry;
573 struct event_ring_next_addr next_addr; 629 struct event_ring_next_addr next_addr;
630};
631
632/* Major and Minor hsi Versions */
633struct hsi_fp_ver_struct {
634 u8 minor_ver_arr[2];
635 u8 major_ver_arr[2];
574}; 636};
575 637
638/* Mstorm non-triggering VF zone */
576struct mstorm_non_trigger_vf_zone { 639struct mstorm_non_trigger_vf_zone {
577 struct eth_mstorm_per_queue_stat eth_queue_stat; 640 struct eth_mstorm_per_queue_stat eth_queue_stat;
641 struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF];
578}; 642};
579 643
644/* Mstorm VF zone */
580struct mstorm_vf_zone { 645struct mstorm_vf_zone {
581 struct mstorm_non_trigger_vf_zone non_trigger; 646 struct mstorm_non_trigger_vf_zone non_trigger;
647
582}; 648};
583 649
650/* personality per PF */
584enum personality_type { 651enum personality_type {
585 BAD_PERSONALITY_TYP, 652 BAD_PERSONALITY_TYP,
586 PERSONALITY_RESERVED, 653 PERSONALITY_ISCSI,
587 PERSONALITY_RESERVED2, 654 PERSONALITY_RESERVED2,
588 PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */, 655 PERSONALITY_RDMA_AND_ETH,
589 PERSONALITY_RESERVED3, 656 PERSONALITY_RESERVED3,
590 PERSONALITY_CORE, 657 PERSONALITY_CORE,
591 PERSONALITY_ETH /* Ethernet */, 658 PERSONALITY_ETH,
592 PERSONALITY_RESERVED4, 659 PERSONALITY_RESERVED4,
593 MAX_PERSONALITY_TYPE 660 MAX_PERSONALITY_TYPE
594}; 661};
595 662
663/* tunnel configuration */
596struct pf_start_tunnel_config { 664struct pf_start_tunnel_config {
597 u8 set_vxlan_udp_port_flg; 665 u8 set_vxlan_udp_port_flg;
598 u8 set_geneve_udp_port_flg; 666 u8 set_geneve_udp_port_flg;
599 u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */; 667 u8 tx_enable_vxlan;
600 u8 tx_enable_l2geneve; 668 u8 tx_enable_l2geneve;
601 u8 tx_enable_ipgeneve; 669 u8 tx_enable_ipgeneve;
602 u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */; 670 u8 tx_enable_l2gre;
603 u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */; 671 u8 tx_enable_ipgre;
604 u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */; 672 u8 tunnel_clss_vxlan;
605 u8 tunnel_clss_l2geneve; 673 u8 tunnel_clss_l2geneve;
606 u8 tunnel_clss_ipgeneve; 674 u8 tunnel_clss_ipgeneve;
607 u8 tunnel_clss_l2gre; 675 u8 tunnel_clss_l2gre;
608 u8 tunnel_clss_ipgre; 676 u8 tunnel_clss_ipgre;
609 __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */; 677 __le16 vxlan_udp_port;
610 __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */; 678 __le16 geneve_udp_port;
611}; 679};
612 680
613/* Ramrod data for PF start ramrod */ 681/* Ramrod data for PF start ramrod */
614struct pf_start_ramrod_data { 682struct pf_start_ramrod_data {
615 struct regpair event_ring_pbl_addr; 683 struct regpair event_ring_pbl_addr;
616 struct regpair consolid_q_pbl_addr; 684 struct regpair consolid_q_pbl_addr;
617 struct pf_start_tunnel_config tunnel_config; 685 struct pf_start_tunnel_config tunnel_config;
618 __le16 event_ring_sb_id; 686 __le16 event_ring_sb_id;
619 u8 base_vf_id; 687 u8 base_vf_id;
620 u8 num_vfs; 688 u8 num_vfs;
621 u8 event_ring_num_pages; 689 u8 event_ring_num_pages;
622 u8 event_ring_sb_index; 690 u8 event_ring_sb_index;
623 u8 path_id; 691 u8 path_id;
624 u8 warning_as_error; 692 u8 warning_as_error;
625 u8 dont_log_ramrods; 693 u8 dont_log_ramrods;
626 u8 personality; 694 u8 personality;
627 __le16 log_type_mask; 695 __le16 log_type_mask;
628 u8 mf_mode /* Multi function mode */; 696 u8 mf_mode;
629 u8 integ_phase /* Integration phase */; 697 u8 integ_phase;
630 u8 allow_npar_tx_switching; 698 u8 allow_npar_tx_switching;
631 u8 inner_to_outer_pri_map[8]; 699 u8 inner_to_outer_pri_map[8];
632 u8 pri_map_valid; 700 u8 pri_map_valid;
633 u32 outer_tag; 701 __le32 outer_tag;
634 u8 reserved0[4]; 702 struct hsi_fp_ver_struct hsi_fp_ver;
635}; 703
636 704};
637/* Data for port update ramrod */ 705
638struct protocol_dcb_data { 706struct protocol_dcb_data {
639 u8 dcb_enable_flag; 707 u8 dcb_enable_flag;
640 u8 dcb_priority; 708 u8 dcb_priority;
@@ -642,25 +710,24 @@ struct protocol_dcb_data {
642 u8 reserved; 710 u8 reserved;
643}; 711};
644 712
645/* tunnel configuration */
646struct pf_update_tunnel_config { 713struct pf_update_tunnel_config {
647 u8 update_rx_pf_clss; 714 u8 update_rx_pf_clss;
648 u8 update_tx_pf_clss; 715 u8 update_tx_pf_clss;
649 u8 set_vxlan_udp_port_flg; 716 u8 set_vxlan_udp_port_flg;
650 u8 set_geneve_udp_port_flg; 717 u8 set_geneve_udp_port_flg;
651 u8 tx_enable_vxlan; 718 u8 tx_enable_vxlan;
652 u8 tx_enable_l2geneve; 719 u8 tx_enable_l2geneve;
653 u8 tx_enable_ipgeneve; 720 u8 tx_enable_ipgeneve;
654 u8 tx_enable_l2gre; 721 u8 tx_enable_l2gre;
655 u8 tx_enable_ipgre; 722 u8 tx_enable_ipgre;
656 u8 tunnel_clss_vxlan; 723 u8 tunnel_clss_vxlan;
657 u8 tunnel_clss_l2geneve; 724 u8 tunnel_clss_l2geneve;
658 u8 tunnel_clss_ipgeneve; 725 u8 tunnel_clss_ipgeneve;
659 u8 tunnel_clss_l2gre; 726 u8 tunnel_clss_l2gre;
660 u8 tunnel_clss_ipgre; 727 u8 tunnel_clss_ipgre;
661 __le16 vxlan_udp_port; 728 __le16 vxlan_udp_port;
662 __le16 geneve_udp_port; 729 __le16 geneve_udp_port;
663 __le16 reserved[3]; 730 __le16 reserved[3];
664}; 731};
665 732
666struct pf_update_ramrod_data { 733struct pf_update_ramrod_data {
@@ -669,38 +736,43 @@ struct pf_update_ramrod_data {
669 u8 update_fcoe_dcb_data_flag; 736 u8 update_fcoe_dcb_data_flag;
670 u8 update_iscsi_dcb_data_flag; 737 u8 update_iscsi_dcb_data_flag;
671 u8 update_roce_dcb_data_flag; 738 u8 update_roce_dcb_data_flag;
739 u8 update_iwarp_dcb_data_flag;
672 u8 update_mf_vlan_flag; 740 u8 update_mf_vlan_flag;
673 __le16 mf_vlan; 741 u8 reserved;
674 struct protocol_dcb_data eth_dcb_data; 742 struct protocol_dcb_data eth_dcb_data;
675 struct protocol_dcb_data fcoe_dcb_data; 743 struct protocol_dcb_data fcoe_dcb_data;
676 struct protocol_dcb_data iscsi_dcb_data; 744 struct protocol_dcb_data iscsi_dcb_data;
677 struct protocol_dcb_data roce_dcb_data; 745 struct protocol_dcb_data roce_dcb_data;
678 struct pf_update_tunnel_config tunnel_config; 746 struct protocol_dcb_data iwarp_dcb_data;
679}; 747 __le16 mf_vlan;
680 748 __le16 reserved2;
681/* Tunnel classification scheme */ 749 struct pf_update_tunnel_config tunnel_config;
682enum tunnel_clss {
683 TUNNEL_CLSS_MAC_VLAN = 0,
684 TUNNEL_CLSS_MAC_VNI,
685 TUNNEL_CLSS_INNER_MAC_VLAN,
686 TUNNEL_CLSS_INNER_MAC_VNI,
687 MAX_TUNNEL_CLSS
688}; 750};
689 751
752/* Ports mode */
690enum ports_mode { 753enum ports_mode {
691 ENGX2_PORTX1 /* 2 engines x 1 port */, 754 ENGX2_PORTX1,
692 ENGX2_PORTX2 /* 2 engines x 2 ports */, 755 ENGX2_PORTX2,
693 ENGX1_PORTX1 /* 1 engine x 1 port */, 756 ENGX1_PORTX1,
694 ENGX1_PORTX2 /* 1 engine x 2 ports */, 757 ENGX1_PORTX2,
695 ENGX1_PORTX4 /* 1 engine x 4 ports */, 758 ENGX1_PORTX4,
696 MAX_PORTS_MODE 759 MAX_PORTS_MODE
697}; 760};
698 761
762/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
763enum protocol_version_array_key {
764 ETH_VER_KEY = 0,
765 ROCE_VER_KEY,
766 MAX_PROTOCOL_VERSION_ARRAY_KEY
767};
768
769/* Pstorm non-triggering VF zone */
699struct pstorm_non_trigger_vf_zone { 770struct pstorm_non_trigger_vf_zone {
700 struct eth_pstorm_per_queue_stat eth_queue_stat; 771 struct eth_pstorm_per_queue_stat eth_queue_stat;
701 struct regpair reserved[2]; 772 struct regpair reserved[2];
702}; 773};
703 774
775/* Pstorm VF zone */
704struct pstorm_vf_zone { 776struct pstorm_vf_zone {
705 struct pstorm_non_trigger_vf_zone non_trigger; 777 struct pstorm_non_trigger_vf_zone non_trigger;
706 struct regpair reserved[7]; 778 struct regpair reserved[7];
@@ -708,56 +780,89 @@ struct pstorm_vf_zone {
708 780
709/* Ramrod Header of SPQE */ 781/* Ramrod Header of SPQE */
710struct ramrod_header { 782struct ramrod_header {
711 __le32 cid /* Slowpath Connection CID */; 783 __le32 cid;
712 u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */; 784 u8 cmd_id;
713 u8 protocol_id /* Ramrod Protocol ID */; 785 u8 protocol_id;
714 __le16 echo /* Ramrod echo */; 786 __le16 echo;
715}; 787};
716 788
717/* Slowpath Element (SPQE) */ 789/* Slowpath Element (SPQE) */
718struct slow_path_element { 790struct slow_path_element {
719 struct ramrod_header hdr /* Ramrod Header */; 791 struct ramrod_header hdr;
720 struct regpair data_ptr; 792 struct regpair data_ptr;
793};
794
795/* Tstorm non-triggering VF zone */
796struct tstorm_non_trigger_vf_zone {
797 struct regpair reserved[2];
721}; 798};
722 799
723struct tstorm_per_port_stat { 800struct tstorm_per_port_stat {
724 struct regpair trunc_error_discard; 801 struct regpair trunc_error_discard;
725 struct regpair mac_error_discard; 802 struct regpair mac_error_discard;
726 struct regpair mftag_filter_discard; 803 struct regpair mftag_filter_discard;
727 struct regpair eth_mac_filter_discard; 804 struct regpair eth_mac_filter_discard;
728 struct regpair ll2_mac_filter_discard; 805 struct regpair reserved[5];
729 struct regpair ll2_conn_disabled_discard; 806 struct regpair eth_irregular_pkt;
730 struct regpair iscsi_irregular_pkt; 807 struct regpair reserved1[2];
731 struct regpair fcoe_irregular_pkt; 808 struct regpair eth_gre_tunn_filter_discard;
732 struct regpair roce_irregular_pkt; 809 struct regpair eth_vxlan_tunn_filter_discard;
733 struct regpair eth_irregular_pkt; 810 struct regpair eth_geneve_tunn_filter_discard;
734 struct regpair toe_irregular_pkt; 811};
735 struct regpair preroce_irregular_pkt; 812
813/* Tstorm VF zone */
814struct tstorm_vf_zone {
815 struct tstorm_non_trigger_vf_zone non_trigger;
816};
817
818/* Tunnel classification scheme */
819enum tunnel_clss {
820 TUNNEL_CLSS_MAC_VLAN = 0,
821 TUNNEL_CLSS_MAC_VNI,
822 TUNNEL_CLSS_INNER_MAC_VLAN,
823 TUNNEL_CLSS_INNER_MAC_VNI,
824 TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
825 MAX_TUNNEL_CLSS
736}; 826};
737 827
828/* Ustorm non-triggering VF zone */
738struct ustorm_non_trigger_vf_zone { 829struct ustorm_non_trigger_vf_zone {
739 struct eth_ustorm_per_queue_stat eth_queue_stat; 830 struct eth_ustorm_per_queue_stat eth_queue_stat;
740 struct regpair vf_pf_msg_addr; 831 struct regpair vf_pf_msg_addr;
741}; 832};
742 833
834/* Ustorm triggering VF zone */
743struct ustorm_trigger_vf_zone { 835struct ustorm_trigger_vf_zone {
744 u8 vf_pf_msg_valid; 836 u8 vf_pf_msg_valid;
745 u8 reserved[7]; 837 u8 reserved[7];
746}; 838};
747 839
840/* Ustorm VF zone */
748struct ustorm_vf_zone { 841struct ustorm_vf_zone {
749 struct ustorm_non_trigger_vf_zone non_trigger; 842 struct ustorm_non_trigger_vf_zone non_trigger;
750 struct ustorm_trigger_vf_zone trigger; 843 struct ustorm_trigger_vf_zone trigger;
751}; 844};
752 845
846/* VF-PF channel data */
847struct vf_pf_channel_data {
848 __le32 ready;
849 u8 valid;
850 u8 reserved0;
851 __le16 reserved1;
852};
853
854/* Ramrod data for VF start ramrod */
753struct vf_start_ramrod_data { 855struct vf_start_ramrod_data {
754 u8 vf_id; 856 u8 vf_id;
755 u8 enable_flr_ack; 857 u8 enable_flr_ack;
756 __le16 opaque_fid; 858 __le16 opaque_fid;
757 u8 personality; 859 u8 personality;
758 u8 reserved[3]; 860 u8 reserved[7];
861 struct hsi_fp_ver_struct hsi_fp_ver;
862
759}; 863};
760 864
865/* Ramrod data for VF start ramrod */
761struct vf_stop_ramrod_data { 866struct vf_stop_ramrod_data {
762 u8 vf_id; 867 u8 vf_id;
763 u8 reserved0; 868 u8 reserved0;
@@ -765,94 +870,474 @@ struct vf_stop_ramrod_data {
765 __le32 reserved2; 870 __le32 reserved2;
766}; 871};
767 872
873/* Attentions status block */
768struct atten_status_block { 874struct atten_status_block {
769 __le32 atten_bits; 875 __le32 atten_bits;
770 __le32 atten_ack; 876 __le32 atten_ack;
771 __le16 reserved0; 877 __le16 reserved0;
772 __le16 sb_index /* status block running index */; 878 __le16 sb_index;
773 __le32 reserved1; 879 __le32 reserved1;
880};
881
882enum command_type_bit {
883 IGU_COMMAND_TYPE_NOP = 0,
884 IGU_COMMAND_TYPE_SET = 1,
885 MAX_COMMAND_TYPE_BIT
886};
887
888/* DMAE command */
889struct dmae_cmd {
890 __le32 opcode;
891#define DMAE_CMD_SRC_MASK 0x1
892#define DMAE_CMD_SRC_SHIFT 0
893#define DMAE_CMD_DST_MASK 0x3
894#define DMAE_CMD_DST_SHIFT 1
895#define DMAE_CMD_C_DST_MASK 0x1
896#define DMAE_CMD_C_DST_SHIFT 3
897#define DMAE_CMD_CRC_RESET_MASK 0x1
898#define DMAE_CMD_CRC_RESET_SHIFT 4
899#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
900#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
901#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
902#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
903#define DMAE_CMD_COMP_FUNC_MASK 0x1
904#define DMAE_CMD_COMP_FUNC_SHIFT 7
905#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
906#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
907#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
908#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
909#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
910#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
911#define DMAE_CMD_RESERVED1_MASK 0x1
912#define DMAE_CMD_RESERVED1_SHIFT 13
913#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
914#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
915#define DMAE_CMD_ERR_HANDLING_MASK 0x3
916#define DMAE_CMD_ERR_HANDLING_SHIFT 16
917#define DMAE_CMD_PORT_ID_MASK 0x3
918#define DMAE_CMD_PORT_ID_SHIFT 18
919#define DMAE_CMD_SRC_PF_ID_MASK 0xF
920#define DMAE_CMD_SRC_PF_ID_SHIFT 20
921#define DMAE_CMD_DST_PF_ID_MASK 0xF
922#define DMAE_CMD_DST_PF_ID_SHIFT 24
923#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
924#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
925#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
926#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
927#define DMAE_CMD_RESERVED2_MASK 0x3
928#define DMAE_CMD_RESERVED2_SHIFT 30
929 __le32 src_addr_lo;
930 __le32 src_addr_hi;
931 __le32 dst_addr_lo;
932 __le32 dst_addr_hi;
933 __le16 length_dw;
934 __le16 opcode_b;
935#define DMAE_CMD_SRC_VF_ID_MASK 0xFF
936#define DMAE_CMD_SRC_VF_ID_SHIFT 0
937#define DMAE_CMD_DST_VF_ID_MASK 0xFF
938#define DMAE_CMD_DST_VF_ID_SHIFT 8
939 __le32 comp_addr_lo;
940 __le32 comp_addr_hi;
941 __le32 comp_val;
942 __le32 crc32;
943 __le32 crc_32_c;
944 __le16 crc16;
945 __le16 crc16_c;
946 __le16 crc10;
947 __le16 reserved;
948 __le16 xsum16;
949 __le16 xsum8;
950};
951
952enum dmae_cmd_comp_crc_en_enum {
953 dmae_cmd_comp_crc_disabled,
954 dmae_cmd_comp_crc_enabled,
955 MAX_DMAE_CMD_COMP_CRC_EN_ENUM
956};
957
958enum dmae_cmd_comp_func_enum {
959 dmae_cmd_comp_func_to_src,
960 dmae_cmd_comp_func_to_dst,
961 MAX_DMAE_CMD_COMP_FUNC_ENUM
962};
963
964enum dmae_cmd_comp_word_en_enum {
965 dmae_cmd_comp_word_disabled,
966 dmae_cmd_comp_word_enabled,
967 MAX_DMAE_CMD_COMP_WORD_EN_ENUM
968};
969
970enum dmae_cmd_c_dst_enum {
971 dmae_cmd_c_dst_pcie,
972 dmae_cmd_c_dst_grc,
973 MAX_DMAE_CMD_C_DST_ENUM
974};
975
976enum dmae_cmd_dst_enum {
977 dmae_cmd_dst_none_0,
978 dmae_cmd_dst_pcie,
979 dmae_cmd_dst_grc,
980 dmae_cmd_dst_none_3,
981 MAX_DMAE_CMD_DST_ENUM
982};
983
984enum dmae_cmd_error_handling_enum {
985 dmae_cmd_error_handling_send_regular_comp,
986 dmae_cmd_error_handling_send_comp_with_err,
987 dmae_cmd_error_handling_dont_send_comp,
988 MAX_DMAE_CMD_ERROR_HANDLING_ENUM
989};
990
991enum dmae_cmd_src_enum {
992 dmae_cmd_src_pcie,
993 dmae_cmd_src_grc,
994 MAX_DMAE_CMD_SRC_ENUM
995};
996
997/* IGU cleanup command */
998struct igu_cleanup {
999 __le32 sb_id_and_flags;
1000#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
1001#define IGU_CLEANUP_RESERVED0_SHIFT 0
1002#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
1003#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
1004#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
1005#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
1006#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
1007#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
1008 __le32 reserved1;
1009};
1010
1011/* IGU firmware driver command */
1012union igu_command {
1013 struct igu_prod_cons_update prod_cons_update;
1014 struct igu_cleanup cleanup;
1015};
1016
1017/* IGU firmware driver command */
1018struct igu_command_reg_ctrl {
1019 __le16 opaque_fid;
1020 __le16 igu_command_reg_ctrl_fields;
1021#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
1022#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
1023#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
1024#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
1025#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
1026#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
774}; 1027};
775 1028
1029/* IGU mapping line structure */
1030struct igu_mapping_line {
1031 __le32 igu_mapping_line_fields;
1032#define IGU_MAPPING_LINE_VALID_MASK 0x1
1033#define IGU_MAPPING_LINE_VALID_SHIFT 0
1034#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
1035#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
1036#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
1037#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
1038#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1
1039#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
1040#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
1041#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
1042#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
1043#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
1044};
1045
1046/* IGU MSIX line structure */
1047struct igu_msix_vector {
1048 struct regpair address;
1049 __le32 data;
1050 __le32 msix_vector_fields;
1051#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
1052#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
1053#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
1054#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
1055#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
1056#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
1057#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
1058#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
1059};
1060
1061struct mstorm_core_conn_ag_ctx {
1062 u8 byte0;
1063 u8 byte1;
1064 u8 flags0;
1065#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
1066#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
1067#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
1068#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
1069#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
1070#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
1071#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
1072#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
1073#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
1074#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
1075 u8 flags1;
1076#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
1077#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
1078#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
1079#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
1080#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
1081#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
1082#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
1083#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
1084#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
1085#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
1086#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
1087#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
1088#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
1089#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
1090#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
1091#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
1092 __le16 word0;
1093 __le16 word1;
1094 __le32 reg0;
1095 __le32 reg1;
1096};
1097
1098/* per encapsulation type enabling flags */
1099struct prs_reg_encapsulation_type_en {
1100 u8 flags;
1101#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
1102#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
1103#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
1104#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
1105#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
1106#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
1107#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
1108#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
1109#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
1110#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
1111#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
1112#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
1113#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
1114#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
1115};
1116
1117enum pxp_tph_st_hint {
1118 TPH_ST_HINT_BIDIR,
1119 TPH_ST_HINT_REQUESTER,
1120 TPH_ST_HINT_TARGET,
1121 TPH_ST_HINT_TARGET_PRIO,
1122 MAX_PXP_TPH_ST_HINT
1123};
1124
1125/* QM hardware structure of enable bypass credit mask */
1126struct qm_rf_bypass_mask {
1127 u8 flags;
1128#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
1129#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
1130#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
1131#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
1132#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
1133#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
1134#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
1135#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
1136#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
1137#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
1138#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
1139#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
1140#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
1141#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
1142#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
1143#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
1144};
1145
1146/* QM hardware structure of opportunistic credit mask */
1147struct qm_rf_opportunistic_mask {
1148 __le16 flags;
1149#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
1150#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
1151#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
1152#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
1153#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
1154#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
1155#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
1156#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
1157#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
1158#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
1159#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
1160#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
1161#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
1162#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
1163#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
1164#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
1165#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
1166#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
1167#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
1168#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
1169};
1170
1171/* QM hardware structure of QM map memory */
1172struct qm_rf_pq_map {
1173 __le32 reg;
1174#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
1175#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
1176#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
1177#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
1178#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
1179#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
1180#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
1181#define QM_RF_PQ_MAP_VOQ_SHIFT 18
1182#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
1183#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
1184#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
1185#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
1186#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
1187#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
1188};
1189
1190/* Completion params for aggregated interrupt completion */
1191struct sdm_agg_int_comp_params {
1192 __le16 params;
1193#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
1194#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
1195#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
1196#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
1197#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
1198#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
1199};
1200
1201/* SDM operation gen command (generate aggregative interrupt) */
1202struct sdm_op_gen {
1203 __le32 command;
1204#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
1205#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
1206#define SDM_OP_GEN_COMP_TYPE_MASK 0xF
1207#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
1208#define SDM_OP_GEN_RESERVED_MASK 0xFFF
1209#define SDM_OP_GEN_RESERVED_SHIFT 20
1210};
1211
1212struct ystorm_core_conn_ag_ctx {
1213 u8 byte0;
1214 u8 byte1;
1215 u8 flags0;
1216#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
1217#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
1218#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
1219#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
1220#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
1221#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
1222#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
1223#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
1224#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
1225#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
1226 u8 flags1;
1227#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
1228#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
1229#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
1230#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
1231#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
1232#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
1233#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
1234#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
1235#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
1236#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
1237#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
1238#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
1239#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
1240#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
1241#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
1242#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
1243 u8 byte2;
1244 u8 byte3;
1245 __le16 word0;
1246 __le32 reg0;
1247 __le32 reg1;
1248 __le16 word1;
1249 __le16 word2;
1250 __le16 word3;
1251 __le16 word4;
1252 __le32 reg2;
1253 __le32 reg3;
1254};
1255
1256/****************************************/
1257/* Debug Tools HSI constants and macros */
1258/****************************************/
1259
776enum block_addr { 1260enum block_addr {
777 GRCBASE_GRC = 0x50000, 1261 GRCBASE_GRC = 0x50000,
778 GRCBASE_MISCS = 0x9000, 1262 GRCBASE_MISCS = 0x9000,
779 GRCBASE_MISC = 0x8000, 1263 GRCBASE_MISC = 0x8000,
780 GRCBASE_DBU = 0xa000, 1264 GRCBASE_DBU = 0xa000,
781 GRCBASE_PGLUE_B = 0x2a8000, 1265 GRCBASE_PGLUE_B = 0x2a8000,
782 GRCBASE_CNIG = 0x218000, 1266 GRCBASE_CNIG = 0x218000,
783 GRCBASE_CPMU = 0x30000, 1267 GRCBASE_CPMU = 0x30000,
784 GRCBASE_NCSI = 0x40000, 1268 GRCBASE_NCSI = 0x40000,
785 GRCBASE_OPTE = 0x53000, 1269 GRCBASE_OPTE = 0x53000,
786 GRCBASE_BMB = 0x540000, 1270 GRCBASE_BMB = 0x540000,
787 GRCBASE_PCIE = 0x54000, 1271 GRCBASE_PCIE = 0x54000,
788 GRCBASE_MCP = 0xe00000, 1272 GRCBASE_MCP = 0xe00000,
789 GRCBASE_MCP2 = 0x52000, 1273 GRCBASE_MCP2 = 0x52000,
790 GRCBASE_PSWHST = 0x2a0000, 1274 GRCBASE_PSWHST = 0x2a0000,
791 GRCBASE_PSWHST2 = 0x29e000, 1275 GRCBASE_PSWHST2 = 0x29e000,
792 GRCBASE_PSWRD = 0x29c000, 1276 GRCBASE_PSWRD = 0x29c000,
793 GRCBASE_PSWRD2 = 0x29d000, 1277 GRCBASE_PSWRD2 = 0x29d000,
794 GRCBASE_PSWWR = 0x29a000, 1278 GRCBASE_PSWWR = 0x29a000,
795 GRCBASE_PSWWR2 = 0x29b000, 1279 GRCBASE_PSWWR2 = 0x29b000,
796 GRCBASE_PSWRQ = 0x280000, 1280 GRCBASE_PSWRQ = 0x280000,
797 GRCBASE_PSWRQ2 = 0x240000, 1281 GRCBASE_PSWRQ2 = 0x240000,
798 GRCBASE_PGLCS = 0x0, 1282 GRCBASE_PGLCS = 0x0,
799 GRCBASE_PTU = 0x560000, 1283 GRCBASE_DMAE = 0xc000,
800 GRCBASE_DMAE = 0xc000, 1284 GRCBASE_PTU = 0x560000,
801 GRCBASE_TCM = 0x1180000, 1285 GRCBASE_TCM = 0x1180000,
802 GRCBASE_MCM = 0x1200000, 1286 GRCBASE_MCM = 0x1200000,
803 GRCBASE_UCM = 0x1280000, 1287 GRCBASE_UCM = 0x1280000,
804 GRCBASE_XCM = 0x1000000, 1288 GRCBASE_XCM = 0x1000000,
805 GRCBASE_YCM = 0x1080000, 1289 GRCBASE_YCM = 0x1080000,
806 GRCBASE_PCM = 0x1100000, 1290 GRCBASE_PCM = 0x1100000,
807 GRCBASE_QM = 0x2f0000, 1291 GRCBASE_QM = 0x2f0000,
808 GRCBASE_TM = 0x2c0000, 1292 GRCBASE_TM = 0x2c0000,
809 GRCBASE_DORQ = 0x100000, 1293 GRCBASE_DORQ = 0x100000,
810 GRCBASE_BRB = 0x340000, 1294 GRCBASE_BRB = 0x340000,
811 GRCBASE_SRC = 0x238000, 1295 GRCBASE_SRC = 0x238000,
812 GRCBASE_PRS = 0x1f0000, 1296 GRCBASE_PRS = 0x1f0000,
813 GRCBASE_TSDM = 0xfb0000, 1297 GRCBASE_TSDM = 0xfb0000,
814 GRCBASE_MSDM = 0xfc0000, 1298 GRCBASE_MSDM = 0xfc0000,
815 GRCBASE_USDM = 0xfd0000, 1299 GRCBASE_USDM = 0xfd0000,
816 GRCBASE_XSDM = 0xf80000, 1300 GRCBASE_XSDM = 0xf80000,
817 GRCBASE_YSDM = 0xf90000, 1301 GRCBASE_YSDM = 0xf90000,
818 GRCBASE_PSDM = 0xfa0000, 1302 GRCBASE_PSDM = 0xfa0000,
819 GRCBASE_TSEM = 0x1700000, 1303 GRCBASE_TSEM = 0x1700000,
820 GRCBASE_MSEM = 0x1800000, 1304 GRCBASE_MSEM = 0x1800000,
821 GRCBASE_USEM = 0x1900000, 1305 GRCBASE_USEM = 0x1900000,
822 GRCBASE_XSEM = 0x1400000, 1306 GRCBASE_XSEM = 0x1400000,
823 GRCBASE_YSEM = 0x1500000, 1307 GRCBASE_YSEM = 0x1500000,
824 GRCBASE_PSEM = 0x1600000, 1308 GRCBASE_PSEM = 0x1600000,
825 GRCBASE_RSS = 0x238800, 1309 GRCBASE_RSS = 0x238800,
826 GRCBASE_TMLD = 0x4d0000, 1310 GRCBASE_TMLD = 0x4d0000,
827 GRCBASE_MULD = 0x4e0000, 1311 GRCBASE_MULD = 0x4e0000,
828 GRCBASE_YULD = 0x4c8000, 1312 GRCBASE_YULD = 0x4c8000,
829 GRCBASE_XYLD = 0x4c0000, 1313 GRCBASE_XYLD = 0x4c0000,
830 GRCBASE_PRM = 0x230000, 1314 GRCBASE_PRM = 0x230000,
831 GRCBASE_PBF_PB1 = 0xda0000, 1315 GRCBASE_PBF_PB1 = 0xda0000,
832 GRCBASE_PBF_PB2 = 0xda4000, 1316 GRCBASE_PBF_PB2 = 0xda4000,
833 GRCBASE_RPB = 0x23c000, 1317 GRCBASE_RPB = 0x23c000,
834 GRCBASE_BTB = 0xdb0000, 1318 GRCBASE_BTB = 0xdb0000,
835 GRCBASE_PBF = 0xd80000, 1319 GRCBASE_PBF = 0xd80000,
836 GRCBASE_RDIF = 0x300000, 1320 GRCBASE_RDIF = 0x300000,
837 GRCBASE_TDIF = 0x310000, 1321 GRCBASE_TDIF = 0x310000,
838 GRCBASE_CDU = 0x580000, 1322 GRCBASE_CDU = 0x580000,
839 GRCBASE_CCFC = 0x2e0000, 1323 GRCBASE_CCFC = 0x2e0000,
840 GRCBASE_TCFC = 0x2d0000, 1324 GRCBASE_TCFC = 0x2d0000,
841 GRCBASE_IGU = 0x180000, 1325 GRCBASE_IGU = 0x180000,
842 GRCBASE_CAU = 0x1c0000, 1326 GRCBASE_CAU = 0x1c0000,
843 GRCBASE_UMAC = 0x51000, 1327 GRCBASE_UMAC = 0x51000,
844 GRCBASE_XMAC = 0x210000, 1328 GRCBASE_XMAC = 0x210000,
845 GRCBASE_DBG = 0x10000, 1329 GRCBASE_DBG = 0x10000,
846 GRCBASE_NIG = 0x500000, 1330 GRCBASE_NIG = 0x500000,
847 GRCBASE_WOL = 0x600000, 1331 GRCBASE_WOL = 0x600000,
848 GRCBASE_BMBN = 0x610000, 1332 GRCBASE_BMBN = 0x610000,
849 GRCBASE_IPC = 0x20000, 1333 GRCBASE_IPC = 0x20000,
850 GRCBASE_NWM = 0x800000, 1334 GRCBASE_NWM = 0x800000,
851 GRCBASE_NWS = 0x700000, 1335 GRCBASE_NWS = 0x700000,
852 GRCBASE_MS = 0x6a0000, 1336 GRCBASE_MS = 0x6a0000,
853 GRCBASE_PHY_PCIE = 0x620000, 1337 GRCBASE_PHY_PCIE = 0x620000,
854 GRCBASE_MISC_AEU = 0x8000, 1338 GRCBASE_LED = 0x6b8000,
855 GRCBASE_BAR0_MAP = 0x1c00000, 1339 GRCBASE_MISC_AEU = 0x8000,
1340 GRCBASE_BAR0_MAP = 0x1c00000,
856 MAX_BLOCK_ADDR 1341 MAX_BLOCK_ADDR
857}; 1342};
858 1343
@@ -879,8 +1364,8 @@ enum block_id {
879 BLOCK_PSWRQ, 1364 BLOCK_PSWRQ,
880 BLOCK_PSWRQ2, 1365 BLOCK_PSWRQ2,
881 BLOCK_PGLCS, 1366 BLOCK_PGLCS,
882 BLOCK_PTU,
883 BLOCK_DMAE, 1367 BLOCK_DMAE,
1368 BLOCK_PTU,
884 BLOCK_TCM, 1369 BLOCK_TCM,
885 BLOCK_MCM, 1370 BLOCK_MCM,
886 BLOCK_UCM, 1371 BLOCK_UCM,
@@ -934,141 +1419,216 @@ enum block_id {
934 BLOCK_NWS, 1419 BLOCK_NWS,
935 BLOCK_MS, 1420 BLOCK_MS,
936 BLOCK_PHY_PCIE, 1421 BLOCK_PHY_PCIE,
1422 BLOCK_LED,
937 BLOCK_MISC_AEU, 1423 BLOCK_MISC_AEU,
938 BLOCK_BAR0_MAP, 1424 BLOCK_BAR0_MAP,
939 MAX_BLOCK_ID 1425 MAX_BLOCK_ID
940}; 1426};
941 1427
942enum command_type_bit { 1428/* binary debug buffer types */
943 IGU_COMMAND_TYPE_NOP = 0, 1429enum bin_dbg_buffer_type {
944 IGU_COMMAND_TYPE_SET = 1, 1430 BIN_BUF_DBG_MODE_TREE,
945 MAX_COMMAND_TYPE_BIT 1431 BIN_BUF_DBG_DUMP_REG,
1432 BIN_BUF_DBG_DUMP_MEM,
1433 BIN_BUF_DBG_IDLE_CHK_REGS,
1434 BIN_BUF_DBG_IDLE_CHK_IMMS,
1435 BIN_BUF_DBG_IDLE_CHK_RULES,
1436 BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
1437 BIN_BUF_DBG_ATTN_BLOCKS,
1438 BIN_BUF_DBG_ATTN_REGS,
1439 BIN_BUF_DBG_ATTN_INDEXES,
1440 BIN_BUF_DBG_ATTN_NAME_OFFSETS,
1441 BIN_BUF_DBG_PARSING_STRINGS,
1442 MAX_BIN_DBG_BUFFER_TYPE
946}; 1443};
947 1444
948struct dmae_cmd { 1445/* Chip IDs */
949 __le32 opcode; 1446enum chip_ids {
950#define DMAE_CMD_SRC_MASK 0x1 1447 CHIP_RESERVED,
951#define DMAE_CMD_SRC_SHIFT 0 1448 CHIP_BB_B0,
952#define DMAE_CMD_DST_MASK 0x3 1449 CHIP_RESERVED2,
953#define DMAE_CMD_DST_SHIFT 1 1450 MAX_CHIP_IDS
954#define DMAE_CMD_C_DST_MASK 0x1
955#define DMAE_CMD_C_DST_SHIFT 3
956#define DMAE_CMD_CRC_RESET_MASK 0x1
957#define DMAE_CMD_CRC_RESET_SHIFT 4
958#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
959#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
960#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
961#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
962#define DMAE_CMD_COMP_FUNC_MASK 0x1
963#define DMAE_CMD_COMP_FUNC_SHIFT 7
964#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
965#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
966#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
967#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
968#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
969#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
970#define DMAE_CMD_RESERVED1_MASK 0x1
971#define DMAE_CMD_RESERVED1_SHIFT 13
972#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
973#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
974#define DMAE_CMD_ERR_HANDLING_MASK 0x3
975#define DMAE_CMD_ERR_HANDLING_SHIFT 16
976#define DMAE_CMD_PORT_ID_MASK 0x3
977#define DMAE_CMD_PORT_ID_SHIFT 18
978#define DMAE_CMD_SRC_PF_ID_MASK 0xF
979#define DMAE_CMD_SRC_PF_ID_SHIFT 20
980#define DMAE_CMD_DST_PF_ID_MASK 0xF
981#define DMAE_CMD_DST_PF_ID_SHIFT 24
982#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
983#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
984#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
985#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
986#define DMAE_CMD_RESERVED2_MASK 0x3
987#define DMAE_CMD_RESERVED2_SHIFT 30
988 __le32 src_addr_lo;
989 __le32 src_addr_hi;
990 __le32 dst_addr_lo;
991 __le32 dst_addr_hi;
992 __le16 length /* Length in DW */;
993 __le16 opcode_b;
994#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
995#define DMAE_CMD_SRC_VF_ID_SHIFT 0
996#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
997#define DMAE_CMD_DST_VF_ID_SHIFT 8
998 __le32 comp_addr_lo /* PCIe completion address low or grc address */;
999 __le32 comp_addr_hi;
1000 __le32 comp_val /* Value to write to copmletion address */;
1001 __le32 crc32 /* crc16 result */;
1002 __le32 crc_32_c /* crc32_c result */;
1003 __le16 crc16 /* crc16 result */;
1004 __le16 crc16_c /* crc16_c result */;
1005 __le16 crc10 /* crc_t10 result */;
1006 __le16 reserved;
1007 __le16 xsum16 /* checksum16 result */;
1008 __le16 xsum8 /* checksum8 result */;
1009}; 1451};
1010 1452
1011struct igu_cleanup { 1453/* Attention bit mapping */
1012 __le32 sb_id_and_flags; 1454struct dbg_attn_bit_mapping {
1013#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF 1455 __le16 data;
1014#define IGU_CLEANUP_RESERVED0_SHIFT 0 1456#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
1015#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 /* cleanup clear - 0, set - 1 */ 1457#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
1016#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27 1458#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
1017#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7 1459#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
1018#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
1019#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
1020#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
1021 __le32 reserved1;
1022}; 1460};
1023 1461
1024union igu_command { 1462/* Attention block per-type data */
1025 struct igu_prod_cons_update prod_cons_update; 1463struct dbg_attn_block_type_data {
1026 struct igu_cleanup cleanup; 1464 __le16 names_offset;
1465 __le16 reserved1;
1466 u8 num_regs;
1467 u8 reserved2;
1468 __le16 regs_offset;
1027}; 1469};
1028 1470
1029struct igu_command_reg_ctrl { 1471/* Block attentions */
1030 __le16 opaque_fid; 1472struct dbg_attn_block {
1031 __le16 igu_command_reg_ctrl_fields; 1473 struct dbg_attn_block_type_data per_type_data[2];
1032#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
1033#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
1034#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
1035#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
1036#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
1037#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
1038}; 1474};
1039 1475
1040struct igu_mapping_line { 1476/* Attention register result */
1041 __le32 igu_mapping_line_fields; 1477struct dbg_attn_reg_result {
1042#define IGU_MAPPING_LINE_VALID_MASK 0x1 1478 __le32 data;
1043#define IGU_MAPPING_LINE_VALID_SHIFT 0 1479#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
1044#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF 1480#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
1045#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1 1481#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
1046#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF 1482#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
1047#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9 1483 __le16 attn_idx_offset;
1048#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */ 1484 __le16 reserved;
1049#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17 1485 __le32 sts_val;
1050#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F 1486 __le32 mask_val;
1051#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18 1487};
1052#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF 1488
1053#define IGU_MAPPING_LINE_RESERVED_SHIFT 24 1489/* Attention block result */
1490struct dbg_attn_block_result {
1491 u8 block_id;
1492 u8 data;
1493#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
1494#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
1495#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
1496#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
1497 __le16 names_offset;
1498 struct dbg_attn_reg_result reg_results[15];
1499};
1500
1501/* mode header */
1502struct dbg_mode_hdr {
1503 __le16 data;
1504#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
1505#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
1506#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
1507#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
1508};
1509
1510/* Attention register */
1511struct dbg_attn_reg {
1512 struct dbg_mode_hdr mode;
1513 __le16 attn_idx_offset;
1514 __le32 data;
1515#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
1516#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
1517#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
1518#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
1519 __le32 sts_clr_address;
1520 __le32 mask_address;
1521};
1522
1523/* attention types */
1524enum dbg_attn_type {
1525 ATTN_TYPE_INTERRUPT,
1526 ATTN_TYPE_PARITY,
1527 MAX_DBG_ATTN_TYPE
1528};
1529
1530/* Debug status codes */
1531enum dbg_status {
1532 DBG_STATUS_OK,
1533 DBG_STATUS_APP_VERSION_NOT_SET,
1534 DBG_STATUS_UNSUPPORTED_APP_VERSION,
1535 DBG_STATUS_DBG_BLOCK_NOT_RESET,
1536 DBG_STATUS_INVALID_ARGS,
1537 DBG_STATUS_OUTPUT_ALREADY_SET,
1538 DBG_STATUS_INVALID_PCI_BUF_SIZE,
1539 DBG_STATUS_PCI_BUF_ALLOC_FAILED,
1540 DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
1541 DBG_STATUS_TOO_MANY_INPUTS,
1542 DBG_STATUS_INPUT_OVERLAP,
1543 DBG_STATUS_HW_ONLY_RECORDING,
1544 DBG_STATUS_STORM_ALREADY_ENABLED,
1545 DBG_STATUS_STORM_NOT_ENABLED,
1546 DBG_STATUS_BLOCK_ALREADY_ENABLED,
1547 DBG_STATUS_BLOCK_NOT_ENABLED,
1548 DBG_STATUS_NO_INPUT_ENABLED,
1549 DBG_STATUS_NO_FILTER_TRIGGER_64B,
1550 DBG_STATUS_FILTER_ALREADY_ENABLED,
1551 DBG_STATUS_TRIGGER_ALREADY_ENABLED,
1552 DBG_STATUS_TRIGGER_NOT_ENABLED,
1553 DBG_STATUS_CANT_ADD_CONSTRAINT,
1554 DBG_STATUS_TOO_MANY_TRIGGER_STATES,
1555 DBG_STATUS_TOO_MANY_CONSTRAINTS,
1556 DBG_STATUS_RECORDING_NOT_STARTED,
1557 DBG_STATUS_DATA_DIDNT_TRIGGER,
1558 DBG_STATUS_NO_DATA_RECORDED,
1559 DBG_STATUS_DUMP_BUF_TOO_SMALL,
1560 DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
1561 DBG_STATUS_UNKNOWN_CHIP,
1562 DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
1563 DBG_STATUS_BLOCK_IN_RESET,
1564 DBG_STATUS_INVALID_TRACE_SIGNATURE,
1565 DBG_STATUS_INVALID_NVRAM_BUNDLE,
1566 DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
1567 DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
1568 DBG_STATUS_NVRAM_READ_FAILED,
1569 DBG_STATUS_IDLE_CHK_PARSE_FAILED,
1570 DBG_STATUS_MCP_TRACE_BAD_DATA,
1571 DBG_STATUS_MCP_TRACE_NO_META,
1572 DBG_STATUS_MCP_COULD_NOT_HALT,
1573 DBG_STATUS_MCP_COULD_NOT_RESUME,
1574 DBG_STATUS_DMAE_FAILED,
1575 DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
1576 DBG_STATUS_IGU_FIFO_BAD_DATA,
1577 DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
1578 DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
1579 DBG_STATUS_REG_FIFO_BAD_DATA,
1580 DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
1581 DBG_STATUS_DBG_ARRAY_NOT_SET,
1582 MAX_DBG_STATUS
1054}; 1583};
1055 1584
1056struct igu_msix_vector { 1585/********************************/
1057 struct regpair address; 1586/* HSI Init Functions constants */
1058 __le32 data; 1587/********************************/
1059 __le32 msix_vector_fields; 1588
1060#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1 1589/* Number of VLAN priorities */
1061#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0 1590#define NUM_OF_VLAN_PRIORITIES 8
1062#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF 1591
1063#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1 1592/* QM per-port init parameters */
1064#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF 1593struct init_qm_port_params {
1065#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16 1594 u8 active;
1066#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF 1595 u8 active_phys_tcs;
1067#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 1596 __le16 num_pbf_cmd_lines;
1597 __le16 num_btb_blocks;
1598 __le16 reserved;
1068}; 1599};
1069 1600
1601/* QM per-PQ init parameters */
1602struct init_qm_pq_params {
1603 u8 vport_id;
1604 u8 tc_id;
1605 u8 wrr_group;
1606 u8 rl_valid;
1607};
1608
1609/* QM per-vport init parameters */
1610struct init_qm_vport_params {
1611 __le32 vport_rl;
1612 __le16 vport_wfq;
1613 __le16 first_tx_pq_id[NUM_OF_TCS];
1614};
1615
1616/**************************************/
1617/* Init Tool HSI constants and macros */
1618/**************************************/
1619
1620/* Width of GRC address in bits (addresses are specified in dwords) */
1621#define GRC_ADDR_BITS 23
1622#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
1623
1624/* indicates an init that should be applied to any phase ID */
1625#define ANY_PHASE_ID 0xffff
1626
1627/* Max size in dwords of a zipped array */
1628#define MAX_ZIPPED_SIZE 8192
1629
1070enum init_modes { 1630enum init_modes {
1071 MODE_BB_A0, 1631 MODE_RESERVED,
1072 MODE_BB_B0, 1632 MODE_BB_B0,
1073 MODE_RESERVED2, 1633 MODE_RESERVED2,
1074 MODE_ASIC, 1634 MODE_ASIC,
@@ -1083,7 +1643,8 @@ enum init_modes {
1083 MODE_PORTS_PER_ENG_2, 1643 MODE_PORTS_PER_ENG_2,
1084 MODE_PORTS_PER_ENG_4, 1644 MODE_PORTS_PER_ENG_4,
1085 MODE_100G, 1645 MODE_100G,
1086 MODE_EAGLE_ENG1_WORKAROUND, 1646 MODE_40G,
1647 MODE_RESERVED7,
1087 MAX_INIT_MODES 1648 MAX_INIT_MODES
1088}; 1649};
1089 1650
@@ -1096,484 +1657,302 @@ enum init_phases {
1096 MAX_INIT_PHASES 1657 MAX_INIT_PHASES
1097}; 1658};
1098 1659
1099/* per encapsulation type enabling flags */ 1660enum init_split_types {
1100struct prs_reg_encapsulation_type_en { 1661 SPLIT_TYPE_NONE,
1101 u8 flags; 1662 SPLIT_TYPE_PORT,
1102#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1 1663 SPLIT_TYPE_PF,
1103#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0 1664 SPLIT_TYPE_PORT_PF,
1104#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1 1665 SPLIT_TYPE_VF,
1105#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1 1666 MAX_INIT_SPLIT_TYPES
1106#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
1107#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
1108#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
1109#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
1110#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
1111#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
1112#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
1113#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
1114#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
1115#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
1116};
1117
1118enum pxp_tph_st_hint {
1119 TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
1120 TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
1121 TPH_ST_HINT_TARGET,
1122 TPH_ST_HINT_TARGET_PRIO,
1123 MAX_PXP_TPH_ST_HINT
1124};
1125
1126/* QM hardware structure of enable bypass credit mask */
1127struct qm_rf_bypass_mask {
1128 u8 flags;
1129#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
1130#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
1131#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
1132#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
1133#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
1134#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
1135#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
1136#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
1137#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
1138#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
1139#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
1140#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
1141#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
1142#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
1143#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
1144#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
1145};
1146
1147/* QM hardware structure of opportunistic credit mask */
1148struct qm_rf_opportunistic_mask {
1149 __le16 flags;
1150#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
1151#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
1152#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
1153#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
1154#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
1155#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
1156#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
1157#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
1158#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
1159#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
1160#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
1161#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
1162#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
1163#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
1164#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
1165#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
1166#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
1167#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
1168#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
1169#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
1170};
1171
1172/* QM hardware structure of QM map memory */
1173struct qm_rf_pq_map {
1174 u32 reg;
1175#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
1176#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
1177#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
1178#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
1179#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
1180#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
1181#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
1182#define QM_RF_PQ_MAP_VOQ_SHIFT 18
1183#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
1184#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
1185#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
1186#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
1187#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
1188#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
1189};
1190
1191/* Completion params for aggregated interrupt completion */
1192struct sdm_agg_int_comp_params {
1193 __le16 params;
1194#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
1195#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
1196#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
1197#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
1198#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
1199#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
1200}; 1667};
1201 1668
1202/* SDM operation gen command (generate aggregative interrupt) */
1203struct sdm_op_gen {
1204 __le32 command;
1205#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */
1206#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
1207#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
1208#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
1209#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
1210#define SDM_OP_GEN_RESERVED_SHIFT 20
1211};
1212
1213/*********************************** Init ************************************/
1214
1215/* Width of GRC address in bits (addresses are specified in dwords) */
1216#define GRC_ADDR_BITS 23
1217#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
1218
1219/* indicates an init that should be applied to any phase ID */
1220#define ANY_PHASE_ID 0xffff
1221
1222/* init pattern size in bytes */
1223#define INIT_PATTERN_SIZE_BITS 4
1224#define MAX_INIT_PATTERN_SIZE BIT(INIT_PATTERN_SIZE_BITS)
1225
1226/* Max size in dwords of a zipped array */
1227#define MAX_ZIPPED_SIZE 8192
1228
1229/* Global PXP window */
1230#define NUM_OF_PXP_WIN 19
1231#define PXP_WIN_DWORD_SIZE_BITS 10
1232#define PXP_WIN_DWORD_SIZE BIT(PXP_WIN_DWORD_SIZE_BITS)
1233#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
1234#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4)
1235
1236/********************************* GRC Dump **********************************/
1237
1238/* width of GRC dump register sequence length in bits */
1239#define DUMP_SEQ_LEN_BITS 8
1240#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1)
1241
1242/* width of GRC dump memory length in bits */
1243#define DUMP_MEM_LEN_BITS 18
1244#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1)
1245
1246/* width of register type ID in bits */
1247#define REG_TYPE_ID_BITS 6
1248#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1)
1249
1250/* width of block ID in bits */
1251#define BLOCK_ID_BITS 8
1252#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1)
1253
1254/******************************** Idle Check *********************************/
1255
1256/* max number of idle check predicate immediates */
1257#define MAX_IDLE_CHK_PRED_IMM 3
1258
1259/* max number of idle check argument registers */
1260#define MAX_IDLE_CHK_READ_REGS 3
1261
1262/* max number of idle check loops */
1263#define MAX_IDLE_CHK_LOOPS 0x10000
1264
1265/* max idle check address increment */
1266#define MAX_IDLE_CHK_INCREMENT 0x10000
1267
1268/* inicates an undefined idle check line index */
1269#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff
1270
1271/* max number of register values following the idle check header */
1272#define IDLE_CHK_MAX_DUMP_REGS 2
1273
1274/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
1275#define IDLE_CHK_QM_RD_WR_PTR 0
1276#define IDLE_CHK_QM_RD_WR_BANK 1
1277
1278/**************************************/
1279/* HSI Functions constants and macros */
1280/**************************************/
1281
1282/* Number of VLAN priorities */
1283#define NUM_OF_VLAN_PRIORITIES 8
1284
1285/* the MCP Trace meta data signautre is duplicated in the perl script that
1286 * generats the NVRAM images.
1287 */
1288#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
1289
1290/* Binary buffer header */ 1669/* Binary buffer header */
1291struct bin_buffer_hdr { 1670struct bin_buffer_hdr {
1292 u32 offset; 1671 __le32 offset;
1293 u32 length /* buffer length in bytes */; 1672 __le32 length;
1294};
1295
1296/* binary buffer types */
1297enum bin_buffer_type {
1298 BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
1299 BIN_BUF_INIT_CMD /* init commands */,
1300 BIN_BUF_INIT_VAL /* init data */,
1301 BIN_BUF_INIT_MODE_TREE /* init modes tree */,
1302 BIN_BUF_IRO /* internal RAM offsets array */,
1303 MAX_BIN_BUFFER_TYPE
1304}; 1673};
1305 1674
1306/* Chip IDs */ 1675/* binary init buffer types */
1307enum chip_ids { 1676enum bin_init_buffer_type {
1308 CHIP_BB_A0 /* BB A0 chip ID */, 1677 BIN_BUF_FW_VER_INFO,
1309 CHIP_BB_B0 /* BB B0 chip ID */, 1678 BIN_BUF_INIT_CMD,
1310 CHIP_K2 /* AH chip ID */, 1679 BIN_BUF_INIT_VAL,
1311 MAX_CHIP_IDS 1680 BIN_BUF_INIT_MODE_TREE,
1681 BIN_BUF_IRO,
1682 MAX_BIN_INIT_BUFFER_TYPE
1312}; 1683};
1313 1684
1685/* init array header: raw */
1314struct init_array_raw_hdr { 1686struct init_array_raw_hdr {
1315 __le32 data; 1687 __le32 data;
1316#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF 1688#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
1317#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 1689#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
1318#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */ 1690#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
1319#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4 1691#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
1320}; 1692};
1321 1693
1694/* init array header: standard */
1322struct init_array_standard_hdr { 1695struct init_array_standard_hdr {
1323 __le32 data; 1696 __le32 data;
1324#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF 1697#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
1325#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 1698#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
1326#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF 1699#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
1327#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4 1700#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
1328}; 1701};
1329 1702
1703/* init array header: zipped */
1330struct init_array_zipped_hdr { 1704struct init_array_zipped_hdr {
1331 __le32 data; 1705 __le32 data;
1332#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF 1706#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
1333#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 1707#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
1334#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF 1708#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
1335#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4 1709#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
1336}; 1710};
1337 1711
1712/* init array header: pattern */
1338struct init_array_pattern_hdr { 1713struct init_array_pattern_hdr {
1339 __le32 data; 1714 __le32 data;
1340#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF 1715#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
1341#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 1716#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
1342#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF 1717#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
1343#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4 1718#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
1344#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF 1719#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
1345#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8 1720#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
1346}; 1721};
1347 1722
1723/* init array header union */
1348union init_array_hdr { 1724union init_array_hdr {
1349 struct init_array_raw_hdr raw /* raw init array header */; 1725 struct init_array_raw_hdr raw;
1350 struct init_array_standard_hdr standard; 1726 struct init_array_standard_hdr standard;
1351 struct init_array_zipped_hdr zipped /* zipped init array header */; 1727 struct init_array_zipped_hdr zipped;
1352 struct init_array_pattern_hdr pattern /* pattern init array header */; 1728 struct init_array_pattern_hdr pattern;
1353}; 1729};
1354 1730
1731/* init array types */
1355enum init_array_types { 1732enum init_array_types {
1356 INIT_ARR_STANDARD /* standard init array */, 1733 INIT_ARR_STANDARD,
1357 INIT_ARR_ZIPPED /* zipped init array */, 1734 INIT_ARR_ZIPPED,
1358 INIT_ARR_PATTERN /* a repeated pattern */, 1735 INIT_ARR_PATTERN,
1359 MAX_INIT_ARRAY_TYPES 1736 MAX_INIT_ARRAY_TYPES
1360}; 1737};
1361 1738
1362/* init operation: callback */ 1739/* init operation: callback */
1363struct init_callback_op { 1740struct init_callback_op {
1364 __le32 op_data; 1741 __le32 op_data;
1365#define INIT_CALLBACK_OP_OP_MASK 0xF 1742#define INIT_CALLBACK_OP_OP_MASK 0xF
1366#define INIT_CALLBACK_OP_OP_SHIFT 0 1743#define INIT_CALLBACK_OP_OP_SHIFT 0
1367#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF 1744#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
1368#define INIT_CALLBACK_OP_RESERVED_SHIFT 4 1745#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
1369 __le16 callback_id /* Callback ID */; 1746 __le16 callback_id;
1370 __le16 block_id /* Blocks ID */; 1747 __le16 block_id;
1371}; 1748};
1372 1749
1373/* init operation: delay */ 1750/* init operation: delay */
1374struct init_delay_op { 1751struct init_delay_op {
1375 __le32 op_data; 1752 __le32 op_data;
1376#define INIT_DELAY_OP_OP_MASK 0xF 1753#define INIT_DELAY_OP_OP_MASK 0xF
1377#define INIT_DELAY_OP_OP_SHIFT 0 1754#define INIT_DELAY_OP_OP_SHIFT 0
1378#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF 1755#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
1379#define INIT_DELAY_OP_RESERVED_SHIFT 4 1756#define INIT_DELAY_OP_RESERVED_SHIFT 4
1380 __le32 delay /* delay in us */; 1757 __le32 delay;
1381}; 1758};
1382 1759
1383/* init operation: if_mode */ 1760/* init operation: if_mode */
1384struct init_if_mode_op { 1761struct init_if_mode_op {
1385 __le32 op_data; 1762 __le32 op_data;
1386#define INIT_IF_MODE_OP_OP_MASK 0xF 1763#define INIT_IF_MODE_OP_OP_MASK 0xF
1387#define INIT_IF_MODE_OP_OP_SHIFT 0 1764#define INIT_IF_MODE_OP_OP_SHIFT 0
1388#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF 1765#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
1389#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 1766#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
1390#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF 1767#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
1391#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 1768#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
1392 __le16 reserved2; 1769 __le16 reserved2;
1393 __le16 modes_buf_offset; 1770 __le16 modes_buf_offset;
1394}; 1771};
1395 1772
1396/* init operation: if_phase */ 1773/* init operation: if_phase */
1397struct init_if_phase_op { 1774struct init_if_phase_op {
1398 __le32 op_data; 1775 __le32 op_data;
1399#define INIT_IF_PHASE_OP_OP_MASK 0xF 1776#define INIT_IF_PHASE_OP_OP_MASK 0xF
1400#define INIT_IF_PHASE_OP_OP_SHIFT 0 1777#define INIT_IF_PHASE_OP_OP_SHIFT 0
1401#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 1778#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
1402#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4 1779#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
1403#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF 1780#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
1404#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5 1781#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
1405#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF 1782#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
1406#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 1783#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
1407 __le32 phase_data; 1784 __le32 phase_data;
1408#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */ 1785#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF
1409#define INIT_IF_PHASE_OP_PHASE_SHIFT 0 1786#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
1410#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF 1787#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
1411#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8 1788#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
1412#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */ 1789#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF
1413#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16 1790#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
1414}; 1791};
1415 1792
1416/* init mode operators */ 1793/* init mode operators */
1417enum init_mode_ops { 1794enum init_mode_ops {
1418 INIT_MODE_OP_NOT /* init mode not operator */, 1795 INIT_MODE_OP_NOT,
1419 INIT_MODE_OP_OR /* init mode or operator */, 1796 INIT_MODE_OP_OR,
1420 INIT_MODE_OP_AND /* init mode and operator */, 1797 INIT_MODE_OP_AND,
1421 MAX_INIT_MODE_OPS 1798 MAX_INIT_MODE_OPS
1422}; 1799};
1423 1800
1424/* init operation: raw */ 1801/* init operation: raw */
1425struct init_raw_op { 1802struct init_raw_op {
1426 __le32 op_data; 1803 __le32 op_data;
1427#define INIT_RAW_OP_OP_MASK 0xF 1804#define INIT_RAW_OP_OP_MASK 0xF
1428#define INIT_RAW_OP_OP_SHIFT 0 1805#define INIT_RAW_OP_OP_SHIFT 0
1429#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */ 1806#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF
1430#define INIT_RAW_OP_PARAM1_SHIFT 4 1807#define INIT_RAW_OP_PARAM1_SHIFT 4
1431 __le32 param2 /* Init param 2 */; 1808 __le32 param2;
1432}; 1809};
1433 1810
1434/* init array params */ 1811/* init array params */
1435struct init_op_array_params { 1812struct init_op_array_params {
1436 __le16 size /* array size in dwords */; 1813 __le16 size;
1437 __le16 offset /* array start offset in dwords */; 1814 __le16 offset;
1438}; 1815};
1439 1816
1440/* Write init operation arguments */ 1817/* Write init operation arguments */
1441union init_write_args { 1818union init_write_args {
1442 __le32 inline_val; 1819 __le32 inline_val;
1443 __le32 zeros_count; 1820 __le32 zeros_count;
1444 __le32 array_offset; 1821 __le32 array_offset;
1445 struct init_op_array_params runtime; 1822 struct init_op_array_params runtime;
1446}; 1823};
1447 1824
1448/* init operation: write */ 1825/* init operation: write */
1449struct init_write_op { 1826struct init_write_op {
1450 __le32 data; 1827 __le32 data;
1451#define INIT_WRITE_OP_OP_MASK 0xF 1828#define INIT_WRITE_OP_OP_MASK 0xF
1452#define INIT_WRITE_OP_OP_SHIFT 0 1829#define INIT_WRITE_OP_OP_SHIFT 0
1453#define INIT_WRITE_OP_SOURCE_MASK 0x7 1830#define INIT_WRITE_OP_SOURCE_MASK 0x7
1454#define INIT_WRITE_OP_SOURCE_SHIFT 4 1831#define INIT_WRITE_OP_SOURCE_SHIFT 4
1455#define INIT_WRITE_OP_RESERVED_MASK 0x1 1832#define INIT_WRITE_OP_RESERVED_MASK 0x1
1456#define INIT_WRITE_OP_RESERVED_SHIFT 7 1833#define INIT_WRITE_OP_RESERVED_SHIFT 7
1457#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 1834#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
1458#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8 1835#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
1459#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF 1836#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
1460#define INIT_WRITE_OP_ADDRESS_SHIFT 9 1837#define INIT_WRITE_OP_ADDRESS_SHIFT 9
1461 union init_write_args args /* Write init operation arguments */; 1838 union init_write_args args;
1462}; 1839};
1463 1840
1464/* init operation: read */ 1841/* init operation: read */
1465struct init_read_op { 1842struct init_read_op {
1466 __le32 op_data; 1843 __le32 op_data;
1467#define INIT_READ_OP_OP_MASK 0xF 1844#define INIT_READ_OP_OP_MASK 0xF
1468#define INIT_READ_OP_OP_SHIFT 0 1845#define INIT_READ_OP_OP_SHIFT 0
1469#define INIT_READ_OP_POLL_TYPE_MASK 0xF 1846#define INIT_READ_OP_POLL_TYPE_MASK 0xF
1470#define INIT_READ_OP_POLL_TYPE_SHIFT 4 1847#define INIT_READ_OP_POLL_TYPE_SHIFT 4
1471#define INIT_READ_OP_RESERVED_MASK 0x1 1848#define INIT_READ_OP_RESERVED_MASK 0x1
1472#define INIT_READ_OP_RESERVED_SHIFT 8 1849#define INIT_READ_OP_RESERVED_SHIFT 8
1473#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF 1850#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
1474#define INIT_READ_OP_ADDRESS_SHIFT 9 1851#define INIT_READ_OP_ADDRESS_SHIFT 9
1475 __le32 expected_val; 1852 __le32 expected_val;
1853
1476}; 1854};
1477 1855
1478/* Init operations union */ 1856/* Init operations union */
1479union init_op { 1857union init_op {
1480 struct init_raw_op raw /* raw init operation */; 1858 struct init_raw_op raw;
1481 struct init_write_op write /* write init operation */; 1859 struct init_write_op write;
1482 struct init_read_op read /* read init operation */; 1860 struct init_read_op read;
1483 struct init_if_mode_op if_mode /* if_mode init operation */; 1861 struct init_if_mode_op if_mode;
1484 struct init_if_phase_op if_phase /* if_phase init operation */; 1862 struct init_if_phase_op if_phase;
1485 struct init_callback_op callback /* callback init operation */; 1863 struct init_callback_op callback;
1486 struct init_delay_op delay /* delay init operation */; 1864 struct init_delay_op delay;
1487}; 1865};
1488 1866
1489/* Init command operation types */ 1867/* Init command operation types */
1490enum init_op_types { 1868enum init_op_types {
1491 INIT_OP_READ /* GRC read init command */, 1869 INIT_OP_READ,
1492 INIT_OP_WRITE /* GRC write init command */, 1870 INIT_OP_WRITE,
1493 INIT_OP_IF_MODE, 1871 INIT_OP_IF_MODE,
1494 INIT_OP_IF_PHASE, 1872 INIT_OP_IF_PHASE,
1495 INIT_OP_DELAY /* delay init command */, 1873 INIT_OP_DELAY,
1496 INIT_OP_CALLBACK /* callback init command */, 1874 INIT_OP_CALLBACK,
1497 MAX_INIT_OP_TYPES 1875 MAX_INIT_OP_TYPES
1498}; 1876};
1499 1877
1878/* init polling types */
1500enum init_poll_types { 1879enum init_poll_types {
1501 INIT_POLL_NONE /* No polling */, 1880 INIT_POLL_NONE,
1502 INIT_POLL_EQ /* init value is included in the init command */, 1881 INIT_POLL_EQ,
1503 INIT_POLL_OR /* init value is all zeros */, 1882 INIT_POLL_OR,
1504 INIT_POLL_AND /* init value is an array of values */, 1883 INIT_POLL_AND,
1505 MAX_INIT_POLL_TYPES 1884 MAX_INIT_POLL_TYPES
1506}; 1885};
1507 1886
1508/* init source types */ 1887/* init source types */
1509enum init_source_types { 1888enum init_source_types {
1510 INIT_SRC_INLINE /* init value is included in the init command */, 1889 INIT_SRC_INLINE,
1511 INIT_SRC_ZEROS /* init value is all zeros */, 1890 INIT_SRC_ZEROS,
1512 INIT_SRC_ARRAY /* init value is an array of values */, 1891 INIT_SRC_ARRAY,
1513 INIT_SRC_RUNTIME /* init value is provided during runtime */, 1892 INIT_SRC_RUNTIME,
1514 MAX_INIT_SOURCE_TYPES 1893 MAX_INIT_SOURCE_TYPES
1515}; 1894};
1516 1895
1517/* Internal RAM Offsets macro data */ 1896/* Internal RAM Offsets macro data */
1518struct iro { 1897struct iro {
1519 u32 base /* RAM field offset */; 1898 __le32 base;
1520 u16 m1 /* multiplier 1 */; 1899 __le16 m1;
1521 u16 m2 /* multiplier 2 */; 1900 __le16 m2;
1522 u16 m3 /* multiplier 3 */; 1901 __le16 m3;
1523 u16 size /* RAM field size */; 1902 __le16 size;
1524}; 1903};
1525 1904
1526/* QM per-port init parameters */ 1905/**
1527struct init_qm_port_params { 1906 * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct.
1528 u8 active /* Indicates if this port is active */; 1907 *
1529 u8 num_active_phys_tcs; 1908 * @param p_hwfn
1530 u16 num_pbf_cmd_lines; 1909 * @param results - Pointer to the attention read results
1531 u16 num_btb_blocks; 1910 *
1532 __le16 reserved; 1911 * @return error if one of the following holds:
1533}; 1912 * - the version wasn't set
1534 1913 * Otherwise, returns ok.
1535/* QM per-PQ init parameters */ 1914 */
1536struct init_qm_pq_params { 1915enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
1537 u8 vport_id /* VPORT ID */; 1916 struct dbg_attn_block_result *results);
1538 u8 tc_id /* TC ID */;
1539 u8 wrr_group /* WRR group */;
1540 u8 reserved;
1541};
1542 1917
1543/* QM per-vport init parameters */ 1918#define MAX_NAME_LEN 16
1544struct init_qm_vport_params {
1545 u32 vport_rl;
1546 u16 vport_wfq;
1547 u16 first_tx_pq_id[NUM_OF_TCS];
1548};
1549 1919
1550/* Win 2 */ 1920/* Win 2 */
1551#define GTT_BAR0_MAP_REG_IGU_CMD \ 1921#define GTT_BAR0_MAP_REG_IGU_CMD \
1552 0x00f000UL 1922 0x00f000UL
1923
1553/* Win 3 */ 1924/* Win 3 */
1554#define GTT_BAR0_MAP_REG_TSDM_RAM \ 1925#define GTT_BAR0_MAP_REG_TSDM_RAM \
1555 0x010000UL 1926 0x010000UL
1927
1556/* Win 4 */ 1928/* Win 4 */
1557#define GTT_BAR0_MAP_REG_MSDM_RAM \ 1929#define GTT_BAR0_MAP_REG_MSDM_RAM \
1558 0x011000UL 1930 0x011000UL
1931
1559/* Win 5 */ 1932/* Win 5 */
1560#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \ 1933#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
1561 0x012000UL 1934 0x012000UL
1935
1562/* Win 6 */ 1936/* Win 6 */
1563#define GTT_BAR0_MAP_REG_USDM_RAM \ 1937#define GTT_BAR0_MAP_REG_USDM_RAM \
1564 0x013000UL 1938 0x013000UL
1939
1565/* Win 7 */ 1940/* Win 7 */
1566#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \ 1941#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
1567 0x014000UL 1942 0x014000UL
1943
1568/* Win 8 */ 1944/* Win 8 */
1569#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \ 1945#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
1570 0x015000UL 1946 0x015000UL
1947
1571/* Win 9 */ 1948/* Win 9 */
1572#define GTT_BAR0_MAP_REG_XSDM_RAM \ 1949#define GTT_BAR0_MAP_REG_XSDM_RAM \
1573 0x016000UL 1950 0x016000UL
1951
1574/* Win 10 */ 1952/* Win 10 */
1575#define GTT_BAR0_MAP_REG_YSDM_RAM \ 1953#define GTT_BAR0_MAP_REG_YSDM_RAM \
1576 0x017000UL 1954 0x017000UL
1955
1577/* Win 11 */ 1956/* Win 11 */
1578#define GTT_BAR0_MAP_REG_PSDM_RAM \ 1957#define GTT_BAR0_MAP_REG_PSDM_RAM \
1579 0x018000UL 1958 0x018000UL
@@ -1584,785 +1963,718 @@ struct init_qm_vport_params {
1584 * Returns the required host memory size in 4KB units. 1963 * Returns the required host memory size in 4KB units.
1585 * Must be called before all QM init HSI functions. 1964 * Must be called before all QM init HSI functions.
1586 * 1965 *
1587 * @param pf_id - physical function ID 1966 * @param pf_id - physical function ID
1588 * @param num_pf_cids - number of connections used by this PF 1967 * @param num_pf_cids - number of connections used by this PF
1589 * @param num_vf_cids - number of connections used by VFs of this PF 1968 * @param num_vf_cids - number of connections used by VFs of this PF
1590 * @param num_tids - number of tasks used by this PF 1969 * @param num_tids - number of tasks used by this PF
1591 * @param num_pf_pqs - number of PQs used by this PF 1970 * @param num_pf_pqs - number of PQs used by this PF
1592 * @param num_vf_pqs - number of PQs used by VFs of this PF 1971 * @param num_vf_pqs - number of PQs used by VFs of this PF
1593 * 1972 *
1594 * @return The required host memory size in 4KB units. 1973 * @return The required host memory size in 4KB units.
1595 */ 1974 */
1596u32 qed_qm_pf_mem_size(u8 pf_id, 1975u32 qed_qm_pf_mem_size(u8 pf_id,
1597 u32 num_pf_cids, 1976 u32 num_pf_cids,
1598 u32 num_vf_cids, 1977 u32 num_vf_cids,
1599 u32 num_tids, 1978 u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
1600 u16 num_pf_pqs,
1601 u16 num_vf_pqs);
1602 1979
1603struct qed_qm_common_rt_init_params { 1980struct qed_qm_common_rt_init_params {
1604 u8 max_ports_per_engine; 1981 u8 max_ports_per_engine;
1605 u8 max_phys_tcs_per_port; 1982 u8 max_phys_tcs_per_port;
1606 bool pf_rl_en; 1983 bool pf_rl_en;
1607 bool pf_wfq_en; 1984 bool pf_wfq_en;
1608 bool vport_rl_en; 1985 bool vport_rl_en;
1609 bool vport_wfq_en; 1986 bool vport_wfq_en;
1610 struct init_qm_port_params *port_params; 1987 struct init_qm_port_params *port_params;
1611}; 1988};
1612 1989
1990int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
1991 struct qed_qm_common_rt_init_params *p_params);
1992
1993struct qed_qm_pf_rt_init_params {
1994 u8 port_id;
1995 u8 pf_id;
1996 u8 max_phys_tcs_per_port;
1997 bool is_first_pf;
1998 u32 num_pf_cids;
1999 u32 num_vf_cids;
2000 u32 num_tids;
2001 u16 start_pq;
2002 u16 num_pf_pqs;
2003 u16 num_vf_pqs;
2004 u8 start_vport;
2005 u8 num_vports;
2006 u8 pf_wfq;
2007 u32 pf_rl;
2008 struct init_qm_pq_params *pq_params;
2009 struct init_qm_vport_params *vport_params;
2010};
2011
2012int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
2013 struct qed_ptt *p_ptt,
2014 struct qed_qm_pf_rt_init_params *p_params);
2015
1613/** 2016/**
1614 * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the 2017 * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF
1615 * engine phase.
1616 * 2018 *
1617 * @param p_hwfn 2019 * @param p_hwfn
1618 * @param max_ports_per_engine - max number of ports per engine in HW 2020 * @param p_ptt - ptt window used for writing the registers
1619 * @param max_phys_tcs_per_port - max number of physical TCs per port in HW 2021 * @param pf_id - PF ID
1620 * @param pf_rl_en - enable per-PF rate limiters 2022 * @param pf_wfq - WFQ weight. Must be non-zero.
1621 * @param pf_wfq_en - enable per-PF WFQ
1622 * @param vport_rl_en - enable per-VPORT rate limiters
1623 * @param vport_wfq_en - enable per-VPORT WFQ
1624 * @param port_params - array of size MAX_NUM_PORTS with
1625 * arameters for each port
1626 * 2023 *
1627 * @return 0 on success, -1 on error. 2024 * @return 0 on success, -1 on error.
1628 */ 2025 */
1629int qed_qm_common_rt_init( 2026int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
1630 struct qed_hwfn *p_hwfn, 2027 struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
1631 struct qed_qm_common_rt_init_params *p_params);
1632
1633struct qed_qm_pf_rt_init_params {
1634 u8 port_id;
1635 u8 pf_id;
1636 u8 max_phys_tcs_per_port;
1637 bool is_first_pf;
1638 u32 num_pf_cids;
1639 u32 num_vf_cids;
1640 u32 num_tids;
1641 u16 start_pq;
1642 u16 num_pf_pqs;
1643 u16 num_vf_pqs;
1644 u8 start_vport;
1645 u8 num_vports;
1646 u8 pf_wfq;
1647 u32 pf_rl;
1648 struct init_qm_pq_params *pq_params;
1649 struct init_qm_vport_params *vport_params;
1650};
1651
1652int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
1653 struct qed_ptt *p_ptt,
1654 struct qed_qm_pf_rt_init_params *p_params);
1655 2028
1656/** 2029/**
1657 * @brief qed_init_pf_rl Initializes the rate limit of the specified PF 2030 * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF
1658 * 2031 *
1659 * @param p_hwfn 2032 * @param p_hwfn
1660 * @param p_ptt - ptt window used for writing the registers 2033 * @param p_ptt - ptt window used for writing the registers
1661 * @param pf_id - PF ID 2034 * @param pf_id - PF ID
1662 * @param pf_rl - rate limit in Mb/sec units 2035 * @param pf_rl - rate limit in Mb/sec units
1663 * 2036 *
1664 * @return 0 on success, -1 on error. 2037 * @return 0 on success, -1 on error.
1665 */ 2038 */
1666int qed_init_pf_rl(struct qed_hwfn *p_hwfn, 2039int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
1667 struct qed_ptt *p_ptt, 2040 struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
1668 u8 pf_id,
1669 u32 pf_rl);
1670 2041
1671/** 2042/**
1672 * @brief qed_init_vport_rl Initializes the rate limit of the specified VPORT 2043 * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT
1673 * 2044 *
1674 * @param p_hwfn 2045 * @param p_hwfn
1675 * @param p_ptt - ptt window used for writing the registers 2046 * @param p_ptt - ptt window used for writing the registers
1676 * @param vport_id - VPORT ID 2047 * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
1677 * @param vport_rl - rate limit in Mb/sec units 2048 * with the VPORT for each TC. This array is filled by
2049 * qed_qm_pf_rt_init
2050 * @param vport_wfq - WFQ weight. Must be non-zero.
1678 * 2051 *
1679 * @return 0 on success, -1 on error. 2052 * @return 0 on success, -1 on error.
1680 */ 2053 */
2054int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
2055 struct qed_ptt *p_ptt,
2056 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
1681 2057
1682int qed_init_vport_rl(struct qed_hwfn *p_hwfn, 2058/**
1683 struct qed_ptt *p_ptt, 2059 * @brief qed_init_vport_rl - Initializes the rate limit of the specified VPORT
1684 u8 vport_id, 2060 *
1685 u32 vport_rl); 2061 * @param p_hwfn
2062 * @param p_ptt - ptt window used for writing the registers
2063 * @param vport_id - VPORT ID
2064 * @param vport_rl - rate limit in Mb/sec units
2065 *
2066 * @return 0 on success, -1 on error.
2067 */
2068int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
2069 struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
1686/** 2070/**
1687 * @brief qed_send_qm_stop_cmd Sends a stop command to the QM 2071 * @brief qed_send_qm_stop_cmd Sends a stop command to the QM
1688 * 2072 *
1689 * @param p_hwfn 2073 * @param p_hwfn
1690 * @param p_ptt - ptt window used for writing the registers 2074 * @param p_ptt
1691 * @param is_release_cmd - true for release, false for stop. 2075 * @param is_release_cmd - true for release, false for stop.
1692 * @param is_tx_pq - true for Tx PQs, false for Other PQs. 2076 * @param is_tx_pq - true for Tx PQs, false for Other PQs.
1693 * @param start_pq - first PQ ID to stop 2077 * @param start_pq - first PQ ID to stop
1694 * @param num_pqs - Number of PQs to stop, starting from start_pq. 2078 * @param num_pqs - Number of PQs to stop, starting from start_pq.
1695 * 2079 *
1696 * @return bool, true if successful, false if timeout occurred while waiting 2080 * @return bool, true if successful, false if timeout occured while waiting for QM command done.
1697 * for QM command done.
1698 */ 2081 */
2082bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
2083 struct qed_ptt *p_ptt,
2084 bool is_release_cmd,
2085 bool is_tx_pq, u16 start_pq, u16 num_pqs);
1699 2086
1700bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, 2087/**
1701 struct qed_ptt *p_ptt, 2088 * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
1702 bool is_release_cmd, 2089 *
1703 bool is_tx_pq, 2090 * @param p_ptt - ptt window used for writing the registers.
1704 u16 start_pq, 2091 * @param dest_port - vxlan destination udp port.
1705 u16 num_pqs); 2092 */
1706
1707void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, 2093void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
1708 struct qed_ptt *p_ptt, u16 dest_port); 2094 struct qed_ptt *p_ptt, u16 dest_port);
2095
2096/**
2097 * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
2098 *
2099 * @param p_ptt - ptt window used for writing the registers.
2100 * @param vxlan_enable - vxlan enable flag.
2101 */
1709void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, 2102void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
1710 struct qed_ptt *p_ptt, bool vxlan_enable); 2103 struct qed_ptt *p_ptt, bool vxlan_enable);
2104
2105/**
2106 * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
2107 *
2108 * @param p_ptt - ptt window used for writing the registers.
2109 * @param eth_gre_enable - eth GRE enable enable flag.
2110 * @param ip_gre_enable - IP GRE enable enable flag.
2111 */
1711void qed_set_gre_enable(struct qed_hwfn *p_hwfn, 2112void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
1712 struct qed_ptt *p_ptt, bool eth_gre_enable, 2113 struct qed_ptt *p_ptt,
1713 bool ip_gre_enable); 2114 bool eth_gre_enable, bool ip_gre_enable);
2115
2116/**
2117 * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
2118 *
2119 * @param p_ptt - ptt window used for writing the registers.
2120 * @param dest_port - geneve destination udp port.
2121 */
1714void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, 2122void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
1715 struct qed_ptt *p_ptt, u16 dest_port); 2123 struct qed_ptt *p_ptt, u16 dest_port);
2124
2125/**
2126 * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
2127 *
2128 * @param p_ptt - ptt window used for writing the registers.
2129 * @param eth_geneve_enable - eth GENEVE enable enable flag.
2130 * @param ip_geneve_enable - IP GENEVE enable enable flag.
2131 */
1716void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, 2132void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
1717 struct qed_ptt *p_ptt, bool eth_geneve_enable, 2133 struct qed_ptt *p_ptt,
1718 bool ip_geneve_enable); 2134 bool eth_geneve_enable, bool ip_geneve_enable);
1719 2135
1720/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ 2136#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
1721#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) 2137#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
1722#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) 2138#define TSTORM_PORT_STAT_OFFSET(port_id) \
1723/* Tstorm port statistics */ 2139 (IRO[1].base + ((port_id) * IRO[1].m1))
1724#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1)) 2140#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
1725#define TSTORM_PORT_STAT_SIZE (IRO[1].size) 2141#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
1726/* Tstorm ll2 port statistics */ 2142 (IRO[3].base + ((vf_id) * IRO[3].m1))
1727#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ 2143#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
1728 (IRO[2].base + ((port_id) * IRO[2].m1)) 2144#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
1729#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) 2145 (IRO[4].base + (pf_id) * IRO[4].m1)
1730/* Ustorm VF-PF Channel ready flag */ 2146#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
1731#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ 2147#define USTORM_EQE_CONS_OFFSET(pf_id) \
1732 (IRO[3].base + ((vf_id) * IRO[3].m1)) 2148 (IRO[5].base + ((pf_id) * IRO[5].m1))
1733#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) 2149#define USTORM_EQE_CONS_SIZE (IRO[5].size)
1734/* Ustorm Final flr cleanup ack */ 2150#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
1735#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1)) 2151 (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
1736#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) 2152#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
1737/* Ustorm Event ring consumer */ 2153#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
1738#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1)) 2154 (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
1739#define USTORM_EQE_CONS_SIZE (IRO[5].size) 2155#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
1740/* Ustorm Common Queue ring consumer */ 2156#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
1741#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \ 2157 (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
1742 (IRO[6].base + ((global_queue_id) * IRO[6].m1)) 2158#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
1743#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size) 2159#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
1744/* Xstorm Integration Test Data */ 2160 (IRO[19].base + ((queue_id) * IRO[19].m1))
1745#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base) 2161#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
1746#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size) 2162#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base)
1747/* Ystorm Integration Test Data */ 2163#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size)
1748#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) 2164#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
1749#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) 2165 (IRO[21].base + ((pf_id) * IRO[21].m1))
1750/* Pstorm Integration Test Data */ 2166#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
1751#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) 2167#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
1752#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) 2168 (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
1753/* Tstorm Integration Test Data */ 2169#define USTORM_QUEUE_STAT_SIZE (IRO[22].size)
1754#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) 2170#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
1755#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) 2171 (IRO[23].base + ((pf_id) * IRO[23].m1))
1756/* Mstorm Integration Test Data */ 2172#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size)
1757#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) 2173#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
1758#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) 2174 (IRO[24].base + ((stat_counter_id) * IRO[24].m1))
1759/* Ustorm Integration Test Data */ 2175#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size)
1760#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) 2176#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
1761#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) 2177 (IRO[25].base + ((pf_id) * IRO[25].m1))
1762/* Tstorm producers */ 2178#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size)
1763#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ 2179#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
1764 (IRO[13].base + ((core_rx_queue_id) * IRO[13].m1)) 2180 (IRO[26].base + ((ethtype) * IRO[26].m1))
1765#define TSTORM_LL2_RX_PRODS_SIZE (IRO[13].size) 2181#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size)
1766/* Tstorm LightL2 queue statistics */ 2182#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base)
1767#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ 2183#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size)
1768 (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) 2184#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
1769#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[14].size) 2185 (IRO[28].base + ((pf_id) * IRO[28].m1))
1770/* Ustorm LiteL2 queue statistics */ 2186#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size)
1771#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ 2187#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
1772 (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) 2188 (IRO[29].base + ((queue_id) * IRO[29].m1))
1773#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) 2189#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size)
1774/* Pstorm LiteL2 queue statistics */ 2190
1775#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ 2191static const struct iro iro_arr[46] = {
1776 (IRO[16].base + ((core_tx_stats_id) * IRO[16].m1)) 2192 {0x0, 0x0, 0x0, 0x0, 0x8},
1777#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) 2193 {0x4cb0, 0x78, 0x0, 0x0, 0x78},
1778/* Mstorm queue statistics */ 2194 {0x6318, 0x20, 0x0, 0x0, 0x20},
1779#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ 2195 {0xb00, 0x8, 0x0, 0x0, 0x4},
1780 (IRO[17].base + ((stat_counter_id) * IRO[17].m1)) 2196 {0xa80, 0x8, 0x0, 0x0, 0x4},
1781#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size) 2197 {0x0, 0x8, 0x0, 0x0, 0x2},
1782/* Mstorm producers */ 2198 {0x80, 0x8, 0x0, 0x0, 0x4},
1783#define MSTORM_PRODS_OFFSET(queue_id) (IRO[18].base + ((queue_id) * IRO[18].m1)) 2199 {0x84, 0x8, 0x0, 0x0, 0x2},
1784#define MSTORM_PRODS_SIZE (IRO[18].size) 2200 {0x4bc0, 0x0, 0x0, 0x0, 0x78},
1785/* TPA agregation timeout in us resolution (on ASIC) */ 2201 {0x3df0, 0x0, 0x0, 0x0, 0x78},
1786#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base) 2202 {0x29b0, 0x0, 0x0, 0x0, 0x78},
1787#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size) 2203 {0x4c38, 0x0, 0x0, 0x0, 0x78},
1788/* Ustorm queue statistics */ 2204 {0x4a48, 0x0, 0x0, 0x0, 0x78},
1789#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ 2205 {0x7e48, 0x0, 0x0, 0x0, 0x78},
1790 (IRO[20].base + ((stat_counter_id) * IRO[20].m1)) 2206 {0xa28, 0x8, 0x0, 0x0, 0x8},
1791#define USTORM_QUEUE_STAT_SIZE (IRO[20].size) 2207 {0x60f8, 0x10, 0x0, 0x0, 0x10},
1792/* Ustorm queue zone */ 2208 {0xb820, 0x30, 0x0, 0x0, 0x30},
1793#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ 2209 {0x95b8, 0x30, 0x0, 0x0, 0x30},
1794 (IRO[21].base + ((queue_id) * IRO[21].m1)) 2210 {0x4c18, 0x80, 0x0, 0x0, 0x40},
1795#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size) 2211 {0x1f8, 0x4, 0x0, 0x0, 0x4},
1796/* Pstorm queue statistics */ 2212 {0xc9a8, 0x0, 0x0, 0x0, 0x4},
1797#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ 2213 {0x4c58, 0x80, 0x0, 0x0, 0x20},
1798 (IRO[22].base + ((stat_counter_id) * IRO[22].m1)) 2214 {0x8050, 0x40, 0x0, 0x0, 0x30},
1799#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size) 2215 {0xe770, 0x60, 0x0, 0x0, 0x60},
1800/* Tstorm last parser message */ 2216 {0x2b48, 0x80, 0x0, 0x0, 0x38},
1801#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base) 2217 {0xdf88, 0x78, 0x0, 0x0, 0x78},
1802#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size) 2218 {0x1f8, 0x4, 0x0, 0x0, 0x4},
1803/* Tstorm Eth limit Rx rate */ 2219 {0xacf0, 0x0, 0x0, 0x0, 0xf0},
1804#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1)) 2220 {0xade0, 0x8, 0x0, 0x0, 0x8},
1805#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size) 2221 {0x1f8, 0x8, 0x0, 0x0, 0x8},
1806/* Ystorm queue zone */ 2222 {0xac0, 0x8, 0x0, 0x0, 0x8},
1807#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ 2223 {0x2578, 0x8, 0x0, 0x0, 0x8},
1808 (IRO[25].base + ((queue_id) * IRO[25].m1)) 2224 {0x24f8, 0x8, 0x0, 0x0, 0x8},
1809#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size) 2225 {0x0, 0x8, 0x0, 0x0, 0x8},
1810/* Ystorm cqe producer */ 2226 {0x200, 0x10, 0x8, 0x0, 0x8},
1811#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ 2227 {0xb78, 0x10, 0x8, 0x0, 0x2},
1812 (IRO[26].base + ((rss_id) * IRO[26].m1)) 2228 {0xd888, 0x38, 0x0, 0x0, 0x24},
1813#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size) 2229 {0x12120, 0x10, 0x0, 0x0, 0x8},
1814/* Ustorm cqe producer */ 2230 {0x11b20, 0x38, 0x0, 0x0, 0x18},
1815#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ 2231 {0xa8c0, 0x30, 0x0, 0x0, 0x10},
1816 (IRO[27].base + ((rss_id) * IRO[27].m1)) 2232 {0x86f8, 0x28, 0x0, 0x0, 0x18},
1817#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size) 2233 {0xeff8, 0x10, 0x0, 0x0, 0x10},
1818/* Ustorm grq producer */ 2234 {0xdd08, 0x48, 0x0, 0x0, 0x38},
1819#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ 2235 {0xf460, 0x20, 0x0, 0x0, 0x20},
1820 (IRO[28].base + ((pf_id) * IRO[28].m1)) 2236 {0x2b80, 0x80, 0x0, 0x0, 0x10},
1821#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size) 2237 {0x5000, 0x10, 0x0, 0x0, 0x10},
1822/* Tstorm cmdq-cons of given command queue-id */
1823#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
1824 (IRO[29].base + ((cmdq_queue_id) * IRO[29].m1))
1825#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size)
1826/* Mstorm rq-cons of given queue-id */
1827#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \
1828 (IRO[30].base + ((rq_queue_id) * IRO[30].m1))
1829#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[30].size)
1830/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */
1831#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
1832 (IRO[31].base + ((func_id) * IRO[31].m1) + ((bdq_id) * IRO[31].m2))
1833#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[31].size)
1834/* Tstorm (reflects M-Storm) bdq-external-producer of given fn ID, BDqueue-id */
1835#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
1836 (IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2))
1837#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size)
1838/* Tstorm iSCSI RX stats */
1839#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
1840 (IRO[33].base + ((pf_id) * IRO[33].m1))
1841#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[33].size)
1842/* Mstorm iSCSI RX stats */
1843#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
1844 (IRO[34].base + ((pf_id) * IRO[34].m1))
1845#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[34].size)
1846/* Ustorm iSCSI RX stats */
1847#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
1848 (IRO[35].base + ((pf_id) * IRO[35].m1))
1849#define USTORM_ISCSI_RX_STATS_SIZE (IRO[35].size)
1850/* Xstorm iSCSI TX stats */
1851#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
1852 (IRO[36].base + ((pf_id) * IRO[36].m1))
1853#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[36].size)
1854/* Ystorm iSCSI TX stats */
1855#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
1856 (IRO[37].base + ((pf_id) * IRO[37].m1))
1857#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[37].size)
1858/* Pstorm iSCSI TX stats */
1859#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
1860 (IRO[38].base + ((pf_id) * IRO[38].m1))
1861#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[38].size)
1862/* Tstorm FCoE RX stats */
1863#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
1864 (IRO[39].base + ((pf_id) * IRO[39].m1))
1865#define TSTORM_FCOE_RX_STATS_SIZE (IRO[39].size)
1866/* Mstorm FCoE RX stats */
1867#define MSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
1868 (IRO[40].base + ((pf_id) * IRO[40].m1))
1869#define MSTORM_FCOE_RX_STATS_SIZE (IRO[40].size)
1870/* Pstorm FCoE TX stats */
1871#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
1872 (IRO[41].base + ((pf_id) * IRO[41].m1))
1873#define PSTORM_FCOE_TX_STATS_SIZE (IRO[41].size)
1874/* Pstorm RoCE statistics */
1875#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
1876 (IRO[42].base + ((stat_counter_id) * IRO[42].m1))
1877#define PSTORM_ROCE_STAT_SIZE (IRO[42].size)
1878/* Tstorm RoCE statistics */
1879#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
1880 (IRO[43].base + ((stat_counter_id) * IRO[43].m1))
1881#define TSTORM_ROCE_STAT_SIZE (IRO[43].size)
1882
1883static const struct iro iro_arr[44] = {
1884 { 0x10, 0x0, 0x0, 0x0, 0x8 },
1885 { 0x47c8, 0x60, 0x0, 0x0, 0x60 },
1886 { 0x5e30, 0x20, 0x0, 0x0, 0x20 },
1887 { 0x510, 0x8, 0x0, 0x0, 0x4 },
1888 { 0x490, 0x8, 0x0, 0x0, 0x4 },
1889 { 0x10, 0x8, 0x0, 0x0, 0x2 },
1890 { 0x90, 0x8, 0x0, 0x0, 0x2 },
1891 { 0x4940, 0x0, 0x0, 0x0, 0x78 },
1892 { 0x3de0, 0x0, 0x0, 0x0, 0x78 },
1893 { 0x2998, 0x0, 0x0, 0x0, 0x78 },
1894 { 0x4750, 0x0, 0x0, 0x0, 0x78 },
1895 { 0x56d0, 0x0, 0x0, 0x0, 0x78 },
1896 { 0x7e50, 0x0, 0x0, 0x0, 0x78 },
1897 { 0x100, 0x8, 0x0, 0x0, 0x8 },
1898 { 0x5c10, 0x10, 0x0, 0x0, 0x10 },
1899 { 0xb508, 0x30, 0x0, 0x0, 0x30 },
1900 { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
1901 { 0x58a0, 0x40, 0x0, 0x0, 0x40 },
1902 { 0x200, 0x10, 0x0, 0x0, 0x8 },
1903 { 0xa230, 0x0, 0x0, 0x0, 0x4 },
1904 { 0x8058, 0x40, 0x0, 0x0, 0x30 },
1905 { 0xd00, 0x8, 0x0, 0x0, 0x8 },
1906 { 0x2b30, 0x80, 0x0, 0x0, 0x38 },
1907 { 0xa808, 0x0, 0x0, 0x0, 0xf0 },
1908 { 0xa8f8, 0x8, 0x0, 0x0, 0x8 },
1909 { 0x80, 0x8, 0x0, 0x0, 0x8 },
1910 { 0xac0, 0x8, 0x0, 0x0, 0x8 },
1911 { 0x2580, 0x8, 0x0, 0x0, 0x8 },
1912 { 0x2500, 0x8, 0x0, 0x0, 0x8 },
1913 { 0x440, 0x8, 0x0, 0x0, 0x2 },
1914 { 0x1800, 0x8, 0x0, 0x0, 0x2 },
1915 { 0x1a00, 0x10, 0x8, 0x0, 0x2 },
1916 { 0x640, 0x10, 0x8, 0x0, 0x2 },
1917 { 0xd9b8, 0x38, 0x0, 0x0, 0x24 },
1918 { 0x11048, 0x10, 0x0, 0x0, 0x8 },
1919 { 0x11678, 0x38, 0x0, 0x0, 0x18 },
1920 { 0xaec0, 0x30, 0x0, 0x0, 0x10 },
1921 { 0x8700, 0x28, 0x0, 0x0, 0x18 },
1922 { 0xec00, 0x10, 0x0, 0x0, 0x10 },
1923 { 0xde38, 0x40, 0x0, 0x0, 0x30 },
1924 { 0x121a8, 0x38, 0x0, 0x0, 0x8 },
1925 { 0xf068, 0x20, 0x0, 0x0, 0x20 },
1926 { 0x2b68, 0x80, 0x0, 0x0, 0x10 },
1927 { 0x4ab8, 0x10, 0x0, 0x0, 0x10 },
1928}; 2238};
1929 2239
1930/* Runtime array offsets */ 2240/* Runtime array offsets */
1931#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 2241#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
1932#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 2242#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
1933#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 2243#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
1934#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 2244#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
1935#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 2245#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
1936#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 2246#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
1937#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 2247#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
1938#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 2248#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
1939#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 2249#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
1940#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 2250#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
1941#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 2251#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
1942#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 2252#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
1943#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 2253#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
1944#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 2254#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
1945#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 2255#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
1946#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 2256#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
1947#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 2257#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
1948#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 2258#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
1949#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 2259#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
1950#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 2260#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
1951#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 2261#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
1952#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 2262#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
1953#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 2263#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
1954#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 2264#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
1955#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 2265#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
1956#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 2266#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
1957#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 2267#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
1958#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 2268#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
1959#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 2269#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
1960#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 2270#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
1961#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 2271#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
1962#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 2272#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
1963#define CAU_REG_PI_MEMORY_RT_SIZE 4416 2273#define CAU_REG_PI_MEMORY_RT_SIZE 4416
1964#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 2274#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
1965#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 2275#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
1966#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 2276#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
1967#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 2277#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
1968#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 2278#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
1969#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 2279#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
1970#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 2280#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
1971#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 2281#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
1972#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 2282#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
1973#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 2283#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
1974#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 2284#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
1975#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 2285#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
1976#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 2286#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
1977#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 2287#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
1978#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 2288#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
1979#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 2289#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
1980#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 2290#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
1981#define SRC_REG_FIRSTFREE_RT_SIZE 2 2291#define SRC_REG_FIRSTFREE_RT_SIZE 2
1982#define SRC_REG_LASTFREE_RT_OFFSET 6667 2292#define SRC_REG_LASTFREE_RT_OFFSET 6667
1983#define SRC_REG_LASTFREE_RT_SIZE 2 2293#define SRC_REG_LASTFREE_RT_SIZE 2
1984#define SRC_REG_COUNTFREE_RT_OFFSET 6669 2294#define SRC_REG_COUNTFREE_RT_OFFSET 6669
1985#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 2295#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
1986#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 2296#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
1987#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 2297#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
1988#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 2298#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
1989#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 2299#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
1990#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 2300#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
1991#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6676 2301#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
1992#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6677 2302#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
1993#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6678 2303#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
1994#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6679 2304#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
1995#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6680 2305#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
1996#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6681 2306#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
1997#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6682 2307#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
1998#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6683 2308#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
1999#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6684 2309#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
2000#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6685 2310#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
2001#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6686 2311#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
2002#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6687 2312#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
2003#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688 2313#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
2004#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 2314#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
2005#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 2315#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
2006#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6691 2316#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
2007#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6692 2317#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
2008#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6693 2318#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
2009#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6694 2319#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
2010#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6695 2320#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
2011#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6696 2321#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
2012#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6697 2322#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
2013#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6698 2323#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
2014#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6699 2324#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
2015#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6700 2325#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
2016#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6701 2326#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
2017#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6702 2327#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
2018#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6703 2328#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
2019#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 2329#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
2020#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28703 2330#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
2021#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28704 2331#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
2022#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28705 2332#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
2023#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28706 2333#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
2024#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28707 2334#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
2025#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28708 2335#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
2026#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28709 2336#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
2027#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28710 2337#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
2028#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28711 2338#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
2029#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28712 2339#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
2030#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28713 2340#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
2031#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 2341#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
2032#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29129 2342#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
2033#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 2343#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
2034#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29641 2344#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
2035#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29642 2345#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
2036#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29643 2346#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
2037#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29644 2347#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
2038#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29645 2348#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
2039#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29646 2349#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
2040#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29647 2350#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
2041#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29648 2351#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
2042#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29649 2352#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
2043#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29650 2353#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
2044#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29651 2354#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
2045#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29652 2355#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
2046#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29653 2356#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
2047#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29654 2357#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
2048#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29655 2358#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
2049#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29656 2359#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
2050#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29657 2360#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
2051#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29658 2361#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
2052#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29659 2362#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
2053#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29660 2363#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
2054#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29661 2364#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
2055#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29662 2365#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
2056#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29663 2366#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
2057#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29664 2367#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
2058#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29665 2368#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
2059#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29666 2369#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
2060#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29667 2370#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
2061#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29668 2371#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
2062#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29669 2372#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
2063#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29670 2373#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
2064#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29671 2374#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
2065#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29672 2375#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
2066#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29673 2376#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
2067#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29674 2377#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
2068#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29675 2378#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
2069#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29676 2379#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
2070#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29677 2380#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
2071#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29678 2381#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
2072#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29679 2382#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
2073#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29680 2383#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
2074#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29681 2384#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
2075#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29682 2385#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
2076#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29683 2386#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
2077#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29684 2387#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
2078#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29685 2388#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
2079#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29686 2389#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
2080#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29687 2390#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
2081#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29688 2391#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
2082#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29689 2392#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
2083#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29690 2393#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
2084#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29691 2394#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
2085#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29692 2395#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
2086#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29693 2396#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
2087#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29694 2397#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
2088#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29695 2398#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
2089#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29696 2399#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
2090#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29697 2400#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
2091#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29698 2401#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
2092#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29699 2402#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
2093#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29700 2403#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
2094#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29701 2404#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
2095#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29702 2405#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
2096#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29703 2406#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
2097#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29704 2407#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
2098#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29705 2408#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
2099#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29706 2409#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
2100#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29707 2410#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
2101#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29708 2411#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
2102#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 2412#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
2103#define QM_REG_VOQCRDLINE_RT_OFFSET 29836 2413#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
2104#define QM_REG_VOQCRDLINE_RT_SIZE 20 2414#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
2105#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29856 2415#define QM_REG_VOQCRDLINE_RT_SIZE 20
2106#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 2416#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
2107#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29876 2417#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
2108#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29877 2418#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
2109#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29878 2419#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
2110#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29879 2420#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
2111#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29880 2421#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
2112#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29881 2422#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
2113#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29882 2423#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
2114#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29883 2424#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
2115#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29884 2425#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
2116#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29885 2426#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
2117#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29886 2427#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
2118#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29887 2428#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
2119#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29888 2429#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
2120#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29889 2430#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
2121#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29890 2431#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
2122#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29891 2432#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
2123#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29892 2433#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
2124#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29893 2434#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
2125#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29894 2435#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
2126#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29895 2436#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
2127#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29896 2437#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
2128#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29897 2438#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
2129#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29898 2439#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
2130#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29899 2440#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
2131#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29900 2441#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
2132#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29901 2442#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
2133#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29902 2443#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
2134#define QM_REG_PQTX2PF_0_RT_OFFSET 29903 2444#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
2135#define QM_REG_PQTX2PF_1_RT_OFFSET 29904 2445#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
2136#define QM_REG_PQTX2PF_2_RT_OFFSET 29905 2446#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
2137#define QM_REG_PQTX2PF_3_RT_OFFSET 29906 2447#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
2138#define QM_REG_PQTX2PF_4_RT_OFFSET 29907 2448#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
2139#define QM_REG_PQTX2PF_5_RT_OFFSET 29908 2449#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
2140#define QM_REG_PQTX2PF_6_RT_OFFSET 29909 2450#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
2141#define QM_REG_PQTX2PF_7_RT_OFFSET 29910 2451#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
2142#define QM_REG_PQTX2PF_8_RT_OFFSET 29911 2452#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
2143#define QM_REG_PQTX2PF_9_RT_OFFSET 29912 2453#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
2144#define QM_REG_PQTX2PF_10_RT_OFFSET 29913 2454#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
2145#define QM_REG_PQTX2PF_11_RT_OFFSET 29914 2455#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
2146#define QM_REG_PQTX2PF_12_RT_OFFSET 29915 2456#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
2147#define QM_REG_PQTX2PF_13_RT_OFFSET 29916 2457#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
2148#define QM_REG_PQTX2PF_14_RT_OFFSET 29917 2458#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
2149#define QM_REG_PQTX2PF_15_RT_OFFSET 29918 2459#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
2150#define QM_REG_PQTX2PF_16_RT_OFFSET 29919 2460#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
2151#define QM_REG_PQTX2PF_17_RT_OFFSET 29920 2461#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
2152#define QM_REG_PQTX2PF_18_RT_OFFSET 29921 2462#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
2153#define QM_REG_PQTX2PF_19_RT_OFFSET 29922 2463#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
2154#define QM_REG_PQTX2PF_20_RT_OFFSET 29923 2464#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
2155#define QM_REG_PQTX2PF_21_RT_OFFSET 29924 2465#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
2156#define QM_REG_PQTX2PF_22_RT_OFFSET 29925 2466#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
2157#define QM_REG_PQTX2PF_23_RT_OFFSET 29926 2467#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
2158#define QM_REG_PQTX2PF_24_RT_OFFSET 29927 2468#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
2159#define QM_REG_PQTX2PF_25_RT_OFFSET 29928 2469#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
2160#define QM_REG_PQTX2PF_26_RT_OFFSET 29929 2470#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
2161#define QM_REG_PQTX2PF_27_RT_OFFSET 29930 2471#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
2162#define QM_REG_PQTX2PF_28_RT_OFFSET 29931 2472#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
2163#define QM_REG_PQTX2PF_29_RT_OFFSET 29932 2473#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
2164#define QM_REG_PQTX2PF_30_RT_OFFSET 29933 2474#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
2165#define QM_REG_PQTX2PF_31_RT_OFFSET 29934 2475#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
2166#define QM_REG_PQTX2PF_32_RT_OFFSET 29935 2476#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
2167#define QM_REG_PQTX2PF_33_RT_OFFSET 29936 2477#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
2168#define QM_REG_PQTX2PF_34_RT_OFFSET 29937 2478#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
2169#define QM_REG_PQTX2PF_35_RT_OFFSET 29938 2479#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
2170#define QM_REG_PQTX2PF_36_RT_OFFSET 29939 2480#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
2171#define QM_REG_PQTX2PF_37_RT_OFFSET 29940 2481#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
2172#define QM_REG_PQTX2PF_38_RT_OFFSET 29941 2482#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
2173#define QM_REG_PQTX2PF_39_RT_OFFSET 29942 2483#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
2174#define QM_REG_PQTX2PF_40_RT_OFFSET 29943 2484#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
2175#define QM_REG_PQTX2PF_41_RT_OFFSET 29944 2485#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
2176#define QM_REG_PQTX2PF_42_RT_OFFSET 29945 2486#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
2177#define QM_REG_PQTX2PF_43_RT_OFFSET 29946 2487#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
2178#define QM_REG_PQTX2PF_44_RT_OFFSET 29947 2488#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
2179#define QM_REG_PQTX2PF_45_RT_OFFSET 29948 2489#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
2180#define QM_REG_PQTX2PF_46_RT_OFFSET 29949 2490#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
2181#define QM_REG_PQTX2PF_47_RT_OFFSET 29950 2491#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
2182#define QM_REG_PQTX2PF_48_RT_OFFSET 29951 2492#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
2183#define QM_REG_PQTX2PF_49_RT_OFFSET 29952 2493#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
2184#define QM_REG_PQTX2PF_50_RT_OFFSET 29953 2494#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
2185#define QM_REG_PQTX2PF_51_RT_OFFSET 29954 2495#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
2186#define QM_REG_PQTX2PF_52_RT_OFFSET 29955 2496#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
2187#define QM_REG_PQTX2PF_53_RT_OFFSET 29956 2497#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
2188#define QM_REG_PQTX2PF_54_RT_OFFSET 29957 2498#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
2189#define QM_REG_PQTX2PF_55_RT_OFFSET 29958 2499#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
2190#define QM_REG_PQTX2PF_56_RT_OFFSET 29959 2500#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
2191#define QM_REG_PQTX2PF_57_RT_OFFSET 29960 2501#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
2192#define QM_REG_PQTX2PF_58_RT_OFFSET 29961 2502#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
2193#define QM_REG_PQTX2PF_59_RT_OFFSET 29962 2503#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
2194#define QM_REG_PQTX2PF_60_RT_OFFSET 29963 2504#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
2195#define QM_REG_PQTX2PF_61_RT_OFFSET 29964 2505#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
2196#define QM_REG_PQTX2PF_62_RT_OFFSET 29965 2506#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
2197#define QM_REG_PQTX2PF_63_RT_OFFSET 29966 2507#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
2198#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29967 2508#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
2199#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29968 2509#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
2200#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29969 2510#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
2201#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29970 2511#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
2202#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29971 2512#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
2203#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29972 2513#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
2204#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29973 2514#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
2205#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29974 2515#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
2206#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29975 2516#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
2207#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29976 2517#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
2208#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29977 2518#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
2209#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29978 2519#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
2210#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29979 2520#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
2211#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29980 2521#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
2212#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29981 2522#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
2213#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29982 2523#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
2214#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29983 2524#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
2215#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29984 2525#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
2216#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29985 2526#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
2217#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29986 2527#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
2218#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29987 2528#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
2219#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29988 2529#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
2220#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29989 2530#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
2221#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29990 2531#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
2222#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29991 2532#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
2223#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29992 2533#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
2224#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29993 2534#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
2225#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29994 2535#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
2226#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29995 2536#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
2227#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 2537#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
2228#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30251 2538#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
2229#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 2539#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
2230#define QM_REG_RLGLBLCRD_RT_OFFSET 30507 2540#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
2231#define QM_REG_RLGLBLCRD_RT_SIZE 256 2541#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
2232#define QM_REG_RLGLBLENABLE_RT_OFFSET 30763 2542#define QM_REG_RLGLBLCRD_RT_SIZE 256
2233#define QM_REG_RLPFPERIOD_RT_OFFSET 30764 2543#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
2234#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30765 2544#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
2235#define QM_REG_RLPFINCVAL_RT_OFFSET 30766 2545#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
2236#define QM_REG_RLPFINCVAL_RT_SIZE 16 2546#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
2237#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30782 2547#define QM_REG_RLPFINCVAL_RT_SIZE 16
2238#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 2548#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
2239#define QM_REG_RLPFCRD_RT_OFFSET 30798 2549#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
2240#define QM_REG_RLPFCRD_RT_SIZE 16 2550#define QM_REG_RLPFCRD_RT_OFFSET 30799
2241#define QM_REG_RLPFENABLE_RT_OFFSET 30814 2551#define QM_REG_RLPFCRD_RT_SIZE 16
2242#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30815 2552#define QM_REG_RLPFENABLE_RT_OFFSET 30815
2243#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30816 2553#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
2244#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 2554#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
2245#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30832 2555#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
2246#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 2556#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
2247#define QM_REG_WFQPFCRD_RT_OFFSET 30848 2557#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
2248#define QM_REG_WFQPFCRD_RT_SIZE 160 2558#define QM_REG_WFQPFCRD_RT_OFFSET 30849
2249#define QM_REG_WFQPFENABLE_RT_OFFSET 31008 2559#define QM_REG_WFQPFCRD_RT_SIZE 160
2250#define QM_REG_WFQVPENABLE_RT_OFFSET 31009 2560#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
2251#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31010 2561#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
2252#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 2562#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
2253#define QM_REG_TXPQMAP_RT_OFFSET 31522 2563#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
2254#define QM_REG_TXPQMAP_RT_SIZE 512 2564#define QM_REG_TXPQMAP_RT_OFFSET 31523
2255#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32034 2565#define QM_REG_TXPQMAP_RT_SIZE 512
2256#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 2566#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
2257#define QM_REG_WFQVPCRD_RT_OFFSET 32546 2567#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
2258#define QM_REG_WFQVPCRD_RT_SIZE 512 2568#define QM_REG_WFQVPCRD_RT_OFFSET 32547
2259#define QM_REG_WFQVPMAP_RT_OFFSET 33058 2569#define QM_REG_WFQVPCRD_RT_SIZE 512
2260#define QM_REG_WFQVPMAP_RT_SIZE 512 2570#define QM_REG_WFQVPMAP_RT_OFFSET 33059
2261#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33570 2571#define QM_REG_WFQVPMAP_RT_SIZE 512
2262#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 2572#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
2263#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33730 2573#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
2264#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33731 2574#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
2265#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33732 2575#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
2266#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33733 2576#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
2267#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33734 2577#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
2268#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33735 2578#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
2269#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33736 2579#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
2270#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33737 2580#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
2271#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 2581#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
2272#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33741 2582#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
2273#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 2583#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
2274#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33745 2584#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
2275#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 2585#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
2276#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33749 2586#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
2277#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33750 2587#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
2278#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 2588#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
2279#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33782 2589#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
2280#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 2590#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
2281#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33798 2591#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
2282#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 2592#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
2283#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33814 2593#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
2284#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 2594#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
2285#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33830 2595#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
2286#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 2596#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
2287#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33846 2597#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
2288#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33847 2598#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
2289#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33848 2599#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848
2290#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33849 2600#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849
2291#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33850 2601#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850
2292#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33851 2602#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851
2293#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33852 2603#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852
2294#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33853 2604#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853
2295#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33854 2605#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854
2296#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33855 2606#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855
2297#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33856 2607#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856
2298#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33857 2608#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857
2299#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33858 2609#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858
2300#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33859 2610#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859
2301#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33860 2611#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860
2302#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33861 2612#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861
2303#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33862 2613#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862
2304#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33863 2614#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863
2305#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33864 2615#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864
2306#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33865 2616#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865
2307#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33866 2617#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866
2308#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33867 2618#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867
2309#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33868 2619#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868
2310#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33869 2620#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869
2311#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33870 2621#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870
2312#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33871 2622#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871
2313#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33872 2623#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872
2314#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33873 2624#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873
2315#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33874 2625#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874
2316#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33875 2626#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875
2317#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33876 2627#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876
2318#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33877 2628#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877
2319#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33878 2629#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878
2320#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33879 2630#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879
2321#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33880 2631#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880
2322#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33881 2632#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881
2323#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33882 2633#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882
2324#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33883 2634#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883
2325#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33884 2635#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884
2326#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33885 2636#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885
2327#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33886 2637#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886
2328#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33887 2638#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887
2329#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33888 2639#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888
2330#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33889 2640#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889
2331#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33890 2641#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890
2332#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33891 2642#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891
2333#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33892 2643#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892
2334#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33893 2644#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893
2335#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33894 2645#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894
2336#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33895 2646#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895
2337#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33896 2647#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896
2338#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33897 2648#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897
2339#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33898 2649#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898
2340#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33899 2650#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899
2341#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33900 2651#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900
2342#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33901 2652#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901
2343#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33902 2653#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902
2344#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33903 2654#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903
2345#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33904 2655#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904
2346#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33905 2656#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905
2347#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33906 2657#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906
2348#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33907 2658#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907
2349#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33908 2659#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908
2350#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33909 2660#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909
2351#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33910 2661#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910
2352#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33911 2662#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911
2353#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33912 2663#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912
2354#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33913 2664#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913
2355#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33914 2665#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914
2356#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33915 2666#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915
2357#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33916 2667#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916
2358#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33917 2668#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917
2359#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33918 2669#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918
2360#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33919 2670#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919
2361#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33920 2671#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920
2362#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33921 2672#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921
2363#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33922 2673#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922
2364 2674#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923
2365#define RUNTIME_ARRAY_SIZE 33923 2675#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924
2676
2677#define RUNTIME_ARRAY_SIZE 33925
2366 2678
2367/* The eth storm context for the Tstorm */ 2679/* The eth storm context for the Tstorm */
2368struct tstorm_eth_conn_st_ctx { 2680struct tstorm_eth_conn_st_ctx {
@@ -2380,266 +2692,266 @@ struct xstorm_eth_conn_st_ctx {
2380}; 2692};
2381 2693
2382struct xstorm_eth_conn_ag_ctx { 2694struct xstorm_eth_conn_ag_ctx {
2383 u8 reserved0 /* cdu_validation */; 2695 u8 reserved0;
2384 u8 eth_state /* state */; 2696 u8 eth_state;
2385 u8 flags0; 2697 u8 flags0;
2386#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 2698#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
2387#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 2699#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
2388#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 2700#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
2389#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 2701#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
2390#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 2702#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
2391#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 2703#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
2392#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 2704#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
2393#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 2705#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
2394#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */ 2706#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
2395#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 2707#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
2396#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 2708#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
2397#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 2709#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
2398#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */ 2710#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
2399#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 2711#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
2400#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */ 2712#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
2401#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 2713#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
2402 u8 flags1; 2714 u8 flags1;
2403#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */ 2715#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
2404#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 2716#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
2405#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */ 2717#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
2406#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 2718#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
2407#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */ 2719#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
2408#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 2720#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
2409#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */ 2721#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
2410#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 2722#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
2411#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */ 2723#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
2412#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4 2724#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
2413#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */ 2725#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
2414#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5 2726#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
2415#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */ 2727#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
2416#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 2728#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
2417#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */ 2729#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
2418#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 2730#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
2419 u8 flags2; 2731 u8 flags2;
2420#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ 2732#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
2421#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 2733#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
2422#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ 2734#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
2423#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 2735#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
2424#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ 2736#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
2425#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 2737#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
2426#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 2738#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
2427#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 2739#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
2428 u8 flags3; 2740 u8 flags3;
2429#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ 2741#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
2430#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 2742#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
2431#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ 2743#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
2432#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 2744#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
2433#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ 2745#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
2434#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 2746#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
2435#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ 2747#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
2436#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 2748#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
2437 u8 flags4; 2749 u8 flags4;
2438#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ 2750#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
2439#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 2751#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
2440#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ 2752#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
2441#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 2753#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
2442#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ 2754#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
2443#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 2755#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
2444#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */ 2756#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
2445#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 2757#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
2446 u8 flags5; 2758 u8 flags5;
2447#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */ 2759#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
2448#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 2760#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
2449#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */ 2761#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
2450#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 2762#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
2451#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */ 2763#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
2452#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 2764#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
2453#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */ 2765#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
2454#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 2766#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
2455 u8 flags6; 2767 u8 flags6;
2456#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */ 2768#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
2457#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 2769#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
2458#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 2770#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
2459#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 2771#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
2460#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */ 2772#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
2461#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 2773#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
2462#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */ 2774#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
2463#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 2775#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
2464 u8 flags7; 2776 u8 flags7;
2465#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */ 2777#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
2466#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 2778#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
2467#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */ 2779#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
2468#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 2780#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
2469#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */ 2781#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
2470#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 2782#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
2471#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ 2783#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
2472#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 2784#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
2473#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ 2785#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
2474#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 2786#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
2475 u8 flags8; 2787 u8 flags8;
2476#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ 2788#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
2477#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 2789#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
2478#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ 2790#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
2479#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 2791#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
2480#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ 2792#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
2481#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 2793#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
2482#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ 2794#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
2483#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 2795#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
2484#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ 2796#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
2485#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 2797#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
2486#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ 2798#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
2487#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 2799#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
2488#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ 2800#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
2489#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 2801#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
2490#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ 2802#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
2491#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 2803#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
2492 u8 flags9; 2804 u8 flags9;
2493#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ 2805#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
2494#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 2806#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
2495#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */ 2807#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
2496#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 2808#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
2497#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */ 2809#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
2498#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 2810#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
2499#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */ 2811#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
2500#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 2812#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
2501#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */ 2813#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
2502#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 2814#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
2503#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */ 2815#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
2504#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 2816#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
2505#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */ 2817#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
2506#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 2818#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
2507#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 2819#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
2508#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 2820#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
2509 u8 flags10; 2821 u8 flags10;
2510#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */ 2822#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
2511#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 2823#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
2512#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */ 2824#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
2513#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 2825#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
2514#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */ 2826#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
2515#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 2827#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
2516#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */ 2828#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
2517#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 2829#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
2518#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */ 2830#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
2519#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 2831#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
2520#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */ 2832#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
2521#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 2833#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
2522#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */ 2834#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
2523#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 2835#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
2524#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */ 2836#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
2525#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 2837#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
2526 u8 flags11; 2838 u8 flags11;
2527#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */ 2839#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
2528#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 2840#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
2529#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */ 2841#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
2530#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 2842#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
2531#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */ 2843#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
2532#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 2844#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
2533#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ 2845#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
2534#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 2846#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
2535#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ 2847#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
2536#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 2848#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
2537#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ 2849#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
2538#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 2850#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
2539#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */ 2851#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
2540#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 2852#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
2541#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */ 2853#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
2542#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 2854#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
2543 u8 flags12; 2855 u8 flags12;
2544#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */ 2856#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
2545#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 2857#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
2546#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */ 2858#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
2547#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 2859#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
2548#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */ 2860#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
2549#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 2861#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
2550#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */ 2862#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
2551#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 2863#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
2552#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */ 2864#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
2553#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 2865#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
2554#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */ 2866#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
2555#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 2867#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
2556#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */ 2868#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
2557#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 2869#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
2558#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */ 2870#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
2559#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 2871#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
2560 u8 flags13; 2872 u8 flags13;
2561#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */ 2873#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
2562#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 2874#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
2563#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */ 2875#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
2564#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 2876#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
2565#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */ 2877#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
2566#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 2878#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
2567#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */ 2879#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
2568#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 2880#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
2569#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */ 2881#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
2570#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 2882#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
2571#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */ 2883#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
2572#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 2884#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
2573#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */ 2885#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
2574#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 2886#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
2575#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */ 2887#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
2576#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 2888#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
2577 u8 flags14; 2889 u8 flags14;
2578#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */ 2890#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
2579#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 2891#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
2580#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */ 2892#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
2581#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 2893#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
2582#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */ 2894#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
2583#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 2895#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
2584#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */ 2896#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
2585#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 2897#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
2586#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */ 2898#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
2587#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 2899#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
2588#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */ 2900#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
2589#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 2901#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
2590#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */ 2902#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
2591#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 2903#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
2592 u8 edpm_event_id /* byte2 */; 2904 u8 edpm_event_id;
2593 __le16 physical_q0 /* physical_q0 */; 2905 __le16 physical_q0;
2594 __le16 word1 /* physical_q1 */; 2906 __le16 quota;
2595 __le16 edpm_num_bds /* physical_q2 */; 2907 __le16 edpm_num_bds;
2596 __le16 tx_bd_cons /* word3 */; 2908 __le16 tx_bd_cons;
2597 __le16 tx_bd_prod /* word4 */; 2909 __le16 tx_bd_prod;
2598 __le16 go_to_bd_cons /* word5 */; 2910 __le16 tx_class;
2599 __le16 conn_dpi /* conn_dpi */; 2911 __le16 conn_dpi;
2600 u8 byte3 /* byte3 */; 2912 u8 byte3;
2601 u8 byte4 /* byte4 */; 2913 u8 byte4;
2602 u8 byte5 /* byte5 */; 2914 u8 byte5;
2603 u8 byte6 /* byte6 */; 2915 u8 byte6;
2604 __le32 reg0 /* reg0 */; 2916 __le32 reg0;
2605 __le32 reg1 /* reg1 */; 2917 __le32 reg1;
2606 __le32 reg2 /* reg2 */; 2918 __le32 reg2;
2607 __le32 reg3 /* reg3 */; 2919 __le32 reg3;
2608 __le32 reg4 /* reg4 */; 2920 __le32 reg4;
2609 __le32 reg5 /* cf_array0 */; 2921 __le32 reg5;
2610 __le32 reg6 /* cf_array1 */; 2922 __le32 reg6;
2611 __le16 word7 /* word7 */; 2923 __le16 word7;
2612 __le16 word8 /* word8 */; 2924 __le16 word8;
2613 __le16 word9 /* word9 */; 2925 __le16 word9;
2614 __le16 word10 /* word10 */; 2926 __le16 word10;
2615 __le32 reg7 /* reg7 */; 2927 __le32 reg7;
2616 __le32 reg8 /* reg8 */; 2928 __le32 reg8;
2617 __le32 reg9 /* reg9 */; 2929 __le32 reg9;
2618 u8 byte7 /* byte7 */; 2930 u8 byte7;
2619 u8 byte8 /* byte8 */; 2931 u8 byte8;
2620 u8 byte9 /* byte9 */; 2932 u8 byte9;
2621 u8 byte10 /* byte10 */; 2933 u8 byte10;
2622 u8 byte11 /* byte11 */; 2934 u8 byte11;
2623 u8 byte12 /* byte12 */; 2935 u8 byte12;
2624 u8 byte13 /* byte13 */; 2936 u8 byte13;
2625 u8 byte14 /* byte14 */; 2937 u8 byte14;
2626 u8 byte15 /* byte15 */; 2938 u8 byte15;
2627 u8 byte16 /* byte16 */; 2939 u8 byte16;
2628 __le16 word11 /* word11 */; 2940 __le16 word11;
2629 __le32 reg10 /* reg10 */; 2941 __le32 reg10;
2630 __le32 reg11 /* reg11 */; 2942 __le32 reg11;
2631 __le32 reg12 /* reg12 */; 2943 __le32 reg12;
2632 __le32 reg13 /* reg13 */; 2944 __le32 reg13;
2633 __le32 reg14 /* reg14 */; 2945 __le32 reg14;
2634 __le32 reg15 /* reg15 */; 2946 __le32 reg15;
2635 __le32 reg16 /* reg16 */; 2947 __le32 reg16;
2636 __le32 reg17 /* reg17 */; 2948 __le32 reg17;
2637 __le32 reg18 /* reg18 */; 2949 __le32 reg18;
2638 __le32 reg19 /* reg19 */; 2950 __le32 reg19;
2639 __le16 word12 /* word12 */; 2951 __le16 word12;
2640 __le16 word13 /* word13 */; 2952 __le16 word13;
2641 __le16 word14 /* word14 */; 2953 __le16 word14;
2642 __le16 word15 /* word15 */; 2954 __le16 word15;
2643}; 2955};
2644 2956
2645/* The eth storm context for the Ystorm */ 2957/* The eth storm context for the Ystorm */
@@ -2648,220 +2960,220 @@ struct ystorm_eth_conn_st_ctx {
2648}; 2960};
2649 2961
2650struct ystorm_eth_conn_ag_ctx { 2962struct ystorm_eth_conn_ag_ctx {
2651 u8 byte0 /* cdu_validation */; 2963 u8 byte0;
2652 u8 byte1 /* state */; 2964 u8 state;
2653 u8 flags0; 2965 u8 flags0;
2654#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ 2966#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
2655#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 2967#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
2656#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ 2968#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
2657#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 2969#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
2658#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */ 2970#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
2659#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 2971#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
2660#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */ 2972#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
2661#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 2973#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
2662#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ 2974#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
2663#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 2975#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
2664 u8 flags1; 2976 u8 flags1;
2665#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */ 2977#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
2666#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 2978#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
2667#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ 2979#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
2668#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 2980#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
2669#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ 2981#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
2670#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 2982#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
2671#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ 2983#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
2672#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 2984#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
2673#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ 2985#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
2674#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 2986#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
2675#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ 2987#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
2676#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 2988#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
2677#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ 2989#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
2678#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 2990#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
2679#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ 2991#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
2680#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 2992#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
2681 u8 byte2 /* byte2 */; 2993 u8 tx_q0_int_coallecing_timeset;
2682 u8 byte3 /* byte3 */; 2994 u8 byte3;
2683 __le16 word0 /* word0 */; 2995 __le16 word0;
2684 __le32 terminate_spqe /* reg0 */; 2996 __le32 terminate_spqe;
2685 __le32 reg1 /* reg1 */; 2997 __le32 reg1;
2686 __le16 tx_bd_cons_upd /* word1 */; 2998 __le16 tx_bd_cons_upd;
2687 __le16 word2 /* word2 */; 2999 __le16 word2;
2688 __le16 word3 /* word3 */; 3000 __le16 word3;
2689 __le16 word4 /* word4 */; 3001 __le16 word4;
2690 __le32 reg2 /* reg2 */; 3002 __le32 reg2;
2691 __le32 reg3 /* reg3 */; 3003 __le32 reg3;
2692}; 3004};
2693 3005
2694struct tstorm_eth_conn_ag_ctx { 3006struct tstorm_eth_conn_ag_ctx {
2695 u8 byte0 /* cdu_validation */; 3007 u8 byte0;
2696 u8 byte1 /* state */; 3008 u8 byte1;
2697 u8 flags0; 3009 u8 flags0;
2698#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ 3010#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
2699#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 3011#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
2700#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ 3012#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
2701#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 3013#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
2702#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ 3014#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
2703#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 3015#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
2704#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ 3016#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
2705#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 3017#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
2706#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ 3018#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
2707#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 3019#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
2708#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ 3020#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
2709#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 3021#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
2710#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ 3022#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
2711#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 3023#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
2712 u8 flags1; 3024 u8 flags1;
2713#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ 3025#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
2714#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 3026#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
2715#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ 3027#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
2716#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 3028#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
2717#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ 3029#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
2718#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 3030#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
2719#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ 3031#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
2720#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 3032#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
2721 u8 flags2; 3033 u8 flags2;
2722#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ 3034#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
2723#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 3035#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
2724#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ 3036#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
2725#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 3037#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
2726#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ 3038#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
2727#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 3039#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
2728#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ 3040#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
2729#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 3041#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
2730 u8 flags3; 3042 u8 flags3;
2731#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ 3043#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
2732#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 3044#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
2733#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ 3045#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
2734#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 3046#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
2735#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ 3047#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
2736#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 3048#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
2737#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ 3049#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
2738#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 3050#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
2739#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ 3051#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
2740#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 3052#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
2741#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ 3053#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
2742#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 3054#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
2743 u8 flags4; 3055 u8 flags4;
2744#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ 3056#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
2745#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 3057#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
2746#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ 3058#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
2747#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 3059#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
2748#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ 3060#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
2749#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 3061#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
2750#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ 3062#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
2751#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 3063#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
2752#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ 3064#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
2753#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 3065#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
2754#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ 3066#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
2755#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 3067#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
2756#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ 3068#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
2757#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 3069#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
2758#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ 3070#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
2759#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 3071#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
2760 u8 flags5; 3072 u8 flags5;
2761#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ 3073#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
2762#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 3074#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
2763#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ 3075#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
2764#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 3076#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
2765#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ 3077#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
2766#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 3078#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
2767#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ 3079#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
2768#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 3080#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
2769#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ 3081#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
2770#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 3082#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
2771#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */ 3083#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
2772#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 3084#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
2773#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ 3085#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
2774#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 3086#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
2775#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ 3087#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
2776#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 3088#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
2777 __le32 reg0 /* reg0 */; 3089 __le32 reg0;
2778 __le32 reg1 /* reg1 */; 3090 __le32 reg1;
2779 __le32 reg2 /* reg2 */; 3091 __le32 reg2;
2780 __le32 reg3 /* reg3 */; 3092 __le32 reg3;
2781 __le32 reg4 /* reg4 */; 3093 __le32 reg4;
2782 __le32 reg5 /* reg5 */; 3094 __le32 reg5;
2783 __le32 reg6 /* reg6 */; 3095 __le32 reg6;
2784 __le32 reg7 /* reg7 */; 3096 __le32 reg7;
2785 __le32 reg8 /* reg8 */; 3097 __le32 reg8;
2786 u8 byte2 /* byte2 */; 3098 u8 byte2;
2787 u8 byte3 /* byte3 */; 3099 u8 byte3;
2788 __le16 rx_bd_cons /* word0 */; 3100 __le16 rx_bd_cons;
2789 u8 byte4 /* byte4 */; 3101 u8 byte4;
2790 u8 byte5 /* byte5 */; 3102 u8 byte5;
2791 __le16 rx_bd_prod /* word1 */; 3103 __le16 rx_bd_prod;
2792 __le16 word2 /* conn_dpi */; 3104 __le16 word2;
2793 __le16 word3 /* word3 */; 3105 __le16 word3;
2794 __le32 reg9 /* reg9 */; 3106 __le32 reg9;
2795 __le32 reg10 /* reg10 */; 3107 __le32 reg10;
2796}; 3108};
2797 3109
2798struct ustorm_eth_conn_ag_ctx { 3110struct ustorm_eth_conn_ag_ctx {
2799 u8 byte0 /* cdu_validation */; 3111 u8 byte0;
2800 u8 byte1 /* state */; 3112 u8 byte1;
2801 u8 flags0; 3113 u8 flags0;
2802#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ 3114#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
2803#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 3115#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
2804#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ 3116#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
2805#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 3117#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
2806#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */ 3118#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
2807#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 3119#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
2808#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */ 3120#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
2809#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 3121#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
2810#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ 3122#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
2811#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 3123#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
2812 u8 flags1; 3124 u8 flags1;
2813#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ 3125#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
2814#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 3126#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
2815#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */ 3127#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
2816#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 3128#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
2817#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */ 3129#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
2818#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 3130#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
2819#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */ 3131#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
2820#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 3132#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
2821 u8 flags2; 3133 u8 flags2;
2822#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */ 3134#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
2823#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 3135#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
2824#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ 3136#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
2825#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 3137#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
2826#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ 3138#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
2827#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 3139#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
2828#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ 3140#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
2829#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 3141#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
2830#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */ 3142#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
2831#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 3143#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
2832#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */ 3144#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
2833#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 3145#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
2834#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */ 3146#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
2835#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 3147#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
2836#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ 3148#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
2837#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 3149#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
2838 u8 flags3; 3150 u8 flags3;
2839#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ 3151#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
2840#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 3152#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
2841#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ 3153#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
2842#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 3154#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
2843#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ 3155#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
2844#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 3156#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
2845#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ 3157#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
2846#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 3158#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
2847#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ 3159#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
2848#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 3160#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
2849#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ 3161#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
2850#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 3162#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
2851#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ 3163#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
2852#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 3164#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
2853#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ 3165#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
2854#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 3166#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
2855 u8 byte2 /* byte2 */; 3167 u8 byte2;
2856 u8 byte3 /* byte3 */; 3168 u8 byte3;
2857 __le16 word0 /* conn_dpi */; 3169 __le16 word0;
2858 __le16 tx_bd_cons /* word1 */; 3170 __le16 tx_bd_cons;
2859 __le32 reg0 /* reg0 */; 3171 __le32 reg0;
2860 __le32 reg1 /* reg1 */; 3172 __le32 reg1;
2861 __le32 reg2 /* reg2 */; 3173 __le32 reg2;
2862 __le32 tx_int_coallecing_timeset /* reg3 */; 3174 __le32 tx_int_coallecing_timeset;
2863 __le16 tx_drv_bd_cons /* word2 */; 3175 __le16 tx_drv_bd_cons;
2864 __le16 rx_drv_cqe_cons /* word3 */; 3176 __le16 rx_drv_cqe_cons;
2865}; 3177};
2866 3178
2867/* The eth storm context for the Ustorm */ 3179/* The eth storm context for the Ustorm */
@@ -2876,47 +3188,75 @@ struct mstorm_eth_conn_st_ctx {
2876 3188
2877/* eth connection context */ 3189/* eth connection context */
2878struct eth_conn_context { 3190struct eth_conn_context {
2879 struct tstorm_eth_conn_st_ctx tstorm_st_context; 3191 struct tstorm_eth_conn_st_ctx tstorm_st_context;
2880 struct regpair tstorm_st_padding[2]; 3192 struct regpair tstorm_st_padding[2];
2881 struct pstorm_eth_conn_st_ctx pstorm_st_context; 3193 struct pstorm_eth_conn_st_ctx pstorm_st_context;
2882 struct xstorm_eth_conn_st_ctx xstorm_st_context; 3194 struct xstorm_eth_conn_st_ctx xstorm_st_context;
2883 struct xstorm_eth_conn_ag_ctx xstorm_ag_context; 3195 struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
2884 struct ystorm_eth_conn_st_ctx ystorm_st_context; 3196 struct ystorm_eth_conn_st_ctx ystorm_st_context;
2885 struct ystorm_eth_conn_ag_ctx ystorm_ag_context; 3197 struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
2886 struct tstorm_eth_conn_ag_ctx tstorm_ag_context; 3198 struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
2887 struct ustorm_eth_conn_ag_ctx ustorm_ag_context; 3199 struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
2888 struct ustorm_eth_conn_st_ctx ustorm_st_context; 3200 struct ustorm_eth_conn_st_ctx ustorm_st_context;
2889 struct mstorm_eth_conn_st_ctx mstorm_st_context; 3201 struct mstorm_eth_conn_st_ctx mstorm_st_context;
2890}; 3202};
2891 3203
3204/* opcodes for the event ring */
3205enum eth_event_opcode {
3206 ETH_EVENT_UNUSED,
3207 ETH_EVENT_VPORT_START,
3208 ETH_EVENT_VPORT_UPDATE,
3209 ETH_EVENT_VPORT_STOP,
3210 ETH_EVENT_TX_QUEUE_START,
3211 ETH_EVENT_TX_QUEUE_STOP,
3212 ETH_EVENT_RX_QUEUE_START,
3213 ETH_EVENT_RX_QUEUE_UPDATE,
3214 ETH_EVENT_RX_QUEUE_STOP,
3215 ETH_EVENT_FILTERS_UPDATE,
3216 ETH_EVENT_RESERVED,
3217 ETH_EVENT_RESERVED2,
3218 ETH_EVENT_RESERVED3,
3219 ETH_EVENT_RX_ADD_UDP_FILTER,
3220 ETH_EVENT_RX_DELETE_UDP_FILTER,
3221 ETH_EVENT_RESERVED4,
3222 ETH_EVENT_RESERVED5,
3223 MAX_ETH_EVENT_OPCODE
3224};
3225
3226/* Classify rule types in E2/E3 */
2892enum eth_filter_action { 3227enum eth_filter_action {
3228 ETH_FILTER_ACTION_UNUSED,
2893 ETH_FILTER_ACTION_REMOVE, 3229 ETH_FILTER_ACTION_REMOVE,
2894 ETH_FILTER_ACTION_ADD, 3230 ETH_FILTER_ACTION_ADD,
2895 ETH_FILTER_ACTION_REMOVE_ALL, 3231 ETH_FILTER_ACTION_REMOVE_ALL,
2896 MAX_ETH_FILTER_ACTION 3232 MAX_ETH_FILTER_ACTION
2897}; 3233};
2898 3234
3235/* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ */
2899struct eth_filter_cmd { 3236struct eth_filter_cmd {
2900 u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */; 3237 u8 type;
2901 u8 vport_id /* the vport id */; 3238 u8 vport_id;
2902 u8 action /* filter command action: add/remove/replace */; 3239 u8 action;
2903 u8 reserved0; 3240 u8 reserved0;
2904 __le32 vni; 3241 __le32 vni;
2905 __le16 mac_lsb; 3242 __le16 mac_lsb;
2906 __le16 mac_mid; 3243 __le16 mac_mid;
2907 __le16 mac_msb; 3244 __le16 mac_msb;
2908 __le16 vlan_id; 3245 __le16 vlan_id;
2909}; 3246};
2910 3247
3248/* $$KEEP_ENDIANNESS$$ */
2911struct eth_filter_cmd_header { 3249struct eth_filter_cmd_header {
2912 u8 rx; 3250 u8 rx;
2913 u8 tx; 3251 u8 tx;
2914 u8 cmd_cnt; 3252 u8 cmd_cnt;
2915 u8 assert_on_error; 3253 u8 assert_on_error;
2916 u8 reserved1[4]; 3254 u8 reserved1[4];
2917}; 3255};
2918 3256
3257/* Ethernet filter types: mac/vlan/pair */
2919enum eth_filter_type { 3258enum eth_filter_type {
3259 ETH_FILTER_TYPE_UNUSED,
2920 ETH_FILTER_TYPE_MAC, 3260 ETH_FILTER_TYPE_MAC,
2921 ETH_FILTER_TYPE_VLAN, 3261 ETH_FILTER_TYPE_VLAN,
2922 ETH_FILTER_TYPE_PAIR, 3262 ETH_FILTER_TYPE_PAIR,
@@ -2929,463 +3269,3515 @@ enum eth_filter_type {
2929 MAX_ETH_FILTER_TYPE 3269 MAX_ETH_FILTER_TYPE
2930}; 3270};
2931 3271
3272/* Ethernet Ramrod Command IDs */
2932enum eth_ramrod_cmd_id { 3273enum eth_ramrod_cmd_id {
2933 ETH_RAMROD_UNUSED, 3274 ETH_RAMROD_UNUSED,
2934 ETH_RAMROD_VPORT_START /* VPort Start Ramrod */, 3275 ETH_RAMROD_VPORT_START,
2935 ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */, 3276 ETH_RAMROD_VPORT_UPDATE,
2936 ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */, 3277 ETH_RAMROD_VPORT_STOP,
2937 ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */, 3278 ETH_RAMROD_RX_QUEUE_START,
2938 ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */, 3279 ETH_RAMROD_RX_QUEUE_STOP,
2939 ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */, 3280 ETH_RAMROD_TX_QUEUE_START,
2940 ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */, 3281 ETH_RAMROD_TX_QUEUE_STOP,
2941 ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */, 3282 ETH_RAMROD_FILTERS_UPDATE,
2942 ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */, 3283 ETH_RAMROD_RX_QUEUE_UPDATE,
2943 ETH_RAMROD_RESERVED, 3284 ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION,
2944 ETH_RAMROD_RESERVED2, 3285 ETH_RAMROD_RX_ADD_OPENFLOW_FILTER,
2945 ETH_RAMROD_RESERVED3, 3286 ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER,
2946 ETH_RAMROD_RESERVED4, 3287 ETH_RAMROD_RX_ADD_UDP_FILTER,
2947 ETH_RAMROD_RESERVED5, 3288 ETH_RAMROD_RX_DELETE_UDP_FILTER,
2948 ETH_RAMROD_RESERVED6, 3289 ETH_RAMROD_RX_CREATE_GFT_ACTION,
2949 ETH_RAMROD_RESERVED7, 3290 ETH_RAMROD_GFT_UPDATE_FILTER,
2950 ETH_RAMROD_RESERVED8,
2951 MAX_ETH_RAMROD_CMD_ID 3291 MAX_ETH_RAMROD_CMD_ID
2952}; 3292};
2953 3293
3294/* return code from eth sp ramrods */
3295struct eth_return_code {
3296 u8 value;
3297#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
3298#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
3299#define ETH_RETURN_CODE_RESERVED_MASK 0x3
3300#define ETH_RETURN_CODE_RESERVED_SHIFT 5
3301#define ETH_RETURN_CODE_RX_TX_MASK 0x1
3302#define ETH_RETURN_CODE_RX_TX_SHIFT 7
3303};
3304
3305/* What to do in case an error occurs */
2954enum eth_tx_err { 3306enum eth_tx_err {
2955 ETH_TX_ERR_DROP /* Drop erronous packet. */, 3307 ETH_TX_ERR_DROP,
2956 ETH_TX_ERR_ASSERT_MALICIOUS, 3308 ETH_TX_ERR_ASSERT_MALICIOUS,
2957 MAX_ETH_TX_ERR 3309 MAX_ETH_TX_ERR
2958}; 3310};
2959 3311
3312/* Array of the different error type behaviors */
2960struct eth_tx_err_vals { 3313struct eth_tx_err_vals {
2961 __le16 values; 3314 __le16 values;
2962#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1 3315#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
2963#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0 3316#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
2964#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1 3317#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
2965#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1 3318#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
2966#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1 3319#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
2967#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2 3320#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
2968#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1 3321#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
2969#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3 3322#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
2970#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1 3323#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
2971#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4 3324#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
2972#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1 3325#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
2973#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 3326#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
2974#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 3327#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
2975#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 3328#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
2976#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF 3329#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
2977#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7 3330#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
2978}; 3331};
2979 3332
3333/* vport rss configuration data */
2980struct eth_vport_rss_config { 3334struct eth_vport_rss_config {
2981 __le16 capabilities; 3335 __le16 capabilities;
2982#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 3336#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
2983#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0 3337#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
2984#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1 3338#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
2985#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1 3339#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
2986#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1 3340#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
2987#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2 3341#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
2988#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1 3342#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
2989#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3 3343#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
2990#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1 3344#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
2991#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4 3345#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
2992#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1 3346#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
2993#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 3347#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
2994#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 3348#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
2995#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 3349#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
2996#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF 3350#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
2997#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7 3351#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
2998 u8 rss_id; 3352 u8 rss_id;
2999 u8 rss_mode; 3353 u8 rss_mode;
3000 u8 update_rss_key; 3354 u8 update_rss_key;
3001 u8 update_rss_ind_table; 3355 u8 update_rss_ind_table;
3002 u8 update_rss_capabilities; 3356 u8 update_rss_capabilities;
3003 u8 tbl_size; 3357 u8 tbl_size;
3004 __le32 reserved2[2]; 3358 __le32 reserved2[2];
3005 __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; 3359 __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
3006 __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; 3360
3007 __le32 reserved3[2]; 3361 __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
3008}; 3362 __le32 reserved3[2];
3009 3363};
3364
3365/* eth vport RSS mode */
3010enum eth_vport_rss_mode { 3366enum eth_vport_rss_mode {
3011 ETH_VPORT_RSS_MODE_DISABLED, 3367 ETH_VPORT_RSS_MODE_DISABLED,
3012 ETH_VPORT_RSS_MODE_REGULAR, 3368 ETH_VPORT_RSS_MODE_REGULAR,
3013 MAX_ETH_VPORT_RSS_MODE 3369 MAX_ETH_VPORT_RSS_MODE
3014}; 3370};
3015 3371
3372/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
3016struct eth_vport_rx_mode { 3373struct eth_vport_rx_mode {
3017 __le16 state; 3374 __le16 state;
3018#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1 3375#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
3019#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0 3376#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
3020#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 3377#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
3021#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 3378#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
3022#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1 3379#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
3023#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2 3380#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
3024#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1 3381#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
3025#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3 3382#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
3026#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 3383#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
3027#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 3384#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
3028#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 3385#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
3029#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 3386#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
3030#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF 3387#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
3031#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 3388#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
3032 __le16 reserved2[3]; 3389 __le16 reserved2[3];
3033}; 3390};
3034 3391
3392/* Command for setting tpa parameters */
3035struct eth_vport_tpa_param { 3393struct eth_vport_tpa_param {
3036 u8 tpa_ipv4_en_flg; 3394 u8 tpa_ipv4_en_flg;
3037 u8 tpa_ipv6_en_flg; 3395 u8 tpa_ipv6_en_flg;
3038 u8 tpa_ipv4_tunn_en_flg; 3396 u8 tpa_ipv4_tunn_en_flg;
3039 u8 tpa_ipv6_tunn_en_flg; 3397 u8 tpa_ipv6_tunn_en_flg;
3040 u8 tpa_pkt_split_flg; 3398 u8 tpa_pkt_split_flg;
3041 u8 tpa_hdr_data_split_flg; 3399 u8 tpa_hdr_data_split_flg;
3042 u8 tpa_gro_consistent_flg; 3400 u8 tpa_gro_consistent_flg;
3043 u8 tpa_max_aggs_num; 3401
3044 u16 tpa_max_size; 3402 u8 tpa_max_aggs_num;
3045 u16 tpa_min_size_to_start; 3403
3046 u16 tpa_min_size_to_cont; 3404 __le16 tpa_max_size;
3047 u8 max_buff_num; 3405 __le16 tpa_min_size_to_start;
3048 u8 reserved; 3406
3407 __le16 tpa_min_size_to_cont;
3408 u8 max_buff_num;
3409 u8 reserved;
3049}; 3410};
3050 3411
3412/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
3051struct eth_vport_tx_mode { 3413struct eth_vport_tx_mode {
3052 __le16 state; 3414 __le16 state;
3053#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1 3415#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
3054#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0 3416#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
3055#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 3417#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
3056#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 3418#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
3057#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1 3419#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
3058#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2 3420#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
3059#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 3421#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
3060#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3 3422#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
3061#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 3423#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
3062#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 3424#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
3063#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF 3425#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
3064#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 3426#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
3065 __le16 reserved2[3]; 3427 __le16 reserved2[3];
3066}; 3428};
3067 3429
3430/* Ramrod data for rx queue start ramrod */
3068struct rx_queue_start_ramrod_data { 3431struct rx_queue_start_ramrod_data {
3069 __le16 rx_queue_id; 3432 __le16 rx_queue_id;
3070 __le16 num_of_pbl_pages; 3433 __le16 num_of_pbl_pages;
3071 __le16 bd_max_bytes; 3434 __le16 bd_max_bytes;
3072 __le16 sb_id; 3435 __le16 sb_id;
3073 u8 sb_index; 3436 u8 sb_index;
3074 u8 vport_id; 3437 u8 vport_id;
3075 u8 default_rss_queue_flg; 3438 u8 default_rss_queue_flg;
3076 u8 complete_cqe_flg; 3439 u8 complete_cqe_flg;
3077 u8 complete_event_flg; 3440 u8 complete_event_flg;
3078 u8 stats_counter_id; 3441 u8 stats_counter_id;
3079 u8 pin_context; 3442 u8 pin_context;
3080 u8 pxp_tph_valid_bd; 3443 u8 pxp_tph_valid_bd;
3081 u8 pxp_tph_valid_pkt; 3444 u8 pxp_tph_valid_pkt;
3082 u8 pxp_st_hint; 3445 u8 pxp_st_hint;
3083 __le16 pxp_st_index; 3446
3084 u8 pmd_mode; 3447 __le16 pxp_st_index;
3085 u8 notify_en; 3448 u8 pmd_mode;
3086 u8 toggle_val; 3449
3087 u8 reserved[7]; 3450 u8 notify_en;
3088 __le16 reserved1; 3451 u8 toggle_val;
3089 struct regpair cqe_pbl_addr; 3452
3090 struct regpair bd_base; 3453 u8 vf_rx_prod_index;
3091 struct regpair reserved2; 3454
3455 u8 reserved[6];
3456 __le16 reserved1;
3457 struct regpair cqe_pbl_addr;
3458 struct regpair bd_base;
3459 struct regpair reserved2;
3092}; 3460};
3093 3461
3462/* Ramrod data for rx queue start ramrod */
3094struct rx_queue_stop_ramrod_data { 3463struct rx_queue_stop_ramrod_data {
3095 __le16 rx_queue_id; 3464 __le16 rx_queue_id;
3096 u8 complete_cqe_flg; 3465 u8 complete_cqe_flg;
3097 u8 complete_event_flg; 3466 u8 complete_event_flg;
3098 u8 vport_id; 3467 u8 vport_id;
3099 u8 reserved[3]; 3468 u8 reserved[3];
3100}; 3469};
3101 3470
3471/* Ramrod data for rx queue update ramrod */
3102struct rx_queue_update_ramrod_data { 3472struct rx_queue_update_ramrod_data {
3103 __le16 rx_queue_id; 3473 __le16 rx_queue_id;
3104 u8 complete_cqe_flg; 3474 u8 complete_cqe_flg;
3105 u8 complete_event_flg; 3475 u8 complete_event_flg;
3106 u8 vport_id; 3476 u8 vport_id;
3107 u8 reserved[4]; 3477 u8 reserved[4];
3108 u8 reserved1; 3478 u8 reserved1;
3109 u8 reserved2; 3479 u8 reserved2;
3110 u8 reserved3; 3480 u8 reserved3;
3111 __le16 reserved4; 3481 __le16 reserved4;
3112 __le16 reserved5; 3482 __le16 reserved5;
3113 struct regpair reserved6; 3483 struct regpair reserved6;
3114}; 3484};
3115 3485
3116struct tx_queue_start_ramrod_data { 3486/* Ramrod data for rx Add UDP Filter */
3117 __le16 sb_id; 3487struct rx_udp_filter_data {
3118 u8 sb_index; 3488 __le16 action_icid;
3119 u8 vport_id; 3489 __le16 vlan_id;
3120 u8 reserved0; 3490 u8 ip_type;
3121 u8 stats_counter_id; 3491 u8 tenant_id_exists;
3122 __le16 qm_pq_id; 3492 __le16 reserved1;
3123 u8 flags; 3493 __le32 ip_dst_addr[4];
3124#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1 3494 __le32 ip_src_addr[4];
3125#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0 3495 __le16 udp_dst_port;
3126#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1 3496 __le16 udp_src_port;
3127#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 3497 __le32 tenant_id;
3128#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
3129#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
3130#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
3131#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
3132#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
3133#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
3134#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
3135#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
3136#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
3137#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
3138 u8 pxp_st_hint;
3139 u8 pxp_tph_valid_bd;
3140 u8 pxp_tph_valid_pkt;
3141 __le16 pxp_st_index;
3142 __le16 comp_agg_size;
3143 __le16 queue_zone_id;
3144 __le16 test_dup_count;
3145 __le16 pbl_size;
3146 __le16 tx_queue_id;
3147 struct regpair pbl_base_addr;
3148 struct regpair bd_cons_address;
3149}; 3498};
3150 3499
3500/* Ramrod data for rx queue start ramrod */
3501struct tx_queue_start_ramrod_data {
3502 __le16 sb_id;
3503 u8 sb_index;
3504 u8 vport_id;
3505 u8 reserved0;
3506 u8 stats_counter_id;
3507 __le16 qm_pq_id;
3508 u8 flags;
3509#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
3510#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
3511#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
3512#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
3513#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
3514#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
3515#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
3516#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
3517#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
3518#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
3519#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
3520#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
3521#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
3522#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
3523 u8 pxp_st_hint;
3524 u8 pxp_tph_valid_bd;
3525 u8 pxp_tph_valid_pkt;
3526 __le16 pxp_st_index;
3527 __le16 comp_agg_size;
3528 __le16 queue_zone_id;
3529 __le16 test_dup_count;
3530 __le16 pbl_size;
3531 __le16 tx_queue_id;
3532
3533 struct regpair pbl_base_addr;
3534 struct regpair bd_cons_address;
3535};
3536
3537/* Ramrod data for tx queue stop ramrod */
3151struct tx_queue_stop_ramrod_data { 3538struct tx_queue_stop_ramrod_data {
3152 __le16 reserved[4]; 3539 __le16 reserved[4];
3153}; 3540};
3154 3541
3542/* Ramrod data for vport update ramrod */
3155struct vport_filter_update_ramrod_data { 3543struct vport_filter_update_ramrod_data {
3156 struct eth_filter_cmd_header filter_cmd_hdr; 3544 struct eth_filter_cmd_header filter_cmd_hdr;
3157 struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]; 3545 struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
3158}; 3546};
3159 3547
3548/* Ramrod data for vport start ramrod */
3160struct vport_start_ramrod_data { 3549struct vport_start_ramrod_data {
3161 u8 vport_id; 3550 u8 vport_id;
3162 u8 sw_fid; 3551 u8 sw_fid;
3163 __le16 mtu; 3552 __le16 mtu;
3164 u8 drop_ttl0_en; 3553 u8 drop_ttl0_en;
3165 u8 inner_vlan_removal_en; 3554 u8 inner_vlan_removal_en;
3166 struct eth_vport_rx_mode rx_mode; 3555 struct eth_vport_rx_mode rx_mode;
3167 struct eth_vport_tx_mode tx_mode; 3556 struct eth_vport_tx_mode tx_mode;
3168 struct eth_vport_tpa_param tpa_param; 3557 struct eth_vport_tpa_param tpa_param;
3169 __le16 default_vlan; 3558 __le16 default_vlan;
3170 u8 tx_switching_en; 3559 u8 tx_switching_en;
3171 u8 anti_spoofing_en; 3560 u8 anti_spoofing_en;
3172 u8 default_vlan_en; 3561
3173 u8 handle_ptp_pkts; 3562 u8 default_vlan_en;
3174 u8 silent_vlan_removal_en; 3563
3175 u8 untagged; 3564 u8 handle_ptp_pkts;
3176 struct eth_tx_err_vals tx_err_behav; 3565 u8 silent_vlan_removal_en;
3177 u8 zero_placement_offset; 3566 u8 untagged;
3178 u8 reserved[7]; 3567 struct eth_tx_err_vals tx_err_behav;
3179}; 3568
3180 3569 u8 zero_placement_offset;
3570 u8 ctl_frame_mac_check_en;
3571 u8 ctl_frame_ethtype_check_en;
3572 u8 reserved[5];
3573};
3574
3575/* Ramrod data for vport stop ramrod */
3181struct vport_stop_ramrod_data { 3576struct vport_stop_ramrod_data {
3182 u8 vport_id; 3577 u8 vport_id;
3183 u8 reserved[7]; 3578 u8 reserved[7];
3184}; 3579};
3185 3580
3581/* Ramrod data for vport update ramrod */
3186struct vport_update_ramrod_data_cmn { 3582struct vport_update_ramrod_data_cmn {
3187 u8 vport_id; 3583 u8 vport_id;
3188 u8 update_rx_active_flg; 3584 u8 update_rx_active_flg;
3189 u8 rx_active_flg; 3585 u8 rx_active_flg;
3190 u8 update_tx_active_flg; 3586 u8 update_tx_active_flg;
3191 u8 tx_active_flg; 3587 u8 tx_active_flg;
3192 u8 update_rx_mode_flg; 3588 u8 update_rx_mode_flg;
3193 u8 update_tx_mode_flg; 3589 u8 update_tx_mode_flg;
3194 u8 update_approx_mcast_flg; 3590 u8 update_approx_mcast_flg;
3195 u8 update_rss_flg; 3591
3196 u8 update_inner_vlan_removal_en_flg; 3592 u8 update_rss_flg;
3197 u8 inner_vlan_removal_en; 3593 u8 update_inner_vlan_removal_en_flg;
3198 u8 update_tpa_param_flg; 3594
3199 u8 update_tpa_en_flg; 3595 u8 inner_vlan_removal_en;
3200 u8 update_tx_switching_en_flg; 3596 u8 update_tpa_param_flg;
3201 u8 tx_switching_en; 3597 u8 update_tpa_en_flg;
3202 u8 update_anti_spoofing_en_flg; 3598 u8 update_tx_switching_en_flg;
3203 u8 anti_spoofing_en; 3599
3204 u8 update_handle_ptp_pkts; 3600 u8 tx_switching_en;
3205 u8 handle_ptp_pkts; 3601 u8 update_anti_spoofing_en_flg;
3206 u8 update_default_vlan_en_flg; 3602
3207 u8 default_vlan_en; 3603 u8 anti_spoofing_en;
3208 u8 update_default_vlan_flg; 3604 u8 update_handle_ptp_pkts;
3209 __le16 default_vlan; 3605
3210 u8 update_accept_any_vlan_flg; 3606 u8 handle_ptp_pkts;
3211 u8 accept_any_vlan; 3607 u8 update_default_vlan_en_flg;
3212 u8 silent_vlan_removal_en; 3608
3213 u8 update_mtu_flg; 3609 u8 default_vlan_en;
3214 __le16 mtu; 3610
3215 u8 reserved[2]; 3611 u8 update_default_vlan_flg;
3612
3613 __le16 default_vlan;
3614 u8 update_accept_any_vlan_flg;
3615
3616 u8 accept_any_vlan;
3617 u8 silent_vlan_removal_en;
3618 u8 update_mtu_flg;
3619
3620 __le16 mtu;
3621 u8 reserved[2];
3216}; 3622};
3217 3623
3218struct vport_update_ramrod_mcast { 3624struct vport_update_ramrod_mcast {
3219 __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 3625 __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
3220}; 3626};
3221 3627
3628/* Ramrod data for vport update ramrod */
3222struct vport_update_ramrod_data { 3629struct vport_update_ramrod_data {
3223 struct vport_update_ramrod_data_cmn common; 3630 struct vport_update_ramrod_data_cmn common;
3224 struct eth_vport_rx_mode rx_mode; 3631
3225 struct eth_vport_tx_mode tx_mode; 3632 struct eth_vport_rx_mode rx_mode;
3226 struct eth_vport_tpa_param tpa_param; 3633 struct eth_vport_tx_mode tx_mode;
3227 struct vport_update_ramrod_mcast approx_mcast; 3634 struct eth_vport_tpa_param tpa_param;
3228 struct eth_vport_rss_config rss_config; 3635 struct vport_update_ramrod_mcast approx_mcast;
3636 struct eth_vport_rss_config rss_config;
3637};
3638
3639struct mstorm_rdma_task_st_ctx {
3640 struct regpair temp[4];
3641};
3642
3643struct rdma_close_func_ramrod_data {
3644 u8 cnq_start_offset;
3645 u8 num_cnqs;
3646 u8 vf_id;
3647 u8 vf_valid;
3648 u8 reserved[4];
3649};
3650
3651struct rdma_cnq_params {
3652 __le16 sb_num;
3653 u8 sb_index;
3654 u8 num_pbl_pages;
3655 __le32 reserved;
3656 struct regpair pbl_base_addr;
3657 __le16 queue_zone_num;
3658 u8 reserved1[6];
3659};
3660
3661struct rdma_create_cq_ramrod_data {
3662 struct regpair cq_handle;
3663 struct regpair pbl_addr;
3664 __le32 max_cqes;
3665 __le16 pbl_num_pages;
3666 __le16 dpi;
3667 u8 is_two_level_pbl;
3668 u8 cnq_id;
3669 u8 pbl_log_page_size;
3670 u8 toggle_bit;
3671 __le16 int_timeout;
3672 __le16 reserved1;
3229}; 3673};
3230 3674
3231#define VF_MAX_STATIC 192 /* In case of K2 */ 3675struct rdma_deregister_tid_ramrod_data {
3676 __le32 itid;
3677 __le32 reserved;
3678};
3232 3679
3233#define MCP_GLOB_PATH_MAX 2 3680struct rdma_destroy_cq_output_params {
3234#define MCP_PORT_MAX 2 /* Global */ 3681 __le16 cnq_num;
3235#define MCP_GLOB_PORT_MAX 4 /* Global */ 3682 __le16 reserved0;
3236#define MCP_GLOB_FUNC_MAX 16 /* Global */ 3683 __le32 reserved1;
3684};
3685
3686struct rdma_destroy_cq_ramrod_data {
3687 struct regpair output_params_addr;
3688};
3689
3690enum rdma_event_opcode {
3691 RDMA_EVENT_UNUSED,
3692 RDMA_EVENT_FUNC_INIT,
3693 RDMA_EVENT_FUNC_CLOSE,
3694 RDMA_EVENT_REGISTER_MR,
3695 RDMA_EVENT_DEREGISTER_MR,
3696 RDMA_EVENT_CREATE_CQ,
3697 RDMA_EVENT_RESIZE_CQ,
3698 RDMA_EVENT_DESTROY_CQ,
3699 RDMA_EVENT_CREATE_SRQ,
3700 RDMA_EVENT_MODIFY_SRQ,
3701 RDMA_EVENT_DESTROY_SRQ,
3702 MAX_RDMA_EVENT_OPCODE
3703};
3704
3705enum rdma_fw_return_code {
3706 RDMA_RETURN_OK = 0,
3707 RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
3708 RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR,
3709 RDMA_RETURN_RESIZE_CQ_ERR,
3710 RDMA_RETURN_NIG_DRAIN_REQ,
3711 MAX_RDMA_FW_RETURN_CODE
3712};
3713
3714struct rdma_init_func_hdr {
3715 u8 cnq_start_offset;
3716 u8 num_cnqs;
3717 u8 cq_ring_mode;
3718 u8 cnp_vlan_priority;
3719 __le32 cnp_send_timeout;
3720 u8 cnp_dscp;
3721 u8 vf_id;
3722 u8 vf_valid;
3723 u8 reserved[5];
3724};
3725
3726struct rdma_init_func_ramrod_data {
3727 struct rdma_init_func_hdr params_header;
3728 struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
3729};
3730
3731enum rdma_ramrod_cmd_id {
3732 RDMA_RAMROD_UNUSED,
3733 RDMA_RAMROD_FUNC_INIT,
3734 RDMA_RAMROD_FUNC_CLOSE,
3735 RDMA_RAMROD_REGISTER_MR,
3736 RDMA_RAMROD_DEREGISTER_MR,
3737 RDMA_RAMROD_CREATE_CQ,
3738 RDMA_RAMROD_RESIZE_CQ,
3739 RDMA_RAMROD_DESTROY_CQ,
3740 RDMA_RAMROD_CREATE_SRQ,
3741 RDMA_RAMROD_MODIFY_SRQ,
3742 RDMA_RAMROD_DESTROY_SRQ,
3743 MAX_RDMA_RAMROD_CMD_ID
3744};
3745
3746struct rdma_register_tid_ramrod_data {
3747 __le32 flags;
3748#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF
3749#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0
3750#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
3751#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18
3752#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
3753#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23
3754#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
3755#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24
3756#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
3757#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25
3758#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
3759#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26
3760#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
3761#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27
3762#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
3763#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28
3764#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
3765#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29
3766#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
3767#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30
3768#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
3769#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31
3770 u8 flags1;
3771#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
3772#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
3773#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
3774#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
3775 u8 flags2;
3776#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
3777#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
3778#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
3779#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
3780#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
3781#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
3782 u8 key;
3783 u8 length_hi;
3784 u8 vf_id;
3785 u8 vf_valid;
3786 __le16 pd;
3787 __le32 length_lo;
3788 __le32 itid;
3789 __le32 reserved2;
3790 struct regpair va;
3791 struct regpair pbl_base;
3792 struct regpair dif_error_addr;
3793 struct regpair dif_runt_addr;
3794 __le32 reserved3[2];
3795};
3796
3797struct rdma_resize_cq_output_params {
3798 __le32 old_cq_cons;
3799 __le32 old_cq_prod;
3800};
3801
3802struct rdma_resize_cq_ramrod_data {
3803 u8 flags;
3804#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
3805#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
3806#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
3807#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
3808#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
3809#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
3810 u8 pbl_log_page_size;
3811 __le16 pbl_num_pages;
3812 __le32 max_cqes;
3813 struct regpair pbl_addr;
3814 struct regpair output_params_addr;
3815};
3816
3817struct rdma_srq_context {
3818 struct regpair temp[8];
3819};
3820
3821struct rdma_srq_create_ramrod_data {
3822 struct regpair pbl_base_addr;
3823 __le16 pages_in_srq_pbl;
3824 __le16 pd_id;
3825 struct rdma_srq_id srq_id;
3826 __le16 page_size;
3827 __le16 reserved1;
3828 __le32 reserved2;
3829 struct regpair producers_addr;
3830};
3831
3832struct rdma_srq_destroy_ramrod_data {
3833 struct rdma_srq_id srq_id;
3834 __le32 reserved;
3835};
3836
3837struct rdma_srq_modify_ramrod_data {
3838 struct rdma_srq_id srq_id;
3839 __le32 wqe_limit;
3840};
3841
3842struct ystorm_rdma_task_st_ctx {
3843 struct regpair temp[4];
3844};
3845
3846struct ystorm_rdma_task_ag_ctx {
3847 u8 reserved;
3848 u8 byte1;
3849 __le16 msem_ctx_upd_seq;
3850 u8 flags0;
3851#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
3852#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
3853#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
3854#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
3855#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
3856#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
3857#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
3858#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
3859#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
3860#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
3861 u8 flags1;
3862#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
3863#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
3864#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
3865#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
3866#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
3867#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
3868#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
3869#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
3870#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
3871#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
3872 u8 flags2;
3873#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
3874#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
3875#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
3876#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
3877#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
3878#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
3879#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
3880#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
3881#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
3882#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
3883#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
3884#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
3885#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
3886#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
3887#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
3888#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
3889 u8 key;
3890 __le32 mw_cnt;
3891 u8 ref_cnt_seq;
3892 u8 ctx_upd_seq;
3893 __le16 dif_flags;
3894 __le16 tx_ref_count;
3895 __le16 last_used_ltid;
3896 __le16 parent_mr_lo;
3897 __le16 parent_mr_hi;
3898 __le32 fbo_lo;
3899 __le32 fbo_hi;
3900};
3901
3902struct mstorm_rdma_task_ag_ctx {
3903 u8 reserved;
3904 u8 byte1;
3905 __le16 icid;
3906 u8 flags0;
3907#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
3908#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
3909#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
3910#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
3911#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
3912#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
3913#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
3914#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
3915#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
3916#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
3917 u8 flags1;
3918#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
3919#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
3920#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
3921#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
3922#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
3923#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
3924#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
3925#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
3926#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
3927#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
3928 u8 flags2;
3929#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
3930#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
3931#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
3932#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
3933#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
3934#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
3935#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
3936#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
3937#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
3938#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
3939#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
3940#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
3941#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
3942#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
3943#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
3944#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
3945 u8 key;
3946 __le32 mw_cnt;
3947 u8 ref_cnt_seq;
3948 u8 ctx_upd_seq;
3949 __le16 dif_flags;
3950 __le16 tx_ref_count;
3951 __le16 last_used_ltid;
3952 __le16 parent_mr_lo;
3953 __le16 parent_mr_hi;
3954 __le32 fbo_lo;
3955 __le32 fbo_hi;
3956};
3957
3958struct ustorm_rdma_task_st_ctx {
3959 struct regpair temp[2];
3960};
3961
3962struct ustorm_rdma_task_ag_ctx {
3963 u8 reserved;
3964 u8 byte1;
3965 __le16 icid;
3966 u8 flags0;
3967#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
3968#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
3969#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
3970#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
3971#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
3972#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
3973#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
3974#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
3975 u8 flags1;
3976#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
3977#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
3978#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
3979#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
3980#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
3981#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
3982#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
3983#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
3984 u8 flags2;
3985#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
3986#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
3987#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
3988#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
3989#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
3990#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
3991#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
3992#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
3993#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
3994#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
3995#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
3996#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
3997#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
3998#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
3999#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
4000#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
4001 u8 flags3;
4002#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
4003#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
4004#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
4005#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
4006#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
4007#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
4008#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
4009#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
4010#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
4011#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
4012 __le32 dif_err_intervals;
4013 __le32 dif_error_1st_interval;
4014 __le32 reg2;
4015 __le32 dif_runt_value;
4016 __le32 reg4;
4017 __le32 reg5;
4018};
4019
4020struct rdma_task_context {
4021 struct ystorm_rdma_task_st_ctx ystorm_st_context;
4022 struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
4023 struct tdif_task_context tdif_context;
4024 struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
4025 struct mstorm_rdma_task_st_ctx mstorm_st_context;
4026 struct rdif_task_context rdif_context;
4027 struct ustorm_rdma_task_st_ctx ustorm_st_context;
4028 struct regpair ustorm_st_padding[2];
4029 struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
4030};
4031
4032enum rdma_tid_type {
4033 RDMA_TID_REGISTERED_MR,
4034 RDMA_TID_FMR,
4035 RDMA_TID_MW_TYPE1,
4036 RDMA_TID_MW_TYPE2A,
4037 MAX_RDMA_TID_TYPE
4038};
4039
4040struct mstorm_rdma_conn_ag_ctx {
4041 u8 byte0;
4042 u8 byte1;
4043 u8 flags0;
4044#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
4045#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
4046#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
4047#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
4048#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
4049#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
4050#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
4051#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
4052#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
4053#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
4054 u8 flags1;
4055#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
4056#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
4057#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
4058#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
4059#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
4060#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
4061#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
4062#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
4063#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
4064#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
4065#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
4066#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
4067#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
4068#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
4069#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
4070#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
4071 __le16 word0;
4072 __le16 word1;
4073 __le32 reg0;
4074 __le32 reg1;
4075};
4076
4077struct tstorm_rdma_conn_ag_ctx {
4078 u8 reserved0;
4079 u8 byte1;
4080 u8 flags0;
4081#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
4082#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
4083#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
4084#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
4085#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
4086#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
4087#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1
4088#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3
4089#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
4090#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
4091#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
4092#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
4093#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
4094#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6
4095 u8 flags1;
4096#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
4097#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0
4098#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
4099#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2
4100#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
4101#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
4102#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
4103#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
4104 u8 flags2;
4105#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
4106#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
4107#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
4108#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2
4109#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3
4110#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4
4111#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
4112#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6
4113 u8 flags3;
4114#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
4115#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0
4116#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
4117#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2
4118#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
4119#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4
4120#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
4121#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5
4122#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
4123#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6
4124#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
4125#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
4126 u8 flags4;
4127#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
4128#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
4129#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
4130#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
4131#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
4132#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2
4133#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1
4134#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3
4135#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
4136#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4
4137#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
4138#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5
4139#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
4140#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6
4141#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
4142#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7
4143 u8 flags5;
4144#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
4145#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0
4146#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
4147#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
4148#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
4149#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
4150#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
4151#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
4152#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
4153#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
4154#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
4155#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
4156#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
4157#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
4158#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
4159#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
4160 __le32 reg0;
4161 __le32 reg1;
4162 __le32 reg2;
4163 __le32 reg3;
4164 __le32 reg4;
4165 __le32 reg5;
4166 __le32 reg6;
4167 __le32 reg7;
4168 __le32 reg8;
4169 u8 byte2;
4170 u8 byte3;
4171 __le16 word0;
4172 u8 byte4;
4173 u8 byte5;
4174 __le16 word1;
4175 __le16 word2;
4176 __le16 word3;
4177 __le32 reg9;
4178 __le32 reg10;
4179};
4180
4181struct tstorm_rdma_task_ag_ctx {
4182 u8 byte0;
4183 u8 byte1;
4184 __le16 word0;
4185 u8 flags0;
4186#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
4187#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
4188#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
4189#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
4190#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
4191#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
4192#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
4193#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
4194#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
4195#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
4196 u8 flags1;
4197#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
4198#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
4199#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
4200#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
4201#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
4202#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
4203#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
4204#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
4205#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
4206#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
4207 u8 flags2;
4208#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
4209#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
4210#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
4211#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
4212#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
4213#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
4214#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
4215#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
4216 u8 flags3;
4217#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
4218#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
4219#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
4220#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
4221#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
4222#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
4223#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
4224#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
4225#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
4226#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
4227#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
4228#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
4229#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
4230#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
4231 u8 flags4;
4232#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
4233#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
4234#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
4235#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
4236#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
4237#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
4238#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
4239#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
4240#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
4241#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
4242#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
4243#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
4244#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
4245#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
4246#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
4247#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
4248 u8 byte2;
4249 __le16 word1;
4250 __le32 reg0;
4251 u8 byte3;
4252 u8 byte4;
4253 __le16 word2;
4254 __le16 word3;
4255 __le16 word4;
4256 __le32 reg1;
4257 __le32 reg2;
4258};
4259
4260struct ustorm_rdma_conn_ag_ctx {
4261 u8 reserved;
4262 u8 byte1;
4263 u8 flags0;
4264#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
4265#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
4266#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
4267#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
4268#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
4269#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
4270#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
4271#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
4272#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
4273#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
4274 u8 flags1;
4275#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
4276#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
4277#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
4278#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
4279#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
4280#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
4281#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
4282#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
4283 u8 flags2;
4284#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
4285#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
4286#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
4287#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
4288#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
4289#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
4290#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
4291#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
4292#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
4293#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
4294#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
4295#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
4296#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
4297#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
4298#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
4299#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
4300 u8 flags3;
4301#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
4302#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
4303#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
4304#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
4305#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
4306#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
4307#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
4308#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
4309#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
4310#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
4311#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
4312#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
4313#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
4314#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
4315#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
4316#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
4317 u8 byte2;
4318 u8 byte3;
4319 __le16 conn_dpi;
4320 __le16 word1;
4321 __le32 cq_cons;
4322 __le32 cq_se_prod;
4323 __le32 cq_prod;
4324 __le32 reg3;
4325 __le16 int_timeout;
4326 __le16 word3;
4327};
4328
4329struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
4330 u8 reserved0;
4331 u8 state;
4332 u8 flags0;
4333#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
4334#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
4335#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
4336#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
4337#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
4338#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
4339#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
4340#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
4341#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
4342#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
4343#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
4344#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
4345#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
4346#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
4347#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
4348#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
4349 u8 flags1;
4350#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
4351#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
4352#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
4353#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
4354#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
4355#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
4356#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
4357#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
4358#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
4359#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
4360#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
4361#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
4362#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
4363#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
4364#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
4365#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
4366 u8 flags2;
4367#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
4368#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
4369#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
4370#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
4371#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
4372#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
4373#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
4374#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
4375 u8 flags3;
4376#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
4377#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
4378#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
4379#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
4380#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
4381#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
4382#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
4383#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
4384 u8 flags4;
4385#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
4386#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
4387#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
4388#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
4389#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
4390#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
4391#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
4392#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
4393 u8 flags5;
4394#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
4395#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
4396#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
4397#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
4398#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
4399#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
4400#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
4401#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
4402 u8 flags6;
4403#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
4404#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
4405#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
4406#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
4407#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
4408#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
4409#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
4410#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
4411 u8 flags7;
4412#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
4413#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
4414#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
4415#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
4416#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
4417#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
4418#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
4419#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
4420#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
4421#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
4422 u8 flags8;
4423#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
4424#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
4425#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
4426#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
4427#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
4428#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
4429#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
4430#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
4431#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
4432#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
4433#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
4434#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
4435#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
4436#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
4437#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
4438#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
4439 u8 flags9;
4440#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
4441#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
4442#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
4443#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
4444#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
4445#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
4446#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
4447#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
4448#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
4449#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
4450#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
4451#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
4452#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
4453#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
4454#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
4455#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
4456 u8 flags10;
4457#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
4458#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
4459#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
4460#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
4461#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
4462#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
4463#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
4464#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
4465#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
4466#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
4467#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
4468#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
4469#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
4470#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
4471#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
4472#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
4473 u8 flags11;
4474#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
4475#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
4476#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
4477#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
4478#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
4479#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
4480#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
4481#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
4482#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
4483#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
4484#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
4485#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
4486#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
4487#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
4488#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
4489#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
4490 u8 flags12;
4491#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
4492#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
4493#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
4494#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
4495#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
4496#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
4497#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
4498#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
4499#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
4500#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
4501#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
4502#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
4503#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
4504#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
4505#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
4506#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
4507 u8 flags13;
4508#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
4509#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
4510#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
4511#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
4512#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
4513#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
4514#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
4515#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
4516#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
4517#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
4518#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
4519#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
4520#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
4521#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
4522#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
4523#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
4524 u8 flags14;
4525#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
4526#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
4527#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
4528#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
4529#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
4530#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
4531#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
4532#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
4533#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
4534#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
4535#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
4536#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
4537 u8 byte2;
4538 __le16 physical_q0;
4539 __le16 word1;
4540 __le16 word2;
4541 __le16 word3;
4542 __le16 word4;
4543 __le16 word5;
4544 __le16 conn_dpi;
4545 u8 byte3;
4546 u8 byte4;
4547 u8 byte5;
4548 u8 byte6;
4549 __le32 reg0;
4550 __le32 reg1;
4551 __le32 reg2;
4552 __le32 snd_nxt_psn;
4553 __le32 reg4;
4554};
4555
4556struct xstorm_rdma_conn_ag_ctx {
4557 u8 reserved0;
4558 u8 state;
4559 u8 flags0;
4560#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
4561#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
4562#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
4563#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
4564#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
4565#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
4566#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
4567#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
4568#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
4569#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
4570#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
4571#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
4572#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1
4573#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6
4574#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1
4575#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7
4576 u8 flags1;
4577#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1
4578#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0
4579#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1
4580#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1
4581#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1
4582#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2
4583#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1
4584#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
4585#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
4586#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
4587#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1
4588#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5
4589#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
4590#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
4591#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
4592#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
4593 u8 flags2;
4594#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
4595#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0
4596#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
4597#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2
4598#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
4599#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4
4600#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
4601#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6
4602 u8 flags3;
4603#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3
4604#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0
4605#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3
4606#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2
4607#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
4608#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4
4609#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
4610#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
4611 u8 flags4;
4612#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
4613#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0
4614#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
4615#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2
4616#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
4617#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4
4618#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3
4619#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6
4620 u8 flags5;
4621#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3
4622#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0
4623#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3
4624#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2
4625#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3
4626#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4
4627#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3
4628#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6
4629 u8 flags6;
4630#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3
4631#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0
4632#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3
4633#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2
4634#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3
4635#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4
4636#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3
4637#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6
4638 u8 flags7;
4639#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3
4640#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0
4641#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3
4642#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2
4643#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3
4644#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4
4645#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
4646#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6
4647#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
4648#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7
4649 u8 flags8;
4650#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
4651#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0
4652#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
4653#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1
4654#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1
4655#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2
4656#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1
4657#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3
4658#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
4659#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4
4660#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
4661#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
4662#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
4663#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6
4664#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
4665#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7
4666 u8 flags9;
4667#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
4668#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0
4669#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
4670#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1
4671#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
4672#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2
4673#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
4674#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3
4675#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
4676#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4
4677#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
4678#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5
4679#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
4680#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6
4681#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
4682#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7
4683 u8 flags10;
4684#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1
4685#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0
4686#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1
4687#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1
4688#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1
4689#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2
4690#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1
4691#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3
4692#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
4693#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
4694#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1
4695#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5
4696#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
4697#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6
4698#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
4699#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7
4700 u8 flags11;
4701#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
4702#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0
4703#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
4704#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1
4705#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
4706#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2
4707#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
4708#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3
4709#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
4710#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4
4711#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
4712#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5
4713#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
4714#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
4715#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1
4716#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7
4717 u8 flags12;
4718#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1
4719#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0
4720#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1
4721#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1
4722#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
4723#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
4724#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
4725#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
4726#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1
4727#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4
4728#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1
4729#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5
4730#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1
4731#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6
4732#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1
4733#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7
4734 u8 flags13;
4735#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1
4736#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0
4737#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1
4738#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1
4739#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
4740#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
4741#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
4742#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
4743#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
4744#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
4745#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
4746#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
4747#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
4748#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
4749#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
4750#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
4751 u8 flags14;
4752#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1
4753#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0
4754#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1
4755#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1
4756#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
4757#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
4758#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1
4759#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4
4760#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
4761#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
4762#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3
4763#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6
4764 u8 byte2;
4765 __le16 physical_q0;
4766 __le16 word1;
4767 __le16 word2;
4768 __le16 word3;
4769 __le16 word4;
4770 __le16 word5;
4771 __le16 conn_dpi;
4772 u8 byte3;
4773 u8 byte4;
4774 u8 byte5;
4775 u8 byte6;
4776 __le32 reg0;
4777 __le32 reg1;
4778 __le32 reg2;
4779 __le32 snd_nxt_psn;
4780 __le32 reg4;
4781 __le32 reg5;
4782 __le32 reg6;
4783};
4784
4785struct ystorm_rdma_conn_ag_ctx {
4786 u8 byte0;
4787 u8 byte1;
4788 u8 flags0;
4789#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
4790#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
4791#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
4792#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
4793#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
4794#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
4795#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
4796#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
4797#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
4798#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
4799 u8 flags1;
4800#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
4801#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
4802#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
4803#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
4804#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
4805#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
4806#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
4807#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
4808#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
4809#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
4810#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
4811#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
4812#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
4813#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
4814#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
4815#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
4816 u8 byte2;
4817 u8 byte3;
4818 __le16 word0;
4819 __le32 reg0;
4820 __le32 reg1;
4821 __le16 word1;
4822 __le16 word2;
4823 __le16 word3;
4824 __le16 word4;
4825 __le32 reg2;
4826 __le32 reg3;
4827};
4828
4829struct mstorm_roce_conn_st_ctx {
4830 struct regpair temp[6];
4831};
4832
4833struct pstorm_roce_conn_st_ctx {
4834 struct regpair temp[16];
4835};
4836
4837struct ystorm_roce_conn_st_ctx {
4838 struct regpair temp[2];
4839};
4840
4841struct xstorm_roce_conn_st_ctx {
4842 struct regpair temp[22];
4843};
4844
4845struct tstorm_roce_conn_st_ctx {
4846 struct regpair temp[30];
4847};
4848
4849struct ustorm_roce_conn_st_ctx {
4850 struct regpair temp[12];
4851};
4852
4853struct roce_conn_context {
4854 struct ystorm_roce_conn_st_ctx ystorm_st_context;
4855 struct regpair ystorm_st_padding[2];
4856 struct pstorm_roce_conn_st_ctx pstorm_st_context;
4857 struct xstorm_roce_conn_st_ctx xstorm_st_context;
4858 struct regpair xstorm_st_padding[2];
4859 struct xstorm_rdma_conn_ag_ctx xstorm_ag_context;
4860 struct tstorm_rdma_conn_ag_ctx tstorm_ag_context;
4861 struct timers_context timer_context;
4862 struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
4863 struct tstorm_roce_conn_st_ctx tstorm_st_context;
4864 struct mstorm_roce_conn_st_ctx mstorm_st_context;
4865 struct ustorm_roce_conn_st_ctx ustorm_st_context;
4866 struct regpair ustorm_st_padding[2];
4867};
4868
4869struct roce_create_qp_req_ramrod_data {
4870 __le16 flags;
4871#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
4872#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
4873#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
4874#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
4875#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
4876#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
4877#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
4878#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
4879#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
4880#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
4881#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
4882#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
4883#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
4884#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12
4885 u8 max_ord;
4886 u8 traffic_class;
4887 u8 hop_limit;
4888 u8 orq_num_pages;
4889 __le16 p_key;
4890 __le32 flow_label;
4891 __le32 dst_qp_id;
4892 __le32 ack_timeout_val;
4893 __le32 initial_psn;
4894 __le16 mtu;
4895 __le16 pd;
4896 __le16 sq_num_pages;
4897 __le16 reseved2;
4898 struct regpair sq_pbl_addr;
4899 struct regpair orq_pbl_addr;
4900 __le16 local_mac_addr[3];
4901 __le16 remote_mac_addr[3];
4902 __le16 vlan_id;
4903 __le16 udp_src_port;
4904 __le32 src_gid[4];
4905 __le32 dst_gid[4];
4906 struct regpair qp_handle_for_cqe;
4907 struct regpair qp_handle_for_async;
4908 u8 stats_counter_id;
4909 u8 reserved3[7];
4910 __le32 cq_cid;
4911 __le16 physical_queue0;
4912 __le16 dpi;
4913};
4914
4915struct roce_create_qp_resp_ramrod_data {
4916 __le16 flags;
4917#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
4918#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
4919#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
4920#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
4921#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
4922#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
4923#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
4924#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
4925#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1
4926#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
4927#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
4928#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
4929#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1
4930#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7
4931#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
4932#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
4933#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
4934#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
4935 u8 max_ird;
4936 u8 traffic_class;
4937 u8 hop_limit;
4938 u8 irq_num_pages;
4939 __le16 p_key;
4940 __le32 flow_label;
4941 __le32 dst_qp_id;
4942 u8 stats_counter_id;
4943 u8 reserved1;
4944 __le16 mtu;
4945 __le32 initial_psn;
4946 __le16 pd;
4947 __le16 rq_num_pages;
4948 struct rdma_srq_id srq_id;
4949 struct regpair rq_pbl_addr;
4950 struct regpair irq_pbl_addr;
4951 __le16 local_mac_addr[3];
4952 __le16 remote_mac_addr[3];
4953 __le16 vlan_id;
4954 __le16 udp_src_port;
4955 __le32 src_gid[4];
4956 __le32 dst_gid[4];
4957 struct regpair qp_handle_for_cqe;
4958 struct regpair qp_handle_for_async;
4959 __le32 reserved2[2];
4960 __le32 cq_cid;
4961 __le16 physical_queue0;
4962 __le16 dpi;
4963};
4964
4965struct roce_destroy_qp_req_output_params {
4966 __le32 num_bound_mw;
4967 __le32 reserved;
4968};
4969
4970struct roce_destroy_qp_req_ramrod_data {
4971 struct regpair output_params_addr;
4972};
4973
4974struct roce_destroy_qp_resp_output_params {
4975 __le32 num_invalidated_mw;
4976 __le32 reserved;
4977};
4978
4979struct roce_destroy_qp_resp_ramrod_data {
4980 struct regpair output_params_addr;
4981};
4982
4983enum roce_event_opcode {
4984 ROCE_EVENT_CREATE_QP = 11,
4985 ROCE_EVENT_MODIFY_QP,
4986 ROCE_EVENT_QUERY_QP,
4987 ROCE_EVENT_DESTROY_QP,
4988 MAX_ROCE_EVENT_OPCODE
4989};
4990
4991struct roce_modify_qp_req_ramrod_data {
4992 __le16 flags;
4993#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
4994#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
4995#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1
4996#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1
4997#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1
4998#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
4999#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1
5000#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3
5001#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
5002#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4
5003#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1
5004#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5
5005#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1
5006#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6
5007#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1
5008#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7
5009#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1
5010#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8
5011#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1
5012#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
5013#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
5014#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
5015#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7
5016#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13
5017 u8 fields;
5018#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
5019#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
5020#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
5021#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4
5022 u8 max_ord;
5023 u8 traffic_class;
5024 u8 hop_limit;
5025 __le16 p_key;
5026 __le32 flow_label;
5027 __le32 ack_timeout_val;
5028 __le16 mtu;
5029 __le16 reserved2;
5030 __le32 reserved3[3];
5031 __le32 src_gid[4];
5032 __le32 dst_gid[4];
5033};
5034
5035struct roce_modify_qp_resp_ramrod_data {
5036 __le16 flags;
5037#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
5038#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
5039#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
5040#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1
5041#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
5042#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2
5043#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
5044#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3
5045#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1
5046#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4
5047#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
5048#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5
5049#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1
5050#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6
5051#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1
5052#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7
5053#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1
5054#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
5055#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
5056#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
5057#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F
5058#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10
5059 u8 fields;
5060#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
5061#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
5062#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
5063#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3
5064 u8 max_ird;
5065 u8 traffic_class;
5066 u8 hop_limit;
5067 __le16 p_key;
5068 __le32 flow_label;
5069 __le16 mtu;
5070 __le16 reserved2;
5071 __le32 src_gid[4];
5072 __le32 dst_gid[4];
5073};
5074
5075struct roce_query_qp_req_output_params {
5076 __le32 psn;
5077 __le32 flags;
5078#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
5079#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
5080#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1
5081#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
5082#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF
5083#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
5084};
5085
5086struct roce_query_qp_req_ramrod_data {
5087 struct regpair output_params_addr;
5088};
5089
5090struct roce_query_qp_resp_output_params {
5091 __le32 psn;
5092 __le32 err_flag;
5093#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
5094#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
5095#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
5096#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
5097};
5098
5099struct roce_query_qp_resp_ramrod_data {
5100 struct regpair output_params_addr;
5101};
5102
5103enum roce_ramrod_cmd_id {
5104 ROCE_RAMROD_CREATE_QP = 11,
5105 ROCE_RAMROD_MODIFY_QP,
5106 ROCE_RAMROD_QUERY_QP,
5107 ROCE_RAMROD_DESTROY_QP,
5108 MAX_ROCE_RAMROD_CMD_ID
5109};
5110
5111struct mstorm_roce_req_conn_ag_ctx {
5112 u8 byte0;
5113 u8 byte1;
5114 u8 flags0;
5115#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
5116#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
5117#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
5118#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
5119#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
5120#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
5121#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
5122#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
5123#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
5124#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
5125 u8 flags1;
5126#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
5127#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
5128#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
5129#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
5130#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
5131#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
5132#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
5133#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
5134#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
5135#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
5136#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
5137#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
5138#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
5139#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
5140#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
5141#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
5142 __le16 word0;
5143 __le16 word1;
5144 __le32 reg0;
5145 __le32 reg1;
5146};
5147
5148struct mstorm_roce_resp_conn_ag_ctx {
5149 u8 byte0;
5150 u8 byte1;
5151 u8 flags0;
5152#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
5153#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
5154#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
5155#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
5156#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
5157#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
5158#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
5159#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
5160#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
5161#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
5162 u8 flags1;
5163#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
5164#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
5165#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
5166#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
5167#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
5168#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
5169#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
5170#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
5171#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
5172#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
5173#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
5174#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
5175#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
5176#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
5177#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
5178#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
5179 __le16 word0;
5180 __le16 word1;
5181 __le32 reg0;
5182 __le32 reg1;
5183};
5184
5185enum roce_flavor {
5186 PLAIN_ROCE /* RoCE v1 */ ,
5187 RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ ,
5188 RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ ,
5189 MAX_ROCE_FLAVOR
5190};
5191
5192struct tstorm_roce_req_conn_ag_ctx {
5193 u8 reserved0;
5194 u8 state;
5195 u8 flags0;
5196#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
5197#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
5198#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1
5199#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1
5200#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1
5201#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2
5202#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
5203#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
5204#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
5205#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
5206#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
5207#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
5208#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
5209#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
5210 u8 flags1;
5211#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
5212#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0
5213#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
5214#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
5215#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
5216#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
5217#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
5218#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
5219 u8 flags2;
5220#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
5221#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
5222#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
5223#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
5224#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
5225#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
5226#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
5227#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
5228 u8 flags3;
5229#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
5230#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
5231#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
5232#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
5233#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
5234#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
5235#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
5236#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5
5237#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
5238#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
5239#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
5240#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
5241 u8 flags4;
5242#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
5243#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
5244#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
5245#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
5246#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
5247#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
5248#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
5249#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
5250#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
5251#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
5252#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
5253#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
5254#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
5255#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
5256#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
5257#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
5258 u8 flags5;
5259#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
5260#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
5261#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
5262#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
5263#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
5264#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
5265#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
5266#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
5267#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
5268#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
5269#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
5270#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
5271#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
5272#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
5273#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
5274#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
5275 __le32 reg0;
5276 __le32 snd_nxt_psn;
5277 __le32 snd_max_psn;
5278 __le32 orq_prod;
5279 __le32 reg4;
5280 __le32 reg5;
5281 __le32 reg6;
5282 __le32 reg7;
5283 __le32 reg8;
5284 u8 tx_cqe_error_type;
5285 u8 orq_cache_idx;
5286 __le16 snd_sq_cons_th;
5287 u8 byte4;
5288 u8 byte5;
5289 __le16 snd_sq_cons;
5290 __le16 word2;
5291 __le16 word3;
5292 __le32 reg9;
5293 __le32 reg10;
5294};
5295
5296struct tstorm_roce_resp_conn_ag_ctx {
5297 u8 byte0;
5298 u8 state;
5299 u8 flags0;
5300#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
5301#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
5302#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
5303#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
5304#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
5305#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
5306#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
5307#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
5308#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
5309#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
5310#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
5311#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
5312#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
5313#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
5314 u8 flags1;
5315#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
5316#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
5317#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
5318#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
5319#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
5320#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
5321#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
5322#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
5323 u8 flags2;
5324#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
5325#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
5326#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
5327#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
5328#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
5329#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
5330#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
5331#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
5332 u8 flags3;
5333#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
5334#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
5335#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
5336#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
5337#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
5338#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
5339#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
5340#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
5341#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
5342#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
5343#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
5344#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
5345 u8 flags4;
5346#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
5347#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
5348#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
5349#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
5350#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
5351#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
5352#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
5353#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
5354#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
5355#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
5356#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
5357#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
5358#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
5359#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
5360#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
5361#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
5362 u8 flags5;
5363#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
5364#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
5365#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
5366#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
5367#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
5368#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
5369#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
5370#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
5371#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
5372#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
5373#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
5374#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
5375#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
5376#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
5377#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
5378#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
5379 __le32 psn_and_rxmit_id_echo;
5380 __le32 reg1;
5381 __le32 reg2;
5382 __le32 reg3;
5383 __le32 reg4;
5384 __le32 reg5;
5385 __le32 reg6;
5386 __le32 reg7;
5387 __le32 reg8;
5388 u8 tx_async_error_type;
5389 u8 byte3;
5390 __le16 rq_cons;
5391 u8 byte4;
5392 u8 byte5;
5393 __le16 rq_prod;
5394 __le16 conn_dpi;
5395 __le16 irq_cons;
5396 __le32 num_invlidated_mw;
5397 __le32 reg10;
5398};
5399
5400struct ustorm_roce_req_conn_ag_ctx {
5401 u8 byte0;
5402 u8 byte1;
5403 u8 flags0;
5404#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
5405#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
5406#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
5407#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
5408#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
5409#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
5410#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
5411#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
5412#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
5413#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
5414 u8 flags1;
5415#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
5416#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
5417#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
5418#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
5419#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
5420#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
5421#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
5422#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
5423 u8 flags2;
5424#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
5425#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
5426#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
5427#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
5428#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
5429#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
5430#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
5431#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
5432#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
5433#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
5434#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
5435#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
5436#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
5437#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
5438#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
5439#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
5440 u8 flags3;
5441#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
5442#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
5443#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
5444#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
5445#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
5446#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
5447#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
5448#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
5449#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
5450#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
5451#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
5452#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
5453#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
5454#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
5455#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
5456#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
5457 u8 byte2;
5458 u8 byte3;
5459 __le16 word0;
5460 __le16 word1;
5461 __le32 reg0;
5462 __le32 reg1;
5463 __le32 reg2;
5464 __le32 reg3;
5465 __le16 word2;
5466 __le16 word3;
5467};
5468
5469struct ustorm_roce_resp_conn_ag_ctx {
5470 u8 byte0;
5471 u8 byte1;
5472 u8 flags0;
5473#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
5474#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
5475#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
5476#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
5477#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
5478#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
5479#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
5480#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
5481#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
5482#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
5483 u8 flags1;
5484#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
5485#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
5486#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
5487#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
5488#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
5489#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
5490#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
5491#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
5492 u8 flags2;
5493#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
5494#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
5495#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
5496#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
5497#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
5498#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
5499#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
5500#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
5501#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
5502#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
5503#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
5504#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
5505#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
5506#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
5507#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
5508#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
5509 u8 flags3;
5510#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
5511#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
5512#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
5513#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
5514#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
5515#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
5516#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
5517#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
5518#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
5519#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
5520#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
5521#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
5522#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
5523#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
5524#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
5525#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
5526 u8 byte2;
5527 u8 byte3;
5528 __le16 word0;
5529 __le16 word1;
5530 __le32 reg0;
5531 __le32 reg1;
5532 __le32 reg2;
5533 __le32 reg3;
5534 __le16 word2;
5535 __le16 word3;
5536};
5537
5538struct xstorm_roce_req_conn_ag_ctx {
5539 u8 reserved0;
5540 u8 state;
5541 u8 flags0;
5542#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
5543#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
5544#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
5545#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
5546#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
5547#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
5548#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
5549#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
5550#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
5551#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
5552#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
5553#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
5554#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
5555#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
5556#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
5557#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
5558 u8 flags1;
5559#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
5560#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
5561#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
5562#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
5563#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
5564#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
5565#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
5566#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
5567#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
5568#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
5569#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
5570#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
5571#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
5572#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
5573#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
5574#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
5575 u8 flags2;
5576#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
5577#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
5578#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
5579#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
5580#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
5581#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
5582#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
5583#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
5584 u8 flags3;
5585#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
5586#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
5587#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
5588#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
5589#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
5590#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
5591#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
5592#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
5593 u8 flags4;
5594#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3
5595#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0
5596#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3
5597#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2
5598#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
5599#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
5600#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
5601#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
5602 u8 flags5;
5603#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
5604#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
5605#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
5606#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
5607#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
5608#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
5609#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
5610#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
5611 u8 flags6;
5612#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
5613#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
5614#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
5615#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
5616#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
5617#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
5618#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
5619#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
5620 u8 flags7;
5621#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
5622#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
5623#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
5624#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
5625#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
5626#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
5627#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
5628#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
5629#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
5630#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
5631 u8 flags8;
5632#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
5633#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
5634#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
5635#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
5636#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
5637#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
5638#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
5639#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
5640#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
5641#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
5642#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
5643#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
5644#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1
5645#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6
5646#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1
5647#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7
5648 u8 flags9;
5649#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
5650#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
5651#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
5652#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
5653#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
5654#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
5655#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
5656#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
5657#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
5658#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
5659#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
5660#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
5661#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
5662#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
5663#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
5664#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
5665 u8 flags10;
5666#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
5667#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
5668#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
5669#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
5670#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
5671#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
5672#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
5673#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
5674#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
5675#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
5676#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
5677#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
5678#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
5679#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
5680#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
5681#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
5682 u8 flags11;
5683#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
5684#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
5685#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
5686#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
5687#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
5688#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
5689#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
5690#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
5691#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
5692#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
5693#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
5694#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
5695#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
5696#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
5697#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
5698#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
5699 u8 flags12;
5700#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
5701#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
5702#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
5703#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
5704#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
5705#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
5706#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
5707#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
5708#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
5709#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
5710#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
5711#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
5712#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
5713#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
5714#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
5715#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
5716 u8 flags13;
5717#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
5718#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
5719#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
5720#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
5721#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
5722#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
5723#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
5724#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
5725#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
5726#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
5727#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
5728#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
5729#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
5730#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
5731#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
5732#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
5733 u8 flags14;
5734#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
5735#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
5736#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
5737#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
5738#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
5739#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
5740#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
5741#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
5742#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
5743#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
5744#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
5745#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
5746 u8 byte2;
5747 __le16 physical_q0;
5748 __le16 word1;
5749 __le16 sq_cmp_cons;
5750 __le16 sq_cons;
5751 __le16 sq_prod;
5752 __le16 word5;
5753 __le16 conn_dpi;
5754 u8 byte3;
5755 u8 byte4;
5756 u8 byte5;
5757 u8 byte6;
5758 __le32 lsn;
5759 __le32 ssn;
5760 __le32 snd_una_psn;
5761 __le32 snd_nxt_psn;
5762 __le32 reg4;
5763 __le32 orq_cons_th;
5764 __le32 orq_cons;
5765};
5766
5767struct xstorm_roce_resp_conn_ag_ctx {
5768 u8 reserved0;
5769 u8 state;
5770 u8 flags0;
5771#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
5772#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
5773#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
5774#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
5775#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
5776#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
5777#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
5778#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
5779#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
5780#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
5781#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
5782#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
5783#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
5784#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
5785#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
5786#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
5787 u8 flags1;
5788#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
5789#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
5790#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
5791#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
5792#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
5793#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
5794#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
5795#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
5796#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
5797#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
5798#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
5799#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
5800#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
5801#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
5802#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
5803#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
5804 u8 flags2;
5805#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
5806#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
5807#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
5808#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
5809#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
5810#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
5811#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
5812#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
5813 u8 flags3;
5814#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
5815#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
5816#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
5817#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
5818#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
5819#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
5820#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
5821#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
5822 u8 flags4;
5823#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
5824#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
5825#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
5826#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
5827#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
5828#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
5829#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
5830#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
5831 u8 flags5;
5832#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
5833#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
5834#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
5835#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
5836#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
5837#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
5838#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
5839#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
5840 u8 flags6;
5841#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
5842#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
5843#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
5844#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
5845#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
5846#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
5847#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
5848#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
5849 u8 flags7;
5850#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
5851#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
5852#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
5853#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
5854#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
5855#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
5856#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
5857#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
5858#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
5859#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
5860 u8 flags8;
5861#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
5862#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
5863#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
5864#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
5865#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
5866#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
5867#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
5868#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
5869#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
5870#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
5871#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
5872#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
5873#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
5874#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
5875#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
5876#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
5877 u8 flags9;
5878#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
5879#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
5880#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
5881#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
5882#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
5883#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
5884#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
5885#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
5886#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
5887#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
5888#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
5889#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
5890#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
5891#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
5892#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
5893#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
5894 u8 flags10;
5895#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
5896#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
5897#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
5898#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
5899#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
5900#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
5901#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
5902#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
5903#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
5904#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
5905#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
5906#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
5907#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
5908#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
5909#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
5910#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
5911 u8 flags11;
5912#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
5913#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
5914#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
5915#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
5916#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
5917#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
5918#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
5919#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
5920#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
5921#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
5922#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
5923#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
5924#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
5925#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
5926#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
5927#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
5928 u8 flags12;
5929#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1
5930#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0
5931#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
5932#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
5933#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
5934#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
5935#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
5936#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
5937#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
5938#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
5939#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
5940#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
5941#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
5942#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
5943#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
5944#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
5945 u8 flags13;
5946#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
5947#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
5948#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
5949#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
5950#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
5951#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
5952#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
5953#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
5954#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
5955#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
5956#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
5957#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
5958#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
5959#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
5960#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
5961#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
5962 u8 flags14;
5963#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
5964#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
5965#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
5966#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
5967#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
5968#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
5969#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
5970#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
5971#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
5972#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
5973#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
5974#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
5975#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
5976#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
5977 u8 byte2;
5978 __le16 physical_q0;
5979 __le16 word1;
5980 __le16 irq_prod;
5981 __le16 word3;
5982 __le16 word4;
5983 __le16 word5;
5984 __le16 irq_cons;
5985 u8 rxmit_opcode;
5986 u8 byte4;
5987 u8 byte5;
5988 u8 byte6;
5989 __le32 rxmit_psn_and_id;
5990 __le32 rxmit_bytes_length;
5991 __le32 psn;
5992 __le32 reg3;
5993 __le32 reg4;
5994 __le32 reg5;
5995 __le32 msn_and_syndrome;
5996};
5997
5998struct ystorm_roce_req_conn_ag_ctx {
5999 u8 byte0;
6000 u8 byte1;
6001 u8 flags0;
6002#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
6003#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
6004#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
6005#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
6006#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
6007#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
6008#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
6009#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
6010#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
6011#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
6012 u8 flags1;
6013#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
6014#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
6015#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
6016#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
6017#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
6018#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
6019#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
6020#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
6021#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
6022#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
6023#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
6024#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
6025#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
6026#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
6027#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
6028#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
6029 u8 byte2;
6030 u8 byte3;
6031 __le16 word0;
6032 __le32 reg0;
6033 __le32 reg1;
6034 __le16 word1;
6035 __le16 word2;
6036 __le16 word3;
6037 __le16 word4;
6038 __le32 reg2;
6039 __le32 reg3;
6040};
6041
6042struct ystorm_roce_resp_conn_ag_ctx {
6043 u8 byte0;
6044 u8 byte1;
6045 u8 flags0;
6046#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
6047#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
6048#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
6049#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
6050#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
6051#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
6052#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
6053#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
6054#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
6055#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
6056 u8 flags1;
6057#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
6058#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
6059#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
6060#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
6061#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
6062#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
6063#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
6064#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
6065#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
6066#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
6067#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
6068#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
6069#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
6070#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
6071#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
6072#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
6073 u8 byte2;
6074 u8 byte3;
6075 __le16 word0;
6076 __le32 reg0;
6077 __le32 reg1;
6078 __le16 word1;
6079 __le16 word2;
6080 __le16 word3;
6081 __le16 word4;
6082 __le32 reg2;
6083 __le32 reg3;
6084};
6085
6086struct ystorm_iscsi_conn_st_ctx {
6087 __le32 reserved[4];
6088};
6089
6090struct pstorm_iscsi_tcp_conn_st_ctx {
6091 __le32 tcp[32];
6092 __le32 iscsi[4];
6093};
6094
6095struct xstorm_iscsi_tcp_conn_st_ctx {
6096 __le32 reserved_iscsi[40];
6097 __le32 reserved_tcp[4];
6098};
6099
6100struct xstorm_iscsi_conn_ag_ctx {
6101 u8 cdu_validation;
6102 u8 state;
6103 u8 flags0;
6104#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
6105#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
6106#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
6107#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
6108#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
6109#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
6110#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
6111#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
6112#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
6113#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
6114#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
6115#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
6116#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
6117#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
6118#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
6119#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
6120 u8 flags1;
6121#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
6122#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
6123#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
6124#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
6125#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
6126#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
6127#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
6128#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
6129#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
6130#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
6131#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
6132#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
6133#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
6134#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
6135#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
6136#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
6137 u8 flags2;
6138#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
6139#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
6140#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
6141#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
6142#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
6143#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
6144#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
6145#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
6146 u8 flags3;
6147#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
6148#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
6149#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
6150#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
6151#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
6152#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
6153#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
6154#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
6155 u8 flags4;
6156#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
6157#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
6158#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
6159#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
6160#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
6161#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
6162#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
6163#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
6164 u8 flags5;
6165#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
6166#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
6167#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
6168#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
6169#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
6170#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
6171#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
6172#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
6173 u8 flags6;
6174#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
6175#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
6176#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
6177#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
6178#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
6179#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
6180#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
6181#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
6182 u8 flags7;
6183#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
6184#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
6185#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
6186#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
6187#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
6188#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
6189#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
6190#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
6191#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
6192#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
6193 u8 flags8;
6194#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
6195#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
6196#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
6197#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
6198#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
6199#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
6200#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
6201#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
6202#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
6203#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
6204#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
6205#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
6206#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
6207#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
6208#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
6209#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
6210 u8 flags9;
6211#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
6212#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
6213#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
6214#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
6215#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
6216#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
6217#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
6218#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
6219#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
6220#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
6221#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
6222#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
6223#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
6224#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
6225#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
6226#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
6227 u8 flags10;
6228#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
6229#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
6230#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
6231#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
6232#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
6233#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
6234#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
6235#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
6236#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
6237#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
6238#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
6239#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
6240#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
6241#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
6242#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
6243#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
6244 u8 flags11;
6245#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
6246#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0
6247#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
6248#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
6249#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
6250#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
6251#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
6252#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
6253#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
6254#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
6255#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
6256#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
6257#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
6258#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
6259#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
6260#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
6261 u8 flags12;
6262#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
6263#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
6264#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
6265#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
6266#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
6267#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
6268#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
6269#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
6270#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
6271#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
6272#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
6273#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
6274#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
6275#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
6276#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
6277#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
6278 u8 flags13;
6279#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
6280#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
6281#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
6282#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
6283#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
6284#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
6285#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
6286#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
6287#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
6288#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
6289#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
6290#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
6291#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
6292#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
6293#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
6294#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
6295 u8 flags14;
6296#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
6297#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
6298#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
6299#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
6300#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
6301#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
6302#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
6303#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
6304#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
6305#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
6306#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
6307#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
6308#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
6309#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
6310 u8 byte2;
6311 __le16 physical_q0;
6312 __le16 physical_q1;
6313 __le16 dummy_dorq_var;
6314 __le16 sq_cons;
6315 __le16 sq_prod;
6316 __le16 word5;
6317 __le16 slow_io_total_data_tx_update;
6318 u8 byte3;
6319 u8 byte4;
6320 u8 byte5;
6321 u8 byte6;
6322 __le32 reg0;
6323 __le32 reg1;
6324 __le32 reg2;
6325 __le32 more_to_send_seq;
6326 __le32 reg4;
6327 __le32 reg5;
6328 __le32 hq_scan_next_relevant_ack;
6329 __le16 r2tq_prod;
6330 __le16 r2tq_cons;
6331 __le16 hq_prod;
6332 __le16 hq_cons;
6333 __le32 remain_seq;
6334 __le32 bytes_to_next_pdu;
6335 __le32 hq_tcp_seq;
6336 u8 byte7;
6337 u8 byte8;
6338 u8 byte9;
6339 u8 byte10;
6340 u8 byte11;
6341 u8 byte12;
6342 u8 byte13;
6343 u8 byte14;
6344 u8 byte15;
6345 u8 byte16;
6346 __le16 word11;
6347 __le32 reg10;
6348 __le32 reg11;
6349 __le32 exp_stat_sn;
6350 __le32 reg13;
6351 __le32 reg14;
6352 __le32 reg15;
6353 __le32 reg16;
6354 __le32 reg17;
6355};
6356
6357struct tstorm_iscsi_conn_ag_ctx {
6358 u8 reserved0;
6359 u8 state;
6360 u8 flags0;
6361#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
6362#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
6363#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
6364#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
6365#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
6366#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
6367#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
6368#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
6369#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
6370#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
6371#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
6372#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
6373#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
6374#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
6375 u8 flags1;
6376#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
6377#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0
6378#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
6379#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2
6380#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
6381#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
6382#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
6383#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
6384 u8 flags2;
6385#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
6386#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
6387#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
6388#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
6389#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
6390#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
6391#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
6392#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
6393 u8 flags3;
6394#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
6395#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
6396#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
6397#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
6398#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
6399#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
6400#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
6401#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5
6402#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
6403#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6
6404#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
6405#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
6406 u8 flags4;
6407#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
6408#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
6409#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
6410#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
6411#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
6412#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
6413#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
6414#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
6415#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
6416#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
6417#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
6418#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
6419#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
6420#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
6421#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
6422#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
6423 u8 flags5;
6424#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
6425#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
6426#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
6427#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
6428#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
6429#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
6430#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
6431#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
6432#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
6433#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
6434#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
6435#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
6436#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
6437#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
6438#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
6439#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
6440 __le32 reg0;
6441 __le32 reg1;
6442 __le32 reg2;
6443 __le32 reg3;
6444 __le32 reg4;
6445 __le32 reg5;
6446 __le32 reg6;
6447 __le32 reg7;
6448 __le32 reg8;
6449 u8 byte2;
6450 u8 byte3;
6451 __le16 word0;
6452};
6453
6454struct ustorm_iscsi_conn_ag_ctx {
6455 u8 byte0;
6456 u8 byte1;
6457 u8 flags0;
6458#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
6459#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
6460#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
6461#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
6462#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
6463#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
6464#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
6465#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
6466#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
6467#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
6468 u8 flags1;
6469#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
6470#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
6471#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
6472#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
6473#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
6474#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
6475#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
6476#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
6477 u8 flags2;
6478#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
6479#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
6480#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
6481#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
6482#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
6483#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
6484#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
6485#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
6486#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
6487#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
6488#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
6489#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
6490#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
6491#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
6492#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
6493#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
6494 u8 flags3;
6495#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
6496#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
6497#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
6498#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
6499#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
6500#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
6501#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
6502#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
6503#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
6504#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
6505#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
6506#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
6507#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
6508#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
6509#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
6510#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
6511 u8 byte2;
6512 u8 byte3;
6513 __le16 word0;
6514 __le16 word1;
6515 __le32 reg0;
6516 __le32 reg1;
6517 __le32 reg2;
6518 __le32 reg3;
6519 __le16 word2;
6520 __le16 word3;
6521};
6522
6523struct tstorm_iscsi_conn_st_ctx {
6524 __le32 reserved[40];
6525};
6526
6527struct mstorm_iscsi_conn_ag_ctx {
6528 u8 reserved;
6529 u8 state;
6530 u8 flags0;
6531#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
6532#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
6533#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
6534#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
6535#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
6536#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
6537#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
6538#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
6539#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
6540#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
6541 u8 flags1;
6542#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
6543#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
6544#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
6545#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
6546#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
6547#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
6548#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
6549#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
6550#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
6551#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
6552#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
6553#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
6554#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
6555#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
6556#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
6557#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
6558 __le16 word0;
6559 __le16 word1;
6560 __le32 reg0;
6561 __le32 reg1;
6562};
6563
6564struct mstorm_iscsi_tcp_conn_st_ctx {
6565 __le32 reserved_tcp[20];
6566 __le32 reserved_iscsi[8];
6567};
6568
6569struct ustorm_iscsi_conn_st_ctx {
6570 __le32 reserved[52];
6571};
6572
6573struct iscsi_conn_context {
6574 struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
6575 struct regpair ystorm_st_padding[2];
6576 struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
6577 struct regpair pstorm_st_padding[2];
6578 struct pb_context xpb2_context;
6579 struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
6580 struct regpair xstorm_st_padding[2];
6581 struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
6582 struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
6583 struct regpair tstorm_ag_padding[2];
6584 struct timers_context timer_context;
6585 struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
6586 struct pb_context upb_context;
6587 struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
6588 struct regpair tstorm_st_padding[2];
6589 struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
6590 struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
6591 struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
6592};
6593
6594struct iscsi_init_ramrod_params {
6595 struct iscsi_spe_func_init iscsi_init_spe;
6596 struct tcp_init_params tcp_init;
6597};
6598
6599struct ystorm_iscsi_conn_ag_ctx {
6600 u8 byte0;
6601 u8 byte1;
6602 u8 flags0;
6603#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
6604#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
6605#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
6606#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
6607#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
6608#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
6609#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
6610#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
6611#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
6612#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
6613 u8 flags1;
6614#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
6615#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
6616#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
6617#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
6618#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
6619#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
6620#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
6621#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
6622#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
6623#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
6624#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
6625#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
6626#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
6627#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
6628#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
6629#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
6630 u8 byte2;
6631 u8 byte3;
6632 __le16 word0;
6633 __le32 reg0;
6634 __le32 reg1;
6635 __le16 word1;
6636 __le16 word2;
6637 __le16 word3;
6638 __le16 word4;
6639 __le32 reg2;
6640 __le32 reg3;
6641};
6642#define VF_MAX_STATIC 192
6643
6644#define MCP_GLOB_PATH_MAX 2
6645#define MCP_PORT_MAX 2
6646#define MCP_GLOB_PORT_MAX 4
6647#define MCP_GLOB_FUNC_MAX 16
3237 6648
3238typedef u32 offsize_t; /* In DWORDS !!! */
3239/* Offset from the beginning of the MCP scratchpad */ 6649/* Offset from the beginning of the MCP scratchpad */
3240#define OFFSIZE_OFFSET_SHIFT 0 6650#define OFFSIZE_OFFSET_SHIFT 0
3241#define OFFSIZE_OFFSET_MASK 0x0000ffff 6651#define OFFSIZE_OFFSET_MASK 0x0000ffff
3242/* Size of specific element (not the whole array if any) */ 6652/* Size of specific element (not the whole array if any) */
3243#define OFFSIZE_SIZE_SHIFT 16 6653#define OFFSIZE_SIZE_SHIFT 16
3244#define OFFSIZE_SIZE_MASK 0xffff0000 6654#define OFFSIZE_SIZE_MASK 0xffff0000
3245 6655
3246/* SECTION_OFFSET is calculating the offset in bytes out of offsize */ 6656#define SECTION_OFFSET(_offsize) ((((_offsize & \
3247#define SECTION_OFFSET(_offsize) ((((_offsize & \ 6657 OFFSIZE_OFFSET_MASK) >> \
3248 OFFSIZE_OFFSET_MASK) >> \ 6658 OFFSIZE_OFFSET_SHIFT) << 2))
3249 OFFSIZE_OFFSET_SHIFT) << 2))
3250 6659
3251/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */ 6660#define QED_SECTION_SIZE(_offsize) (((_offsize & \
3252#define QED_SECTION_SIZE(_offsize) (((_offsize & \ 6661 OFFSIZE_SIZE_MASK) >> \
3253 OFFSIZE_SIZE_MASK) >> \ 6662 OFFSIZE_SIZE_SHIFT) << 2)
3254 OFFSIZE_SIZE_SHIFT) << 2)
3255 6663
3256/* SECTION_ADDR returns the GRC addr of a section, given offsize and index 6664#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
3257 * within section. 6665 SECTION_OFFSET(_offsize) + \
3258 */ 6666 (QED_SECTION_SIZE(_offsize) * idx))
3259#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ 6667
3260 SECTION_OFFSET(_offsize) + \ 6668#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
3261 (QED_SECTION_SIZE(_offsize) * idx)) 6669 (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
3262 6670
3263/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address.
3264 * Use offsetof, since the OFFSETUP collide with the firmware definition
3265 */
3266#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base + \
3267 offsetof(struct \
3268 mcp_public_data, \
3269 sections[_section]))
3270/* PHY configuration */ 6671/* PHY configuration */
3271struct pmm_phy_cfg { 6672struct eth_phy_cfg {
3272 u32 speed; 6673 u32 speed;
3273#define PMM_SPEED_AUTONEG 0 6674#define ETH_SPEED_AUTONEG 0
3274 6675#define ETH_SPEED_SMARTLINQ 0x8
3275 u32 pause; /* bitmask */ 6676
3276#define PMM_PAUSE_NONE 0x0 6677 u32 pause;
3277#define PMM_PAUSE_AUTONEG 0x1 6678#define ETH_PAUSE_NONE 0x0
3278#define PMM_PAUSE_RX 0x2 6679#define ETH_PAUSE_AUTONEG 0x1
3279#define PMM_PAUSE_TX 0x4 6680#define ETH_PAUSE_RX 0x2
3280 6681#define ETH_PAUSE_TX 0x4
3281 u32 adv_speed; /* Default should be the speed_cap_mask */ 6682
3282 u32 loopback_mode; 6683 u32 adv_speed;
3283#define PMM_LOOPBACK_NONE 0 6684 u32 loopback_mode;
3284#define PMM_LOOPBACK_INT_PHY 1 6685#define ETH_LOOPBACK_NONE (0)
3285#define PMM_LOOPBACK_EXT_PHY 2 6686#define ETH_LOOPBACK_INT_PHY (1)
3286#define PMM_LOOPBACK_EXT 3 6687#define ETH_LOOPBACK_EXT_PHY (2)
3287#define PMM_LOOPBACK_MAC 4 6688#define ETH_LOOPBACK_EXT (3)
3288 6689#define ETH_LOOPBACK_MAC (4)
3289 /* features */ 6690
3290 u32 feature_config_flags; 6691 u32 feature_config_flags;
6692#define ETH_EEE_MODE_ADV_LPI (1 << 0)
3291}; 6693};
3292 6694
3293struct port_mf_cfg { 6695struct port_mf_cfg {
3294 u32 dynamic_cfg; /* device control channel */ 6696 u32 dynamic_cfg;
3295#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff 6697#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
3296#define PORT_MF_CFG_OV_TAG_SHIFT 0 6698#define PORT_MF_CFG_OV_TAG_SHIFT 0
3297#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK 6699#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
3298 6700
3299 u32 reserved[1]; 6701 u32 reserved[1];
3300}; 6702};
3301 6703
3302/* DO NOT add new fields in the middle 6704struct eth_stats {
3303 * MUST be synced with struct pmm_stats_map 6705 u64 r64;
3304 */ 6706 u64 r127;
3305struct pmm_stats { 6707 u64 r255;
3306 u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/ 6708 u64 r511;
3307 u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/ 6709 u64 r1023;
3308 u64 r255; 6710 u64 r1518;
3309 u64 r511; 6711 u64 r1522;
3310 u64 r1023; 6712 u64 r2047;
3311 u64 r1518; 6713 u64 r4095;
3312 u64 r1522; 6714 u64 r9216;
3313 u64 r2047; 6715 u64 r16383;
3314 u64 r4095; 6716 u64 rfcs;
3315 u64 r9216; 6717 u64 rxcf;
3316 u64 r16383; 6718 u64 rxpf;
3317 u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/ 6719 u64 rxpp;
3318 u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/ 6720 u64 raln;
3319 u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/ 6721 u64 rfcr;
3320 u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/ 6722 u64 rovr;
3321 u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/ 6723 u64 rjbr;
3322 u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */ 6724 u64 rund;
3323 u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/ 6725 u64 rfrg;
3324 u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */ 6726 u64 t64;
3325 u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */ 6727 u64 t127;
3326 u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */ 6728 u64 t255;
3327 u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */ 6729 u64 t511;
3328 u64 t127; 6730 u64 t1023;
3329 u64 t255; 6731 u64 t1518;
3330 u64 t511; 6732 u64 t2047;
3331 u64 t1023; 6733 u64 t4095;
3332 u64 t1518; 6734 u64 t9216;
3333 u64 t2047; 6735 u64 t16383;
3334 u64 t4095; 6736 u64 txpf;
3335 u64 t9216; 6737 u64 txpp;
3336 u64 t16383; 6738 u64 tlpiec;
3337 u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */ 6739 u64 tncl;
3338 u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */ 6740 u64 rbyte;
3339 u64 tlpiec; 6741 u64 rxuca;
3340 u64 tncl; 6742 u64 rxmca;
3341 u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */ 6743 u64 rxbca;
3342 u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */ 6744 u64 rxpok;
3343 u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */ 6745 u64 tbyte;
3344 u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */ 6746 u64 txuca;
3345 u64 rxpok; 6747 u64 txmca;
3346 u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */ 6748 u64 txbca;
3347 u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */ 6749 u64 txcf;
3348 u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
3349 u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
3350 u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
3351}; 6750};
3352 6751
3353struct brb_stats { 6752struct brb_stats {
3354 u64 brb_truncate[8]; 6753 u64 brb_truncate[8];
3355 u64 brb_discard[8]; 6754 u64 brb_discard[8];
3356}; 6755};
3357 6756
3358struct port_stats { 6757struct port_stats {
3359 struct brb_stats brb; 6758 struct brb_stats brb;
3360 struct pmm_stats pmm; 6759 struct eth_stats eth;
3361}; 6760};
3362 6761
3363#define CMT_TEAM0 0
3364#define CMT_TEAM1 1
3365#define CMT_TEAM_MAX 2
3366
3367struct couple_mode_teaming { 6762struct couple_mode_teaming {
3368 u8 port_cmt[MCP_GLOB_PORT_MAX]; 6763 u8 port_cmt[MCP_GLOB_PORT_MAX];
3369#define PORT_CMT_IN_TEAM BIT(0) 6764#define PORT_CMT_IN_TEAM (1 << 0)
3370 6765
3371#define PORT_CMT_PORT_ROLE BIT(1) 6766#define PORT_CMT_PORT_ROLE (1 << 1)
3372#define PORT_CMT_PORT_INACTIVE (0 << 1) 6767#define PORT_CMT_PORT_INACTIVE (0 << 1)
3373#define PORT_CMT_PORT_ACTIVE BIT(1) 6768#define PORT_CMT_PORT_ACTIVE (1 << 1)
3374 6769
3375#define PORT_CMT_TEAM_MASK BIT(2) 6770#define PORT_CMT_TEAM_MASK (1 << 2)
3376#define PORT_CMT_TEAM0 (0 << 2) 6771#define PORT_CMT_TEAM0 (0 << 2)
3377#define PORT_CMT_TEAM1 BIT(2) 6772#define PORT_CMT_TEAM1 (1 << 2)
3378}; 6773};
3379 6774
3380/************************************** 6775#define LLDP_CHASSIS_ID_STAT_LEN 4
3381* LLDP and DCBX HSI structures 6776#define LLDP_PORT_ID_STAT_LEN 4
3382**************************************/ 6777#define DCBX_MAX_APP_PROTOCOL 32
3383#define LLDP_CHASSIS_ID_STAT_LEN 4 6778#define MAX_SYSTEM_LLDP_TLV_DATA 32
3384#define LLDP_PORT_ID_STAT_LEN 4
3385#define DCBX_MAX_APP_PROTOCOL 32
3386#define MAX_SYSTEM_LLDP_TLV_DATA 32
3387 6779
3388enum lldp_agent_e { 6780enum _lldp_agent {
3389 LLDP_NEAREST_BRIDGE = 0, 6781 LLDP_NEAREST_BRIDGE = 0,
3390 LLDP_NEAREST_NON_TPMR_BRIDGE, 6782 LLDP_NEAREST_NON_TPMR_BRIDGE,
3391 LLDP_NEAREST_CUSTOMER_BRIDGE, 6783 LLDP_NEAREST_CUSTOMER_BRIDGE,
@@ -3394,690 +6786,517 @@ enum lldp_agent_e {
3394 6786
3395struct lldp_config_params_s { 6787struct lldp_config_params_s {
3396 u32 config; 6788 u32 config;
3397#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff 6789#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
3398#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 6790#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
3399#define LLDP_CONFIG_HOLD_MASK 0x00000f00 6791#define LLDP_CONFIG_HOLD_MASK 0x00000f00
3400#define LLDP_CONFIG_HOLD_SHIFT 8 6792#define LLDP_CONFIG_HOLD_SHIFT 8
3401#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 6793#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
3402#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 6794#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
3403#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 6795#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
3404#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 6796#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
3405#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 6797#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
3406#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 6798#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
3407 u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; 6799 u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
3408 u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; 6800 u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
3409}; 6801};
3410 6802
3411struct lldp_status_params_s { 6803struct lldp_status_params_s {
3412 u32 prefix_seq_num; 6804 u32 prefix_seq_num;
3413 u32 status; /* TBD */ 6805 u32 status;
3414 6806 u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
3415 /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ 6807 u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
3416 u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; 6808 u32 suffix_seq_num;
3417
3418 /* Holds remote Port ID TLV header, subtype and 9B of payload. */
3419 u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
3420 u32 suffix_seq_num;
3421}; 6809};
3422 6810
3423struct dcbx_ets_feature { 6811struct dcbx_ets_feature {
3424 u32 flags; 6812 u32 flags;
3425#define DCBX_ETS_ENABLED_MASK 0x00000001 6813#define DCBX_ETS_ENABLED_MASK 0x00000001
3426#define DCBX_ETS_ENABLED_SHIFT 0 6814#define DCBX_ETS_ENABLED_SHIFT 0
3427#define DCBX_ETS_WILLING_MASK 0x00000002 6815#define DCBX_ETS_WILLING_MASK 0x00000002
3428#define DCBX_ETS_WILLING_SHIFT 1 6816#define DCBX_ETS_WILLING_SHIFT 1
3429#define DCBX_ETS_ERROR_MASK 0x00000004 6817#define DCBX_ETS_ERROR_MASK 0x00000004
3430#define DCBX_ETS_ERROR_SHIFT 2 6818#define DCBX_ETS_ERROR_SHIFT 2
3431#define DCBX_ETS_CBS_MASK 0x00000008 6819#define DCBX_ETS_CBS_MASK 0x00000008
3432#define DCBX_ETS_CBS_SHIFT 3 6820#define DCBX_ETS_CBS_SHIFT 3
3433#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 6821#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
3434#define DCBX_ETS_MAX_TCS_SHIFT 4 6822#define DCBX_ETS_MAX_TCS_SHIFT 4
3435 u32 pri_tc_tbl[1]; 6823#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
3436#define DCBX_ISCSI_OOO_TC 4 6824#define DCBX_ISCSI_OOO_TC_SHIFT 8
3437#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1) 6825 u32 pri_tc_tbl[1];
3438 u32 tc_bw_tbl[2]; 6826#define DCBX_ISCSI_OOO_TC (4)
3439 u32 tc_tsa_tbl[2]; 6827
3440#define DCBX_ETS_TSA_STRICT 0 6828#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
3441#define DCBX_ETS_TSA_CBS 1 6829#define DCBX_CEE_STRICT_PRIORITY 0xf
3442#define DCBX_ETS_TSA_ETS 2 6830 u32 tc_bw_tbl[2];
6831 u32 tc_tsa_tbl[2];
6832#define DCBX_ETS_TSA_STRICT 0
6833#define DCBX_ETS_TSA_CBS 1
6834#define DCBX_ETS_TSA_ETS 2
3443}; 6835};
3444 6836
3445struct dcbx_app_priority_entry { 6837struct dcbx_app_priority_entry {
3446 u32 entry; 6838 u32 entry;
3447#define DCBX_APP_PRI_MAP_MASK 0x000000ff 6839#define DCBX_APP_PRI_MAP_MASK 0x000000ff
3448#define DCBX_APP_PRI_MAP_SHIFT 0 6840#define DCBX_APP_PRI_MAP_SHIFT 0
3449#define DCBX_APP_PRI_0 0x01 6841#define DCBX_APP_PRI_0 0x01
3450#define DCBX_APP_PRI_1 0x02 6842#define DCBX_APP_PRI_1 0x02
3451#define DCBX_APP_PRI_2 0x04 6843#define DCBX_APP_PRI_2 0x04
3452#define DCBX_APP_PRI_3 0x08 6844#define DCBX_APP_PRI_3 0x08
3453#define DCBX_APP_PRI_4 0x10 6845#define DCBX_APP_PRI_4 0x10
3454#define DCBX_APP_PRI_5 0x20 6846#define DCBX_APP_PRI_5 0x20
3455#define DCBX_APP_PRI_6 0x40 6847#define DCBX_APP_PRI_6 0x40
3456#define DCBX_APP_PRI_7 0x80 6848#define DCBX_APP_PRI_7 0x80
3457#define DCBX_APP_SF_MASK 0x00000300 6849#define DCBX_APP_SF_MASK 0x00000300
3458#define DCBX_APP_SF_SHIFT 8 6850#define DCBX_APP_SF_SHIFT 8
3459#define DCBX_APP_SF_ETHTYPE 0 6851#define DCBX_APP_SF_ETHTYPE 0
3460#define DCBX_APP_SF_PORT 1 6852#define DCBX_APP_SF_PORT 1
3461#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 6853#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
3462#define DCBX_APP_PROTOCOL_ID_SHIFT 16 6854#define DCBX_APP_PROTOCOL_ID_SHIFT 16
3463}; 6855};
3464 6856
3465/* FW structure in BE */
3466struct dcbx_app_priority_feature { 6857struct dcbx_app_priority_feature {
3467 u32 flags; 6858 u32 flags;
3468#define DCBX_APP_ENABLED_MASK 0x00000001 6859#define DCBX_APP_ENABLED_MASK 0x00000001
3469#define DCBX_APP_ENABLED_SHIFT 0 6860#define DCBX_APP_ENABLED_SHIFT 0
3470#define DCBX_APP_WILLING_MASK 0x00000002 6861#define DCBX_APP_WILLING_MASK 0x00000002
3471#define DCBX_APP_WILLING_SHIFT 1 6862#define DCBX_APP_WILLING_SHIFT 1
3472#define DCBX_APP_ERROR_MASK 0x00000004 6863#define DCBX_APP_ERROR_MASK 0x00000004
3473#define DCBX_APP_ERROR_SHIFT 2 6864#define DCBX_APP_ERROR_SHIFT 2
3474/* Not in use 6865#define DCBX_APP_MAX_TCS_MASK 0x0000f000
3475 * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00 6866#define DCBX_APP_MAX_TCS_SHIFT 12
3476 * #define DCBX_APP_DEFAULT_PRI_SHIFT 8 6867#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
3477 */ 6868#define DCBX_APP_NUM_ENTRIES_SHIFT 16
3478#define DCBX_APP_MAX_TCS_MASK 0x0000f000
3479#define DCBX_APP_MAX_TCS_SHIFT 12
3480#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
3481#define DCBX_APP_NUM_ENTRIES_SHIFT 16
3482 struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; 6869 struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
3483}; 6870};
3484 6871
3485/* FW structure in BE */
3486struct dcbx_features { 6872struct dcbx_features {
3487 /* PG feature */
3488 struct dcbx_ets_feature ets; 6873 struct dcbx_ets_feature ets;
6874 u32 pfc;
6875#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
6876#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
6877#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
6878#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
6879#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
6880#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
6881#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
6882#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
6883#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
6884#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
6885
6886#define DCBX_PFC_FLAGS_MASK 0x0000ff00
6887#define DCBX_PFC_FLAGS_SHIFT 8
6888#define DCBX_PFC_CAPS_MASK 0x00000f00
6889#define DCBX_PFC_CAPS_SHIFT 8
6890#define DCBX_PFC_MBC_MASK 0x00004000
6891#define DCBX_PFC_MBC_SHIFT 14
6892#define DCBX_PFC_WILLING_MASK 0x00008000
6893#define DCBX_PFC_WILLING_SHIFT 15
6894#define DCBX_PFC_ENABLED_MASK 0x00010000
6895#define DCBX_PFC_ENABLED_SHIFT 16
6896#define DCBX_PFC_ERROR_MASK 0x00020000
6897#define DCBX_PFC_ERROR_SHIFT 17
3489 6898
3490 /* PFC feature */
3491 u32 pfc;
3492#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
3493#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
3494#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
3495#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
3496#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
3497#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
3498#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
3499#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
3500#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
3501#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
3502
3503#define DCBX_PFC_FLAGS_MASK 0x0000ff00
3504#define DCBX_PFC_FLAGS_SHIFT 8
3505#define DCBX_PFC_CAPS_MASK 0x00000f00
3506#define DCBX_PFC_CAPS_SHIFT 8
3507#define DCBX_PFC_MBC_MASK 0x00004000
3508#define DCBX_PFC_MBC_SHIFT 14
3509#define DCBX_PFC_WILLING_MASK 0x00008000
3510#define DCBX_PFC_WILLING_SHIFT 15
3511#define DCBX_PFC_ENABLED_MASK 0x00010000
3512#define DCBX_PFC_ENABLED_SHIFT 16
3513#define DCBX_PFC_ERROR_MASK 0x00020000
3514#define DCBX_PFC_ERROR_SHIFT 17
3515
3516 /* APP feature */
3517 struct dcbx_app_priority_feature app; 6899 struct dcbx_app_priority_feature app;
3518}; 6900};
3519 6901
3520struct dcbx_local_params { 6902struct dcbx_local_params {
3521 u32 config; 6903 u32 config;
3522#define DCBX_CONFIG_VERSION_MASK 0x00000003 6904#define DCBX_CONFIG_VERSION_MASK 0x00000007
3523#define DCBX_CONFIG_VERSION_SHIFT 0 6905#define DCBX_CONFIG_VERSION_SHIFT 0
3524#define DCBX_CONFIG_VERSION_DISABLED 0 6906#define DCBX_CONFIG_VERSION_DISABLED 0
3525#define DCBX_CONFIG_VERSION_IEEE 1 6907#define DCBX_CONFIG_VERSION_IEEE 1
3526#define DCBX_CONFIG_VERSION_CEE 2 6908#define DCBX_CONFIG_VERSION_CEE 2
6909#define DCBX_CONFIG_VERSION_STATIC 4
3527 6910
3528 u32 flags; 6911 u32 flags;
3529 struct dcbx_features features; 6912 struct dcbx_features features;
3530}; 6913};
3531 6914
3532struct dcbx_mib { 6915struct dcbx_mib {
3533 u32 prefix_seq_num; 6916 u32 prefix_seq_num;
3534 u32 flags; 6917 u32 flags;
3535 struct dcbx_features features; 6918 struct dcbx_features features;
3536 u32 suffix_seq_num; 6919 u32 suffix_seq_num;
3537}; 6920};
3538 6921
3539struct lldp_system_tlvs_buffer_s { 6922struct lldp_system_tlvs_buffer_s {
3540 u16 valid; 6923 u16 valid;
3541 u16 length; 6924 u16 length;
3542 u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; 6925 u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
3543}; 6926};
3544 6927
3545/**************************************/ 6928struct dcb_dscp_map {
3546/* */ 6929 u32 flags;
3547/* P U B L I C G L O B A L */ 6930#define DCB_DSCP_ENABLE_MASK 0x1
3548/* */ 6931#define DCB_DSCP_ENABLE_SHIFT 0
3549/**************************************/ 6932#define DCB_DSCP_ENABLE 1
3550struct public_global { 6933 u32 dscp_pri_map[8];
3551 u32 max_path;
3552#define MAX_PATH_BIG_BEAR 2
3553#define MAX_PATH_K2 1
3554 u32 max_ports;
3555#define MODE_1P 1
3556#define MODE_2P 2
3557#define MODE_3P 3
3558#define MODE_4P 4
3559 u32 debug_mb_offset;
3560 u32 phymod_dbg_mb_offset;
3561 struct couple_mode_teaming cmt;
3562 s32 internal_temperature;
3563 u32 mfw_ver;
3564 u32 running_bundle_id;
3565}; 6934};
3566 6935
3567/**************************************/ 6936struct public_global {
3568/* */ 6937 u32 max_path;
3569/* P U B L I C P A T H */ 6938 u32 max_ports;
3570/* */ 6939 u32 debug_mb_offset;
3571/**************************************/ 6940 u32 phymod_dbg_mb_offset;
6941 struct couple_mode_teaming cmt;
6942 s32 internal_temperature;
6943 u32 mfw_ver;
6944 u32 running_bundle_id;
6945 s32 external_temperature;
6946 u32 mdump_reason;
6947};
3572 6948
3573/****************************************************************************
3574* Shared Memory 2 Region *
3575****************************************************************************/
3576/* The fw_flr_ack is actually built in the following way: */
3577/* 8 bit: PF ack */
3578/* 128 bit: VF ack */
3579/* 8 bit: ios_dis_ack */
3580/* In order to maintain endianity in the mailbox hsi, we want to keep using */
3581/* u32. The fw must have the VF right after the PF since this is how it */
3582/* access arrays(it expects always the VF to reside after the PF, and that */
3583/* makes the calculation much easier for it. ) */
3584/* In order to answer both limitations, and keep the struct small, the code */
3585/* will abuse the structure defined here to achieve the actual partition */
3586/* above */
3587/****************************************************************************/
3588struct fw_flr_mb { 6949struct fw_flr_mb {
3589 u32 aggint; 6950 u32 aggint;
3590 u32 opgen_addr; 6951 u32 opgen_addr;
3591 u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */ 6952 u32 accum_ack;
3592#define ACCUM_ACK_PF_BASE 0
3593#define ACCUM_ACK_PF_SHIFT 0
3594
3595#define ACCUM_ACK_VF_BASE 8
3596#define ACCUM_ACK_VF_SHIFT 3
3597
3598#define ACCUM_ACK_IOV_DIS_BASE 256
3599#define ACCUM_ACK_IOV_DIS_SHIFT 8
3600}; 6953};
3601 6954
3602struct public_path { 6955struct public_path {
3603 struct fw_flr_mb flr_mb; 6956 struct fw_flr_mb flr_mb;
3604 u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; 6957 u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
3605 6958
3606 u32 process_kill; 6959 u32 process_kill;
3607#define PROCESS_KILL_COUNTER_MASK 0x0000ffff 6960#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
3608#define PROCESS_KILL_COUNTER_SHIFT 0 6961#define PROCESS_KILL_COUNTER_SHIFT 0
3609#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 6962#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
3610#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 6963#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
3611#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit) 6964#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
3612}; 6965};
3613 6966
3614/**************************************/
3615/* */
3616/* P U B L I C P O R T */
3617/* */
3618/**************************************/
3619
3620/****************************************************************************
3621* Driver <-> FW Mailbox *
3622****************************************************************************/
3623
3624struct public_port { 6967struct public_port {
3625 u32 validity_map; /* 0x0 (4*2 = 0x8) */ 6968 u32 validity_map;
3626
3627 /* validity bits */
3628#define MCP_VALIDITY_PCI_CFG 0x00100000
3629#define MCP_VALIDITY_MB 0x00200000
3630#define MCP_VALIDITY_DEV_INFO 0x00400000
3631#define MCP_VALIDITY_RESERVED 0x00000007
3632
3633 /* One licensing bit should be set */
3634#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
3635#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
3636#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
3637#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
3638
3639 /* Active MFW */
3640#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
3641#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
3642#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040
3643#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
3644 6969
3645 u32 link_status; 6970 u32 link_status;
3646#define LINK_STATUS_LINK_UP \ 6971#define LINK_STATUS_LINK_UP 0x00000001
3647 0x00000001 6972#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
3648#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e 6973#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
3649#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1) 6974#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
3650#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) 6975#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
3651#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) 6976#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
3652#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) 6977#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
3653#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) 6978#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
3654#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) 6979#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
3655#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) 6980#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
3656#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) 6981
3657 6982#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
3658#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 6983
3659 6984#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
3660#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 6985#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
3661#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 6986
3662 6987#define LINK_STATUS_PFC_ENABLED 0x00000100
3663#define LINK_STATUS_PFC_ENABLED \ 6988#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
3664 0x00000100 6989#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
3665#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 6990#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
3666#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 6991#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
3667#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 6992#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
3668#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 6993#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
3669#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 6994#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
3670#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 6995#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
3671#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 6996
3672#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 6997#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
3673 6998#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
3674#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 6999#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
3675#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) 7000#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
3676#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18) 7001#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
3677#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) 7002
3678#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) 7003#define LINK_STATUS_SFP_TX_FAULT 0x00100000
3679 7004#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
3680#define LINK_STATUS_SFP_TX_FAULT \ 7005#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
3681 0x00100000 7006#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
3682#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 7007#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
3683#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 7008#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
3684 7009#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
3685 u32 link_status1; 7010
3686 u32 ext_phy_fw_version; 7011 u32 link_status1;
3687 u32 drv_phy_cfg_addr; 7012 u32 ext_phy_fw_version;
3688 7013 u32 drv_phy_cfg_addr;
3689 u32 port_stx; 7014
3690 7015 u32 port_stx;
3691 u32 stat_nig_timer; 7016
3692 7017 u32 stat_nig_timer;
3693 struct port_mf_cfg port_mf_config; 7018
3694 struct port_stats stats; 7019 struct port_mf_cfg port_mf_config;
3695 7020 struct port_stats stats;
3696 u32 media_type; 7021
3697#define MEDIA_UNSPECIFIED 0x0 7022 u32 media_type;
3698#define MEDIA_SFPP_10G_FIBER 0x1 7023#define MEDIA_UNSPECIFIED 0x0
3699#define MEDIA_XFP_FIBER 0x2 7024#define MEDIA_SFPP_10G_FIBER 0x1
3700#define MEDIA_DA_TWINAX 0x3 7025#define MEDIA_XFP_FIBER 0x2
3701#define MEDIA_BASE_T 0x4 7026#define MEDIA_DA_TWINAX 0x3
3702#define MEDIA_SFP_1G_FIBER 0x5 7027#define MEDIA_BASE_T 0x4
3703#define MEDIA_MODULE_FIBER 0x6 7028#define MEDIA_SFP_1G_FIBER 0x5
3704#define MEDIA_KR 0xf0 7029#define MEDIA_MODULE_FIBER 0x6
3705#define MEDIA_NOT_PRESENT 0xff 7030#define MEDIA_KR 0xf0
7031#define MEDIA_NOT_PRESENT 0xff
3706 7032
3707 u32 lfa_status; 7033 u32 lfa_status;
3708#define LFA_LINK_FLAP_REASON_OFFSET 0 7034 u32 link_change_count;
3709#define LFA_LINK_FLAP_REASON_MASK 0x000000ff 7035
3710#define LFA_NO_REASON (0 << 0) 7036 struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
3711#define LFA_LINK_DOWN BIT(0) 7037 struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
3712#define LFA_FORCE_INIT BIT(1) 7038 struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
3713#define LFA_LOOPBACK_MISMATCH BIT(2)
3714#define LFA_SPEED_MISMATCH BIT(3)
3715#define LFA_FLOW_CTRL_MISMATCH BIT(4)
3716#define LFA_ADV_SPEED_MISMATCH BIT(5)
3717#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
3718#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
3719#define LINK_FLAP_COUNT_OFFSET 16
3720#define LINK_FLAP_COUNT_MASK 0x00ff0000
3721
3722 u32 link_change_count;
3723
3724 /* LLDP params */
3725 struct lldp_config_params_s lldp_config_params[
3726 LLDP_MAX_LLDP_AGENTS];
3727 struct lldp_status_params_s lldp_status_params[
3728 LLDP_MAX_LLDP_AGENTS];
3729 struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
3730 7039
3731 /* DCBX related MIB */ 7040 /* DCBX related MIB */
3732 struct dcbx_local_params local_admin_dcbx_mib; 7041 struct dcbx_local_params local_admin_dcbx_mib;
3733 struct dcbx_mib remote_dcbx_mib; 7042 struct dcbx_mib remote_dcbx_mib;
3734 struct dcbx_mib operational_dcbx_mib; 7043 struct dcbx_mib operational_dcbx_mib;
3735 7044
3736 u32 fc_npiv_nvram_tbl_addr; 7045 u32 reserved[2];
3737 u32 fc_npiv_nvram_tbl_size; 7046 u32 transceiver_data;
3738 u32 transceiver_data; 7047#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
3739#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF 7048#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
3740#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000 7049#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
3741#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001 7050#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
3742}; 7051#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
7052#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
3743 7053
3744/**************************************/ 7054 u32 wol_info;
3745/* */ 7055 u32 wol_pkt_len;
3746/* P U B L I C F U N C */ 7056 u32 wol_pkt_details;
3747/* */ 7057 struct dcb_dscp_map dcb_dscp_map;
3748/**************************************/ 7058};
3749 7059
3750struct public_func { 7060struct public_func {
3751 u32 iscsi_boot_signature; 7061 u32 reserved0[2];
3752 u32 iscsi_boot_block_offset; 7062
3753 7063 u32 mtu_size;
3754 u32 mtu_size; 7064
3755 u32 c2s_pcp_map_lower; 7065 u32 reserved[7];
3756 u32 c2s_pcp_map_upper; 7066
3757 u32 c2s_pcp_map_default; 7067 u32 config;
3758 u32 reserved[4]; 7068#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
3759 7069#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
3760 u32 config; 7070#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
3761 7071
3762 /* E/R/I/D */ 7072#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
3763 /* function 0 of each port cannot be hidden */ 7073#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
3764#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 7074#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
3765#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
3766#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
3767
3768#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
3769#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
3770#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
3771#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 7075#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
3772#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
3773#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 7076#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
3774#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 7077#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
3775 7078
3776 /* MINBW, MAXBW */ 7079#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
3777 /* value range - 0..100, increments in 1 % */ 7080#define FUNC_MF_CFG_MIN_BW_SHIFT 8
3778#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 7081#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
3779#define FUNC_MF_CFG_MIN_BW_SHIFT 8 7082#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
3780#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 7083#define FUNC_MF_CFG_MAX_BW_SHIFT 16
3781#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 7084#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
3782#define FUNC_MF_CFG_MAX_BW_SHIFT 16
3783#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
3784 7085
3785 u32 status; 7086 u32 status;
3786#define FUNC_STATUS_VLINK_DOWN 0x00000001 7087#define FUNC_STATUS_VLINK_DOWN 0x00000001
3787 7088
3788 u32 mac_upper; /* MAC */ 7089 u32 mac_upper;
3789#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff 7090#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
3790#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 7091#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
3791#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK 7092#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
3792 u32 mac_lower; 7093 u32 mac_lower;
3793#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff 7094#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
3794 7095
3795 u32 fcoe_wwn_port_name_upper; 7096 u32 fcoe_wwn_port_name_upper;
3796 u32 fcoe_wwn_port_name_lower; 7097 u32 fcoe_wwn_port_name_lower;
3797 7098
3798 u32 fcoe_wwn_node_name_upper; 7099 u32 fcoe_wwn_node_name_upper;
3799 u32 fcoe_wwn_node_name_lower; 7100 u32 fcoe_wwn_node_name_lower;
3800 7101
3801 u32 ovlan_stag; /* tags */ 7102 u32 ovlan_stag;
3802#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff 7103#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
3803#define FUNC_MF_CFG_OV_STAG_SHIFT 0 7104#define FUNC_MF_CFG_OV_STAG_SHIFT 0
3804#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK 7105#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
3805 7106
3806 u32 pf_allocation; /* vf per pf */ 7107 u32 pf_allocation;
3807 7108
3808 u32 preserve_data; /* Will be used bt CCM */ 7109 u32 preserve_data;
3809 7110
3810 u32 driver_last_activity_ts; 7111 u32 driver_last_activity_ts;
3811 7112
3812 u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */ 7113 u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
3813 7114
3814 u32 drv_id; 7115 u32 drv_id;
3815#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff 7116#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
3816#define DRV_ID_PDA_COMP_VER_SHIFT 0 7117#define DRV_ID_PDA_COMP_VER_SHIFT 0
3817 7118
3818#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 7119#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
3819#define DRV_ID_MCP_HSI_VER_SHIFT 16 7120#define DRV_ID_MCP_HSI_VER_SHIFT 16
3820#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT) 7121#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
3821 7122
3822#define DRV_ID_DRV_TYPE_MASK 0x7f000000 7123#define DRV_ID_DRV_TYPE_MASK 0x7f000000
3823#define DRV_ID_DRV_TYPE_SHIFT 24 7124#define DRV_ID_DRV_TYPE_SHIFT 24
3824#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) 7125#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
3825#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) 7126#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
3826#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
3827#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
3828#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
3829#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
3830#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
3831#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
3832#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
3833 7127
3834#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 7128#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
3835#define DRV_ID_DRV_INIT_HW_SHIFT 31 7129#define DRV_ID_DRV_INIT_HW_SHIFT 31
3836#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT) 7130#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
3837}; 7131};
3838 7132
3839/**************************************/
3840/* */
3841/* P U B L I C M B */
3842/* */
3843/**************************************/
3844/* This is the only section that the driver can write to, and each */
3845/* Basically each driver request to set feature parameters,
3846 * will be done using a different command, which will be linked
3847 * to a specific data structure from the union below.
3848 * For huge strucuture, the common blank structure should be used.
3849 */
3850
3851struct mcp_mac { 7133struct mcp_mac {
3852 u32 mac_upper; /* Upper 16 bits are always zeroes */ 7134 u32 mac_upper;
3853 u32 mac_lower; 7135 u32 mac_lower;
3854}; 7136};
3855 7137
3856struct mcp_val64 { 7138struct mcp_val64 {
3857 u32 lo; 7139 u32 lo;
3858 u32 hi; 7140 u32 hi;
3859}; 7141};
3860 7142
3861struct mcp_file_att { 7143struct mcp_file_att {
3862 u32 nvm_start_addr; 7144 u32 nvm_start_addr;
3863 u32 len; 7145 u32 len;
7146};
7147
7148struct bist_nvm_image_att {
7149 u32 return_code;
7150 u32 image_type;
7151 u32 nvm_start_addr;
7152 u32 len;
3864}; 7153};
3865 7154
3866#define MCP_DRV_VER_STR_SIZE 16 7155#define MCP_DRV_VER_STR_SIZE 16
3867#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) 7156#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
3868#define MCP_DRV_NVM_BUF_LEN 32 7157#define MCP_DRV_NVM_BUF_LEN 32
3869struct drv_version_stc { 7158struct drv_version_stc {
3870 u32 version; 7159 u32 version;
3871 u8 name[MCP_DRV_VER_STR_SIZE - 4]; 7160 u8 name[MCP_DRV_VER_STR_SIZE - 4];
7161};
7162
7163struct lan_stats_stc {
7164 u64 ucast_rx_pkts;
7165 u64 ucast_tx_pkts;
7166 u32 fcs_err;
7167 u32 rserved;
7168};
7169
7170struct ocbb_data_stc {
7171 u32 ocbb_host_addr;
7172 u32 ocsd_host_addr;
7173 u32 ocsd_req_update_interval;
7174};
7175
7176#define MAX_NUM_OF_SENSORS 7
7177struct temperature_status_stc {
7178 u32 num_of_sensors;
7179 u32 sensor[MAX_NUM_OF_SENSORS];
7180};
7181
7182/* crash dump configuration header */
7183struct mdump_config_stc {
7184 u32 version;
7185 u32 config;
7186 u32 epoc;
7187 u32 num_of_logs;
7188 u32 valid_logs;
3872}; 7189};
3873 7190
3874union drv_union_data { 7191union drv_union_data {
3875 u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; 7192 u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
3876 struct mcp_mac wol_mac; 7193 struct mcp_mac wol_mac;
7194
7195 struct eth_phy_cfg drv_phy_cfg;
3877 7196
3878 struct pmm_phy_cfg drv_phy_cfg; 7197 struct mcp_val64 val64;
3879 7198
3880 struct mcp_val64 val64; /* For PHY / AVS commands */ 7199 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
3881 7200
3882 u8 raw_data[MCP_DRV_NVM_BUF_LEN]; 7201 struct mcp_file_att file_att;
3883 7202
3884 struct mcp_file_att file_att; 7203 u32 ack_vf_disabled[VF_MAX_STATIC / 32];
3885 7204
3886 u32 ack_vf_disabled[VF_MAX_STATIC / 32]; 7205 struct drv_version_stc drv_version;
3887 7206
3888 struct drv_version_stc drv_version; 7207 struct lan_stats_stc lan_stats;
7208 u64 reserved_stats[11];
7209 struct ocbb_data_stc ocbb_info;
7210 struct temperature_status_stc temp_info;
7211 struct bist_nvm_image_att nvm_image_att;
7212 struct mdump_config_stc mdump_config;
3889}; 7213};
3890 7214
3891struct public_drv_mb { 7215struct public_drv_mb {
3892 u32 drv_mb_header; 7216 u32 drv_mb_header;
3893#define DRV_MSG_CODE_MASK 0xffff0000 7217#define DRV_MSG_CODE_MASK 0xffff0000
3894#define DRV_MSG_CODE_LOAD_REQ 0x10000000 7218#define DRV_MSG_CODE_LOAD_REQ 0x10000000
3895#define DRV_MSG_CODE_LOAD_DONE 0x11000000 7219#define DRV_MSG_CODE_LOAD_DONE 0x11000000
3896#define DRV_MSG_CODE_INIT_HW 0x12000000 7220#define DRV_MSG_CODE_INIT_HW 0x12000000
3897#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 7221#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
3898#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 7222#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
3899#define DRV_MSG_CODE_INIT_PHY 0x22000000 7223#define DRV_MSG_CODE_INIT_PHY 0x22000000
3900 /* Params - FORCE - Reinitialize the link regardless of LFA */ 7224#define DRV_MSG_CODE_LINK_RESET 0x23000000
3901 /* - DONT_CARE - Don't flap the link if up */ 7225#define DRV_MSG_CODE_SET_DCBX 0x25000000
3902#define DRV_MSG_CODE_LINK_RESET 0x23000000 7226
3903
3904#define DRV_MSG_CODE_SET_LLDP 0x24000000
3905#define DRV_MSG_CODE_SET_DCBX 0x25000000
3906#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 7227#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
3907#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 7228#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
3908 7229#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
3909#define DRV_MSG_CODE_INITIATE_FLR 0x02000000 7230#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
3910#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 7231#define DRV_MSG_CODE_MCP_RESET 0x00090000
3911#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 7232#define DRV_MSG_CODE_SET_VERSION 0x000f0000
3912#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000 7233
3913#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000 7234#define DRV_MSG_CODE_BIST_TEST 0x001e0000
3914#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 7235#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
3915#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000 7236
3916#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000 7237#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
3917#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
3918#define DRV_MSG_CODE_MCP_RESET 0x00090000
3919#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
3920#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
3921#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
3922#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
3923#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
3924#define DRV_MSG_CODE_SET_VERSION 0x000f0000
3925
3926#define DRV_MSG_CODE_BIST_TEST 0x001e0000
3927#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
3928
3929#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
3930 7238
3931 u32 drv_mb_param; 7239 u32 drv_mb_param;
7240#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
7241#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
7242#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
7243#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
7244#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
7245#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
7246#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
7247#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
7248#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
7249
7250
7251#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
7252#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
7253#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
7254
7255#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
7256#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
7257
7258#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
7259#define DRV_MB_PARAM_BIST_RC_PASSED 1
7260#define DRV_MB_PARAM_BIST_RC_FAILED 2
7261#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
3932 7262
3933 /* UNLOAD_REQ params */ 7263#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
3934#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 7264#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
3935#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
3936#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
3937#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
3938
3939 /* UNLOAD_DONE_params */
3940#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
3941
3942 /* INIT_PHY params */
3943#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
3944#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
3945
3946 /* LLDP / DCBX params*/
3947#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
3948#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
3949#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
3950#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
3951#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
3952#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
3953
3954#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
3955#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
3956
3957#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
3958#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
3959
3960#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
3961#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
3962#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
3963#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
3964
3965#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
3966#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
3967#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
3968#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
3969#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
3970#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
3971#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
3972#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
3973
3974/* configure vf MSIX params*/
3975#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
3976#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
3977#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
3978#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
3979
3980#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
3981#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
3982#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
3983
3984#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
3985#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
3986#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
3987
3988#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
3989#define DRV_MB_PARAM_BIST_RC_PASSED 1
3990#define DRV_MB_PARAM_BIST_RC_FAILED 2
3991#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
3992
3993#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
3994#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
3995 7265
3996 u32 fw_mb_header; 7266 u32 fw_mb_header;
3997#define FW_MSG_CODE_MASK 0xffff0000 7267#define FW_MSG_CODE_MASK 0xffff0000
3998#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 7268#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
3999#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 7269#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
4000#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 7270#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
4001#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 7271#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
4002#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000 7272#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
4003#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 7273#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
4004#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 7274#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
4005#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 7275#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
4006#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 7276#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
4007#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 7277#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
4008#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 7278#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
4009#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000 7279#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
4010#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000 7280#define FW_MSG_CODE_OK 0x00160000
4011#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000 7281
4012#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000 7282#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
4013#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000 7283
4014#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000 7284 u32 fw_mb_param;
4015#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000 7285
4016#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 7286 u32 drv_pulse_mb;
4017#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 7287#define DRV_PULSE_SEQ_MASK 0x00007fff
4018#define FW_MSG_CODE_FLR_ACK 0x02000000 7288#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
4019#define FW_MSG_CODE_FLR_NACK 0x02100000 7289#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
4020 7290
4021#define FW_MSG_CODE_NVM_OK 0x00010000
4022#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000
4023#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000
4024#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
4025#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000
4026#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000
4027#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
4028#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
4029#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000
4030#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000
4031#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000
4032#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000
4033#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000
4034#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000
4035#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000
4036#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000
4037#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
4038#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
4039#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
4040#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
4041#define FW_MSG_CODE_PHY_OK 0x00110000
4042#define FW_MSG_CODE_PHY_ERROR 0x00120000
4043#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
4044#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
4045#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
4046#define FW_MSG_CODE_OK 0x00160000
4047
4048#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
4049
4050 u32 fw_mb_param;
4051
4052 u32 drv_pulse_mb;
4053#define DRV_PULSE_SEQ_MASK 0x00007fff
4054#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
4055#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
4056 u32 mcp_pulse_mb; 7291 u32 mcp_pulse_mb;
4057#define MCP_PULSE_SEQ_MASK 0x00007fff 7292#define MCP_PULSE_SEQ_MASK 0x00007fff
4058#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 7293#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
4059#define MCP_EVENT_MASK 0xffff0000 7294#define MCP_EVENT_MASK 0xffff0000
4060#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 7295#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
4061 7296
4062 union drv_union_data union_data; 7297 union drv_union_data union_data;
4063}; 7298};
4064 7299
4065/* MFW - DRV MB */
4066/**********************************************************************
4067* Description
4068* Incremental Aggregative
4069* 8-bit MFW counter per message
4070* 8-bit ack-counter per message
4071* Capabilities
4072* Provides up to 256 aggregative message per type
4073* Provides 4 message types in dword
4074* Message type pointers to byte offset
4075* Backward Compatibility by using sizeof for the counters.
4076* No lock requires for 32bit messages
4077* Limitations:
4078* In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
4079* is required to prevent data corruption.
4080**********************************************************************/
4081enum MFW_DRV_MSG_TYPE { 7300enum MFW_DRV_MSG_TYPE {
4082 MFW_DRV_MSG_LINK_CHANGE, 7301 MFW_DRV_MSG_LINK_CHANGE,
4083 MFW_DRV_MSG_FLR_FW_ACK_FAILED, 7302 MFW_DRV_MSG_FLR_FW_ACK_FAILED,
@@ -4085,37 +7304,33 @@ enum MFW_DRV_MSG_TYPE {
4085 MFW_DRV_MSG_LLDP_DATA_UPDATED, 7304 MFW_DRV_MSG_LLDP_DATA_UPDATED,
4086 MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, 7305 MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
4087 MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, 7306 MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
4088 MFW_DRV_MSG_ERROR_RECOVERY, 7307 MFW_DRV_MSG_RESERVED4,
4089 MFW_DRV_MSG_BW_UPDATE, 7308 MFW_DRV_MSG_BW_UPDATE,
4090 MFW_DRV_MSG_S_TAG_UPDATE, 7309 MFW_DRV_MSG_BW_UPDATE5,
4091 MFW_DRV_MSG_GET_LAN_STATS, 7310 MFW_DRV_MSG_BW_UPDATE6,
4092 MFW_DRV_MSG_GET_FCOE_STATS, 7311 MFW_DRV_MSG_BW_UPDATE7,
4093 MFW_DRV_MSG_GET_ISCSI_STATS, 7312 MFW_DRV_MSG_BW_UPDATE8,
4094 MFW_DRV_MSG_GET_RDMA_STATS, 7313 MFW_DRV_MSG_BW_UPDATE9,
4095 MFW_DRV_MSG_FAILURE_DETECTED, 7314 MFW_DRV_MSG_BW_UPDATE10,
4096 MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, 7315 MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
7316 MFW_DRV_MSG_BW_UPDATE11,
4097 MFW_DRV_MSG_MAX 7317 MFW_DRV_MSG_MAX
4098}; 7318};
4099 7319
4100#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1) 7320#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
4101#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2) 7321#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
4102#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3) 7322#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
4103#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) 7323#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
4104 7324
4105struct public_mfw_mb { 7325struct public_mfw_mb {
4106 u32 sup_msgs; 7326 u32 sup_msgs;
4107 u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; 7327 u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
4108 u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; 7328 u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
4109}; 7329};
4110 7330
4111/**************************************/
4112/* */
4113/* P U B L I C D A T A */
4114/* */
4115/**************************************/
4116enum public_sections { 7331enum public_sections {
4117 PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */ 7332 PUBLIC_DRV_MB,
4118 PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */ 7333 PUBLIC_MFW_MB,
4119 PUBLIC_GLOBAL, 7334 PUBLIC_GLOBAL,
4120 PUBLIC_PATH, 7335 PUBLIC_PATH,
4121 PUBLIC_PORT, 7336 PUBLIC_PORT,
@@ -4123,1080 +7338,179 @@ enum public_sections {
4123 PUBLIC_MAX_SECTIONS 7338 PUBLIC_MAX_SECTIONS
4124}; 7339};
4125 7340
4126struct drv_ver_info_stc {
4127 u32 ver;
4128 u8 name[32];
4129};
4130
4131struct mcp_public_data { 7341struct mcp_public_data {
4132 /* The sections fields is an array */ 7342 u32 num_sections;
4133 u32 num_sections; 7343 u32 sections[PUBLIC_MAX_SECTIONS];
4134 offsize_t sections[PUBLIC_MAX_SECTIONS]; 7344 struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
4135 struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; 7345 struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
4136 struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; 7346 struct public_global global;
4137 struct public_global global; 7347 struct public_path path[MCP_GLOB_PATH_MAX];
4138 struct public_path path[MCP_GLOB_PATH_MAX]; 7348 struct public_port port[MCP_GLOB_PORT_MAX];
4139 struct public_port port[MCP_GLOB_PORT_MAX]; 7349 struct public_func func[MCP_GLOB_FUNC_MAX];
4140 struct public_func func[MCP_GLOB_FUNC_MAX];
4141 struct drv_ver_info_stc drv_info;
4142}; 7350};
4143 7351
4144struct nvm_cfg_mac_address { 7352struct nvm_cfg_mac_address {
4145 u32 mac_addr_hi; 7353 u32 mac_addr_hi;
4146#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF 7354#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
4147#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 7355#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
4148 7356 u32 mac_addr_lo;
4149 u32 mac_addr_lo;
4150}; 7357};
4151 7358
4152/******************************************
4153* nvm_cfg1 structs
4154******************************************/
4155
4156struct nvm_cfg1_glob { 7359struct nvm_cfg1_glob {
4157 u32 generic_cont0; /* 0x0 */ 7360 u32 generic_cont0;
4158#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F 7361#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
4159#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0 7362#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
4160#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0 7363#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
4161#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1 7364#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
4162#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2 7365#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
4163#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3 7366#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
4164#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 7367#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
4165#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 7368#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
4166#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 7369#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
4167#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 7370#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
4168#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 7371 u32 engineering_change[3];
4169#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 7372 u32 manufacturing_id;
4170#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 7373 u32 serial_number[4];
4171#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 7374 u32 pcie_cfg;
4172#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 7375 u32 mgmt_traffic;
4173#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 7376 u32 core_cfg;
4174#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000 7377#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
4175#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12 7378#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
4176#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0 7379#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
4177#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1 7380#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
4178#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000 7381#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
4179#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13 7382#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
4180#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000 7383#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
4181#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21 7384#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
4182#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000 7385#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xB
4183#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29 7386#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
4184#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0 7387#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
4185#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1 7388#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
4186#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000 7389 u32 e_lane_cfg1;
4187#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30 7390 u32 e_lane_cfg2;
4188#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0 7391 u32 f_lane_cfg1;
4189#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1 7392 u32 f_lane_cfg2;
4190#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000 7393 u32 mps10_preemphasis;
4191#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31 7394 u32 mps10_driver_current;
4192#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0 7395 u32 mps25_preemphasis;
4193#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1 7396 u32 mps25_driver_current;
4194 7397 u32 pci_id;
4195 u32 engineering_change[3]; /* 0x4 */ 7398 u32 pci_subsys_id;
4196 7399 u32 bar;
4197 u32 manufacturing_id; /* 0x10 */ 7400 u32 mps10_txfir_main;
4198 7401 u32 mps10_txfir_post;
4199 u32 serial_number[4]; /* 0x14 */ 7402 u32 mps25_txfir_main;
4200 7403 u32 mps25_txfir_post;
4201 u32 pcie_cfg; /* 0x24 */ 7404 u32 manufacture_ver;
4202#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003 7405 u32 manufacture_time;
4203#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0 7406 u32 led_global_settings;
4204#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0 7407 u32 generic_cont1;
4205#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1 7408 u32 mbi_version;
4206#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2 7409 u32 mbi_date;
4207#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004 7410 u32 misc_sig;
4208#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2 7411 u32 device_capabilities;
4209#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0 7412#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
4210#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1 7413#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
4211#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018 7414#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
4212#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3 7415 u32 power_dissipated;
4213#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0 7416 u32 power_consumed;
4214#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1 7417 u32 efi_version;
4215#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2 7418 u32 multi_network_modes_capability;
4216#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3 7419 u32 reserved[41];
4217#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK 0x00000020
4218#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET 5
4219#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED 0x0
4220#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED 0x1
4221#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
4222#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
4223#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
4224#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
4225#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
4226#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
4227#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
4228#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
4229#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
4230#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
4231#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
4232#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
4233#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
4234#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
4235
4236 u32 mgmt_traffic; /* 0x28 */
4237#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
4238#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
4239#define NVM_CFG1_GLOB_RESERVED60_100KHZ 0x0
4240#define NVM_CFG1_GLOB_RESERVED60_400KHZ 0x1
4241#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
4242#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
4243#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
4244#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
4245#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
4246#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
4247#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
4248#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
4249#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
4250#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
4251#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
4252
4253 u32 core_cfg; /* 0x2C */
4254#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
4255#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
4256#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0
4257#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1
4258#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2
4259#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3
4260#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4
4261#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5
4262#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB
4263#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC
4264#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD
4265#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100
4266#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8
4267#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0
4268#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1
4269#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200
4270#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9
4271#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0
4272#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1
4273#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00
4274#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10
4275#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000
4276#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18
4277#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
4278#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
4279#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
4280#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP 0x1
4281#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
4282#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
4283#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
4284#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
4285#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
4286
4287 u32 e_lane_cfg1; /* 0x30 */
4288#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
4289#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
4290#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
4291#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
4292#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
4293#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
4294#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
4295#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
4296#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
4297#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
4298#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
4299#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
4300#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
4301#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
4302#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
4303#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
4304
4305 u32 e_lane_cfg2; /* 0x34 */
4306#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
4307#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
4308#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
4309#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
4310#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
4311#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
4312#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
4313#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
4314#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
4315#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
4316#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
4317#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
4318#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
4319#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
4320#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
4321#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
4322#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
4323#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
4324#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
4325#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
4326#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
4327#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
4328#define NVM_CFG1_GLOB_NCSI_OFFSET 12
4329#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
4330#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
4331
4332 u32 f_lane_cfg1; /* 0x38 */
4333#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
4334#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
4335#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
4336#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
4337#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
4338#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
4339#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
4340#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
4341#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
4342#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
4343#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
4344#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
4345#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
4346#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
4347#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
4348#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
4349
4350 u32 f_lane_cfg2; /* 0x3C */
4351#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
4352#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
4353#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
4354#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
4355#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
4356#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
4357#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
4358#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
4359#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
4360#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
4361#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
4362#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
4363#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
4364#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
4365#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
4366#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
4367
4368 u32 eagle_preemphasis; /* 0x40 */
4369#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
4370#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
4371#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
4372#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
4373#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
4374#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
4375#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
4376#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
4377
4378 u32 eagle_driver_current; /* 0x44 */
4379#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
4380#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
4381#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
4382#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
4383#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
4384#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
4385#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
4386#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
4387
4388 u32 falcon_preemphasis; /* 0x48 */
4389#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
4390#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
4391#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
4392#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
4393#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
4394#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
4395#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
4396#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
4397
4398 u32 falcon_driver_current; /* 0x4C */
4399#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
4400#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
4401#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
4402#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
4403#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
4404#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
4405#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
4406#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
4407
4408 u32 pci_id; /* 0x50 */
4409#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
4410#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
4411
4412 u32 pci_subsys_id; /* 0x54 */
4413#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
4414#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
4415#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
4416#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
4417
4418 u32 bar; /* 0x58 */
4419#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
4420#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
4421#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
4422#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
4423#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
4424#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
4425#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
4426#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
4427#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
4428#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
4429#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
4430#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
4431#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
4432#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
4433#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
4434#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
4435#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
4436#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
4437#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
4438#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
4439#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
4440#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
4441#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
4442#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
4443#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
4444#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
4445#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
4446#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
4447#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
4448#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
4449#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
4450#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
4451#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
4452#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
4453#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
4454#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
4455#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
4456#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
4457#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
4458#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
4459#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
4460#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
4461#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
4462#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
4463#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
4464#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
4465#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
4466#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
4467#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
4468#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
4469#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
4470#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
4471#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
4472#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
4473
4474 u32 eagle_txfir_main; /* 0x5C */
4475#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
4476#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
4477#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
4478#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
4479#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
4480#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
4481#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
4482#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
4483
4484 u32 eagle_txfir_post; /* 0x60 */
4485#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
4486#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
4487#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
4488#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
4489#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
4490#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
4491#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
4492#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
4493
4494 u32 falcon_txfir_main; /* 0x64 */
4495#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
4496#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
4497#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
4498#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
4499#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
4500#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
4501#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
4502#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
4503
4504 u32 falcon_txfir_post; /* 0x68 */
4505#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
4506#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
4507#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
4508#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
4509#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
4510#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
4511#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
4512#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
4513
4514 u32 manufacture_ver; /* 0x6C */
4515#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
4516#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
4517#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
4518#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
4519#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
4520#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
4521#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
4522#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
4523#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
4524#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
4525
4526 u32 manufacture_time; /* 0x70 */
4527#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
4528#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
4529#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
4530#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
4531#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
4532#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
4533
4534 u32 led_global_settings; /* 0x74 */
4535#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
4536#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
4537#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
4538#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
4539#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
4540#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
4541#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
4542#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
4543
4544 u32 generic_cont1; /* 0x78 */
4545#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
4546#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
4547
4548 u32 mbi_version; /* 0x7C */
4549#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
4550#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
4551#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
4552#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
4553#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
4554#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
4555
4556 u32 mbi_date; /* 0x80 */
4557
4558 u32 misc_sig; /* 0x84 */
4559
4560 /* Define the GPIO mapping to switch i2c mux */
4561#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
4562#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
4563#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
4564#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
4565#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
4566#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
4567#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
4568#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
4569#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
4570#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
4571#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
4572#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
4573#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
4574#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
4575#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
4576#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
4577#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
4578#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
4579#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
4580#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
4581#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
4582#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
4583#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
4584#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
4585#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
4586#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
4587#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
4588#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
4589#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
4590#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
4591#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
4592#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
4593#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
4594#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
4595#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
4596#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
4597#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
4598 u32 device_capabilities; /* 0x88 */
4599#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
4600 u32 power_dissipated; /* 0x8C */
4601 u32 power_consumed; /* 0x90 */
4602 u32 efi_version; /* 0x94 */
4603 u32 reserved[42]; /* 0x98 */
4604}; 7420};
4605 7421
4606struct nvm_cfg1_path { 7422struct nvm_cfg1_path {
4607 u32 reserved[30]; /* 0x0 */ 7423 u32 reserved[30];
4608}; 7424};
4609 7425
4610struct nvm_cfg1_port { 7426struct nvm_cfg1_port {
4611 u32 reserved__m_relocated_to_option_123; /* 0x0 */ 7427 u32 reserved__m_relocated_to_option_123;
4612 u32 reserved__m_relocated_to_option_124; /* 0x4 */ 7428 u32 reserved__m_relocated_to_option_124;
4613 u32 generic_cont0; /* 0x8 */ 7429 u32 generic_cont0;
4614#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF 7430#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
4615#define NVM_CFG1_PORT_LED_MODE_OFFSET 0 7431#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
4616#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0 7432#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
4617#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1 7433#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
4618#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2 7434#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
4619#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3 7435#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
4620#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4 7436#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
4621#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5 7437#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
4622#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6 7438#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
4623#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7 7439#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
4624#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8 7440#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
4625#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9 7441 u32 pcie_cfg;
4626#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA 7442 u32 features;
4627#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB 7443 u32 speed_cap_mask;
4628#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC 7444#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
4629#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD 7445#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
4630#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE 7446#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
4631#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF 7447#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
4632#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00 7448#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
4633#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8 7449#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
4634#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000 7450#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
4635#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 7451#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
4636#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 7452 u32 link_settings;
4637#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 7453#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
4638#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 7454#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
4639#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 7455#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
4640#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 7456#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
4641#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 7457#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
4642#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 7458#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
4643 u32 pcie_cfg; /* 0xC */ 7459#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
4644#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007 7460#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
4645#define NVM_CFG1_PORT_RESERVED15_OFFSET 0 7461#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
4646 7462#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
4647 u32 features; /* 0x10 */ 7463#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
4648#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001 7464#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
4649#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0 7465#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
4650#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0 7466#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
4651#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1 7467#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
4652#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002 7468 u32 phy_cfg;
4653#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1 7469 u32 mgmt_traffic;
4654#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0 7470 u32 ext_phy;
4655#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1 7471 u32 mba_cfg1;
4656 7472 u32 mba_cfg2;
4657 u32 speed_cap_mask; /* 0x14 */ 7473 u32 vf_cfg;
4658#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF 7474 struct nvm_cfg_mac_address lldp_mac_address;
4659#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 7475 u32 led_port_settings;
4660#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 7476 u32 transceiver_00;
4661#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 7477 u32 device_ids;
4662#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 7478 u32 board_cfg;
4663#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 7479 u32 mnm_10g_cap;
4664#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 7480 u32 mnm_10g_ctrl;
4665#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40 7481 u32 mnm_10g_misc;
4666#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000 7482 u32 mnm_25g_cap;
4667#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16 7483 u32 mnm_25g_ctrl;
4668#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1 7484 u32 mnm_25g_misc;
4669#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2 7485 u32 mnm_40g_cap;
4670#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8 7486 u32 mnm_40g_ctrl;
4671#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10 7487 u32 mnm_40g_misc;
4672#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20 7488 u32 mnm_50g_cap;
4673#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40 7489 u32 mnm_50g_ctrl;
4674 7490 u32 mnm_50g_misc;
4675 u32 link_settings; /* 0x18 */ 7491 u32 mnm_100g_cap;
4676#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F 7492 u32 mnm_100g_ctrl;
4677#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 7493 u32 mnm_100g_misc;
4678#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 7494 u32 reserved[116];
4679#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
4680#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
4681#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
4682#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
4683#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
4684#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7
4685#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
4686#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
4687#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
4688#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
4689#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
4690#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
4691#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
4692#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
4693#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
4694#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
4695#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
4696#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
4697#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
4698#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7
4699#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
4700#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
4701#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
4702#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
4703#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
4704#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000
4705#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
4706#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0
4707#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1
4708
4709 u32 phy_cfg; /* 0x1C */
4710#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
4711#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
4712#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
4713#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
4714#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
4715#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
4716#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
4717#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
4718#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
4719#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
4720#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
4721#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
4722#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
4723#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
4724#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
4725#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
4726#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
4727#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
4728#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
4729#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
4730#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
4731#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
4732#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
4733#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
4734#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
4735#define NVM_CFG1_PORT_AN_MODE_CL73 0x1
4736#define NVM_CFG1_PORT_AN_MODE_CL37 0x2
4737#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
4738#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4
4739#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5
4740#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6
4741
4742 u32 mgmt_traffic; /* 0x20 */
4743#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
4744#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
4745
4746 u32 ext_phy; /* 0x24 */
4747#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
4748#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
4749#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
4750#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
4751#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
4752#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
4753
4754 u32 mba_cfg1; /* 0x28 */
4755#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
4756#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
4757#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
4758#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
4759#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
4760#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
4761#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
4762#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
4763#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
4764#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
4765#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
4766#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
4767#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
4768#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
4769#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
4770#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
4771#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
4772#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
4773#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
4774#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
4775#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
4776#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
4777#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
4778#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
4779#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
4780#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
4781#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7
4782#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
4783#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
4784#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
4785
4786 u32 mba_cfg2; /* 0x2C */
4787#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
4788#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
4789#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
4790#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
4791
4792 u32 vf_cfg; /* 0x30 */
4793#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
4794#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
4795#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
4796#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
4797
4798 struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
4799
4800 u32 led_port_settings; /* 0x3C */
4801#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
4802#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
4803#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
4804#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
4805#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
4806#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
4807#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
4808#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
4809#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
4810#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
4811#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
4812#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40
4813
4814 u32 transceiver_00; /* 0x40 */
4815
4816 /* Define for mapping of transceiver signal module absent */
4817#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
4818#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
4819#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
4820#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
4821#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
4822#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
4823#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
4824#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
4825#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
4826#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
4827#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
4828#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
4829#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
4830#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
4831#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
4832#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
4833#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
4834#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
4835#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
4836#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
4837#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
4838#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
4839#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
4840#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
4841#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
4842#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
4843#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
4844#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
4845#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
4846#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
4847#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
4848#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
4849#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
4850#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
4851#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
4852 /* Define the GPIO mux settings to switch i2c mux to this port */
4853#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
4854#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
4855#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
4856#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
4857
4858 u32 reserved[133]; /* 0x44 */
4859}; 7495};
4860 7496
4861struct nvm_cfg1_func { 7497struct nvm_cfg1_func {
4862 struct nvm_cfg_mac_address mac_address; /* 0x0 */ 7498 struct nvm_cfg_mac_address mac_address;
4863 7499 u32 rsrv1;
4864 u32 rsrv1; /* 0x8 */ 7500 u32 rsrv2;
4865#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF 7501 u32 device_id;
4866#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0 7502 u32 cmn_cfg;
4867#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000 7503 u32 pci_cfg;
4868#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16 7504 struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
4869 7505 struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
4870 u32 rsrv2; /* 0xC */ 7506 u32 preboot_generic_cfg;
4871#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF 7507 u32 reserved[8];
4872#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
4873#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
4874#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
4875
4876 u32 device_id; /* 0x10 */
4877#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
4878#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
4879#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
4880#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
4881
4882 u32 cmn_cfg; /* 0x14 */
4883#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
4884#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
4885#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
4886#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3
4887#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4
4888#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
4889#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
4890#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
4891#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
4892#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
4893#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
4894#define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1
4895#define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2
4896#define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3
4897#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
4898#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
4899#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
4900#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
4901#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
4902#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
4903
4904 u32 pci_cfg; /* 0x18 */
4905#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
4906#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
4907#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80
4908#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7
4909#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
4910#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
4911#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
4912#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
4913#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
4914#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
4915#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
4916#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
4917#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
4918#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
4919#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
4920#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
4921#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
4922#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
4923#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
4924#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
4925#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
4926#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
4927#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
4928#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
4929
4930 struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
4931
4932 struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
4933 u32 preboot_generic_cfg; /* 0x2C */
4934 u32 reserved[8]; /* 0x30 */
4935}; 7508};
4936 7509
4937struct nvm_cfg1 { 7510struct nvm_cfg1 {
4938 struct nvm_cfg1_glob glob; /* 0x0 */ 7511 struct nvm_cfg1_glob glob;
4939 7512 struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
4940 struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */ 7513 struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
4941 7514 struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
4942 struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
4943
4944 struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
4945};
4946
4947/******************************************
4948* nvm_cfg structs
4949******************************************/
4950
4951enum nvm_cfg_sections {
4952 NVM_CFG_SECTION_NVM_CFG1,
4953 NVM_CFG_SECTION_MAX
4954};
4955
4956struct nvm_cfg {
4957 u32 num_sections;
4958 u32 sections_offset[NVM_CFG_SECTION_MAX];
4959 struct nvm_cfg1 cfg1;
4960};
4961
4962#define PORT_0 0
4963#define PORT_1 1
4964#define PORT_2 2
4965#define PORT_3 3
4966
4967extern struct spad_layout g_spad;
4968
4969#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
4970
4971#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE))
4972
4973#define TO_OFFSIZE(_offset, _size) \
4974 (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \
4975 (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
4976
4977enum spad_sections {
4978 SPAD_SECTION_TRACE,
4979 SPAD_SECTION_NVM_CFG,
4980 SPAD_SECTION_PUBLIC,
4981 SPAD_SECTION_PRIVATE,
4982 SPAD_SECTION_MAX
4983};
4984
4985struct spad_layout {
4986 struct nvm_cfg nvm_cfg;
4987 struct mcp_public_data public_data;
4988}; 7515};
4989
4990#define CRC_MAGIC_VALUE 0xDEBB20E3
4991#define CRC32_POLYNOMIAL 0xEDB88320
4992#define NVM_CRC_SIZE (sizeof(u32))
4993
4994enum nvm_sw_arbitrator {
4995 NVM_SW_ARB_HOST,
4996 NVM_SW_ARB_MCP,
4997 NVM_SW_ARB_UART,
4998 NVM_SW_ARB_RESERVED
4999};
5000
5001/****************************************************************************
5002* Boot Strap Region *
5003****************************************************************************/
5004struct legacy_bootstrap_region {
5005 u32 magic_value;
5006#define NVM_MAGIC_VALUE 0x669955aa
5007 u32 sram_start_addr;
5008 u32 code_len; /* boot code length (in dwords) */
5009 u32 code_start_addr;
5010 u32 crc; /* 32-bit CRC */
5011};
5012
5013/****************************************************************************
5014* Directories Region *
5015****************************************************************************/
5016struct nvm_code_entry {
5017 u32 image_type; /* Image type */
5018 u32 nvm_start_addr; /* NVM address of the image */
5019 u32 len; /* Include CRC */
5020 u32 sram_start_addr;
5021 u32 sram_run_addr; /* Relevant in case of MIM only */
5022};
5023
5024enum nvm_image_type {
5025 NVM_TYPE_TIM1 = 0x01,
5026 NVM_TYPE_TIM2 = 0x02,
5027 NVM_TYPE_MIM1 = 0x03,
5028 NVM_TYPE_MIM2 = 0x04,
5029 NVM_TYPE_MBA = 0x05,
5030 NVM_TYPE_MODULES_PN = 0x06,
5031 NVM_TYPE_VPD = 0x07,
5032 NVM_TYPE_MFW_TRACE1 = 0x08,
5033 NVM_TYPE_MFW_TRACE2 = 0x09,
5034 NVM_TYPE_NVM_CFG1 = 0x0a,
5035 NVM_TYPE_L2B = 0x0b,
5036 NVM_TYPE_DIR1 = 0x0c,
5037 NVM_TYPE_EAGLE_FW1 = 0x0d,
5038 NVM_TYPE_FALCON_FW1 = 0x0e,
5039 NVM_TYPE_PCIE_FW1 = 0x0f,
5040 NVM_TYPE_HW_SET = 0x10,
5041 NVM_TYPE_LIM = 0x11,
5042 NVM_TYPE_AVS_FW1 = 0x12,
5043 NVM_TYPE_DIR2 = 0x13,
5044 NVM_TYPE_CCM = 0x14,
5045 NVM_TYPE_EAGLE_FW2 = 0x15,
5046 NVM_TYPE_FALCON_FW2 = 0x16,
5047 NVM_TYPE_PCIE_FW2 = 0x17,
5048 NVM_TYPE_AVS_FW2 = 0x18,
5049
5050 NVM_TYPE_MAX,
5051};
5052
5053#define MAX_NVM_DIR_ENTRIES 200
5054
5055struct nvm_dir {
5056 s32 seq;
5057#define NVM_DIR_NEXT_MFW_MASK 0x00000001
5058#define NVM_DIR_SEQ_MASK 0xfffffffe
5059#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
5060
5061#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK)
5062
5063 u32 num_images;
5064 u32 rsrv;
5065 struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
5066};
5067
5068#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
5069 (_num_images - \
5070 1) * sizeof(struct nvm_code_entry) + \
5071 NVM_CRC_SIZE)
5072
5073struct nvm_vpd_image {
5074 u32 format_revision;
5075#define VPD_IMAGE_VERSION 1
5076
5077 /* This array length depends on the number of VPD fields */
5078 u8 vpd_data[1];
5079};
5080
5081/****************************************************************************
5082* NVRAM FULL MAP *
5083****************************************************************************/
5084#define DIR_ID_1 (0)
5085#define DIR_ID_2 (1)
5086#define MAX_DIR_IDS (2)
5087
5088#define MFW_BUNDLE_1 (0)
5089#define MFW_BUNDLE_2 (1)
5090#define MAX_MFW_BUNDLES (2)
5091
5092#define FLASH_PAGE_SIZE 0x1000
5093#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */
5094#define ASIC_MIM_MAX_SIZE (300 * FLASH_PAGE_SIZE) /* 1.2Mb */
5095#define FPGA_MIM_MAX_SIZE (25 * FLASH_PAGE_SIZE) /* 60Kb */
5096
5097#define LIM_MAX_SIZE ((2 * \
5098 FLASH_PAGE_SIZE) - \
5099 sizeof(struct legacy_bootstrap_region) - \
5100 NVM_RSV_SIZE)
5101#define LIM_OFFSET (NVM_OFFSET(lim_image))
5102#define NVM_RSV_SIZE (44)
5103#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \
5104 FPGA_MIM_MAX_SIZE)
5105#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \
5106 ((idx == \
5107 NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
5108#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \
5109 MIM_MAX_SIZE(is_asic) * 2)
5110
5111union nvm_dir_union {
5112 struct nvm_dir dir;
5113 u8 page[FLASH_PAGE_SIZE];
5114};
5115
5116/* Address
5117 * +-------------------+ 0x000000
5118 * | Bootstrap: |
5119 * | magic_number |
5120 * | sram_start_addr |
5121 * | code_len |
5122 * | code_start_addr |
5123 * | crc |
5124 * +-------------------+ 0x000014
5125 * | rsrv |
5126 * +-------------------+ 0x000040
5127 * | LIM |
5128 * +-------------------+ 0x002000
5129 * | Dir1 |
5130 * +-------------------+ 0x003000
5131 * | Dir2 |
5132 * +-------------------+ 0x004000
5133 * | MIM1 |
5134 * +-------------------+ 0x130000
5135 * | MIM2 |
5136 * +-------------------+ 0x25C000
5137 * | Rest Images: |
5138 * | TIM1/2 |
5139 * | MFW_TRACE1/2 |
5140 * | Eagle/Falcon FW |
5141 * | PCIE/AVS FW |
5142 * | MBA/CCM/L2B |
5143 * | VPD |
5144 * | optic_modules |
5145 * | ... |
5146 * +-------------------+ 0x400000
5147 */
5148struct nvm_image {
5149/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
5150 /* NVM Offset (size) */
5151 struct legacy_bootstrap_region bootstrap;
5152 u8 rsrv[NVM_RSV_SIZE];
5153 u8 lim_image[LIM_MAX_SIZE];
5154 union nvm_dir_union dir[MAX_MFW_BUNDLES];
5155
5156 /* MIM1_IMAGE 0x004000 (0x12c000) */
5157 /* MIM2_IMAGE 0x130000 (0x12c000) */
5158/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
5159}; /* 0x134 */
5160
5161#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f))))
5162
5163struct hw_set_info {
5164 u32 reg_type;
5165#define GRC_REG_TYPE 1
5166#define PHY_REG_TYPE 2
5167#define PCI_REG_TYPE 4
5168
5169 u32 bank_num;
5170 u32 pf_num;
5171 u32 operation;
5172#define READ_OP 1
5173#define WRITE_OP 2
5174#define RMW_SET_OP 3
5175#define RMW_CLR_OP 4
5176
5177 u32 reg_addr;
5178 u32 reg_data;
5179
5180 u32 reset_type;
5181#define POR_RESET_TYPE BIT(0)
5182#define HARD_RESET_TYPE BIT(1)
5183#define CORE_RESET_TYPE BIT(2)
5184#define MCP_RESET_TYPE BIT(3)
5185#define PERSET_ASSERT BIT(4)
5186#define PERSET_DEASSERT BIT(5)
5187};
5188
5189struct hw_set_image {
5190 u32 format_version;
5191#define HW_SET_IMAGE_VERSION 1
5192 u32 no_hw_sets;
5193
5194 /* This array length depends on the no_hw_sets */
5195 struct hw_set_info hw_sets[1];
5196};
5197
5198int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
5199 u8 pf_id, u16 pf_wfq);
5200int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
5201 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
5202#endif 7516#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 0ada7fdb91bc..e17885321faf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -446,7 +446,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
446 idx_cmd, 446 idx_cmd,
447 le32_to_cpu(command->opcode), 447 le32_to_cpu(command->opcode),
448 le16_to_cpu(command->opcode_b), 448 le16_to_cpu(command->opcode_b),
449 le16_to_cpu(command->length), 449 le16_to_cpu(command->length_dw),
450 le32_to_cpu(command->src_addr_hi), 450 le32_to_cpu(command->src_addr_hi),
451 le32_to_cpu(command->src_addr_lo), 451 le32_to_cpu(command->src_addr_lo),
452 le32_to_cpu(command->dst_addr_hi), 452 le32_to_cpu(command->dst_addr_hi),
@@ -461,7 +461,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
461 idx_cmd, 461 idx_cmd,
462 le32_to_cpu(command->opcode), 462 le32_to_cpu(command->opcode),
463 le16_to_cpu(command->opcode_b), 463 le16_to_cpu(command->opcode_b),
464 le16_to_cpu(command->length), 464 le16_to_cpu(command->length_dw),
465 le32_to_cpu(command->src_addr_hi), 465 le32_to_cpu(command->src_addr_hi),
466 le32_to_cpu(command->src_addr_lo), 466 le32_to_cpu(command->src_addr_lo),
467 le32_to_cpu(command->dst_addr_hi), 467 le32_to_cpu(command->dst_addr_hi),
@@ -645,7 +645,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
645 return -EINVAL; 645 return -EINVAL;
646 } 646 }
647 647
648 cmd->length = cpu_to_le16((u16)length); 648 cmd->length_dw = cpu_to_le16((u16)length);
649 649
650 qed_dmae_post_command(p_hwfn, p_ptt); 650 qed_dmae_post_command(p_hwfn, p_ptt);
651 651
@@ -769,6 +769,29 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
769} 769}
770 770
771int 771int
772qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
773 dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
774{
775 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
776 struct qed_dmae_params params;
777 int rc;
778
779 memset(&params, 0, sizeof(struct qed_dmae_params));
780 params.flags = flags;
781
782 mutex_lock(&p_hwfn->dmae_info.mutex);
783
784 rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
785 dest_addr, QED_DMAE_ADDRESS_GRC,
786 QED_DMAE_ADDRESS_HOST_VIRT,
787 size_in_dwords, &params);
788
789 mutex_unlock(&p_hwfn->dmae_info.mutex);
790
791 return rc;
792}
793
794int
772qed_dmae_host2host(struct qed_hwfn *p_hwfn, 795qed_dmae_host2host(struct qed_hwfn *p_hwfn,
773 struct qed_ptt *p_ptt, 796 struct qed_ptt *p_ptt,
774 dma_addr_t source_addr, 797 dma_addr_t source_addr,
@@ -791,16 +814,16 @@ qed_dmae_host2host(struct qed_hwfn *p_hwfn,
791} 814}
792 815
793u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, 816u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
794 enum protocol_type proto, 817 enum protocol_type proto, union qed_qm_pq_params *p_params)
795 union qed_qm_pq_params *p_params)
796{ 818{
797 u16 pq_id = 0; 819 u16 pq_id = 0;
798 820
799 if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) && 821 if ((proto == PROTOCOLID_CORE ||
800 !p_params) { 822 proto == PROTOCOLID_ETH ||
823 proto == PROTOCOLID_ISCSI ||
824 proto == PROTOCOLID_ROCE) && !p_params) {
801 DP_NOTICE(p_hwfn, 825 DP_NOTICE(p_hwfn,
802 "Protocol %d received NULL PQ params\n", 826 "Protocol %d received NULL PQ params\n", proto);
803 proto);
804 return 0; 827 return 0;
805 } 828 }
806 829
@@ -808,6 +831,8 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
808 case PROTOCOLID_CORE: 831 case PROTOCOLID_CORE:
809 if (p_params->core.tc == LB_TC) 832 if (p_params->core.tc == LB_TC)
810 pq_id = p_hwfn->qm_info.pure_lb_pq; 833 pq_id = p_hwfn->qm_info.pure_lb_pq;
834 else if (p_params->core.tc == OOO_LB_TC)
835 pq_id = p_hwfn->qm_info.ooo_pq;
811 else 836 else
812 pq_id = p_hwfn->qm_info.offload_pq; 837 pq_id = p_hwfn->qm_info.offload_pq;
813 break; 838 break;
@@ -817,6 +842,18 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
817 pq_id += p_hwfn->qm_info.vf_queues_offset + 842 pq_id += p_hwfn->qm_info.vf_queues_offset +
818 p_params->eth.vf_id; 843 p_params->eth.vf_id;
819 break; 844 break;
845 case PROTOCOLID_ISCSI:
846 if (p_params->iscsi.q_idx == 1)
847 pq_id = p_hwfn->qm_info.pure_ack_pq;
848 break;
849 case PROTOCOLID_ROCE:
850 if (p_params->roce.dcqcn)
851 pq_id = p_params->roce.qpid;
852 else
853 pq_id = p_hwfn->qm_info.offload_pq;
854 if (pq_id > p_hwfn->qm_info.num_pf_rls)
855 pq_id = p_hwfn->qm_info.offload_pq;
856 break;
820 default: 857 default:
821 pq_id = 0; 858 pq_id = 0;
822 } 859 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 4367363ade40..d01557092868 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -254,6 +254,10 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
254 254
255union qed_qm_pq_params { 255union qed_qm_pq_params {
256 struct { 256 struct {
257 u8 q_idx;
258 } iscsi;
259
260 struct {
257 u8 tc; 261 u8 tc;
258 } core; 262 } core;
259 263
@@ -262,11 +266,15 @@ union qed_qm_pq_params {
262 u8 vf_id; 266 u8 vf_id;
263 u8 tc; 267 u8 tc;
264 } eth; 268 } eth;
269
270 struct {
271 u8 dcqcn;
272 u8 qpid; /* roce relative */
273 } roce;
265}; 274};
266 275
267u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, 276u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
268 enum protocol_type proto, 277 enum protocol_type proto, union qed_qm_pq_params *params);
269 union qed_qm_pq_params *params);
270 278
271int qed_init_fw_data(struct qed_dev *cdev, 279int qed_init_fw_data(struct qed_dev *cdev,
272 const u8 *fw_data); 280 const u8 *fw_data);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index e8a3b9da59b5..23e455f22adc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -31,7 +31,6 @@ enum cminterface {
31}; 31};
32 32
33/* general constants */ 33/* general constants */
34#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
35#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ 34#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
36 QM_PQ_ELEMENT_SIZE, \ 35 QM_PQ_ELEMENT_SIZE, \
37 0x1000) : 0) 36 0x1000) : 0)
@@ -44,28 +43,28 @@ enum cminterface {
44/* other PQ constants */ 43/* other PQ constants */
45#define QM_OTHER_PQS_PER_PF 4 44#define QM_OTHER_PQS_PER_PF 4
46/* WFQ constants */ 45/* WFQ constants */
47#define QM_WFQ_UPPER_BOUND 6250000 46#define QM_WFQ_UPPER_BOUND 62500000
48#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 47#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
49#define QM_WFQ_VP_PQ_PF_SHIFT 5 48#define QM_WFQ_VP_PQ_PF_SHIFT 5
50#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) 49#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
51#define QM_WFQ_MAX_INC_VAL 4375000 50#define QM_WFQ_MAX_INC_VAL 43750000
52#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val)) 51
53/* RL constants */ 52/* RL constants */
54#define QM_RL_UPPER_BOUND 6250000 53#define QM_RL_UPPER_BOUND 62500000
55#define QM_RL_PERIOD 5 /* in us */ 54#define QM_RL_PERIOD 5 /* in us */
56#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) 55#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
56#define QM_RL_MAX_INC_VAL 43750000
57#define QM_RL_INC_VAL(rate) max_t(u32, \ 57#define QM_RL_INC_VAL(rate) max_t(u32, \
58 (((rate ? rate : 1000000) \ 58 (u32)(((rate ? rate : \
59 * QM_RL_PERIOD) / 8), 1) 59 1000000) * \
60#define QM_RL_MAX_INC_VAL 4375000 60 QM_RL_PERIOD * \
61 101) / (8 * 100)), 1)
61/* AFullOprtnstcCrdMask constants */ 62/* AFullOprtnstcCrdMask constants */
62#define QM_OPPOR_LINE_VOQ_DEF 1 63#define QM_OPPOR_LINE_VOQ_DEF 1
63#define QM_OPPOR_FW_STOP_DEF 0 64#define QM_OPPOR_FW_STOP_DEF 0
64#define QM_OPPOR_PQ_EMPTY_DEF 1 65#define QM_OPPOR_PQ_EMPTY_DEF 1
65#define EAGLE_WORKAROUND_TC 7
66/* Command Queue constants */ 66/* Command Queue constants */
67#define PBF_CMDQ_PURE_LB_LINES 150 67#define PBF_CMDQ_PURE_LB_LINES 150
68#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
69#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \ 68#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
70 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \ 69 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
71 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ 70 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
@@ -80,7 +79,6 @@ enum cminterface {
80/* BTB: blocks constants (block size = 256B) */ 79/* BTB: blocks constants (block size = 256B) */
81#define BTB_JUMBO_PKT_BLOCKS 38 80#define BTB_JUMBO_PKT_BLOCKS 38
82#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS 81#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
83#define BTB_EAGLE_WORKAROUND_BLOCKS 4
84#define BTB_PURE_LB_FACTOR 10 82#define BTB_PURE_LB_FACTOR 10
85#define BTB_PURE_LB_RATIO 7 83#define BTB_PURE_LB_RATIO 7
86/* QM stop command constants */ 84/* QM stop command constants */
@@ -107,9 +105,9 @@ enum cminterface {
107 cmd ## _ ## field, \ 105 cmd ## _ ## field, \
108 value) 106 value)
109/* QM: VOQ macros */ 107/* QM: VOQ macros */
110#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \ 108#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
111 (max_phy_tcs_pr_port) \ 109 (max_phys_tcs_per_port) + \
112 + (tc)) 110 (tc))
113#define LB_VOQ(port) ( \ 111#define LB_VOQ(port) ( \
114 MAX_PHYS_VOQS + (port)) 112 MAX_PHYS_VOQS + (port))
115#define VOQ(port, tc, max_phy_tcs_pr_port) \ 113#define VOQ(port, tc, max_phy_tcs_pr_port) \
@@ -120,8 +118,7 @@ enum cminterface {
120 : LB_VOQ(port)) 118 : LB_VOQ(port))
121/******************** INTERNAL IMPLEMENTATION *********************/ 119/******************** INTERNAL IMPLEMENTATION *********************/
122/* Prepare PF RL enable/disable runtime init values */ 120/* Prepare PF RL enable/disable runtime init values */
123static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, 121static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
124 bool pf_rl_en)
125{ 122{
126 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); 123 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
127 if (pf_rl_en) { 124 if (pf_rl_en) {
@@ -130,8 +127,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
130 (1 << MAX_NUM_VOQS) - 1); 127 (1 << MAX_NUM_VOQS) - 1);
131 /* write RL period */ 128 /* write RL period */
132 STORE_RT_REG(p_hwfn, 129 STORE_RT_REG(p_hwfn,
133 QM_REG_RLPFPERIOD_RT_OFFSET, 130 QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
134 QM_RL_PERIOD_CLK_25M);
135 STORE_RT_REG(p_hwfn, 131 STORE_RT_REG(p_hwfn,
136 QM_REG_RLPFPERIODTIMER_RT_OFFSET, 132 QM_REG_RLPFPERIODTIMER_RT_OFFSET,
137 QM_RL_PERIOD_CLK_25M); 133 QM_RL_PERIOD_CLK_25M);
@@ -144,8 +140,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
144} 140}
145 141
146/* Prepare PF WFQ enable/disable runtime init values */ 142/* Prepare PF WFQ enable/disable runtime init values */
147static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, 143static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
148 bool pf_wfq_en)
149{ 144{
150 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); 145 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
151 /* set credit threshold for QM bypass flow */ 146 /* set credit threshold for QM bypass flow */
@@ -156,8 +151,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
156} 151}
157 152
158/* Prepare VPORT RL enable/disable runtime init values */ 153/* Prepare VPORT RL enable/disable runtime init values */
159static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, 154static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
160 bool vport_rl_en)
161{ 155{
162 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, 156 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
163 vport_rl_en ? 1 : 0); 157 vport_rl_en ? 1 : 0);
@@ -178,8 +172,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
178} 172}
179 173
180/* Prepare VPORT WFQ enable/disable runtime init values */ 174/* Prepare VPORT WFQ enable/disable runtime init values */
181static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, 175static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
182 bool vport_wfq_en)
183{ 176{
184 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, 177 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
185 vport_wfq_en ? 1 : 0); 178 vport_wfq_en ? 1 : 0);
@@ -194,8 +187,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
194 * the specified VOQ 187 * the specified VOQ
195 */ 188 */
196static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, 189static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
197 u8 voq, 190 u8 voq, u16 cmdq_lines)
198 u16 cmdq_lines)
199{ 191{
200 u32 qm_line_crd; 192 u32 qm_line_crd;
201 193
@@ -221,7 +213,7 @@ static void qed_cmdq_lines_rt_init(
221 u8 max_phys_tcs_per_port, 213 u8 max_phys_tcs_per_port,
222 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 214 struct init_qm_port_params port_params[MAX_NUM_PORTS])
223{ 215{
224 u8 tc, voq, port_id; 216 u8 tc, voq, port_id, num_tcs_in_port;
225 217
226 /* clear PBF lines for all VOQs */ 218 /* clear PBF lines for all VOQs */
227 for (voq = 0; voq < MAX_NUM_VOQS; voq++) 219 for (voq = 0; voq < MAX_NUM_VOQS; voq++)
@@ -229,22 +221,31 @@ static void qed_cmdq_lines_rt_init(
229 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 221 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
230 if (port_params[port_id].active) { 222 if (port_params[port_id].active) {
231 u16 phys_lines, phys_lines_per_tc; 223 u16 phys_lines, phys_lines_per_tc;
232 u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
233 224
234 /* find #lines to divide between the active 225 /* find #lines to divide between active phys TCs */
235 * physical TCs.
236 */
237 phys_lines = port_params[port_id].num_pbf_cmd_lines - 226 phys_lines = port_params[port_id].num_pbf_cmd_lines -
238 PBF_CMDQ_PURE_LB_LINES; 227 PBF_CMDQ_PURE_LB_LINES;
239 /* find #lines per active physical TC */ 228 /* find #lines per active physical TC */
240 phys_lines_per_tc = phys_lines / phys_tcs; 229 num_tcs_in_port = 0;
230 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
231 if (((port_params[port_id].active_phys_tcs >>
232 tc) & 0x1) == 1)
233 num_tcs_in_port++;
234 }
235
236 phys_lines_per_tc = phys_lines / num_tcs_in_port;
241 /* init registers per active TC */ 237 /* init registers per active TC */
242 for (tc = 0; tc < phys_tcs; tc++) { 238 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
239 if (((port_params[port_id].active_phys_tcs >>
240 tc) & 0x1) != 1)
241 continue;
242
243 voq = PHYS_VOQ(port_id, tc, 243 voq = PHYS_VOQ(port_id, tc,
244 max_phys_tcs_per_port); 244 max_phys_tcs_per_port);
245 qed_cmdq_lines_voq_rt_init(p_hwfn, voq, 245 qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
246 phys_lines_per_tc); 246 phys_lines_per_tc);
247 } 247 }
248
248 /* init registers for pure LB TC */ 249 /* init registers for pure LB TC */
249 qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id), 250 qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
250 PBF_CMDQ_PURE_LB_LINES); 251 PBF_CMDQ_PURE_LB_LINES);
@@ -259,34 +260,42 @@ static void qed_btb_blocks_rt_init(
259 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 260 struct init_qm_port_params port_params[MAX_NUM_PORTS])
260{ 261{
261 u32 usable_blocks, pure_lb_blocks, phys_blocks; 262 u32 usable_blocks, pure_lb_blocks, phys_blocks;
262 u8 tc, voq, port_id; 263 u8 tc, voq, port_id, num_tcs_in_port;
263 264
264 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 265 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
265 u32 temp; 266 u32 temp;
266 u8 phys_tcs;
267 267
268 if (!port_params[port_id].active) 268 if (!port_params[port_id].active)
269 continue; 269 continue;
270 270
271 phys_tcs = port_params[port_id].num_active_phys_tcs;
272
273 /* subtract headroom blocks */ 271 /* subtract headroom blocks */
274 usable_blocks = port_params[port_id].num_btb_blocks - 272 usable_blocks = port_params[port_id].num_btb_blocks -
275 BTB_HEADROOM_BLOCKS; 273 BTB_HEADROOM_BLOCKS;
276 274
277 /* find blocks per physical TC. use factor to avoid 275 /* find blocks per physical TC */
278 * floating arithmethic. 276 num_tcs_in_port = 0;
279 */ 277 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
278 if (((port_params[port_id].active_phys_tcs >>
279 tc) & 0x1) == 1)
280 num_tcs_in_port++;
281 }
282
280 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / 283 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
281 (phys_tcs * BTB_PURE_LB_FACTOR + 284 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
282 BTB_PURE_LB_RATIO); 285 BTB_PURE_LB_RATIO);
283 pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS, 286 pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
284 pure_lb_blocks / BTB_PURE_LB_FACTOR); 287 pure_lb_blocks / BTB_PURE_LB_FACTOR);
285 phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs; 288 phys_blocks = (usable_blocks - pure_lb_blocks) /
289 num_tcs_in_port;
286 290
287 /* init physical TCs */ 291 /* init physical TCs */
288 for (tc = 0; tc < phys_tcs; tc++) { 292 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
289 voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port); 293 if (((port_params[port_id].active_phys_tcs >>
294 tc) & 0x1) != 1)
295 continue;
296
297 voq = PHYS_VOQ(port_id, tc,
298 max_phys_tcs_per_port);
290 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq), 299 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
291 phys_blocks); 300 phys_blocks);
292 } 301 }
@@ -360,10 +369,11 @@ static void qed_tx_pq_map_rt_init(
360 memset(&tx_pq_map, 0, sizeof(tx_pq_map)); 369 memset(&tx_pq_map, 0, sizeof(tx_pq_map));
361 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); 370 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
362 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID, 371 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
363 is_vf_pq ? 1 : 0); 372 p_params->pq_params[i].rl_valid ? 1 : 0);
364 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); 373 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
365 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, 374 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
366 is_vf_pq ? p_params->pq_params[i].vport_id : 0); 375 p_params->pq_params[i].rl_valid ?
376 p_params->pq_params[i].vport_id : 0);
367 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); 377 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
368 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, 378 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
369 p_params->pq_params[i].wrr_group); 379 p_params->pq_params[i].wrr_group);
@@ -390,25 +400,11 @@ static void qed_tx_pq_map_rt_init(
390 /* store Tx PQ VF mask to size select register */ 400 /* store Tx PQ VF mask to size select register */
391 for (i = 0; i < num_tx_pq_vf_masks; i++) { 401 for (i = 0; i < num_tx_pq_vf_masks; i++) {
392 if (tx_pq_vf_mask[i]) { 402 if (tx_pq_vf_mask[i]) {
393 if (is_bb_a0) { 403 u32 addr;
394 u32 curr_mask = 0, addr;
395
396 addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
397 if (!p_params->is_first_pf)
398 curr_mask = qed_rd(p_hwfn, p_ptt,
399 addr);
400
401 addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
402
403 STORE_RT_REG(p_hwfn, addr,
404 curr_mask | tx_pq_vf_mask[i]);
405 } else {
406 u32 addr;
407 404
408 addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; 405 addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
409 STORE_RT_REG(p_hwfn, addr, 406 STORE_RT_REG(p_hwfn, addr,
410 tx_pq_vf_mask[i]); 407 tx_pq_vf_mask[i]);
411 }
412 } 408 }
413 } 409 }
414} 410}
@@ -418,8 +414,7 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
418 u8 port_id, 414 u8 port_id,
419 u8 pf_id, 415 u8 pf_id,
420 u32 num_pf_cids, 416 u32 num_pf_cids,
421 u32 num_tids, 417 u32 num_tids, u32 base_mem_addr_4kb)
422 u32 base_mem_addr_4kb)
423{ 418{
424 u16 i, pq_id; 419 u16 i, pq_id;
425 420
@@ -465,15 +460,10 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
465 (p_params->pf_id % MAX_NUM_PFS_BB); 460 (p_params->pf_id % MAX_NUM_PFS_BB);
466 461
467 inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); 462 inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
468 if (inc_val > QM_WFQ_MAX_INC_VAL) { 463 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
469 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); 464 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
470 return -1; 465 return -1;
471 } 466 }
472 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
473 inc_val);
474 STORE_RT_REG(p_hwfn,
475 QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
476 QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
477 467
478 for (i = 0; i < num_tx_pqs; i++) { 468 for (i = 0; i < num_tx_pqs; i++) {
479 u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, 469 u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
@@ -481,19 +471,21 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
481 471
482 OVERWRITE_RT_REG(p_hwfn, 472 OVERWRITE_RT_REG(p_hwfn,
483 crd_reg_offset + voq * MAX_NUM_PFS_BB, 473 crd_reg_offset + voq * MAX_NUM_PFS_BB,
484 QM_WFQ_INIT_CRD(inc_val) |
485 QM_WFQ_CRD_REG_SIGN_BIT); 474 QM_WFQ_CRD_REG_SIGN_BIT);
486 } 475 }
487 476
477 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
478 inc_val);
479 STORE_RT_REG(p_hwfn,
480 QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
481 QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
488 return 0; 482 return 0;
489} 483}
490 484
491/* Prepare PF RL runtime init values for the specified PF. 485/* Prepare PF RL runtime init values for the specified PF.
492 * Return -1 on error. 486 * Return -1 on error.
493 */ 487 */
494static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, 488static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
495 u8 pf_id,
496 u32 pf_rl)
497{ 489{
498 u32 inc_val = QM_RL_INC_VAL(pf_rl); 490 u32 inc_val = QM_RL_INC_VAL(pf_rl);
499 491
@@ -607,9 +599,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
607 599
608static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, 600static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
609 struct qed_ptt *p_ptt, 601 struct qed_ptt *p_ptt,
610 u32 cmd_addr, 602 u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
611 u32 cmd_data_lsb,
612 u32 cmd_data_msb)
613{ 603{
614 if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) 604 if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
615 return false; 605 return false;
@@ -627,9 +617,7 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
627u32 qed_qm_pf_mem_size(u8 pf_id, 617u32 qed_qm_pf_mem_size(u8 pf_id,
628 u32 num_pf_cids, 618 u32 num_pf_cids,
629 u32 num_vf_cids, 619 u32 num_vf_cids,
630 u32 num_tids, 620 u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
631 u16 num_pf_pqs,
632 u16 num_vf_pqs)
633{ 621{
634 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + 622 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
635 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + 623 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
@@ -713,8 +701,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
713} 701}
714 702
715int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, 703int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
716 struct qed_ptt *p_ptt, 704 struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
717 u8 pf_id, u16 pf_wfq)
718{ 705{
719 u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); 706 u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
720 707
@@ -728,9 +715,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
728} 715}
729 716
730int qed_init_pf_rl(struct qed_hwfn *p_hwfn, 717int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
731 struct qed_ptt *p_ptt, 718 struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
732 u8 pf_id,
733 u32 pf_rl)
734{ 719{
735 u32 inc_val = QM_RL_INC_VAL(pf_rl); 720 u32 inc_val = QM_RL_INC_VAL(pf_rl);
736 721
@@ -749,8 +734,7 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
749 734
750int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, 735int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
751 struct qed_ptt *p_ptt, 736 struct qed_ptt *p_ptt,
752 u16 first_tx_pq_id[NUM_OF_TCS], 737 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
753 u16 vport_wfq)
754{ 738{
755 u32 inc_val = QM_WFQ_INC_VAL(vport_wfq); 739 u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
756 u8 tc; 740 u8 tc;
@@ -773,9 +757,7 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
773} 757}
774 758
775int qed_init_vport_rl(struct qed_hwfn *p_hwfn, 759int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
776 struct qed_ptt *p_ptt, 760 struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
777 u8 vport_id,
778 u32 vport_rl)
779{ 761{
780 u32 inc_val = QM_RL_INC_VAL(vport_rl); 762 u32 inc_val = QM_RL_INC_VAL(vport_rl);
781 763
@@ -795,9 +777,7 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
795bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, 777bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
796 struct qed_ptt *p_ptt, 778 struct qed_ptt *p_ptt,
797 bool is_release_cmd, 779 bool is_release_cmd,
798 bool is_tx_pq, 780 bool is_tx_pq, u16 start_pq, u16 num_pqs)
799 u16 start_pq,
800 u16 num_pqs)
801{ 781{
802 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; 782 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
803 u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id; 783 u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
@@ -841,17 +821,15 @@ qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
841#define PRS_ETH_TUNN_FIC_FORMAT -188897008 821#define PRS_ETH_TUNN_FIC_FORMAT -188897008
842 822
843void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, 823void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
844 struct qed_ptt *p_ptt, 824 struct qed_ptt *p_ptt, u16 dest_port)
845 u16 dest_port)
846{ 825{
847 qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); 826 qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
848 qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port); 827 qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
849 qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); 828 qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
850} 829}
851 830
852void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, 831void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
853 struct qed_ptt *p_ptt, 832 struct qed_ptt *p_ptt, bool vxlan_enable)
854 bool vxlan_enable)
855{ 833{
856 unsigned long reg_val = 0; 834 unsigned long reg_val = 0;
857 u8 shift; 835 u8 shift;
@@ -908,8 +886,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
908} 886}
909 887
910void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, 888void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
911 struct qed_ptt *p_ptt, 889 struct qed_ptt *p_ptt, u16 dest_port)
912 u16 dest_port)
913{ 890{
914 qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); 891 qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
915 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); 892 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
@@ -918,8 +895,7 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
918 895
919void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, 896void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
920 struct qed_ptt *p_ptt, 897 struct qed_ptt *p_ptt,
921 bool eth_geneve_enable, 898 bool eth_geneve_enable, bool ip_geneve_enable)
922 bool ip_geneve_enable)
923{ 899{
924 unsigned long reg_val = 0; 900 unsigned long reg_val = 0;
925 u8 shift; 901 u8 shift;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index d358c3bb1308..9866a20d2128 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -543,8 +543,7 @@ void qed_gtt_init(struct qed_hwfn *p_hwfn)
543 pxp_global_win[i]); 543 pxp_global_win[i]);
544} 544}
545 545
546int qed_init_fw_data(struct qed_dev *cdev, 546int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
547 const u8 *data)
548{ 547{
549 struct qed_fw_data *fw = cdev->fw_data; 548 struct qed_fw_data *fw = cdev->fw_data;
550 struct bin_buffer_hdr *buf_hdr; 549 struct bin_buffer_hdr *buf_hdr;
@@ -555,7 +554,11 @@ int qed_init_fw_data(struct qed_dev *cdev,
555 return -EINVAL; 554 return -EINVAL;
556 } 555 }
557 556
558 buf_hdr = (struct bin_buffer_hdr *)data; 557 /* First Dword contains metadata and should be skipped */
558 buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
559
560 offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
561 fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
559 562
560 offset = buf_hdr[BIN_BUF_INIT_CMD].offset; 563 offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
561 fw->init_ops = (union init_op *)(data + offset); 564 fw->init_ops = (union init_op *)(data + offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 09a6ad3d22dd..8fa50fa23c8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2418,6 +2418,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
2418{ 2418{
2419 struct qed_dev *cdev = p_hwfn->cdev; 2419 struct qed_dev *cdev = p_hwfn->cdev;
2420 u32 cau_state; 2420 u32 cau_state;
2421 u8 timer_res;
2421 2422
2422 memset(p_sb_entry, 0, sizeof(*p_sb_entry)); 2423 memset(p_sb_entry, 0, sizeof(*p_sb_entry));
2423 2424
@@ -2443,6 +2444,23 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
2443 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; 2444 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
2444 } 2445 }
2445 2446
2447 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
2448 if (cdev->rx_coalesce_usecs <= 0x7F)
2449 timer_res = 0;
2450 else if (cdev->rx_coalesce_usecs <= 0xFF)
2451 timer_res = 1;
2452 else
2453 timer_res = 2;
2454 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2455
2456 if (cdev->tx_coalesce_usecs <= 0x7F)
2457 timer_res = 0;
2458 else if (cdev->tx_coalesce_usecs <= 0xFF)
2459 timer_res = 1;
2460 else
2461 timer_res = 2;
2462 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2463
2446 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 2464 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
2447 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 2465 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
2448} 2466}
@@ -2484,17 +2502,28 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
2484 2502
2485 /* Configure pi coalescing if set */ 2503 /* Configure pi coalescing if set */
2486 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 2504 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
2487 u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >> 2505 u8 timeset, timer_res;
2488 (QED_CAU_DEF_RX_TIMER_RES + 1);
2489 u8 num_tc = 1, i; 2506 u8 num_tc = 1, i;
2490 2507
2508 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
2509 if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
2510 timer_res = 0;
2511 else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
2512 timer_res = 1;
2513 else
2514 timer_res = 2;
2515 timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
2491 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 2516 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
2492 QED_COAL_RX_STATE_MACHINE, 2517 QED_COAL_RX_STATE_MACHINE,
2493 timeset); 2518 timeset);
2494 2519
2495 timeset = p_hwfn->cdev->tx_coalesce_usecs >> 2520 if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
2496 (QED_CAU_DEF_TX_TIMER_RES + 1); 2521 timer_res = 0;
2497 2522 else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
2523 timer_res = 1;
2524 else
2525 timer_res = 2;
2526 timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
2498 for (i = 0; i < num_tc; i++) { 2527 for (i = 0; i < num_tc; i++) {
2499 qed_int_cau_conf_pi(p_hwfn, p_ptt, 2528 qed_int_cau_conf_pi(p_hwfn, p_ptt,
2500 igu_sb_id, TX_PI(i), 2529 igu_sb_id, TX_PI(i),
@@ -3199,3 +3228,39 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev)
3199 for_each_hwfn(cdev, i) 3228 for_each_hwfn(cdev, i)
3200 cdev->hwfns[i].b_int_requested = false; 3229 cdev->hwfns[i].b_int_requested = false;
3201} 3230}
3231
3232int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3233 u8 timer_res, u16 sb_id, bool tx)
3234{
3235 struct cau_sb_entry sb_entry;
3236 int rc;
3237
3238 if (!p_hwfn->hw_init_done) {
3239 DP_ERR(p_hwfn, "hardware not initialized yet\n");
3240 return -EINVAL;
3241 }
3242
3243 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
3244 sb_id * sizeof(u64),
3245 (u64)(uintptr_t)&sb_entry, 2, 0);
3246 if (rc) {
3247 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
3248 return rc;
3249 }
3250
3251 if (tx)
3252 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
3253 else
3254 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
3255
3256 rc = qed_dmae_host2grc(p_hwfn, p_ptt,
3257 (u64)(uintptr_t)&sb_entry,
3258 CAU_REG_SB_VAR_MEMORY +
3259 sb_id * sizeof(u64), 2, 0);
3260 if (rc) {
3261 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
3262 return rc;
3263 }
3264
3265 return rc;
3266}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 20b468637504..0948be64dc78 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -389,6 +389,9 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
389 u16 vf_number, 389 u16 vf_number,
390 u8 vf_valid); 390 u8 vf_valid);
391 391
392int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
393 u8 timer_res, u16 sb_id, bool tx);
394
392#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) 395#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
393 396
394#endif 397#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index aada4c7e095f..a12c6caa6c66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -572,9 +572,12 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
572 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); 572 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
573 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 573 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
574 574
575 rc = qed_spq_post(p_hwfn, p_ent, NULL); 575 p_ramrod->vf_rx_prod_index = params->vf_qid;
576 if (params->vf_qid)
577 DP_VERBOSE(p_hwfn, QED_MSG_SP,
578 "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
576 579
577 return rc; 580 return qed_spq_post(p_hwfn, p_ent, NULL);
578} 581}
579 582
580static int 583static int
@@ -612,7 +615,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
612 615
613 *pp_prod = (u8 __iomem *)p_hwfn->regview + 616 *pp_prod = (u8 __iomem *)p_hwfn->regview +
614 GTT_BAR0_MAP_REG_MSDM_RAM + 617 GTT_BAR0_MAP_REG_MSDM_RAM +
615 MSTORM_PRODS_OFFSET(abs_l2_queue); 618 MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
616 619
617 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 620 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
618 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), 621 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
@@ -756,9 +759,9 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
756 struct qed_spq_entry *p_ent = NULL; 759 struct qed_spq_entry *p_ent = NULL;
757 struct qed_sp_init_data init_data; 760 struct qed_sp_init_data init_data;
758 struct qed_hw_cid_data *p_tx_cid; 761 struct qed_hw_cid_data *p_tx_cid;
759 u8 abs_vport_id; 762 u16 pq_id, abs_tx_q_id = 0;
760 int rc = -EINVAL; 763 int rc = -EINVAL;
761 u16 pq_id; 764 u8 abs_vport_id;
762 765
763 /* Store information for the stop */ 766 /* Store information for the stop */
764 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; 767 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
@@ -769,6 +772,10 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
769 if (rc) 772 if (rc)
770 return rc; 773 return rc;
771 774
775 rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
776 if (rc)
777 return rc;
778
772 /* Get SPQ entry */ 779 /* Get SPQ entry */
773 memset(&init_data, 0, sizeof(init_data)); 780 memset(&init_data, 0, sizeof(init_data));
774 init_data.cid = cid; 781 init_data.cid = cid;
@@ -788,6 +795,7 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
788 p_ramrod->sb_index = p_params->sb_idx; 795 p_ramrod->sb_index = p_params->sb_idx;
789 p_ramrod->stats_counter_id = stats_id; 796 p_ramrod->stats_counter_id = stats_id;
790 797
798 p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
791 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 799 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
792 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 800 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
793 801
@@ -1482,51 +1490,51 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1482 offsetof(struct public_port, stats), 1490 offsetof(struct public_port, stats),
1483 sizeof(port_stats)); 1491 sizeof(port_stats));
1484 1492
1485 p_stats->rx_64_byte_packets += port_stats.pmm.r64; 1493 p_stats->rx_64_byte_packets += port_stats.eth.r64;
1486 p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127; 1494 p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
1487 p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255; 1495 p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
1488 p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511; 1496 p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
1489 p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023; 1497 p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1490 p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518; 1498 p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1491 p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522; 1499 p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
1492 p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047; 1500 p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
1493 p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095; 1501 p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
1494 p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216; 1502 p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
1495 p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383; 1503 p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
1496 p_stats->rx_crc_errors += port_stats.pmm.rfcs; 1504 p_stats->rx_crc_errors += port_stats.eth.rfcs;
1497 p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; 1505 p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
1498 p_stats->rx_pause_frames += port_stats.pmm.rxpf; 1506 p_stats->rx_pause_frames += port_stats.eth.rxpf;
1499 p_stats->rx_pfc_frames += port_stats.pmm.rxpp; 1507 p_stats->rx_pfc_frames += port_stats.eth.rxpp;
1500 p_stats->rx_align_errors += port_stats.pmm.raln; 1508 p_stats->rx_align_errors += port_stats.eth.raln;
1501 p_stats->rx_carrier_errors += port_stats.pmm.rfcr; 1509 p_stats->rx_carrier_errors += port_stats.eth.rfcr;
1502 p_stats->rx_oversize_packets += port_stats.pmm.rovr; 1510 p_stats->rx_oversize_packets += port_stats.eth.rovr;
1503 p_stats->rx_jabbers += port_stats.pmm.rjbr; 1511 p_stats->rx_jabbers += port_stats.eth.rjbr;
1504 p_stats->rx_undersize_packets += port_stats.pmm.rund; 1512 p_stats->rx_undersize_packets += port_stats.eth.rund;
1505 p_stats->rx_fragments += port_stats.pmm.rfrg; 1513 p_stats->rx_fragments += port_stats.eth.rfrg;
1506 p_stats->tx_64_byte_packets += port_stats.pmm.t64; 1514 p_stats->tx_64_byte_packets += port_stats.eth.t64;
1507 p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; 1515 p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
1508 p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; 1516 p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
1509 p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; 1517 p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
1510 p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; 1518 p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1511 p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; 1519 p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1512 p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; 1520 p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
1513 p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; 1521 p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
1514 p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; 1522 p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
1515 p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; 1523 p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
1516 p_stats->tx_pause_frames += port_stats.pmm.txpf; 1524 p_stats->tx_pause_frames += port_stats.eth.txpf;
1517 p_stats->tx_pfc_frames += port_stats.pmm.txpp; 1525 p_stats->tx_pfc_frames += port_stats.eth.txpp;
1518 p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; 1526 p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
1519 p_stats->tx_total_collisions += port_stats.pmm.tncl; 1527 p_stats->tx_total_collisions += port_stats.eth.tncl;
1520 p_stats->rx_mac_bytes += port_stats.pmm.rbyte; 1528 p_stats->rx_mac_bytes += port_stats.eth.rbyte;
1521 p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca; 1529 p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
1522 p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca; 1530 p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
1523 p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca; 1531 p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
1524 p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok; 1532 p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
1525 p_stats->tx_mac_bytes += port_stats.pmm.tbyte; 1533 p_stats->tx_mac_bytes += port_stats.eth.tbyte;
1526 p_stats->tx_mac_uc_packets += port_stats.pmm.txuca; 1534 p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
1527 p_stats->tx_mac_mc_packets += port_stats.pmm.txmca; 1535 p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
1528 p_stats->tx_mac_bc_packets += port_stats.pmm.txbca; 1536 p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
1529 p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; 1537 p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
1530 for (j = 0; j < 8; j++) { 1538 for (j = 0; j < 8; j++) {
1531 p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; 1539 p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
1532 p_stats->brb_discards += port_stats.brb.brb_discard[j]; 1540 p_stats->brb_discards += port_stats.brb.brb_discard[j];
@@ -2156,11 +2164,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
2156extern const struct qed_iov_hv_ops qed_iov_ops_pass; 2164extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2157#endif 2165#endif
2158 2166
2167#ifdef CONFIG_DCB
2168extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
2169#endif
2170
2159static const struct qed_eth_ops qed_eth_ops_pass = { 2171static const struct qed_eth_ops qed_eth_ops_pass = {
2160 .common = &qed_common_ops_pass, 2172 .common = &qed_common_ops_pass,
2161#ifdef CONFIG_QED_SRIOV 2173#ifdef CONFIG_QED_SRIOV
2162 .iov = &qed_iov_ops_pass, 2174 .iov = &qed_iov_ops_pass,
2163#endif 2175#endif
2176#ifdef CONFIG_DCB
2177 .dcb = &qed_dcbnl_ops_pass,
2178#endif
2164 .fill_dev_info = &qed_fill_eth_dev_info, 2179 .fill_dev_info = &qed_fill_eth_dev_info,
2165 .register_ops = &qed_register_eth_ops, 2180 .register_ops = &qed_register_eth_ops,
2166 .check_mac = &qed_check_mac, 2181 .check_mac = &qed_check_mac,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c7e01b303540..1f13abb5c316 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -207,6 +207,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
207 dev_info->pci_mem_start = cdev->pci_params.mem_start; 207 dev_info->pci_mem_start = cdev->pci_params.mem_start;
208 dev_info->pci_mem_end = cdev->pci_params.mem_end; 208 dev_info->pci_mem_end = cdev->pci_params.mem_end;
209 dev_info->pci_irq = cdev->pci_params.irq; 209 dev_info->pci_irq = cdev->pci_params.irq;
210 dev_info->rdma_supported =
211 (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE);
210 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); 212 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
211 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); 213 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
212 214
@@ -832,7 +834,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
832 goto err2; 834 goto err2;
833 } 835 }
834 836
835 data = cdev->firmware->data; 837 /* First Dword used to diffrentiate between various sources */
838 data = cdev->firmware->data + sizeof(u32);
836 } 839 }
837 840
838 memset(&tunn_info, 0, sizeof(tunn_info)); 841 memset(&tunn_info, 0, sizeof(tunn_info));
@@ -900,7 +903,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
900 903
901 if (IS_PF(cdev)) { 904 if (IS_PF(cdev)) {
902 qed_free_stream_mem(cdev); 905 qed_free_stream_mem(cdev);
903 qed_sriov_disable(cdev, true); 906 if (IS_QED_ETH_IF(cdev))
907 qed_sriov_disable(cdev, true);
904 908
905 qed_nic_stop(cdev); 909 qed_nic_stop(cdev);
906 qed_slowpath_irq_free(cdev); 910 qed_slowpath_irq_free(cdev);
@@ -991,8 +995,7 @@ static bool qed_can_link_change(struct qed_dev *cdev)
991 return true; 995 return true;
992} 996}
993 997
994static int qed_set_link(struct qed_dev *cdev, 998static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
995 struct qed_link_params *params)
996{ 999{
997 struct qed_hwfn *hwfn; 1000 struct qed_hwfn *hwfn;
998 struct qed_mcp_link_params *link_params; 1001 struct qed_mcp_link_params *link_params;
@@ -1032,7 +1035,7 @@ static int qed_set_link(struct qed_dev *cdev,
1032 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1035 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1033 if (params->adv_speeds & 0) 1036 if (params->adv_speeds & 0)
1034 link_params->speed.advertised_speeds |= 1037 link_params->speed.advertised_speeds |=
1035 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G; 1038 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1036 } 1039 }
1037 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1040 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1038 link_params->speed.forced_speed = params->forced_speed; 1041 link_params->speed.forced_speed = params->forced_speed;
@@ -1053,19 +1056,19 @@ static int qed_set_link(struct qed_dev *cdev,
1053 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1056 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1054 switch (params->loopback_mode) { 1057 switch (params->loopback_mode) {
1055 case QED_LINK_LOOPBACK_INT_PHY: 1058 case QED_LINK_LOOPBACK_INT_PHY:
1056 link_params->loopback_mode = PMM_LOOPBACK_INT_PHY; 1059 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1057 break; 1060 break;
1058 case QED_LINK_LOOPBACK_EXT_PHY: 1061 case QED_LINK_LOOPBACK_EXT_PHY:
1059 link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY; 1062 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1060 break; 1063 break;
1061 case QED_LINK_LOOPBACK_EXT: 1064 case QED_LINK_LOOPBACK_EXT:
1062 link_params->loopback_mode = PMM_LOOPBACK_EXT; 1065 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1063 break; 1066 break;
1064 case QED_LINK_LOOPBACK_MAC: 1067 case QED_LINK_LOOPBACK_MAC:
1065 link_params->loopback_mode = PMM_LOOPBACK_MAC; 1068 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1066 break; 1069 break;
1067 default: 1070 default:
1068 link_params->loopback_mode = PMM_LOOPBACK_NONE; 1071 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1069 break; 1072 break;
1070 } 1073 }
1071 } 1074 }
@@ -1185,7 +1188,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
1185 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1188 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1186 if_link->advertised_caps |= 0; 1189 if_link->advertised_caps |= 0;
1187 if (params.speed.advertised_speeds & 1190 if (params.speed.advertised_speeds &
1188 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) 1191 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1189 if_link->advertised_caps |= 0; 1192 if_link->advertised_caps |= 0;
1190 1193
1191 if (link_caps.speed_capabilities & 1194 if (link_caps.speed_capabilities &
@@ -1202,7 +1205,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
1202 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1205 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1203 if_link->supported_caps |= 0; 1206 if_link->supported_caps |= 0;
1204 if (link_caps.speed_capabilities & 1207 if (link_caps.speed_capabilities &
1205 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) 1208 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1206 if_link->supported_caps |= 0; 1209 if_link->supported_caps |= 0;
1207 1210
1208 if (link.link_up) 1211 if (link.link_up)
@@ -1301,6 +1304,38 @@ static int qed_drain(struct qed_dev *cdev)
1301 return 0; 1304 return 0;
1302} 1305}
1303 1306
1307static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
1308{
1309 *rx_coal = cdev->rx_coalesce_usecs;
1310 *tx_coal = cdev->tx_coalesce_usecs;
1311}
1312
1313static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
1314 u8 qid, u16 sb_id)
1315{
1316 struct qed_hwfn *hwfn;
1317 struct qed_ptt *ptt;
1318 int hwfn_index;
1319 int status = 0;
1320
1321 hwfn_index = qid % cdev->num_hwfns;
1322 hwfn = &cdev->hwfns[hwfn_index];
1323 ptt = qed_ptt_acquire(hwfn);
1324 if (!ptt)
1325 return -EAGAIN;
1326
1327 status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
1328 qid / cdev->num_hwfns, sb_id);
1329 if (status)
1330 goto out;
1331 status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
1332 qid / cdev->num_hwfns, sb_id);
1333out:
1334 qed_ptt_release(hwfn, ptt);
1335
1336 return status;
1337}
1338
1304static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 1339static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
1305{ 1340{
1306 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1341 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
@@ -1347,5 +1382,7 @@ const struct qed_common_ops qed_common_ops_pass = {
1347 .update_msglvl = &qed_init_dp, 1382 .update_msglvl = &qed_init_dp,
1348 .chain_alloc = &qed_chain_alloc, 1383 .chain_alloc = &qed_chain_alloc,
1349 .chain_free = &qed_chain_free, 1384 .chain_free = &qed_chain_free,
1385 .get_coalesce = &qed_get_coalesce,
1386 .set_coalesce = &qed_set_coalesce,
1350 .set_led = &qed_set_led, 1387 .set_led = &qed_set_led,
1351}; 1388};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 1182361798b5..a240f26344a4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -531,9 +531,9 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
531 transceiver_data))); 531 transceiver_data)));
532 532
533 transceiver_state = GET_FIELD(transceiver_state, 533 transceiver_state = GET_FIELD(transceiver_state,
534 PMM_TRANSCEIVER_STATE); 534 ETH_TRANSCEIVER_STATE);
535 535
536 if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT) 536 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
537 DP_NOTICE(p_hwfn, "Transceiver is present.\n"); 537 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
538 else 538 else
539 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); 539 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
@@ -668,14 +668,12 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
668 qed_link_update(p_hwfn); 668 qed_link_update(p_hwfn);
669} 669}
670 670
671int qed_mcp_set_link(struct qed_hwfn *p_hwfn, 671int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
672 struct qed_ptt *p_ptt,
673 bool b_up)
674{ 672{
675 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; 673 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
676 struct qed_mcp_mb_params mb_params; 674 struct qed_mcp_mb_params mb_params;
677 union drv_union_data union_data; 675 union drv_union_data union_data;
678 struct pmm_phy_cfg *phy_cfg; 676 struct eth_phy_cfg *phy_cfg;
679 int rc = 0; 677 int rc = 0;
680 u32 cmd; 678 u32 cmd;
681 679
@@ -685,9 +683,9 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
685 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; 683 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
686 if (!params->speed.autoneg) 684 if (!params->speed.autoneg)
687 phy_cfg->speed = params->speed.forced_speed; 685 phy_cfg->speed = params->speed.forced_speed;
688 phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0; 686 phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
689 phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0; 687 phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
690 phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0; 688 phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
691 phy_cfg->adv_speed = params->speed.advertised_speeds; 689 phy_cfg->adv_speed = params->speed.advertised_speeds;
692 phy_cfg->loopback_mode = params->loopback_mode; 690 phy_cfg->loopback_mode = params->loopback_mode;
693 691
@@ -773,6 +771,34 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
773 return size; 771 return size;
774} 772}
775 773
774int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
775 struct qed_ptt *p_ptt, u8 *p_pf)
776{
777 struct public_func shmem_info;
778 int i;
779
780 /* Find first Ethernet interface in port */
781 for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
782 i += p_hwfn->cdev->num_ports_in_engines) {
783 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
784 MCP_PF_ID_BY_REL(p_hwfn, i));
785
786 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
787 continue;
788
789 if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
790 FUNC_MF_CFG_PROTOCOL_ETHERNET) {
791 *p_pf = (u8)i;
792 return 0;
793 }
794 }
795
796 DP_NOTICE(p_hwfn,
797 "Failed to find on port an ethernet interface in MF_SI mode\n");
798
799 return -EINVAL;
800}
801
776static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, 802static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
777 struct qed_ptt *p_ptt) 803 struct qed_ptt *p_ptt)
778{ 804{
@@ -951,7 +977,18 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
951 977
952 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { 978 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
953 case FUNC_MF_CFG_PROTOCOL_ETHERNET: 979 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
954 *p_proto = QED_PCI_ETH; 980 if (test_bit(QED_DEV_CAP_ROCE,
981 &p_hwfn->hw_info.device_capabilities))
982 *p_proto = QED_PCI_ETH_ROCE;
983 else
984 *p_proto = QED_PCI_ETH;
985 break;
986 case FUNC_MF_CFG_PROTOCOL_ISCSI:
987 *p_proto = QED_PCI_ISCSI;
988 break;
989 case FUNC_MF_CFG_PROTOCOL_ROCE:
990 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
991 rc = -EINVAL;
955 break; 992 break;
956 default: 993 default:
957 rc = -EINVAL; 994 rc = -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 6dd59eb7f4c6..7f319aa1b229 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -457,4 +457,7 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
457 struct qed_ptt *p_ptt, 457 struct qed_ptt *p_ptt,
458 struct qed_mcp_link_state *p_link, 458 struct qed_mcp_link_state *p_link,
459 u8 min_bw); 459 u8 min_bw);
460
461int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
462 struct qed_ptt *p_ptt, u8 *p_pf);
460#endif 463#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 3a6c506f0d71..f6b86ca1ff79 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -27,6 +27,35 @@
27#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \ 27#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
28 0xff << 24) 28 0xff << 24)
29 29
30#define CDU_REG_SEGMENT0_PARAMS \
31 0x580904UL
32#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK \
33 (0xfff << 0)
34#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT \
35 0
36#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE \
37 (0xff << 16)
38#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT \
39 16
40#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE \
41 (0xff << 24)
42#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT \
43 24
44#define CDU_REG_SEGMENT1_PARAMS \
45 0x580908UL
46#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK \
47 (0xfff << 0)
48#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT \
49 0
50#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE \
51 (0xff << 16)
52#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT \
53 16
54#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE \
55 (0xff << 24)
56#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT \
57 24
58
30#define XSDM_REG_OPERATION_GEN \ 59#define XSDM_REG_OPERATION_GEN \
31 0xf80408UL 60 0xf80408UL
32#define NIG_REG_RX_BRB_OUT_EN \ 61#define NIG_REG_RX_BRB_OUT_EN \
@@ -51,6 +80,8 @@
51 0x1f00000UL 80 0x1f00000UL
52#define BAR0_MAP_REG_TSDM_RAM \ 81#define BAR0_MAP_REG_TSDM_RAM \
53 0x1c80000UL 82 0x1c80000UL
83#define BAR0_MAP_REG_XSDM_RAM \
84 0x1e00000UL
54#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ 85#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
55 0x5011f4UL 86 0x5011f4UL
56#define PRS_REG_SEARCH_TCP \ 87#define PRS_REG_SEARCH_TCP \
@@ -167,6 +198,10 @@
167 0x1800004UL 198 0x1800004UL
168#define NIG_REG_CM_HDR \ 199#define NIG_REG_CM_HDR \
169 0x500840UL 200 0x500840UL
201#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \
202 0x50196cUL
203#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
204 0x501964UL
170#define NCSI_REG_CONFIG \ 205#define NCSI_REG_CONFIG \
171 0x040200UL 206 0x040200UL
172#define PBF_REG_INIT \ 207#define PBF_REG_INIT \
@@ -219,6 +254,10 @@
219 0x230000UL 254 0x230000UL
220#define PRS_REG_SOFT_RST \ 255#define PRS_REG_SOFT_RST \
221 0x1f0000UL 256 0x1f0000UL
257#define PRS_REG_MSG_INFO \
258 0x1f0a1cUL
259#define PRS_REG_ROCE_DEST_QP_MAX_PF \
260 0x1f0430UL
222#define PSDM_REG_ENABLE_IN1 \ 261#define PSDM_REG_ENABLE_IN1 \
223 0xfa0004UL 262 0xfa0004UL
224#define PSEM_REG_ENABLE_IN \ 263#define PSEM_REG_ENABLE_IN \
@@ -227,6 +266,8 @@
227 0x280020UL 266 0x280020UL
228#define PSWRQ2_REG_CDUT_P_SIZE \ 267#define PSWRQ2_REG_CDUT_P_SIZE \
229 0x24000cUL 268 0x24000cUL
269#define PSWRQ2_REG_ILT_MEMORY \
270 0x260000UL
230#define PSWHST_REG_DISCARD_INTERNAL_WRITES \ 271#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
231 0x2a0040UL 272 0x2a0040UL
232#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \ 273#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
@@ -460,7 +501,7 @@
460#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2) 501#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
461#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2 502#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
462 503
463#define NIG_REG_VXLAN_PORT 0x50105cUL 504#define NIG_REG_VXLAN_CTRL 0x50105cUL
464#define PBF_REG_VXLAN_PORT 0xd80518UL 505#define PBF_REG_VXLAN_PORT 0xd80518UL
465#define PBF_REG_NGE_PORT 0xd8051cUL 506#define PBF_REG_NGE_PORT 0xd8051cUL
466#define PRS_REG_NGE_PORT 0x1f086cUL 507#define PRS_REG_NGE_PORT 0x1f086cUL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index ea4e9ce53e0a..a548504c3420 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -63,6 +63,32 @@ union ramrod_data {
63 struct vport_update_ramrod_data vport_update; 63 struct vport_update_ramrod_data vport_update;
64 struct vport_filter_update_ramrod_data vport_filter_update; 64 struct vport_filter_update_ramrod_data vport_filter_update;
65 65
66 struct rdma_init_func_ramrod_data rdma_init_func;
67 struct rdma_close_func_ramrod_data rdma_close_func;
68 struct rdma_register_tid_ramrod_data rdma_register_tid;
69 struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
70 struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
71 struct roce_create_qp_req_ramrod_data roce_create_qp_req;
72 struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
73 struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
74 struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
75 struct roce_query_qp_req_ramrod_data roce_query_qp_req;
76 struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
77 struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
78 struct rdma_create_cq_ramrod_data rdma_create_cq;
79 struct rdma_resize_cq_ramrod_data rdma_resize_cq;
80 struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
81 struct rdma_srq_create_ramrod_data rdma_create_srq;
82 struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
83 struct rdma_srq_modify_ramrod_data rdma_modify_srq;
84
85 struct iscsi_slow_path_hdr iscsi_empty;
86 struct iscsi_init_ramrod_params iscsi_init;
87 struct iscsi_spe_func_dstry iscsi_destroy;
88 struct iscsi_spe_conn_offload iscsi_conn_offload;
89 struct iscsi_conn_update_ramrod_params iscsi_conn_update;
90 struct iscsi_spe_conn_termination iscsi_conn_terminate;
91
66 struct vf_start_ramrod_data vf_start; 92 struct vf_start_ramrod_data vf_start;
67 struct vf_stop_ramrod_data vf_stop; 93 struct vf_stop_ramrod_data vf_stop;
68}; 94};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 67f6ce3c84c8..a52f3fc051f5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -308,6 +308,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
308 struct qed_spq_entry *p_ent = NULL; 308 struct qed_spq_entry *p_ent = NULL;
309 struct qed_sp_init_data init_data; 309 struct qed_sp_init_data init_data;
310 int rc = -EINVAL; 310 int rc = -EINVAL;
311 u8 page_cnt;
311 312
312 /* update initial eq producer */ 313 /* update initial eq producer */
313 qed_eq_prod_update(p_hwfn, 314 qed_eq_prod_update(p_hwfn,
@@ -332,7 +333,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
332 p_ramrod->path_id = QED_PATH_ID(p_hwfn); 333 p_ramrod->path_id = QED_PATH_ID(p_hwfn);
333 p_ramrod->dont_log_ramrods = 0; 334 p_ramrod->dont_log_ramrods = 0;
334 p_ramrod->log_type_mask = cpu_to_le16(0xf); 335 p_ramrod->log_type_mask = cpu_to_le16(0xf);
335 p_ramrod->mf_mode = mode; 336
336 switch (mode) { 337 switch (mode) {
337 case QED_MF_DEFAULT: 338 case QED_MF_DEFAULT:
338 case QED_MF_NPAR: 339 case QED_MF_NPAR:
@@ -350,24 +351,41 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
350 /* Place EQ address in RAMROD */ 351 /* Place EQ address in RAMROD */
351 DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, 352 DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
352 p_hwfn->p_eq->chain.pbl.p_phys_table); 353 p_hwfn->p_eq->chain.pbl.p_phys_table);
353 p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt; 354 page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
354 355 p_ramrod->event_ring_num_pages = page_cnt;
355 DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, 356 DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
356 p_hwfn->p_consq->chain.pbl.p_phys_table); 357 p_hwfn->p_consq->chain.pbl.p_phys_table);
357 358
358 qed_tunn_set_pf_start_params(p_hwfn, p_tunn, 359 qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
359 &p_ramrod->tunnel_config); 360 &p_ramrod->tunnel_config);
360 p_hwfn->hw_info.personality = PERSONALITY_ETH;
361 361
362 if (IS_MF_SI(p_hwfn)) 362 if (IS_MF_SI(p_hwfn))
363 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; 363 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
364 364
365 switch (p_hwfn->hw_info.personality) {
366 case QED_PCI_ETH:
367 p_ramrod->personality = PERSONALITY_ETH;
368 break;
369 case QED_PCI_ISCSI:
370 p_ramrod->personality = PERSONALITY_ISCSI;
371 break;
372 case QED_PCI_ETH_ROCE:
373 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
374 break;
375 default:
376 DP_NOTICE(p_hwfn, "Unkown personality %d\n",
377 p_hwfn->hw_info.personality);
378 p_ramrod->personality = PERSONALITY_ETH;
379 }
380
365 if (p_hwfn->cdev->p_iov_info) { 381 if (p_hwfn->cdev->p_iov_info) {
366 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 382 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
367 383
368 p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; 384 p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
369 p_ramrod->num_vfs = (u8) p_iov->total_vfs; 385 p_ramrod->num_vfs = (u8) p_iov->total_vfs;
370 } 386 }
387 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
388 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
371 389
372 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 390 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
373 "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", 391 "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index b122f6013b6c..97ffeae262bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -339,6 +339,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
339 if (qed_chain_alloc(p_hwfn->cdev, 339 if (qed_chain_alloc(p_hwfn->cdev,
340 QED_CHAIN_USE_TO_PRODUCE, 340 QED_CHAIN_USE_TO_PRODUCE,
341 QED_CHAIN_MODE_PBL, 341 QED_CHAIN_MODE_PBL,
342 QED_CHAIN_CNT_TYPE_U16,
342 num_elem, 343 num_elem,
343 sizeof(union event_ring_element), 344 sizeof(union event_ring_element),
344 &p_eq->chain)) { 345 &p_eq->chain)) {
@@ -412,10 +413,10 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
412***************************************************************************/ 413***************************************************************************/
413void qed_spq_setup(struct qed_hwfn *p_hwfn) 414void qed_spq_setup(struct qed_hwfn *p_hwfn)
414{ 415{
415 struct qed_spq *p_spq = p_hwfn->p_spq; 416 struct qed_spq *p_spq = p_hwfn->p_spq;
416 struct qed_spq_entry *p_virt = NULL; 417 struct qed_spq_entry *p_virt = NULL;
417 dma_addr_t p_phys = 0; 418 dma_addr_t p_phys = 0;
418 unsigned int i = 0; 419 u32 i, capacity;
419 420
420 INIT_LIST_HEAD(&p_spq->pending); 421 INIT_LIST_HEAD(&p_spq->pending);
421 INIT_LIST_HEAD(&p_spq->completion_pending); 422 INIT_LIST_HEAD(&p_spq->completion_pending);
@@ -427,7 +428,8 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
427 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); 428 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
428 p_virt = p_spq->p_virt; 429 p_virt = p_spq->p_virt;
429 430
430 for (i = 0; i < p_spq->chain.capacity; i++) { 431 capacity = qed_chain_get_capacity(&p_spq->chain);
432 for (i = 0; i < capacity; i++) {
431 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); 433 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
432 434
433 list_add_tail(&p_virt->list, &p_spq->free_pool); 435 list_add_tail(&p_virt->list, &p_spq->free_pool);
@@ -455,9 +457,10 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
455 457
456int qed_spq_alloc(struct qed_hwfn *p_hwfn) 458int qed_spq_alloc(struct qed_hwfn *p_hwfn)
457{ 459{
458 struct qed_spq *p_spq = NULL; 460 struct qed_spq_entry *p_virt = NULL;
459 dma_addr_t p_phys = 0; 461 struct qed_spq *p_spq = NULL;
460 struct qed_spq_entry *p_virt = NULL; 462 dma_addr_t p_phys = 0;
463 u32 capacity;
461 464
462 /* SPQ struct */ 465 /* SPQ struct */
463 p_spq = 466 p_spq =
@@ -471,6 +474,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
471 if (qed_chain_alloc(p_hwfn->cdev, 474 if (qed_chain_alloc(p_hwfn->cdev,
472 QED_CHAIN_USE_TO_PRODUCE, 475 QED_CHAIN_USE_TO_PRODUCE,
473 QED_CHAIN_MODE_SINGLE, 476 QED_CHAIN_MODE_SINGLE,
477 QED_CHAIN_CNT_TYPE_U16,
474 0, /* N/A when the mode is SINGLE */ 478 0, /* N/A when the mode is SINGLE */
475 sizeof(struct slow_path_element), 479 sizeof(struct slow_path_element),
476 &p_spq->chain)) { 480 &p_spq->chain)) {
@@ -479,11 +483,11 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
479 } 483 }
480 484
481 /* allocate and fill the SPQ elements (incl. ramrod data list) */ 485 /* allocate and fill the SPQ elements (incl. ramrod data list) */
486 capacity = qed_chain_get_capacity(&p_spq->chain);
482 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 487 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
483 p_spq->chain.capacity * 488 capacity *
484 sizeof(struct qed_spq_entry), 489 sizeof(struct qed_spq_entry),
485 &p_phys, 490 &p_phys, GFP_KERNEL);
486 GFP_KERNEL);
487 491
488 if (!p_virt) 492 if (!p_virt)
489 goto spq_allocate_fail; 493 goto spq_allocate_fail;
@@ -503,16 +507,18 @@ spq_allocate_fail:
503void qed_spq_free(struct qed_hwfn *p_hwfn) 507void qed_spq_free(struct qed_hwfn *p_hwfn)
504{ 508{
505 struct qed_spq *p_spq = p_hwfn->p_spq; 509 struct qed_spq *p_spq = p_hwfn->p_spq;
510 u32 capacity;
506 511
507 if (!p_spq) 512 if (!p_spq)
508 return; 513 return;
509 514
510 if (p_spq->p_virt) 515 if (p_spq->p_virt) {
516 capacity = qed_chain_get_capacity(&p_spq->chain);
511 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 517 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
512 p_spq->chain.capacity * 518 capacity *
513 sizeof(struct qed_spq_entry), 519 sizeof(struct qed_spq_entry),
514 p_spq->p_virt, 520 p_spq->p_virt, p_spq->p_phys);
515 p_spq->p_phys); 521 }
516 522
517 qed_chain_free(p_hwfn->cdev, &p_spq->chain); 523 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
518 ; 524 ;
@@ -882,9 +888,9 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
882 if (qed_chain_alloc(p_hwfn->cdev, 888 if (qed_chain_alloc(p_hwfn->cdev,
883 QED_CHAIN_USE_TO_PRODUCE, 889 QED_CHAIN_USE_TO_PRODUCE,
884 QED_CHAIN_MODE_PBL, 890 QED_CHAIN_MODE_PBL,
891 QED_CHAIN_CNT_TYPE_U16,
885 QED_CHAIN_PAGE_SIZE / 0x80, 892 QED_CHAIN_PAGE_SIZE / 0x80,
886 0x80, 893 0x80, &p_consq->chain)) {
887 &p_consq->chain)) {
888 DP_NOTICE(p_hwfn, "Failed to allocate consq chain"); 894 DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
889 goto consq_allocate_fail; 895 goto consq_allocate_fail;
890 } 896 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index c325ee857ecd..4d161c751c12 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -21,18 +21,18 @@
21#include "qed_vf.h" 21#include "qed_vf.h"
22 22
23/* IOV ramrods */ 23/* IOV ramrods */
24static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, 24static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
25 u32 concrete_vfid, u16 opaque_vfid)
26{ 25{
27 struct vf_start_ramrod_data *p_ramrod = NULL; 26 struct vf_start_ramrod_data *p_ramrod = NULL;
28 struct qed_spq_entry *p_ent = NULL; 27 struct qed_spq_entry *p_ent = NULL;
29 struct qed_sp_init_data init_data; 28 struct qed_sp_init_data init_data;
30 int rc = -EINVAL; 29 int rc = -EINVAL;
30 u8 fp_minor;
31 31
32 /* Get SPQ entry */ 32 /* Get SPQ entry */
33 memset(&init_data, 0, sizeof(init_data)); 33 memset(&init_data, 0, sizeof(init_data));
34 init_data.cid = qed_spq_get_cid(p_hwfn); 34 init_data.cid = qed_spq_get_cid(p_hwfn);
35 init_data.opaque_fid = opaque_vfid; 35 init_data.opaque_fid = p_vf->opaque_fid;
36 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 36 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
37 37
38 rc = qed_sp_init_request(p_hwfn, &p_ent, 38 rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -43,10 +43,39 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
43 43
44 p_ramrod = &p_ent->ramrod.vf_start; 44 p_ramrod = &p_ent->ramrod.vf_start;
45 45
46 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); 46 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
47 p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid); 47 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
48 48
49 p_ramrod->personality = PERSONALITY_ETH; 49 switch (p_hwfn->hw_info.personality) {
50 case QED_PCI_ETH:
51 p_ramrod->personality = PERSONALITY_ETH;
52 break;
53 case QED_PCI_ETH_ROCE:
54 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
55 break;
56 default:
57 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
58 p_hwfn->hw_info.personality);
59 return -EINVAL;
60 }
61
62 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
63 if (fp_minor > ETH_HSI_VER_MINOR) {
64 DP_VERBOSE(p_hwfn,
65 QED_MSG_IOV,
66 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
67 p_vf->abs_vf_id,
68 ETH_HSI_VER_MAJOR,
69 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
70 fp_minor = ETH_HSI_VER_MINOR;
71 }
72
73 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
74 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
75
76 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
77 "VF[%d] - Starting using HSI %02x.%02x\n",
78 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
50 79
51 return qed_spq_post(p_hwfn, p_ent, NULL); 80 return qed_spq_post(p_hwfn, p_ent, NULL);
52} 81}
@@ -117,6 +146,45 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
117 return vf; 146 return vf;
118} 147}
119 148
149static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
150 struct qed_vf_info *p_vf, u16 rx_qid)
151{
152 if (rx_qid >= p_vf->num_rxqs)
153 DP_VERBOSE(p_hwfn,
154 QED_MSG_IOV,
155 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
156 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
157 return rx_qid < p_vf->num_rxqs;
158}
159
160static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
161 struct qed_vf_info *p_vf, u16 tx_qid)
162{
163 if (tx_qid >= p_vf->num_txqs)
164 DP_VERBOSE(p_hwfn,
165 QED_MSG_IOV,
166 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
167 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
168 return tx_qid < p_vf->num_txqs;
169}
170
171static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
172 struct qed_vf_info *p_vf, u16 sb_idx)
173{
174 int i;
175
176 for (i = 0; i < p_vf->num_sbs; i++)
177 if (p_vf->igu_sbs[i] == sb_idx)
178 return true;
179
180 DP_VERBOSE(p_hwfn,
181 QED_MSG_IOV,
182 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
183 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
184
185 return false;
186}
187
120int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, 188int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
121 int vfid, struct qed_ptt *p_ptt) 189 int vfid, struct qed_ptt *p_ptt)
122{ 190{
@@ -293,6 +361,9 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
293 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | 361 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
294 (vf->abs_vf_id << 8); 362 (vf->abs_vf_id << 8);
295 vf->vport_id = idx + 1; 363 vf->vport_id = idx + 1;
364
365 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
366 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
296 } 367 }
297} 368}
298 369
@@ -598,17 +669,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
598 /* unpretend */ 669 /* unpretend */
599 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 670 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
600 671
601 if (vf->state != VF_STOPPED) {
602 DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
603 vf->abs_vf_id);
604 return -EINVAL;
605 }
606
607 /* Start VF */
608 rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
609 if (rc)
610 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
611
612 vf->state = VF_FREE; 672 vf->state = VF_FREE;
613 673
614 return rc; 674 return rc;
@@ -852,7 +912,6 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
852 struct qed_mcp_link_params params; 912 struct qed_mcp_link_params params;
853 struct qed_mcp_link_state link; 913 struct qed_mcp_link_state link;
854 struct qed_vf_info *vf = NULL; 914 struct qed_vf_info *vf = NULL;
855 int rc = 0;
856 915
857 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 916 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
858 if (!vf) { 917 if (!vf) {
@@ -874,18 +933,8 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
874 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 933 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
875 qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps); 934 qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
876 935
877 if (vf->state != VF_STOPPED) { 936 /* Forget the VF's acquisition message */
878 /* Stopping the VF */ 937 memset(&vf->acquire, 0, sizeof(vf->acquire));
879 rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
880
881 if (rc != 0) {
882 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
883 rc);
884 return rc;
885 }
886
887 vf->state = VF_STOPPED;
888 }
889 938
890 /* disablng interrupts and resetting permission table was done during 939 /* disablng interrupts and resetting permission table was done during
891 * vf-close, however, we could get here without going through vf_close 940 * vf-close, however, we could get here without going through vf_close
@@ -1116,8 +1165,6 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1116 1165
1117 p_vf->vf_bulletin = 0; 1166 p_vf->vf_bulletin = 0;
1118 p_vf->vport_instance = 0; 1167 p_vf->vport_instance = 0;
1119 p_vf->num_mac_filters = 0;
1120 p_vf->num_vlan_filters = 0;
1121 p_vf->configured_features = 0; 1168 p_vf->configured_features = 0;
1122 1169
1123 /* If VF previously requested less resources, go back to default */ 1170 /* If VF previously requested less resources, go back to default */
@@ -1130,9 +1177,95 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1130 p_vf->vf_queues[i].rxq_active = 0; 1177 p_vf->vf_queues[i].rxq_active = 0;
1131 1178
1132 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); 1179 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1180 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1133 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); 1181 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1134} 1182}
1135 1183
1184static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1185 struct qed_ptt *p_ptt,
1186 struct qed_vf_info *p_vf,
1187 struct vf_pf_resc_request *p_req,
1188 struct pf_vf_resc *p_resp)
1189{
1190 int i;
1191
1192 /* Queue related information */
1193 p_resp->num_rxqs = p_vf->num_rxqs;
1194 p_resp->num_txqs = p_vf->num_txqs;
1195 p_resp->num_sbs = p_vf->num_sbs;
1196
1197 for (i = 0; i < p_resp->num_sbs; i++) {
1198 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1199 p_resp->hw_sbs[i].sb_qid = 0;
1200 }
1201
1202 /* These fields are filled for backward compatibility.
1203 * Unused by modern vfs.
1204 */
1205 for (i = 0; i < p_resp->num_rxqs; i++) {
1206 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1207 (u16 *)&p_resp->hw_qid[i]);
1208 p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1209 }
1210
1211 /* Filter related information */
1212 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1213 p_req->num_mac_filters);
1214 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1215 p_req->num_vlan_filters);
1216
1217 /* This isn't really needed/enforced, but some legacy VFs might depend
1218 * on the correct filling of this field.
1219 */
1220 p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1221
1222 /* Validate sufficient resources for VF */
1223 if (p_resp->num_rxqs < p_req->num_rxqs ||
1224 p_resp->num_txqs < p_req->num_txqs ||
1225 p_resp->num_sbs < p_req->num_sbs ||
1226 p_resp->num_mac_filters < p_req->num_mac_filters ||
1227 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1228 p_resp->num_mc_filters < p_req->num_mc_filters) {
1229 DP_VERBOSE(p_hwfn,
1230 QED_MSG_IOV,
1231 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
1232 p_vf->abs_vf_id,
1233 p_req->num_rxqs,
1234 p_resp->num_rxqs,
1235 p_req->num_rxqs,
1236 p_resp->num_txqs,
1237 p_req->num_sbs,
1238 p_resp->num_sbs,
1239 p_req->num_mac_filters,
1240 p_resp->num_mac_filters,
1241 p_req->num_vlan_filters,
1242 p_resp->num_vlan_filters,
1243 p_req->num_mc_filters, p_resp->num_mc_filters);
1244 return PFVF_STATUS_NO_RESOURCE;
1245 }
1246
1247 return PFVF_STATUS_SUCCESS;
1248}
1249
1250static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1251 struct pfvf_stats_info *p_stats)
1252{
1253 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1254 offsetof(struct mstorm_vf_zone,
1255 non_trigger.eth_queue_stat);
1256 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1257 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1258 offsetof(struct ustorm_vf_zone,
1259 non_trigger.eth_queue_stat);
1260 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1261 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1262 offsetof(struct pstorm_vf_zone,
1263 non_trigger.eth_queue_stat);
1264 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1265 p_stats->tstats.address = 0;
1266 p_stats->tstats.len = 0;
1267}
1268
1136static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, 1269static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1137 struct qed_ptt *p_ptt, 1270 struct qed_ptt *p_ptt,
1138 struct qed_vf_info *vf) 1271 struct qed_vf_info *vf)
@@ -1141,25 +1274,27 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1141 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; 1274 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1142 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 1275 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1143 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; 1276 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1144 u8 i, vfpf_status = PFVF_STATUS_SUCCESS; 1277 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1145 struct pf_vf_resc *resc = &resp->resc; 1278 struct pf_vf_resc *resc = &resp->resc;
1279 int rc;
1280
1281 memset(resp, 0, sizeof(*resp));
1146 1282
1147 /* Validate FW compatibility */ 1283 /* Validate FW compatibility */
1148 if (req->vfdev_info.fw_major != FW_MAJOR_VERSION || 1284 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1149 req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
1150 req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
1151 req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
1152 DP_INFO(p_hwfn, 1285 DP_INFO(p_hwfn,
1153 "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n", 1286 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1154 vf->abs_vf_id, 1287 vf->abs_vf_id,
1155 req->vfdev_info.fw_major, 1288 req->vfdev_info.eth_fp_hsi_major,
1156 req->vfdev_info.fw_minor, 1289 req->vfdev_info.eth_fp_hsi_minor,
1157 req->vfdev_info.fw_revision, 1290 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1158 req->vfdev_info.fw_engineering, 1291
1159 FW_MAJOR_VERSION, 1292 /* Write the PF version so that VF would know which version
1160 FW_MINOR_VERSION, 1293 * is supported.
1161 FW_REVISION_VERSION, FW_ENGINEERING_VERSION); 1294 */
1162 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; 1295 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1296 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1297
1163 goto out; 1298 goto out;
1164 } 1299 }
1165 1300
@@ -1169,16 +1304,13 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1169 DP_INFO(p_hwfn, 1304 DP_INFO(p_hwfn,
1170 "VF[%d] is running an old driver that doesn't support 100g\n", 1305 "VF[%d] is running an old driver that doesn't support 100g\n",
1171 vf->abs_vf_id); 1306 vf->abs_vf_id);
1172 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1173 goto out; 1307 goto out;
1174 } 1308 }
1175 1309
1176 memset(resp, 0, sizeof(*resp)); 1310 /* Store the acquire message */
1311 memcpy(&vf->acquire, req, sizeof(vf->acquire));
1177 1312
1178 /* Fill in vf info stuff */
1179 vf->opaque_fid = req->vfdev_info.opaque_fid; 1313 vf->opaque_fid = req->vfdev_info.opaque_fid;
1180 vf->num_mac_filters = 1;
1181 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
1182 1314
1183 vf->vf_bulletin = req->bulletin_addr; 1315 vf->vf_bulletin = req->bulletin_addr;
1184 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? 1316 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
@@ -1194,26 +1326,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1194 if (p_hwfn->cdev->num_hwfns > 1) 1326 if (p_hwfn->cdev->num_hwfns > 1)
1195 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; 1327 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1196 1328
1197 pfdev_info->stats_info.mstats.address = 1329 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1198 PXP_VF_BAR0_START_MSDM_ZONE_B +
1199 offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
1200 pfdev_info->stats_info.mstats.len =
1201 sizeof(struct eth_mstorm_per_queue_stat);
1202
1203 pfdev_info->stats_info.ustats.address =
1204 PXP_VF_BAR0_START_USDM_ZONE_B +
1205 offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
1206 pfdev_info->stats_info.ustats.len =
1207 sizeof(struct eth_ustorm_per_queue_stat);
1208
1209 pfdev_info->stats_info.pstats.address =
1210 PXP_VF_BAR0_START_PSDM_ZONE_B +
1211 offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
1212 pfdev_info->stats_info.pstats.len =
1213 sizeof(struct eth_pstorm_per_queue_stat);
1214
1215 pfdev_info->stats_info.tstats.address = 0;
1216 pfdev_info->stats_info.tstats.len = 0;
1217 1330
1218 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 1331 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1219 1332
@@ -1221,36 +1334,31 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1221 pfdev_info->fw_minor = FW_MINOR_VERSION; 1334 pfdev_info->fw_minor = FW_MINOR_VERSION;
1222 pfdev_info->fw_rev = FW_REVISION_VERSION; 1335 pfdev_info->fw_rev = FW_REVISION_VERSION;
1223 pfdev_info->fw_eng = FW_ENGINEERING_VERSION; 1336 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1337 pfdev_info->minor_fp_hsi = min_t(u8,
1338 ETH_HSI_VER_MINOR,
1339 req->vfdev_info.eth_fp_hsi_minor);
1224 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; 1340 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1225 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); 1341 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1226 1342
1227 pfdev_info->dev_type = p_hwfn->cdev->type; 1343 pfdev_info->dev_type = p_hwfn->cdev->type;
1228 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; 1344 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1229 1345
1230 resc->num_rxqs = vf->num_rxqs; 1346 /* Fill resources available to VF; Make sure there are enough to
1231 resc->num_txqs = vf->num_txqs; 1347 * satisfy the VF's request.
1232 resc->num_sbs = vf->num_sbs; 1348 */
1233 for (i = 0; i < resc->num_sbs; i++) { 1349 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1234 resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i]; 1350 &req->resc_request, resc);
1235 resc->hw_sbs[i].sb_qid = 0; 1351 if (vfpf_status != PFVF_STATUS_SUCCESS)
1236 } 1352 goto out;
1237 1353
1238 for (i = 0; i < resc->num_rxqs; i++) { 1354 /* Start the VF in FW */
1239 qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid, 1355 rc = qed_sp_vf_start(p_hwfn, vf);
1240 (u16 *)&resc->hw_qid[i]); 1356 if (rc) {
1241 resc->cid[i] = vf->vf_queues[i].fw_cid; 1357 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1358 vfpf_status = PFVF_STATUS_FAILURE;
1359 goto out;
1242 } 1360 }
1243 1361
1244 resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
1245 req->resc_request.num_mac_filters);
1246 resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
1247 req->resc_request.num_vlan_filters);
1248
1249 /* This isn't really required as VF isn't limited, but some VFs might
1250 * actually test this value, so need to provide it.
1251 */
1252 resc->num_mc_filters = req->resc_request.num_mc_filters;
1253
1254 /* Fill agreed size of bulletin board in response */ 1362 /* Fill agreed size of bulletin board in response */
1255 resp->bulletin_size = vf->bulletin.size; 1363 resp->bulletin_size = vf->bulletin.size;
1256 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); 1364 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
@@ -1585,10 +1693,6 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1585 sizeof(struct pfvf_def_resp_tlv), status); 1693 sizeof(struct pfvf_def_resp_tlv), status);
1586} 1694}
1587 1695
1588#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
1589#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
1590 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
1591
1592static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, 1696static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1593 struct qed_ptt *p_ptt, 1697 struct qed_ptt *p_ptt,
1594 struct qed_vf_info *vf, u8 status) 1698 struct qed_vf_info *vf, u8 status)
@@ -1606,16 +1710,11 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1606 1710
1607 /* Update the TLV with the response */ 1711 /* Update the TLV with the response */
1608 if (status == PFVF_STATUS_SUCCESS) { 1712 if (status == PFVF_STATUS_SUCCESS) {
1609 u16 hw_qid = 0;
1610
1611 req = &mbx->req_virt->start_rxq; 1713 req = &mbx->req_virt->start_rxq;
1612 qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid, 1714 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
1613 &hw_qid); 1715 offsetof(struct mstorm_vf_zone,
1614 1716 non_trigger.eth_rx_queue_producers) +
1615 p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) + 1717 sizeof(struct eth_rx_prod_data) * req->rx_qid;
1616 hw_qid * MSTORM_QZONE_SIZE +
1617 offsetof(struct mstorm_eth_queue_zone,
1618 rx_producers);
1619 } 1718 }
1620 1719
1621 qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status); 1720 qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
@@ -1627,13 +1726,19 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1627{ 1726{
1628 struct qed_queue_start_common_params params; 1727 struct qed_queue_start_common_params params;
1629 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1728 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1630 u8 status = PFVF_STATUS_SUCCESS; 1729 u8 status = PFVF_STATUS_NO_RESOURCE;
1631 struct vfpf_start_rxq_tlv *req; 1730 struct vfpf_start_rxq_tlv *req;
1632 int rc; 1731 int rc;
1633 1732
1634 memset(&params, 0, sizeof(params)); 1733 memset(&params, 0, sizeof(params));
1635 req = &mbx->req_virt->start_rxq; 1734 req = &mbx->req_virt->start_rxq;
1735
1736 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
1737 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1738 goto out;
1739
1636 params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid; 1740 params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
1741 params.vf_qid = req->rx_qid;
1637 params.vport_id = vf->vport_id; 1742 params.vport_id = vf->vport_id;
1638 params.sb = req->hw_sb; 1743 params.sb = req->hw_sb;
1639 params.sb_idx = req->sb_index; 1744 params.sb_idx = req->sb_index;
@@ -1649,22 +1754,48 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1649 if (rc) { 1754 if (rc) {
1650 status = PFVF_STATUS_FAILURE; 1755 status = PFVF_STATUS_FAILURE;
1651 } else { 1756 } else {
1757 status = PFVF_STATUS_SUCCESS;
1652 vf->vf_queues[req->rx_qid].rxq_active = true; 1758 vf->vf_queues[req->rx_qid].rxq_active = true;
1653 vf->num_active_rxqs++; 1759 vf->num_active_rxqs++;
1654 } 1760 }
1655 1761
1762out:
1656 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status); 1763 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
1657} 1764}
1658 1765
1766static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
1767 struct qed_ptt *p_ptt,
1768 struct qed_vf_info *p_vf, u8 status)
1769{
1770 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1771 struct pfvf_start_queue_resp_tlv *p_tlv;
1772
1773 mbx->offset = (u8 *)mbx->reply_virt;
1774
1775 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
1776 sizeof(*p_tlv));
1777 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1778 sizeof(struct channel_list_end_tlv));
1779
1780 /* Update the TLV with the response */
1781 if (status == PFVF_STATUS_SUCCESS) {
1782 u16 qid = mbx->req_virt->start_txq.tx_qid;
1783
1784 p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
1785 DQ_DEMS_LEGACY);
1786 }
1787
1788 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
1789}
1790
1659static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, 1791static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
1660 struct qed_ptt *p_ptt, 1792 struct qed_ptt *p_ptt,
1661 struct qed_vf_info *vf) 1793 struct qed_vf_info *vf)
1662{ 1794{
1663 u16 length = sizeof(struct pfvf_def_resp_tlv);
1664 struct qed_queue_start_common_params params; 1795 struct qed_queue_start_common_params params;
1665 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1796 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1797 u8 status = PFVF_STATUS_NO_RESOURCE;
1666 union qed_qm_pq_params pq_params; 1798 union qed_qm_pq_params pq_params;
1667 u8 status = PFVF_STATUS_SUCCESS;
1668 struct vfpf_start_txq_tlv *req; 1799 struct vfpf_start_txq_tlv *req;
1669 int rc; 1800 int rc;
1670 1801
@@ -1675,6 +1806,11 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
1675 1806
1676 memset(&params, 0, sizeof(params)); 1807 memset(&params, 0, sizeof(params));
1677 req = &mbx->req_virt->start_txq; 1808 req = &mbx->req_virt->start_txq;
1809
1810 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
1811 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1812 goto out;
1813
1678 params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid; 1814 params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
1679 params.vport_id = vf->vport_id; 1815 params.vport_id = vf->vport_id;
1680 params.sb = req->hw_sb; 1816 params.sb = req->hw_sb;
@@ -1688,13 +1824,15 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
1688 req->pbl_addr, 1824 req->pbl_addr,
1689 req->pbl_size, &pq_params); 1825 req->pbl_size, &pq_params);
1690 1826
1691 if (rc) 1827 if (rc) {
1692 status = PFVF_STATUS_FAILURE; 1828 status = PFVF_STATUS_FAILURE;
1693 else 1829 } else {
1830 status = PFVF_STATUS_SUCCESS;
1694 vf->vf_queues[req->tx_qid].txq_active = true; 1831 vf->vf_queues[req->tx_qid].txq_active = true;
1832 }
1695 1833
1696 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ, 1834out:
1697 length, status); 1835 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
1698} 1836}
1699 1837
1700static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, 1838static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
@@ -2119,6 +2257,16 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
2119 u16 length; 2257 u16 length;
2120 int rc; 2258 int rc;
2121 2259
2260 /* Valiate PF can send such a request */
2261 if (!vf->vport_instance) {
2262 DP_VERBOSE(p_hwfn,
2263 QED_MSG_IOV,
2264 "No VPORT instance available for VF[%d], failing vport update\n",
2265 vf->abs_vf_id);
2266 status = PFVF_STATUS_FAILURE;
2267 goto out;
2268 }
2269
2122 memset(&params, 0, sizeof(params)); 2270 memset(&params, 0, sizeof(params));
2123 params.opaque_fid = vf->opaque_fid; 2271 params.opaque_fid = vf->opaque_fid;
2124 params.vport_id = vf->vport_id; 2272 params.vport_id = vf->vport_id;
@@ -2161,15 +2309,12 @@ out:
2161 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 2309 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2162} 2310}
2163 2311
2164static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, 2312static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
2165 struct qed_vf_info *p_vf, 2313 struct qed_vf_info *p_vf,
2166 struct qed_filter_ucast *p_params) 2314 struct qed_filter_ucast *p_params)
2167{ 2315{
2168 int i; 2316 int i;
2169 2317
2170 if (p_params->type == QED_FILTER_MAC)
2171 return 0;
2172
2173 /* First remove entries and then add new ones */ 2318 /* First remove entries and then add new ones */
2174 if (p_params->opcode == QED_FILTER_REMOVE) { 2319 if (p_params->opcode == QED_FILTER_REMOVE) {
2175 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 2320 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
@@ -2222,6 +2367,80 @@ static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
2222 return 0; 2367 return 0;
2223} 2368}
2224 2369
2370static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
2371 struct qed_vf_info *p_vf,
2372 struct qed_filter_ucast *p_params)
2373{
2374 int i;
2375
2376 /* If we're in forced-mode, we don't allow any change */
2377 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
2378 return 0;
2379
2380 /* First remove entries and then add new ones */
2381 if (p_params->opcode == QED_FILTER_REMOVE) {
2382 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2383 if (ether_addr_equal(p_vf->shadow_config.macs[i],
2384 p_params->mac)) {
2385 memset(p_vf->shadow_config.macs[i], 0,
2386 ETH_ALEN);
2387 break;
2388 }
2389 }
2390
2391 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2392 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2393 "MAC isn't configured\n");
2394 return -EINVAL;
2395 }
2396 } else if (p_params->opcode == QED_FILTER_REPLACE ||
2397 p_params->opcode == QED_FILTER_FLUSH) {
2398 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
2399 memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
2400 }
2401
2402 /* List the new MAC address */
2403 if (p_params->opcode != QED_FILTER_ADD &&
2404 p_params->opcode != QED_FILTER_REPLACE)
2405 return 0;
2406
2407 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2408 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
2409 ether_addr_copy(p_vf->shadow_config.macs[i],
2410 p_params->mac);
2411 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2412 "Added MAC at %d entry in shadow\n", i);
2413 break;
2414 }
2415 }
2416
2417 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2418 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
2419 return -EINVAL;
2420 }
2421
2422 return 0;
2423}
2424
2425static int
2426qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
2427 struct qed_vf_info *p_vf,
2428 struct qed_filter_ucast *p_params)
2429{
2430 int rc = 0;
2431
2432 if (p_params->type == QED_FILTER_MAC) {
2433 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
2434 if (rc)
2435 return rc;
2436 }
2437
2438 if (p_params->type == QED_FILTER_VLAN)
2439 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
2440
2441 return rc;
2442}
2443
2225int qed_iov_chk_ucast(struct qed_hwfn *hwfn, 2444int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
2226 int vfid, struct qed_filter_ucast *params) 2445 int vfid, struct qed_filter_ucast *params)
2227{ 2446{
@@ -2366,11 +2585,27 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
2366 struct qed_vf_info *p_vf) 2585 struct qed_vf_info *p_vf)
2367{ 2586{
2368 u16 length = sizeof(struct pfvf_def_resp_tlv); 2587 u16 length = sizeof(struct pfvf_def_resp_tlv);
2588 u8 status = PFVF_STATUS_SUCCESS;
2589 int rc = 0;
2369 2590
2370 qed_iov_vf_cleanup(p_hwfn, p_vf); 2591 qed_iov_vf_cleanup(p_hwfn, p_vf);
2371 2592
2593 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
2594 /* Stopping the VF */
2595 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
2596 p_vf->opaque_fid);
2597
2598 if (rc) {
2599 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
2600 rc);
2601 status = PFVF_STATUS_FAILURE;
2602 }
2603
2604 p_vf->state = VF_STOPPED;
2605 }
2606
2372 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, 2607 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
2373 length, PFVF_STATUS_SUCCESS); 2608 length, status);
2374} 2609}
2375 2610
2376static int 2611static int
@@ -2622,7 +2857,6 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
2622{ 2857{
2623 struct qed_iov_vf_mbx *mbx; 2858 struct qed_iov_vf_mbx *mbx;
2624 struct qed_vf_info *p_vf; 2859 struct qed_vf_info *p_vf;
2625 int i;
2626 2860
2627 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 2861 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
2628 if (!p_vf) 2862 if (!p_vf)
@@ -2631,9 +2865,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
2631 mbx = &p_vf->vf_mbx; 2865 mbx = &p_vf->vf_mbx;
2632 2866
2633 /* qed_iov_process_mbx_request */ 2867 /* qed_iov_process_mbx_request */
2634 DP_VERBOSE(p_hwfn, 2868 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2635 QED_MSG_IOV, 2869 "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
2636 "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
2637 2870
2638 mbx->first_tlv = mbx->req_virt->first_tlv; 2871 mbx->first_tlv = mbx->req_virt->first_tlv;
2639 2872
@@ -2687,15 +2920,28 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
2687 * support them. Or this may be because someone wrote a crappy 2920 * support them. Or this may be because someone wrote a crappy
2688 * VF driver and is sending garbage over the channel. 2921 * VF driver and is sending garbage over the channel.
2689 */ 2922 */
2690 DP_ERR(p_hwfn, 2923 DP_NOTICE(p_hwfn,
2691 "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", 2924 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
2692 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); 2925 p_vf->abs_vf_id,
2693 2926 mbx->first_tlv.tl.type,
2694 for (i = 0; i < 20; i++) { 2927 mbx->first_tlv.tl.length,
2928 mbx->first_tlv.padding, mbx->first_tlv.reply_address);
2929
2930 /* Try replying in case reply address matches the acquisition's
2931 * posted address.
2932 */
2933 if (p_vf->acquire.first_tlv.reply_address &&
2934 (mbx->first_tlv.reply_address ==
2935 p_vf->acquire.first_tlv.reply_address)) {
2936 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
2937 mbx->first_tlv.tl.type,
2938 sizeof(struct pfvf_def_resp_tlv),
2939 PFVF_STATUS_NOT_SUPPORTED);
2940 } else {
2695 DP_VERBOSE(p_hwfn, 2941 DP_VERBOSE(p_hwfn,
2696 QED_MSG_IOV, 2942 QED_MSG_IOV,
2697 "%x ", 2943 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
2698 mbx->req_virt->tlv_buf_size.tlv_buffer[i]); 2944 p_vf->abs_vf_id);
2699 } 2945 }
2700 } 2946 }
2701} 2947}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index c90b2b6ad969..0dd23e409b3f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -10,6 +10,9 @@
10#define _QED_SRIOV_H 10#define _QED_SRIOV_H
11#include <linux/types.h> 11#include <linux/types.h>
12#include "qed_vf.h" 12#include "qed_vf.h"
13
14#define QED_ETH_VF_NUM_MAC_FILTERS 1
15#define QED_ETH_VF_NUM_VLAN_FILTERS 2
13#define QED_VF_ARRAY_LENGTH (3) 16#define QED_VF_ARRAY_LENGTH (3)
14 17
15#ifdef CONFIG_QED_SRIOV 18#ifdef CONFIG_QED_SRIOV
@@ -24,7 +27,6 @@
24#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) 27#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
25 28
26#define QED_MAX_VF_CHAINS_PER_PF 16 29#define QED_MAX_VF_CHAINS_PER_PF 16
27#define QED_ETH_VF_NUM_VLAN_FILTERS 2
28 30
29#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \ 31#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
30 (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS) 32 (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
@@ -120,6 +122,8 @@ struct qed_vf_shadow_config {
120 /* Shadow copy of all guest vlans */ 122 /* Shadow copy of all guest vlans */
121 struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1]; 123 struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
122 124
125 /* Shadow copy of all configured MACs; Empty if forcing MACs */
126 u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
123 u8 inner_vlan_removal; 127 u8 inner_vlan_removal;
124}; 128};
125 129
@@ -133,6 +137,9 @@ struct qed_vf_info {
133 struct qed_bulletin bulletin; 137 struct qed_bulletin bulletin;
134 dma_addr_t vf_bulletin; 138 dma_addr_t vf_bulletin;
135 139
140 /* PF saves a copy of the last VF acquire message */
141 struct vfpf_acquire_tlv acquire;
142
136 u32 concrete_fid; 143 u32 concrete_fid;
137 u16 opaque_fid; 144 u16 opaque_fid;
138 u16 mtu; 145 u16 mtu;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 72e69c0ec10d..9819230947bf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -117,36 +117,64 @@ exit:
117} 117}
118 118
119#define VF_ACQUIRE_THRESH 3 119#define VF_ACQUIRE_THRESH 3
120#define VF_ACQUIRE_MAC_FILTERS 1 120static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
121 struct vf_pf_resc_request *p_req,
122 struct pf_vf_resc *p_resp)
123{
124 DP_VERBOSE(p_hwfn,
125 QED_MSG_IOV,
126 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
127 p_req->num_rxqs,
128 p_resp->num_rxqs,
129 p_req->num_rxqs,
130 p_resp->num_txqs,
131 p_req->num_sbs,
132 p_resp->num_sbs,
133 p_req->num_mac_filters,
134 p_resp->num_mac_filters,
135 p_req->num_vlan_filters,
136 p_resp->num_vlan_filters,
137 p_req->num_mc_filters, p_resp->num_mc_filters);
138
139 /* humble our request */
140 p_req->num_txqs = p_resp->num_txqs;
141 p_req->num_rxqs = p_resp->num_rxqs;
142 p_req->num_sbs = p_resp->num_sbs;
143 p_req->num_mac_filters = p_resp->num_mac_filters;
144 p_req->num_vlan_filters = p_resp->num_vlan_filters;
145 p_req->num_mc_filters = p_resp->num_mc_filters;
146}
121 147
122static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) 148static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
123{ 149{
124 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 150 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
125 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 151 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
126 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 152 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
127 u8 rx_count = 1, tx_count = 1, num_sbs = 1; 153 struct vf_pf_resc_request *p_resc;
128 u8 num_mac = VF_ACQUIRE_MAC_FILTERS;
129 bool resources_acquired = false; 154 bool resources_acquired = false;
130 struct vfpf_acquire_tlv *req; 155 struct vfpf_acquire_tlv *req;
131 int rc = 0, attempts = 0; 156 int rc = 0, attempts = 0;
132 157
133 /* clear mailbox and prep first tlv */ 158 /* clear mailbox and prep first tlv */
134 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 159 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
160 p_resc = &req->resc_request;
135 161
136 /* starting filling the request */ 162 /* starting filling the request */
137 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; 163 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
138 164
139 req->resc_request.num_rxqs = rx_count; 165 p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
140 req->resc_request.num_txqs = tx_count; 166 p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
141 req->resc_request.num_sbs = num_sbs; 167 p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
142 req->resc_request.num_mac_filters = num_mac; 168 p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
143 req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 169 p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
144 170
145 req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; 171 req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
146 req->vfdev_info.fw_major = FW_MAJOR_VERSION; 172 req->vfdev_info.fw_major = FW_MAJOR_VERSION;
147 req->vfdev_info.fw_minor = FW_MINOR_VERSION; 173 req->vfdev_info.fw_minor = FW_MINOR_VERSION;
148 req->vfdev_info.fw_revision = FW_REVISION_VERSION; 174 req->vfdev_info.fw_revision = FW_REVISION_VERSION;
149 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; 175 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
176 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
177 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
150 178
151 /* Fill capability field with any non-deprecated config we support */ 179 /* Fill capability field with any non-deprecated config we support */
152 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; 180 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
@@ -185,21 +213,21 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
185 resources_acquired = true; 213 resources_acquired = true;
186 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && 214 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
187 attempts < VF_ACQUIRE_THRESH) { 215 attempts < VF_ACQUIRE_THRESH) {
188 DP_VERBOSE(p_hwfn, 216 qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
189 QED_MSG_IOV, 217 &resp->resc);
190 "PF unwilling to fullfill resource request. Try PF recommended amount\n");
191
192 /* humble our request */
193 req->resc_request.num_txqs = resp->resc.num_txqs;
194 req->resc_request.num_rxqs = resp->resc.num_rxqs;
195 req->resc_request.num_sbs = resp->resc.num_sbs;
196 req->resc_request.num_mac_filters =
197 resp->resc.num_mac_filters;
198 req->resc_request.num_vlan_filters =
199 resp->resc.num_vlan_filters;
200 218
201 /* Clear response buffer */ 219 /* Clear response buffer */
202 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 220 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
221 } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
222 pfdev_info->major_fp_hsi &&
223 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
224 DP_NOTICE(p_hwfn,
225 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
226 pfdev_info->major_fp_hsi,
227 pfdev_info->minor_fp_hsi,
228 ETH_HSI_VER_MAJOR,
229 ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
230 return -EINVAL;
203 } else { 231 } else {
204 DP_ERR(p_hwfn, 232 DP_ERR(p_hwfn,
205 "PF returned error %d to VF acquisition request\n", 233 "PF returned error %d to VF acquisition request\n",
@@ -225,6 +253,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
225 } 253 }
226 } 254 }
227 255
256 if (ETH_HSI_VER_MINOR &&
257 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
258 DP_INFO(p_hwfn,
259 "PF is using older fastpath HSI; %02x.%02x is configured\n",
260 ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
261 }
262
228 return 0; 263 return 0;
229} 264}
230 265
@@ -405,8 +440,8 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
405 u16 pbl_size, void __iomem **pp_doorbell) 440 u16 pbl_size, void __iomem **pp_doorbell)
406{ 441{
407 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 442 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
443 struct pfvf_start_queue_resp_tlv *resp;
408 struct vfpf_start_txq_tlv *req; 444 struct vfpf_start_txq_tlv *req;
409 struct pfvf_def_resp_tlv *resp;
410 int rc; 445 int rc;
411 446
412 /* clear mailbox and prep first tlv */ 447 /* clear mailbox and prep first tlv */
@@ -424,20 +459,24 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
424 qed_add_tlv(p_hwfn, &p_iov->offset, 459 qed_add_tlv(p_hwfn, &p_iov->offset,
425 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 460 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
426 461
427 resp = &p_iov->pf2vf_reply->default_resp; 462 resp = &p_iov->pf2vf_reply->queue_start;
428 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 463 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
429 if (rc) 464 if (rc)
430 return rc; 465 goto exit;
431 466
432 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 467 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
433 return -EINVAL; 468 rc = -EINVAL;
469 goto exit;
470 }
434 471
435 if (pp_doorbell) { 472 if (pp_doorbell) {
436 u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; 473 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
437 474
438 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + 475 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
439 qed_db_addr(cid, DQ_DEMS_LEGACY); 476 "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
477 tx_queue_id, *pp_doorbell, resp->offset);
440 } 478 }
479exit:
441 480
442 return rc; 481 return rc;
443} 482}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index b82fda964bbd..b23ce58e932f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -96,7 +96,9 @@ struct vfpf_acquire_tlv {
96 u32 driver_version; 96 u32 driver_version;
97 u16 opaque_fid; /* ME register value */ 97 u16 opaque_fid; /* ME register value */
98 u8 os_type; /* VFPF_ACQUIRE_OS_* value */ 98 u8 os_type; /* VFPF_ACQUIRE_OS_* value */
99 u8 padding[5]; 99 u8 eth_fp_hsi_major;
100 u8 eth_fp_hsi_minor;
101 u8 padding[3];
100 } vfdev_info; 102 } vfdev_info;
101 103
102 struct vf_pf_resc_request resc_request; 104 struct vf_pf_resc_request resc_request;
@@ -171,7 +173,14 @@ struct pfvf_acquire_resp_tlv {
171 struct pfvf_stats_info stats_info; 173 struct pfvf_stats_info stats_info;
172 174
173 u8 port_mac[ETH_ALEN]; 175 u8 port_mac[ETH_ALEN];
174 u8 padding2[2]; 176
177 /* It's possible PF had to configure an older fastpath HSI
178 * [in case VF is newer than PF]. This is communicated back
179 * to the VF. It can also be used in case of error due to
180 * non-matching versions to shed light in VF about failure.
181 */
182 u8 major_fp_hsi;
183 u8 minor_fp_hsi;
175 } pfdev_info; 184 } pfdev_info;
176 185
177 struct pf_vf_resc { 186 struct pf_vf_resc {
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 06ff90d87572..74a49850d74d 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -1,3 +1,4 @@
1obj-$(CONFIG_QEDE) := qede.o 1obj-$(CONFIG_QEDE) := qede.o
2 2
3qede-y := qede_main.o qede_ethtool.o 3qede-y := qede_main.o qede_ethtool.o
4qede-$(CONFIG_DCB) += qede_dcbnl.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 47d6b22252f6..1441c8f6d414 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -304,6 +304,9 @@ union qede_reload_args {
304 u16 mtu; 304 u16 mtu;
305}; 305};
306 306
307#ifdef CONFIG_DCB
308void qede_set_dcbnl_ops(struct net_device *ndev);
309#endif
307void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); 310void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
308void qede_set_ethtool_ops(struct net_device *netdev); 311void qede_set_ethtool_ops(struct net_device *netdev);
309void qede_reload(struct qede_dev *edev, 312void qede_reload(struct qede_dev *edev,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
new file mode 100644
index 000000000000..03e8c0212433
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -0,0 +1,348 @@
1/* QLogic qede NIC Driver
2* Copyright (c) 2015 QLogic Corporation
3*
4* This software is available under the terms of the GNU General Public License
5* (GPL) Version 2, available from the file COPYING in the main directory of
6* this source tree.
7*/
8
9#include <linux/types.h>
10#include <linux/netdevice.h>
11#include <linux/rtnetlink.h>
12#include <net/dcbnl.h>
13#include "qede.h"
14
15static u8 qede_dcbnl_getstate(struct net_device *netdev)
16{
17 struct qede_dev *edev = netdev_priv(netdev);
18
19 return edev->ops->dcb->getstate(edev->cdev);
20}
21
22static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state)
23{
24 struct qede_dev *edev = netdev_priv(netdev);
25
26 return edev->ops->dcb->setstate(edev->cdev, state);
27}
28
29static void qede_dcbnl_getpermhwaddr(struct net_device *netdev,
30 u8 *perm_addr)
31{
32 memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
33}
34
35static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio,
36 u8 *prio_type, u8 *pgid, u8 *bw_pct,
37 u8 *up_map)
38{
39 struct qede_dev *edev = netdev_priv(netdev);
40
41 edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type,
42 pgid, bw_pct, up_map);
43}
44
45static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
46 int pgid, u8 *bw_pct)
47{
48 struct qede_dev *edev = netdev_priv(netdev);
49
50 edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct);
51}
52
53static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio,
54 u8 *prio_type, u8 *pgid, u8 *bw_pct,
55 u8 *up_map)
56{
57 struct qede_dev *edev = netdev_priv(netdev);
58
59 edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct,
60 up_map);
61}
62
63static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev,
64 int pgid, u8 *bw_pct)
65{
66 struct qede_dev *edev = netdev_priv(netdev);
67
68 edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct);
69}
70
71static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio,
72 u8 *setting)
73{
74 struct qede_dev *edev = netdev_priv(netdev);
75
76 edev->ops->dcb->getpfccfg(edev->cdev, prio, setting);
77}
78
79static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio,
80 u8 setting)
81{
82 struct qede_dev *edev = netdev_priv(netdev);
83
84 edev->ops->dcb->setpfccfg(edev->cdev, prio, setting);
85}
86
87static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
88{
89 struct qede_dev *edev = netdev_priv(netdev);
90
91 return edev->ops->dcb->getcap(edev->cdev, capid, cap);
92}
93
94static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
95{
96 struct qede_dev *edev = netdev_priv(netdev);
97
98 return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num);
99}
100
101static u8 qede_dcbnl_getpfcstate(struct net_device *netdev)
102{
103 struct qede_dev *edev = netdev_priv(netdev);
104
105 return edev->ops->dcb->getpfcstate(edev->cdev);
106}
107
108static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
109{
110 struct qede_dev *edev = netdev_priv(netdev);
111
112 return edev->ops->dcb->getapp(edev->cdev, idtype, id);
113}
114
115static u8 qede_dcbnl_getdcbx(struct net_device *netdev)
116{
117 struct qede_dev *edev = netdev_priv(netdev);
118
119 return edev->ops->dcb->getdcbx(edev->cdev);
120}
121
122static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio,
123 u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
124{
125 struct qede_dev *edev = netdev_priv(netdev);
126
127 return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid,
128 bw_pct, up_map);
129}
130
131static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio,
132 u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
133{
134 struct qede_dev *edev = netdev_priv(netdev);
135
136 return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid,
137 bw_pct, up_map);
138}
139
140static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid,
141 u8 bw_pct)
142{
143 struct qede_dev *edev = netdev_priv(netdev);
144
145 return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct);
146}
147
148static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid,
149 u8 bw_pct)
150{
151 struct qede_dev *edev = netdev_priv(netdev);
152
153 return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct);
154}
155
156static u8 qede_dcbnl_setall(struct net_device *netdev)
157{
158 struct qede_dev *edev = netdev_priv(netdev);
159
160 return edev->ops->dcb->setall(edev->cdev);
161}
162
163static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
164{
165 struct qede_dev *edev = netdev_priv(netdev);
166
167 return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num);
168}
169
170static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
171{
172 struct qede_dev *edev = netdev_priv(netdev);
173
174 return edev->ops->dcb->setpfcstate(edev->cdev, state);
175}
176
177static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval,
178 u8 up)
179{
180 struct qede_dev *edev = netdev_priv(netdev);
181
182 return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up);
183}
184
185static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state)
186{
187 struct qede_dev *edev = netdev_priv(netdev);
188
189 return edev->ops->dcb->setdcbx(edev->cdev, state);
190}
191
192static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid,
193 u8 *flags)
194{
195 struct qede_dev *edev = netdev_priv(netdev);
196
197 return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags);
198}
199
200static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags)
201{
202 struct qede_dev *edev = netdev_priv(netdev);
203
204 return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags);
205}
206
207static int qede_dcbnl_peer_getappinfo(struct net_device *netdev,
208 struct dcb_peer_app_info *info,
209 u16 *count)
210{
211 struct qede_dev *edev = netdev_priv(netdev);
212
213 return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count);
214}
215
216static int qede_dcbnl_peer_getapptable(struct net_device *netdev,
217 struct dcb_app *app)
218{
219 struct qede_dev *edev = netdev_priv(netdev);
220
221 return edev->ops->dcb->peer_getapptable(edev->cdev, app);
222}
223
224static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev,
225 struct cee_pfc *pfc)
226{
227 struct qede_dev *edev = netdev_priv(netdev);
228
229 return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc);
230}
231
232static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev,
233 struct cee_pg *pg)
234{
235 struct qede_dev *edev = netdev_priv(netdev);
236
237 return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg);
238}
239
240static int qede_dcbnl_ieee_getpfc(struct net_device *netdev,
241 struct ieee_pfc *pfc)
242{
243 struct qede_dev *edev = netdev_priv(netdev);
244
245 return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc);
246}
247
248static int qede_dcbnl_ieee_setpfc(struct net_device *netdev,
249 struct ieee_pfc *pfc)
250{
251 struct qede_dev *edev = netdev_priv(netdev);
252
253 return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc);
254}
255
256static int qede_dcbnl_ieee_getets(struct net_device *netdev,
257 struct ieee_ets *ets)
258{
259 struct qede_dev *edev = netdev_priv(netdev);
260
261 return edev->ops->dcb->ieee_getets(edev->cdev, ets);
262}
263
264static int qede_dcbnl_ieee_setets(struct net_device *netdev,
265 struct ieee_ets *ets)
266{
267 struct qede_dev *edev = netdev_priv(netdev);
268
269 return edev->ops->dcb->ieee_setets(edev->cdev, ets);
270}
271
272static int qede_dcbnl_ieee_getapp(struct net_device *netdev,
273 struct dcb_app *app)
274{
275 struct qede_dev *edev = netdev_priv(netdev);
276
277 return edev->ops->dcb->ieee_getapp(edev->cdev, app);
278}
279
280static int qede_dcbnl_ieee_setapp(struct net_device *netdev,
281 struct dcb_app *app)
282{
283 struct qede_dev *edev = netdev_priv(netdev);
284
285 return edev->ops->dcb->ieee_setapp(edev->cdev, app);
286}
287
288static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev,
289 struct ieee_pfc *pfc)
290{
291 struct qede_dev *edev = netdev_priv(netdev);
292
293 return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc);
294}
295
296static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev,
297 struct ieee_ets *ets)
298{
299 struct qede_dev *edev = netdev_priv(netdev);
300
301 return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets);
302}
303
304static const struct dcbnl_rtnl_ops qede_dcbnl_ops = {
305 .ieee_getpfc = qede_dcbnl_ieee_getpfc,
306 .ieee_setpfc = qede_dcbnl_ieee_setpfc,
307 .ieee_getets = qede_dcbnl_ieee_getets,
308 .ieee_setets = qede_dcbnl_ieee_setets,
309 .ieee_getapp = qede_dcbnl_ieee_getapp,
310 .ieee_setapp = qede_dcbnl_ieee_setapp,
311 .getdcbx = qede_dcbnl_getdcbx,
312 .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
313 .ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
314 .getstate = qede_dcbnl_getstate,
315 .setstate = qede_dcbnl_setstate,
316 .getpermhwaddr = qede_dcbnl_getpermhwaddr,
317 .getpgtccfgtx = qede_dcbnl_getpgtccfgtx,
318 .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx,
319 .getpgtccfgrx = qede_dcbnl_getpgtccfgrx,
320 .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx,
321 .getpfccfg = qede_dcbnl_getpfccfg,
322 .setpfccfg = qede_dcbnl_setpfccfg,
323 .getcap = qede_dcbnl_getcap,
324 .getnumtcs = qede_dcbnl_getnumtcs,
325 .getpfcstate = qede_dcbnl_getpfcstate,
326 .getapp = qede_dcbnl_getapp,
327 .getdcbx = qede_dcbnl_getdcbx,
328 .setpgtccfgtx = qede_dcbnl_setpgtccfgtx,
329 .setpgtccfgrx = qede_dcbnl_setpgtccfgrx,
330 .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx,
331 .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx,
332 .setall = qede_dcbnl_setall,
333 .setnumtcs = qede_dcbnl_setnumtcs,
334 .setpfcstate = qede_dcbnl_setpfcstate,
335 .setapp = qede_dcbnl_setapp,
336 .setdcbx = qede_dcbnl_setdcbx,
337 .setfeatcfg = qede_dcbnl_setfeatcfg,
338 .getfeatcfg = qede_dcbnl_getfeatcfg,
339 .peer_getappinfo = qede_dcbnl_peer_getappinfo,
340 .peer_getapptable = qede_dcbnl_peer_getapptable,
341 .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc,
342 .cee_peer_getpg = qede_dcbnl_cee_peer_getpg,
343};
344
345void qede_set_dcbnl_ops(struct net_device *dev)
346{
347 dev->dcbnl_ops = &qede_dcbnl_ops;
348}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index ad3cae3b7243..c5c658ab0724 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -426,6 +426,59 @@ static u32 qede_get_link(struct net_device *dev)
426 return current_link.link_up; 426 return current_link.link_up;
427} 427}
428 428
429static int qede_get_coalesce(struct net_device *dev,
430 struct ethtool_coalesce *coal)
431{
432 struct qede_dev *edev = netdev_priv(dev);
433 u16 rxc, txc;
434
435 memset(coal, 0, sizeof(struct ethtool_coalesce));
436 edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc);
437
438 coal->rx_coalesce_usecs = rxc;
439 coal->tx_coalesce_usecs = txc;
440
441 return 0;
442}
443
444static int qede_set_coalesce(struct net_device *dev,
445 struct ethtool_coalesce *coal)
446{
447 struct qede_dev *edev = netdev_priv(dev);
448 int i, rc = 0;
449 u16 rxc, txc;
450 u8 sb_id;
451
452 if (!netif_running(dev)) {
453 DP_INFO(edev, "Interface is down\n");
454 return -EINVAL;
455 }
456
457 if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
458 coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
459 DP_INFO(edev,
460 "Can't support requested %s coalesce value [max supported value %d]\n",
461 coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
462 : "tx",
463 QED_COALESCE_MAX);
464 return -EINVAL;
465 }
466
467 rxc = (u16)coal->rx_coalesce_usecs;
468 txc = (u16)coal->tx_coalesce_usecs;
469 for_each_rss(i) {
470 sb_id = edev->fp_array[i].sb_info->igu_sb_id;
471 rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
472 (u8)i, sb_id);
473 if (rc) {
474 DP_INFO(edev, "Set coalesce error, rc = %d\n", rc);
475 return rc;
476 }
477 }
478
479 return rc;
480}
481
429static void qede_get_ringparam(struct net_device *dev, 482static void qede_get_ringparam(struct net_device *dev,
430 struct ethtool_ringparam *ering) 483 struct ethtool_ringparam *ering)
431{ 484{
@@ -910,6 +963,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
910 memset(first_bd, 0, sizeof(*first_bd)); 963 memset(first_bd, 0, sizeof(*first_bd));
911 val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 964 val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
912 first_bd->data.bd_flags.bitfields = val; 965 first_bd->data.bd_flags.bitfields = val;
966 val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
967 first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
913 968
914 /* Map skb linear data for DMA and set in the first BD */ 969 /* Map skb linear data for DMA and set in the first BD */
915 mapping = dma_map_single(&edev->pdev->dev, skb->data, 970 mapping = dma_map_single(&edev->pdev->dev, skb->data,
@@ -1137,6 +1192,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
1137 .set_msglevel = qede_set_msglevel, 1192 .set_msglevel = qede_set_msglevel,
1138 .nway_reset = qede_nway_reset, 1193 .nway_reset = qede_nway_reset,
1139 .get_link = qede_get_link, 1194 .get_link = qede_get_link,
1195 .get_coalesce = qede_get_coalesce,
1196 .set_coalesce = qede_set_coalesce,
1140 .get_ringparam = qede_get_ringparam, 1197 .get_ringparam = qede_get_ringparam,
1141 .set_ringparam = qede_set_ringparam, 1198 .set_ringparam = qede_set_ringparam,
1142 .get_pauseparam = qede_get_pauseparam, 1199 .get_pauseparam = qede_get_pauseparam,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index f8e11f953acb..19bc631e1f04 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -24,12 +24,7 @@
24#include <linux/netdev_features.h> 24#include <linux/netdev_features.h>
25#include <linux/udp.h> 25#include <linux/udp.h>
26#include <linux/tcp.h> 26#include <linux/tcp.h>
27#ifdef CONFIG_QEDE_VXLAN 27#include <net/udp_tunnel.h>
28#include <net/vxlan.h>
29#endif
30#ifdef CONFIG_QEDE_GENEVE
31#include <net/geneve.h>
32#endif
33#include <linux/ip.h> 28#include <linux/ip.h>
34#include <net/ipv6.h> 29#include <net/ipv6.h>
35#include <net/tcp.h> 30#include <net/tcp.h>
@@ -579,8 +574,6 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
579 574
580 /* Fill the parsing flags & params according to the requested offload */ 575 /* Fill the parsing flags & params according to the requested offload */
581 if (xmit_type & XMIT_L4_CSUM) { 576 if (xmit_type & XMIT_L4_CSUM) {
582 u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
583
584 /* We don't re-calculate IP checksum as it is already done by 577 /* We don't re-calculate IP checksum as it is already done by
585 * the upper stack 578 * the upper stack
586 */ 579 */
@@ -590,14 +583,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
590 if (xmit_type & XMIT_ENC) { 583 if (xmit_type & XMIT_ENC) {
591 first_bd->data.bd_flags.bitfields |= 584 first_bd->data.bd_flags.bitfields |=
592 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; 585 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
593 } else { 586 first_bd->data.bitfields |=
594 /* In cases when OS doesn't indicate for inner offloads 587 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
595 * when packet is tunnelled, we need to override the HW
596 * tunnel configuration so that packets are treated as
597 * regular non tunnelled packets and no inner offloads
598 * are done by the hardware.
599 */
600 first_bd->data.bitfields |= cpu_to_le16(temp);
601 } 588 }
602 589
603 /* If the packet is IPv6 with extension header, indicate that 590 /* If the packet is IPv6 with extension header, indicate that
@@ -655,6 +642,10 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
655 tx_data_bd = (struct eth_tx_bd *)third_bd; 642 tx_data_bd = (struct eth_tx_bd *)third_bd;
656 data_split = true; 643 data_split = true;
657 } 644 }
645 } else {
646 first_bd->data.bitfields |=
647 (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
648 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
658 } 649 }
659 650
660 /* Handle fragmented skb */ 651 /* Handle fragmented skb */
@@ -2116,75 +2107,75 @@ int qede_set_features(struct net_device *dev, netdev_features_t features)
2116 return 0; 2107 return 0;
2117} 2108}
2118 2109
2119#ifdef CONFIG_QEDE_VXLAN 2110static void qede_udp_tunnel_add(struct net_device *dev,
2120static void qede_add_vxlan_port(struct net_device *dev, 2111 struct udp_tunnel_info *ti)
2121 sa_family_t sa_family, __be16 port)
2122{ 2112{
2123 struct qede_dev *edev = netdev_priv(dev); 2113 struct qede_dev *edev = netdev_priv(dev);
2124 u16 t_port = ntohs(port); 2114 u16 t_port = ntohs(ti->port);
2125 2115
2126 if (edev->vxlan_dst_port) 2116 switch (ti->type) {
2127 return; 2117 case UDP_TUNNEL_TYPE_VXLAN:
2118 if (edev->vxlan_dst_port)
2119 return;
2128 2120
2129 edev->vxlan_dst_port = t_port; 2121 edev->vxlan_dst_port = t_port;
2130 2122
2131 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port); 2123 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
2124 t_port);
2132 2125
2133 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); 2126 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
2134 schedule_delayed_work(&edev->sp_task, 0); 2127 break;
2135} 2128 case UDP_TUNNEL_TYPE_GENEVE:
2129 if (edev->geneve_dst_port)
2130 return;
2136 2131
2137static void qede_del_vxlan_port(struct net_device *dev, 2132 edev->geneve_dst_port = t_port;
2138 sa_family_t sa_family, __be16 port)
2139{
2140 struct qede_dev *edev = netdev_priv(dev);
2141 u16 t_port = ntohs(port);
2142 2133
2143 if (t_port != edev->vxlan_dst_port) 2134 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
2135 t_port);
2136 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
2137 break;
2138 default:
2144 return; 2139 return;
2140 }
2145 2141
2146 edev->vxlan_dst_port = 0;
2147
2148 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port);
2149
2150 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
2151 schedule_delayed_work(&edev->sp_task, 0); 2142 schedule_delayed_work(&edev->sp_task, 0);
2152} 2143}
2153#endif
2154 2144
2155#ifdef CONFIG_QEDE_GENEVE 2145static void qede_udp_tunnel_del(struct net_device *dev,
2156static void qede_add_geneve_port(struct net_device *dev, 2146 struct udp_tunnel_info *ti)
2157 sa_family_t sa_family, __be16 port)
2158{ 2147{
2159 struct qede_dev *edev = netdev_priv(dev); 2148 struct qede_dev *edev = netdev_priv(dev);
2160 u16 t_port = ntohs(port); 2149 u16 t_port = ntohs(ti->port);
2161 2150
2162 if (edev->geneve_dst_port) 2151 switch (ti->type) {
2163 return; 2152 case UDP_TUNNEL_TYPE_VXLAN:
2153 if (t_port != edev->vxlan_dst_port)
2154 return;
2164 2155
2165 edev->geneve_dst_port = t_port; 2156 edev->vxlan_dst_port = 0;
2166 2157
2167 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port); 2158 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
2168 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); 2159 t_port);
2169 schedule_delayed_work(&edev->sp_task, 0);
2170}
2171 2160
2172static void qede_del_geneve_port(struct net_device *dev, 2161 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
2173 sa_family_t sa_family, __be16 port) 2162 break;
2174{ 2163 case UDP_TUNNEL_TYPE_GENEVE:
2175 struct qede_dev *edev = netdev_priv(dev); 2164 if (t_port != edev->geneve_dst_port)
2176 u16 t_port = ntohs(port); 2165 return;
2177 2166
2178 if (t_port != edev->geneve_dst_port) 2167 edev->geneve_dst_port = 0;
2179 return;
2180 2168
2181 edev->geneve_dst_port = 0; 2169 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
2170 t_port);
2171 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
2172 break;
2173 default:
2174 return;
2175 }
2182 2176
2183 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port);
2184 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
2185 schedule_delayed_work(&edev->sp_task, 0); 2177 schedule_delayed_work(&edev->sp_task, 0);
2186} 2178}
2187#endif
2188 2179
2189static const struct net_device_ops qede_netdev_ops = { 2180static const struct net_device_ops qede_netdev_ops = {
2190 .ndo_open = qede_open, 2181 .ndo_open = qede_open,
@@ -2208,14 +2199,8 @@ static const struct net_device_ops qede_netdev_ops = {
2208 .ndo_get_vf_config = qede_get_vf_config, 2199 .ndo_get_vf_config = qede_get_vf_config,
2209 .ndo_set_vf_rate = qede_set_vf_rate, 2200 .ndo_set_vf_rate = qede_set_vf_rate,
2210#endif 2201#endif
2211#ifdef CONFIG_QEDE_VXLAN 2202 .ndo_udp_tunnel_add = qede_udp_tunnel_add,
2212 .ndo_add_vxlan_port = qede_add_vxlan_port, 2203 .ndo_udp_tunnel_del = qede_udp_tunnel_del,
2213 .ndo_del_vxlan_port = qede_del_vxlan_port,
2214#endif
2215#ifdef CONFIG_QEDE_GENEVE
2216 .ndo_add_geneve_port = qede_add_geneve_port,
2217 .ndo_del_geneve_port = qede_del_geneve_port,
2218#endif
2219}; 2204};
2220 2205
2221/* ------------------------------------------------------------------------- 2206/* -------------------------------------------------------------------------
@@ -2505,6 +2490,10 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
2505 2490
2506 edev->ops->register_ops(cdev, &qede_ll_ops, edev); 2491 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
2507 2492
2493#ifdef CONFIG_DCB
2494 qede_set_dcbnl_ops(edev->ndev);
2495#endif
2496
2508 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); 2497 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
2509 mutex_init(&edev->qede_lock); 2498 mutex_init(&edev->qede_lock);
2510 2499
@@ -2823,6 +2812,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
2823 rc = edev->ops->common->chain_alloc(edev->cdev, 2812 rc = edev->ops->common->chain_alloc(edev->cdev,
2824 QED_CHAIN_USE_TO_CONSUME_PRODUCE, 2813 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
2825 QED_CHAIN_MODE_NEXT_PTR, 2814 QED_CHAIN_MODE_NEXT_PTR,
2815 QED_CHAIN_CNT_TYPE_U16,
2826 RX_RING_SIZE, 2816 RX_RING_SIZE,
2827 sizeof(struct eth_rx_bd), 2817 sizeof(struct eth_rx_bd),
2828 &rxq->rx_bd_ring); 2818 &rxq->rx_bd_ring);
@@ -2834,6 +2824,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
2834 rc = edev->ops->common->chain_alloc(edev->cdev, 2824 rc = edev->ops->common->chain_alloc(edev->cdev,
2835 QED_CHAIN_USE_TO_CONSUME, 2825 QED_CHAIN_USE_TO_CONSUME,
2836 QED_CHAIN_MODE_PBL, 2826 QED_CHAIN_MODE_PBL,
2827 QED_CHAIN_CNT_TYPE_U16,
2837 RX_RING_SIZE, 2828 RX_RING_SIZE,
2838 sizeof(union eth_rx_cqe), 2829 sizeof(union eth_rx_cqe),
2839 &rxq->rx_comp_ring); 2830 &rxq->rx_comp_ring);
@@ -2885,9 +2876,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev,
2885 rc = edev->ops->common->chain_alloc(edev->cdev, 2876 rc = edev->ops->common->chain_alloc(edev->cdev,
2886 QED_CHAIN_USE_TO_CONSUME_PRODUCE, 2877 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
2887 QED_CHAIN_MODE_PBL, 2878 QED_CHAIN_MODE_PBL,
2879 QED_CHAIN_CNT_TYPE_U16,
2888 NUM_TX_BDS_MAX, 2880 NUM_TX_BDS_MAX,
2889 sizeof(*p_virt), 2881 sizeof(*p_virt), &txq->tx_pbl);
2890 &txq->tx_pbl);
2891 if (rc) 2882 if (rc)
2892 goto err; 2883 goto err;
2893 2884
@@ -3578,12 +3569,8 @@ static int qede_open(struct net_device *ndev)
3578 if (rc) 3569 if (rc)
3579 return rc; 3570 return rc;
3580 3571
3581#ifdef CONFIG_QEDE_VXLAN 3572 udp_tunnel_get_rx_info(ndev);
3582 vxlan_get_rx_port(ndev); 3573
3583#endif
3584#ifdef CONFIG_QEDE_GENEVE
3585 geneve_get_rx_port(ndev);
3586#endif
3587 return 0; 3574 return 0;
3588} 3575}
3589 3576
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index caf6ddb7ea76..fd973f4f16c7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1026,10 +1026,8 @@ struct qlcnic_ipaddr {
1026#define QLCNIC_HAS_PHYS_PORT_ID 0x40000 1026#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
1027#define QLCNIC_TSS_RSS 0x80000 1027#define QLCNIC_TSS_RSS 0x80000
1028 1028
1029#ifdef CONFIG_QLCNIC_VXLAN
1030#define QLCNIC_ADD_VXLAN_PORT 0x100000 1029#define QLCNIC_ADD_VXLAN_PORT 0x100000
1031#define QLCNIC_DEL_VXLAN_PORT 0x200000 1030#define QLCNIC_DEL_VXLAN_PORT 0x200000
1032#endif
1033 1031
1034#define QLCNIC_VLAN_FILTERING 0x800000 1032#define QLCNIC_VLAN_FILTERING 0x800000
1035 1033
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f9640d5ce6ba..bdbcd2b088a0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2159,7 +2159,6 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
2159 struct qlcnic_cmd_args cmd; 2159 struct qlcnic_cmd_args cmd;
2160 u32 mac_low, mac_high; 2160 u32 mac_low, mac_high;
2161 2161
2162 function = 0;
2163 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); 2162 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
2164 if (err) 2163 if (err)
2165 return err; 2164 return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index bf892160dd5f..a496390b8632 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1020,7 +1020,6 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
1020 return 0; 1020 return 0;
1021} 1021}
1022 1022
1023#ifdef CONFIG_QLCNIC_VXLAN
1024#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1 1023#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1
1025#define QLC_83XX_MATCH_ENCAP_ID BIT_2 1024#define QLC_83XX_MATCH_ENCAP_ID BIT_2
1026#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3 1025#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3
@@ -1089,14 +1088,12 @@ static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
1089 1088
1090 return ret; 1089 return ret;
1091} 1090}
1092#endif
1093 1091
1094static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) 1092static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
1095{ 1093{
1096 if (adapter->fhash.fnum) 1094 if (adapter->fhash.fnum)
1097 qlcnic_prune_lb_filters(adapter); 1095 qlcnic_prune_lb_filters(adapter);
1098 1096
1099#ifdef CONFIG_QLCNIC_VXLAN
1100 if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) { 1097 if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
1101 if (qlcnic_set_vxlan_port(adapter)) 1098 if (qlcnic_set_vxlan_port(adapter))
1102 return; 1099 return;
@@ -1112,7 +1109,6 @@ static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
1112 adapter->ahw->vxlan_port = 0; 1109 adapter->ahw->vxlan_port = 0;
1113 adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT; 1110 adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
1114 } 1111 }
1115#endif
1116} 1112}
1117 1113
1118/** 1114/**
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1c29105b6c36..3ebef27e0964 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -16,9 +16,7 @@
16#include <linux/aer.h> 16#include <linux/aer.h>
17#include <linux/log2.h> 17#include <linux/log2.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#ifdef CONFIG_QLCNIC_VXLAN
20#include <net/vxlan.h> 19#include <net/vxlan.h>
21#endif
22 20
23#include "qlcnic.h" 21#include "qlcnic.h"
24#include "qlcnic_sriov.h" 22#include "qlcnic_sriov.h"
@@ -474,13 +472,15 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev,
474 return 0; 472 return 0;
475} 473}
476 474
477#ifdef CONFIG_QLCNIC_VXLAN
478static void qlcnic_add_vxlan_port(struct net_device *netdev, 475static void qlcnic_add_vxlan_port(struct net_device *netdev,
479 sa_family_t sa_family, __be16 port) 476 struct udp_tunnel_info *ti)
480{ 477{
481 struct qlcnic_adapter *adapter = netdev_priv(netdev); 478 struct qlcnic_adapter *adapter = netdev_priv(netdev);
482 struct qlcnic_hardware_context *ahw = adapter->ahw; 479 struct qlcnic_hardware_context *ahw = adapter->ahw;
483 480
481 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
482 return;
483
484 /* Adapter supports only one VXLAN port. Use very first port 484 /* Adapter supports only one VXLAN port. Use very first port
485 * for enabling offload 485 * for enabling offload
486 */ 486 */
@@ -488,23 +488,26 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev,
488 return; 488 return;
489 if (!ahw->vxlan_port_count) { 489 if (!ahw->vxlan_port_count) {
490 ahw->vxlan_port_count = 1; 490 ahw->vxlan_port_count = 1;
491 ahw->vxlan_port = ntohs(port); 491 ahw->vxlan_port = ntohs(ti->port);
492 adapter->flags |= QLCNIC_ADD_VXLAN_PORT; 492 adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
493 return; 493 return;
494 } 494 }
495 if (ahw->vxlan_port == ntohs(port)) 495 if (ahw->vxlan_port == ntohs(ti->port))
496 ahw->vxlan_port_count++; 496 ahw->vxlan_port_count++;
497 497
498} 498}
499 499
500static void qlcnic_del_vxlan_port(struct net_device *netdev, 500static void qlcnic_del_vxlan_port(struct net_device *netdev,
501 sa_family_t sa_family, __be16 port) 501 struct udp_tunnel_info *ti)
502{ 502{
503 struct qlcnic_adapter *adapter = netdev_priv(netdev); 503 struct qlcnic_adapter *adapter = netdev_priv(netdev);
504 struct qlcnic_hardware_context *ahw = adapter->ahw; 504 struct qlcnic_hardware_context *ahw = adapter->ahw;
505 505
506 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
507 return;
508
506 if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count || 509 if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
507 (ahw->vxlan_port != ntohs(port))) 510 (ahw->vxlan_port != ntohs(ti->port)))
508 return; 511 return;
509 512
510 ahw->vxlan_port_count--; 513 ahw->vxlan_port_count--;
@@ -519,7 +522,6 @@ static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
519 features = vlan_features_check(skb, features); 522 features = vlan_features_check(skb, features);
520 return vxlan_features_check(skb, features); 523 return vxlan_features_check(skb, features);
521} 524}
522#endif
523 525
524static const struct net_device_ops qlcnic_netdev_ops = { 526static const struct net_device_ops qlcnic_netdev_ops = {
525 .ndo_open = qlcnic_open, 527 .ndo_open = qlcnic_open,
@@ -539,11 +541,9 @@ static const struct net_device_ops qlcnic_netdev_ops = {
539 .ndo_fdb_del = qlcnic_fdb_del, 541 .ndo_fdb_del = qlcnic_fdb_del,
540 .ndo_fdb_dump = qlcnic_fdb_dump, 542 .ndo_fdb_dump = qlcnic_fdb_dump,
541 .ndo_get_phys_port_id = qlcnic_get_phys_port_id, 543 .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
542#ifdef CONFIG_QLCNIC_VXLAN 544 .ndo_udp_tunnel_add = qlcnic_add_vxlan_port,
543 .ndo_add_vxlan_port = qlcnic_add_vxlan_port, 545 .ndo_udp_tunnel_del = qlcnic_del_vxlan_port,
544 .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
545 .ndo_features_check = qlcnic_features_check, 546 .ndo_features_check = qlcnic_features_check,
546#endif
547#ifdef CONFIG_NET_POLL_CONTROLLER 547#ifdef CONFIG_NET_POLL_CONTROLLER
548 .ndo_poll_controller = qlcnic_poll_controller, 548 .ndo_poll_controller = qlcnic_poll_controller,
549#endif 549#endif
@@ -2015,10 +2015,8 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
2015 2015
2016 qlcnic_create_sysfs_entries(adapter); 2016 qlcnic_create_sysfs_entries(adapter);
2017 2017
2018#ifdef CONFIG_QLCNIC_VXLAN
2019 if (qlcnic_encap_rx_offload(adapter)) 2018 if (qlcnic_encap_rx_offload(adapter))
2020 vxlan_get_rx_port(netdev); 2019 udp_tunnel_get_rx_info(netdev);
2021#endif
2022 2020
2023 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; 2021 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
2024 return 0; 2022 return 0;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 6b541e57c96a..7a7a395d0512 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -200,7 +200,6 @@ struct r6040_private {
200 struct mii_bus *mii_bus; 200 struct mii_bus *mii_bus;
201 struct napi_struct napi; 201 struct napi_struct napi;
202 void __iomem *base; 202 void __iomem *base;
203 struct phy_device *phydev;
204 int old_link; 203 int old_link;
205 int old_duplex; 204 int old_duplex;
206}; 205};
@@ -474,7 +473,7 @@ static void r6040_down(struct net_device *dev)
474 iowrite16(adrp[1], ioaddr + MID_0M); 473 iowrite16(adrp[1], ioaddr + MID_0M);
475 iowrite16(adrp[2], ioaddr + MID_0H); 474 iowrite16(adrp[2], ioaddr + MID_0H);
476 475
477 phy_stop(lp->phydev); 476 phy_stop(dev->phydev);
478} 477}
479 478
480static int r6040_close(struct net_device *dev) 479static int r6040_close(struct net_device *dev)
@@ -515,12 +514,10 @@ static int r6040_close(struct net_device *dev)
515 514
516static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 515static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
517{ 516{
518 struct r6040_private *lp = netdev_priv(dev); 517 if (!dev->phydev)
519
520 if (!lp->phydev)
521 return -EINVAL; 518 return -EINVAL;
522 519
523 return phy_mii_ioctl(lp->phydev, rq, cmd); 520 return phy_mii_ioctl(dev->phydev, rq, cmd);
524} 521}
525 522
526static int r6040_rx(struct net_device *dev, int limit) 523static int r6040_rx(struct net_device *dev, int limit)
@@ -732,7 +729,7 @@ static int r6040_up(struct net_device *dev)
732 /* Initialize all MAC registers */ 729 /* Initialize all MAC registers */
733 r6040_init_mac_regs(dev); 730 r6040_init_mac_regs(dev);
734 731
735 phy_start(lp->phydev); 732 phy_start(dev->phydev);
736 733
737 return 0; 734 return 0;
738} 735}
@@ -957,26 +954,12 @@ static void netdev_get_drvinfo(struct net_device *dev,
957 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); 954 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
958} 955}
959 956
960static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
961{
962 struct r6040_private *rp = netdev_priv(dev);
963
964 return phy_ethtool_gset(rp->phydev, cmd);
965}
966
967static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
968{
969 struct r6040_private *rp = netdev_priv(dev);
970
971 return phy_ethtool_sset(rp->phydev, cmd);
972}
973
974static const struct ethtool_ops netdev_ethtool_ops = { 957static const struct ethtool_ops netdev_ethtool_ops = {
975 .get_drvinfo = netdev_get_drvinfo, 958 .get_drvinfo = netdev_get_drvinfo,
976 .get_settings = netdev_get_settings,
977 .set_settings = netdev_set_settings,
978 .get_link = ethtool_op_get_link, 959 .get_link = ethtool_op_get_link,
979 .get_ts_info = ethtool_op_get_ts_info, 960 .get_ts_info = ethtool_op_get_ts_info,
961 .get_link_ksettings = phy_ethtool_get_link_ksettings,
962 .set_link_ksettings = phy_ethtool_set_link_ksettings,
980}; 963};
981 964
982static const struct net_device_ops r6040_netdev_ops = { 965static const struct net_device_ops r6040_netdev_ops = {
@@ -998,7 +981,7 @@ static const struct net_device_ops r6040_netdev_ops = {
998static void r6040_adjust_link(struct net_device *dev) 981static void r6040_adjust_link(struct net_device *dev)
999{ 982{
1000 struct r6040_private *lp = netdev_priv(dev); 983 struct r6040_private *lp = netdev_priv(dev);
1001 struct phy_device *phydev = lp->phydev; 984 struct phy_device *phydev = dev->phydev;
1002 int status_changed = 0; 985 int status_changed = 0;
1003 void __iomem *ioaddr = lp->base; 986 void __iomem *ioaddr = lp->base;
1004 987
@@ -1057,7 +1040,6 @@ static int r6040_mii_probe(struct net_device *dev)
1057 | SUPPORTED_TP); 1040 | SUPPORTED_TP);
1058 1041
1059 phydev->advertising = phydev->supported; 1042 phydev->advertising = phydev->supported;
1060 lp->phydev = phydev;
1061 lp->old_link = 0; 1043 lp->old_link = 0;
1062 lp->old_duplex = -1; 1044 lp->old_duplex = -1;
1063 1045
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 867caf6e7a5a..5349284203ff 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -362,8 +362,6 @@ static void ravb_emac_init(struct net_device *ndev)
362 ravb_write(ndev, 362 ravb_write(ndev,
363 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 363 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
364 364
365 ravb_write(ndev, 1, MPR);
366
367 /* E-MAC status register clear */ 365 /* E-MAC status register clear */
368 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR); 366 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
369 367
@@ -402,7 +400,8 @@ static int ravb_dmac_init(struct net_device *ndev)
402#endif 400#endif
403 401
404 /* Set AVB RX */ 402 /* Set AVB RX */
405 ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR); 403 ravb_write(ndev,
404 RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
406 405
407 /* Set FIFO size */ 406 /* Set FIFO size */
408 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC); 407 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
@@ -2111,8 +2110,7 @@ static int ravb_runtime_nop(struct device *dev)
2111} 2110}
2112 2111
2113static const struct dev_pm_ops ravb_dev_pm_ops = { 2112static const struct dev_pm_ops ravb_dev_pm_ops = {
2114 .runtime_suspend = ravb_runtime_nop, 2113 SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
2115 .runtime_resume = ravb_runtime_nop,
2116}; 2114};
2117 2115
2118#define RAVB_PM_OPS (&ravb_dev_pm_ops) 2116#define RAVB_PM_OPS (&ravb_dev_pm_ops)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 45019649bbbd..5cb51b609f02 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -475,7 +475,6 @@ struct sxgbe_priv_data {
475 int rxcsum_insertion; 475 int rxcsum_insertion;
476 spinlock_t stats_lock; /* lock for tx/rx statatics */ 476 spinlock_t stats_lock; /* lock for tx/rx statatics */
477 477
478 struct phy_device *phydev;
479 int oldlink; 478 int oldlink;
480 int speed; 479 int speed;
481 int oldduplex; 480 int oldduplex;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index c0981ae45874..542b67d436df 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -147,7 +147,7 @@ static int sxgbe_get_eee(struct net_device *dev,
147 edata->eee_active = priv->eee_active; 147 edata->eee_active = priv->eee_active;
148 edata->tx_lpi_timer = priv->tx_lpi_timer; 148 edata->tx_lpi_timer = priv->tx_lpi_timer;
149 149
150 return phy_ethtool_get_eee(priv->phydev, edata); 150 return phy_ethtool_get_eee(dev->phydev, edata);
151} 151}
152 152
153static int sxgbe_set_eee(struct net_device *dev, 153static int sxgbe_set_eee(struct net_device *dev,
@@ -172,7 +172,7 @@ static int sxgbe_set_eee(struct net_device *dev,
172 priv->tx_lpi_timer = edata->tx_lpi_timer; 172 priv->tx_lpi_timer = edata->tx_lpi_timer;
173 } 173 }
174 174
175 return phy_ethtool_set_eee(priv->phydev, edata); 175 return phy_ethtool_set_eee(dev->phydev, edata);
176} 176}
177 177
178static void sxgbe_getdrvinfo(struct net_device *dev, 178static void sxgbe_getdrvinfo(struct net_device *dev,
@@ -182,27 +182,6 @@ static void sxgbe_getdrvinfo(struct net_device *dev,
182 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 182 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
183} 183}
184 184
185static int sxgbe_getsettings(struct net_device *dev,
186 struct ethtool_cmd *cmd)
187{
188 struct sxgbe_priv_data *priv = netdev_priv(dev);
189
190 if (priv->phydev)
191 return phy_ethtool_gset(priv->phydev, cmd);
192
193 return -EOPNOTSUPP;
194}
195
196static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
197{
198 struct sxgbe_priv_data *priv = netdev_priv(dev);
199
200 if (priv->phydev)
201 return phy_ethtool_sset(priv->phydev, cmd);
202
203 return -EOPNOTSUPP;
204}
205
206static u32 sxgbe_getmsglevel(struct net_device *dev) 185static u32 sxgbe_getmsglevel(struct net_device *dev)
207{ 186{
208 struct sxgbe_priv_data *priv = netdev_priv(dev); 187 struct sxgbe_priv_data *priv = netdev_priv(dev);
@@ -255,7 +234,7 @@ static void sxgbe_get_ethtool_stats(struct net_device *dev,
255 char *p; 234 char *p;
256 235
257 if (priv->eee_enabled) { 236 if (priv->eee_enabled) {
258 int val = phy_get_eee_err(priv->phydev); 237 int val = phy_get_eee_err(dev->phydev);
259 238
260 if (val) 239 if (val)
261 priv->xstats.eee_wakeup_error_n = val; 240 priv->xstats.eee_wakeup_error_n = val;
@@ -499,8 +478,6 @@ static int sxgbe_get_regs_len(struct net_device *dev)
499 478
500static const struct ethtool_ops sxgbe_ethtool_ops = { 479static const struct ethtool_ops sxgbe_ethtool_ops = {
501 .get_drvinfo = sxgbe_getdrvinfo, 480 .get_drvinfo = sxgbe_getdrvinfo,
502 .get_settings = sxgbe_getsettings,
503 .set_settings = sxgbe_setsettings,
504 .get_msglevel = sxgbe_getmsglevel, 481 .get_msglevel = sxgbe_getmsglevel,
505 .set_msglevel = sxgbe_setmsglevel, 482 .set_msglevel = sxgbe_setmsglevel,
506 .get_link = ethtool_op_get_link, 483 .get_link = ethtool_op_get_link,
@@ -516,6 +493,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
516 .get_regs_len = sxgbe_get_regs_len, 493 .get_regs_len = sxgbe_get_regs_len,
517 .get_eee = sxgbe_get_eee, 494 .get_eee = sxgbe_get_eee,
518 .set_eee = sxgbe_set_eee, 495 .set_eee = sxgbe_set_eee,
496 .get_link_ksettings = phy_ethtool_get_link_ksettings,
497 .set_link_ksettings = phy_ethtool_set_link_ksettings,
519}; 498};
520 499
521void sxgbe_set_ethtool_ops(struct net_device *netdev) 500void sxgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 413ea14ab91f..ea44a2456ce1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -124,12 +124,13 @@ static void sxgbe_eee_ctrl_timer(unsigned long arg)
124 */ 124 */
125bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) 125bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
126{ 126{
127 struct net_device *ndev = priv->dev;
127 bool ret = false; 128 bool ret = false;
128 129
129 /* MAC core supports the EEE feature. */ 130 /* MAC core supports the EEE feature. */
130 if (priv->hw_cap.eee) { 131 if (priv->hw_cap.eee) {
131 /* Check if the PHY supports EEE */ 132 /* Check if the PHY supports EEE */
132 if (phy_init_eee(priv->phydev, 1)) 133 if (phy_init_eee(ndev->phydev, 1))
133 return false; 134 return false;
134 135
135 priv->eee_active = 1; 136 priv->eee_active = 1;
@@ -152,12 +153,14 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
152 153
153static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv) 154static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
154{ 155{
156 struct net_device *ndev = priv->dev;
157
155 /* When the EEE has been already initialised we have to 158 /* When the EEE has been already initialised we have to
156 * modify the PLS bit in the LPI ctrl & status reg according 159 * modify the PLS bit in the LPI ctrl & status reg according
157 * to the PHY link status. For this reason. 160 * to the PHY link status. For this reason.
158 */ 161 */
159 if (priv->eee_enabled) 162 if (priv->eee_enabled)
160 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); 163 priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link);
161} 164}
162 165
163/** 166/**
@@ -203,7 +206,7 @@ static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
203static void sxgbe_adjust_link(struct net_device *dev) 206static void sxgbe_adjust_link(struct net_device *dev)
204{ 207{
205 struct sxgbe_priv_data *priv = netdev_priv(dev); 208 struct sxgbe_priv_data *priv = netdev_priv(dev);
206 struct phy_device *phydev = priv->phydev; 209 struct phy_device *phydev = dev->phydev;
207 u8 new_state = 0; 210 u8 new_state = 0;
208 u8 speed = 0xff; 211 u8 speed = 0xff;
209 212
@@ -306,9 +309,6 @@ static int sxgbe_init_phy(struct net_device *ndev)
306 netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n", 309 netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
307 __func__, phydev->phy_id, phydev->link); 310 __func__, phydev->phy_id, phydev->link);
308 311
309 /* save phy device in private structure */
310 priv->phydev = phydev;
311
312 return 0; 312 return 0;
313} 313}
314 314
@@ -1173,8 +1173,8 @@ static int sxgbe_open(struct net_device *dev)
1173 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES); 1173 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1174 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES); 1174 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1175 1175
1176 if (priv->phydev) 1176 if (dev->phydev)
1177 phy_start(priv->phydev); 1177 phy_start(dev->phydev);
1178 1178
1179 /* initialise TX coalesce parameters */ 1179 /* initialise TX coalesce parameters */
1180 sxgbe_tx_init_coalesce(priv); 1180 sxgbe_tx_init_coalesce(priv);
@@ -1194,8 +1194,8 @@ static int sxgbe_open(struct net_device *dev)
1194 1194
1195init_error: 1195init_error:
1196 free_dma_desc_resources(priv); 1196 free_dma_desc_resources(priv);
1197 if (priv->phydev) 1197 if (dev->phydev)
1198 phy_disconnect(priv->phydev); 1198 phy_disconnect(dev->phydev);
1199phy_error: 1199phy_error:
1200 clk_disable_unprepare(priv->sxgbe_clk); 1200 clk_disable_unprepare(priv->sxgbe_clk);
1201 1201
@@ -1216,10 +1216,9 @@ static int sxgbe_release(struct net_device *dev)
1216 del_timer_sync(&priv->eee_ctrl_timer); 1216 del_timer_sync(&priv->eee_ctrl_timer);
1217 1217
1218 /* Stop and disconnect the PHY */ 1218 /* Stop and disconnect the PHY */
1219 if (priv->phydev) { 1219 if (dev->phydev) {
1220 phy_stop(priv->phydev); 1220 phy_stop(dev->phydev);
1221 phy_disconnect(priv->phydev); 1221 phy_disconnect(dev->phydev);
1222 priv->phydev = NULL;
1223 } 1222 }
1224 1223
1225 netif_tx_stop_all_queues(dev); 1224 netif_tx_stop_all_queues(dev);
@@ -1969,7 +1968,6 @@ static void sxgbe_poll_controller(struct net_device *dev)
1969 */ 1968 */
1970static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1969static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1971{ 1970{
1972 struct sxgbe_priv_data *priv = netdev_priv(dev);
1973 int ret = -EOPNOTSUPP; 1971 int ret = -EOPNOTSUPP;
1974 1972
1975 if (!netif_running(dev)) 1973 if (!netif_running(dev))
@@ -1979,9 +1977,9 @@ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1979 case SIOCGMIIPHY: 1977 case SIOCGMIIPHY:
1980 case SIOCGMIIREG: 1978 case SIOCGMIIREG:
1981 case SIOCSMIIREG: 1979 case SIOCSMIIREG:
1982 if (!priv->phydev) 1980 if (!dev->phydev)
1983 return -EINVAL; 1981 return -EINVAL;
1984 ret = phy_mii_ioctl(priv->phydev, rq, cmd); 1982 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
1985 break; 1983 break;
1986 default: 1984 default:
1987 break; 1985 break;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 1f309127457d..f658fee74f18 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -50,14 +50,34 @@ enum {
50#define HUNT_FILTER_TBL_ROWS 8192 50#define HUNT_FILTER_TBL_ROWS 8192
51 51
52#define EFX_EF10_FILTER_ID_INVALID 0xffff 52#define EFX_EF10_FILTER_ID_INVALID 0xffff
53
54#define EFX_EF10_FILTER_DEV_UC_MAX 32
55#define EFX_EF10_FILTER_DEV_MC_MAX 256
56
57/* VLAN list entry */
58struct efx_ef10_vlan {
59 struct list_head list;
60 u16 vid;
61};
62
63/* Per-VLAN filters information */
64struct efx_ef10_filter_vlan {
65 struct list_head list;
66 u16 vid;
67 u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
68 u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
69 u16 ucdef;
70 u16 bcast;
71 u16 mcdef;
72};
73
53struct efx_ef10_dev_addr { 74struct efx_ef10_dev_addr {
54 u8 addr[ETH_ALEN]; 75 u8 addr[ETH_ALEN];
55 u16 id;
56}; 76};
57 77
58struct efx_ef10_filter_table { 78struct efx_ef10_filter_table {
59/* The RX match field masks supported by this fw & hw, in order of priority */ 79/* The MCDI match masks supported by this fw & hw, in order of priority */
60 enum efx_filter_match_flags rx_match_flags[ 80 u32 rx_match_mcdi_flags[
61 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; 81 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
62 unsigned int rx_match_count; 82 unsigned int rx_match_count;
63 83
@@ -73,16 +93,16 @@ struct efx_ef10_filter_table {
73 } *entry; 93 } *entry;
74 wait_queue_head_t waitq; 94 wait_queue_head_t waitq;
75/* Shadow of net_device address lists, guarded by mac_lock */ 95/* Shadow of net_device address lists, guarded by mac_lock */
76#define EFX_EF10_FILTER_DEV_UC_MAX 32
77#define EFX_EF10_FILTER_DEV_MC_MAX 256
78 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; 96 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
79 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; 97 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
80 int dev_uc_count; 98 int dev_uc_count;
81 int dev_mc_count; 99 int dev_mc_count;
82/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */ 100 bool uc_promisc;
83 u16 ucdef_id; 101 bool mc_promisc;
84 u16 bcast_id; 102/* Whether in multicast promiscuous mode when last changed */
85 u16 mcdef_id; 103 bool mc_promisc_last;
104 bool vlan_filter;
105 struct list_head vlan_list;
86}; 106};
87 107
88/* An arbitrary search limit for the software hash table */ 108/* An arbitrary search limit for the software hash table */
@@ -90,6 +110,10 @@ struct efx_ef10_filter_table {
90 110
91static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); 111static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
92static void efx_ef10_filter_table_remove(struct efx_nic *efx); 112static void efx_ef10_filter_table_remove(struct efx_nic *efx);
113static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
114static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
115 struct efx_ef10_filter_vlan *vlan);
116static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
93 117
94static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) 118static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
95{ 119{
@@ -275,6 +299,131 @@ static ssize_t efx_ef10_show_primary_flag(struct device *dev,
275 ? 1 : 0); 299 ? 1 : 0);
276} 300}
277 301
302static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
303{
304 struct efx_ef10_nic_data *nic_data = efx->nic_data;
305 struct efx_ef10_vlan *vlan;
306
307 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
308
309 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
310 if (vlan->vid == vid)
311 return vlan;
312 }
313
314 return NULL;
315}
316
317static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
318{
319 struct efx_ef10_nic_data *nic_data = efx->nic_data;
320 struct efx_ef10_vlan *vlan;
321 int rc;
322
323 mutex_lock(&nic_data->vlan_lock);
324
325 vlan = efx_ef10_find_vlan(efx, vid);
326 if (vlan) {
327 /* We add VID 0 on init. 8021q adds it on module init
328 * for all interfaces with VLAN filtring feature.
329 */
330 if (vid == 0)
331 goto done_unlock;
332 netif_warn(efx, drv, efx->net_dev,
333 "VLAN %u already added\n", vid);
334 rc = -EALREADY;
335 goto fail_exist;
336 }
337
338 rc = -ENOMEM;
339 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
340 if (!vlan)
341 goto fail_alloc;
342
343 vlan->vid = vid;
344
345 list_add_tail(&vlan->list, &nic_data->vlan_list);
346
347 if (efx->filter_state) {
348 mutex_lock(&efx->mac_lock);
349 down_write(&efx->filter_sem);
350 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
351 up_write(&efx->filter_sem);
352 mutex_unlock(&efx->mac_lock);
353 if (rc)
354 goto fail_filter_add_vlan;
355 }
356
357done_unlock:
358 mutex_unlock(&nic_data->vlan_lock);
359 return 0;
360
361fail_filter_add_vlan:
362 list_del(&vlan->list);
363 kfree(vlan);
364fail_alloc:
365fail_exist:
366 mutex_unlock(&nic_data->vlan_lock);
367 return rc;
368}
369
370static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
371 struct efx_ef10_vlan *vlan)
372{
373 struct efx_ef10_nic_data *nic_data = efx->nic_data;
374
375 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
376
377 if (efx->filter_state) {
378 down_write(&efx->filter_sem);
379 efx_ef10_filter_del_vlan(efx, vlan->vid);
380 up_write(&efx->filter_sem);
381 }
382
383 list_del(&vlan->list);
384 kfree(vlan);
385}
386
387static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
388{
389 struct efx_ef10_nic_data *nic_data = efx->nic_data;
390 struct efx_ef10_vlan *vlan;
391 int rc = 0;
392
393 /* 8021q removes VID 0 on module unload for all interfaces
394 * with VLAN filtering feature. We need to keep it to receive
395 * untagged traffic.
396 */
397 if (vid == 0)
398 return 0;
399
400 mutex_lock(&nic_data->vlan_lock);
401
402 vlan = efx_ef10_find_vlan(efx, vid);
403 if (!vlan) {
404 netif_err(efx, drv, efx->net_dev,
405 "VLAN %u to be deleted not found\n", vid);
406 rc = -ENOENT;
407 } else {
408 efx_ef10_del_vlan_internal(efx, vlan);
409 }
410
411 mutex_unlock(&nic_data->vlan_lock);
412
413 return rc;
414}
415
416static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
417{
418 struct efx_ef10_nic_data *nic_data = efx->nic_data;
419 struct efx_ef10_vlan *vlan, *next_vlan;
420
421 mutex_lock(&nic_data->vlan_lock);
422 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
423 efx_ef10_del_vlan_internal(efx, vlan);
424 mutex_unlock(&nic_data->vlan_lock);
425}
426
278static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, 427static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
279 NULL); 428 NULL);
280static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); 429static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
@@ -421,8 +570,30 @@ static int efx_ef10_probe(struct efx_nic *efx)
421#endif 570#endif
422 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); 571 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
423 572
573 INIT_LIST_HEAD(&nic_data->vlan_list);
574 mutex_init(&nic_data->vlan_lock);
575
576 /* Add unspecified VID to support VLAN filtering being disabled */
577 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
578 if (rc)
579 goto fail_add_vid_unspec;
580
581 /* If VLAN filtering is enabled, we need VID 0 to get untagged
582 * traffic. It is added automatically if 8021q module is loaded,
583 * but we can't rely on it since module may be not loaded.
584 */
585 rc = efx_ef10_add_vlan(efx, 0);
586 if (rc)
587 goto fail_add_vid_0;
588
424 return 0; 589 return 0;
425 590
591fail_add_vid_0:
592 efx_ef10_cleanup_vlans(efx);
593fail_add_vid_unspec:
594 mutex_destroy(&nic_data->vlan_lock);
595 efx_ptp_remove(efx);
596 efx_mcdi_mon_remove(efx);
426fail5: 597fail5:
427 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 598 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
428fail4: 599fail4:
@@ -676,6 +847,9 @@ static void efx_ef10_remove(struct efx_nic *efx)
676 } 847 }
677#endif 848#endif
678 849
850 efx_ef10_cleanup_vlans(efx);
851 mutex_destroy(&nic_data->vlan_lock);
852
679 efx_ptp_remove(efx); 853 efx_ptp_remove(efx);
680 854
681 efx_mcdi_mon_remove(efx); 855 efx_mcdi_mon_remove(efx);
@@ -704,6 +878,45 @@ static int efx_ef10_probe_pf(struct efx_nic *efx)
704 return efx_ef10_probe(efx); 878 return efx_ef10_probe(efx);
705} 879}
706 880
881int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
882 u32 *port_flags, u32 *vadaptor_flags,
883 unsigned int *vlan_tags)
884{
885 struct efx_ef10_nic_data *nic_data = efx->nic_data;
886 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
887 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
888 size_t outlen;
889 int rc;
890
891 if (nic_data->datapath_caps &
892 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
893 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
894 port_id);
895
896 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
897 outbuf, sizeof(outbuf), &outlen);
898 if (rc)
899 return rc;
900
901 if (outlen < sizeof(outbuf)) {
902 rc = -EIO;
903 return rc;
904 }
905 }
906
907 if (port_flags)
908 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
909 if (vadaptor_flags)
910 *vadaptor_flags =
911 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
912 if (vlan_tags)
913 *vlan_tags =
914 MCDI_DWORD(outbuf,
915 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
916
917 return 0;
918}
919
707int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) 920int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
708{ 921{
709 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); 922 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
@@ -3040,15 +3253,55 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
3040 return rc; 3253 return rc;
3041} 3254}
3042 3255
3043static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table, 3256static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
3044 enum efx_filter_match_flags match_flags)
3045{ 3257{
3258 unsigned int match_flags = spec->match_flags;
3259 u32 mcdi_flags = 0;
3260
3261 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
3262 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
3263 mcdi_flags |=
3264 is_multicast_ether_addr(spec->loc_mac) ?
3265 (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) :
3266 (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
3267 }
3268
3269#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \
3270 unsigned int old_match_flags = match_flags; \
3271 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
3272 if (match_flags != old_match_flags) \
3273 mcdi_flags |= \
3274 (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
3275 mcdi_field ## _LBN); \
3276 }
3277 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP);
3278 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP);
3279 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC);
3280 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT);
3281 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC);
3282 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT);
3283 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE);
3284 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN);
3285 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN);
3286 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO);
3287#undef MAP_FILTER_TO_MCDI_FLAG
3288
3289 /* Did we map them all? */
3290 WARN_ON_ONCE(match_flags);
3291
3292 return mcdi_flags;
3293}
3294
3295static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
3296 const struct efx_filter_spec *spec)
3297{
3298 u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
3046 unsigned int match_pri; 3299 unsigned int match_pri;
3047 3300
3048 for (match_pri = 0; 3301 for (match_pri = 0;
3049 match_pri < table->rx_match_count; 3302 match_pri < table->rx_match_count;
3050 match_pri++) 3303 match_pri++)
3051 if (table->rx_match_flags[match_pri] == match_flags) 3304 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
3052 return match_pri; 3305 return match_pri;
3053 3306
3054 return -EPROTONOSUPPORT; 3307 return -EPROTONOSUPPORT;
@@ -3074,7 +3327,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
3074 EFX_FILTER_FLAG_RX) 3327 EFX_FILTER_FLAG_RX)
3075 return -EINVAL; 3328 return -EINVAL;
3076 3329
3077 rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags); 3330 rc = efx_ef10_filter_pri(table, spec);
3078 if (rc < 0) 3331 if (rc < 0)
3079 return rc; 3332 return rc;
3080 match_pri = rc; 3333 match_pri = rc;
@@ -3313,7 +3566,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
3313 spec = efx_ef10_filter_entry_spec(table, filter_idx); 3566 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3314 if (!spec || 3567 if (!spec ||
3315 (!by_index && 3568 (!by_index &&
3316 efx_ef10_filter_rx_match_pri(table, spec->match_flags) != 3569 efx_ef10_filter_pri(table, spec) !=
3317 filter_id / HUNT_FILTER_TBL_ROWS)) { 3570 filter_id / HUNT_FILTER_TBL_ROWS)) {
3318 rc = -ENOENT; 3571 rc = -ENOENT;
3319 goto out_unlock; 3572 goto out_unlock;
@@ -3394,12 +3647,13 @@ static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
3394 return filter_id % HUNT_FILTER_TBL_ROWS; 3647 return filter_id % HUNT_FILTER_TBL_ROWS;
3395} 3648}
3396 3649
3397static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx, 3650static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
3398 enum efx_filter_priority priority, 3651 enum efx_filter_priority priority,
3399 u32 filter_id) 3652 u32 filter_id)
3400{ 3653{
3401 return efx_ef10_filter_remove_internal(efx, 1U << priority, 3654 if (filter_id == EFX_EF10_FILTER_ID_INVALID)
3402 filter_id, true); 3655 return;
3656 efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true);
3403} 3657}
3404 3658
3405static int efx_ef10_filter_get_safe(struct efx_nic *efx, 3659static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@@ -3414,7 +3668,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
3414 spin_lock_bh(&efx->filter_lock); 3668 spin_lock_bh(&efx->filter_lock);
3415 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); 3669 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
3416 if (saved_spec && saved_spec->priority == priority && 3670 if (saved_spec && saved_spec->priority == priority &&
3417 efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) == 3671 efx_ef10_filter_pri(table, saved_spec) ==
3418 filter_id / HUNT_FILTER_TBL_ROWS) { 3672 filter_id / HUNT_FILTER_TBL_ROWS) {
3419 *spec = *saved_spec; 3673 *spec = *saved_spec;
3420 rc = 0; 3674 rc = 0;
@@ -3487,8 +3741,7 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
3487 count = -EMSGSIZE; 3741 count = -EMSGSIZE;
3488 break; 3742 break;
3489 } 3743 }
3490 buf[count++] = (efx_ef10_filter_rx_match_pri( 3744 buf[count++] = (efx_ef10_filter_pri(table, spec) *
3491 table, spec->match_flags) *
3492 HUNT_FILTER_TBL_ROWS + 3745 HUNT_FILTER_TBL_ROWS +
3493 filter_idx); 3746 filter_idx);
3494 } 3747 }
@@ -3724,15 +3977,58 @@ static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
3724 return match_flags; 3977 return match_flags;
3725} 3978}
3726 3979
3980static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
3981{
3982 struct efx_ef10_filter_table *table = efx->filter_state;
3983 struct efx_ef10_filter_vlan *vlan, *next_vlan;
3984
3985 /* See comment in efx_ef10_filter_table_remove() */
3986 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
3987 return;
3988
3989 if (!table)
3990 return;
3991
3992 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
3993 efx_ef10_filter_del_vlan_internal(efx, vlan);
3994}
3995
3996static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
3997 enum efx_filter_match_flags match_flags)
3998{
3999 unsigned int match_pri;
4000 int mf;
4001
4002 for (match_pri = 0;
4003 match_pri < table->rx_match_count;
4004 match_pri++) {
4005 mf = efx_ef10_filter_match_flags_from_mcdi(
4006 table->rx_match_mcdi_flags[match_pri]);
4007 if (mf == match_flags)
4008 return true;
4009 }
4010
4011 return false;
4012}
4013
3727static int efx_ef10_filter_table_probe(struct efx_nic *efx) 4014static int efx_ef10_filter_table_probe(struct efx_nic *efx)
3728{ 4015{
3729 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); 4016 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
3730 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); 4017 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
4018 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4019 struct net_device *net_dev = efx->net_dev;
3731 unsigned int pd_match_pri, pd_match_count; 4020 unsigned int pd_match_pri, pd_match_count;
3732 struct efx_ef10_filter_table *table; 4021 struct efx_ef10_filter_table *table;
4022 struct efx_ef10_vlan *vlan;
3733 size_t outlen; 4023 size_t outlen;
3734 int rc; 4024 int rc;
3735 4025
4026 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4027 return -EINVAL;
4028
4029 if (efx->filter_state) /* already probed */
4030 return 0;
4031
3736 table = kzalloc(sizeof(*table), GFP_KERNEL); 4032 table = kzalloc(sizeof(*table), GFP_KERNEL);
3737 if (!table) 4033 if (!table)
3738 return -ENOMEM; 4034 return -ENOMEM;
@@ -3765,24 +4061,48 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
3765 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", 4061 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
3766 __func__, mcdi_flags, pd_match_pri, 4062 __func__, mcdi_flags, pd_match_pri,
3767 rc, table->rx_match_count); 4063 rc, table->rx_match_count);
3768 table->rx_match_flags[table->rx_match_count++] = rc; 4064 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
4065 table->rx_match_count++;
3769 } 4066 }
3770 } 4067 }
3771 4068
4069 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
4070 !(efx_ef10_filter_match_supported(table,
4071 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
4072 efx_ef10_filter_match_supported(table,
4073 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
4074 netif_info(efx, probe, net_dev,
4075 "VLAN filters are not supported in this firmware variant\n");
4076 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4077 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4078 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4079 }
4080
3772 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); 4081 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
3773 if (!table->entry) { 4082 if (!table->entry) {
3774 rc = -ENOMEM; 4083 rc = -ENOMEM;
3775 goto fail; 4084 goto fail;
3776 } 4085 }
3777 4086
3778 table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; 4087 table->mc_promisc_last = false;
3779 table->bcast_id = EFX_EF10_FILTER_ID_INVALID; 4088 table->vlan_filter =
3780 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; 4089 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
4090 INIT_LIST_HEAD(&table->vlan_list);
3781 4091
3782 efx->filter_state = table; 4092 efx->filter_state = table;
3783 init_waitqueue_head(&table->waitq); 4093 init_waitqueue_head(&table->waitq);
4094
4095 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
4096 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
4097 if (rc)
4098 goto fail_add_vlan;
4099 }
4100
3784 return 0; 4101 return 0;
3785 4102
4103fail_add_vlan:
4104 efx_ef10_filter_cleanup_vlans(efx);
4105 efx->filter_state = NULL;
3786fail: 4106fail:
3787 kfree(table); 4107 kfree(table);
3788 return rc; 4108 return rc;
@@ -3843,7 +4163,6 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
3843 nic_data->must_restore_filters = false; 4163 nic_data->must_restore_filters = false;
3844} 4164}
3845 4165
3846/* Caller must hold efx->filter_sem for write */
3847static void efx_ef10_filter_table_remove(struct efx_nic *efx) 4166static void efx_ef10_filter_table_remove(struct efx_nic *efx)
3848{ 4167{
3849 struct efx_ef10_filter_table *table = efx->filter_state; 4168 struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3852,7 +4171,17 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
3852 unsigned int filter_idx; 4171 unsigned int filter_idx;
3853 int rc; 4172 int rc;
3854 4173
4174 efx_ef10_filter_cleanup_vlans(efx);
3855 efx->filter_state = NULL; 4175 efx->filter_state = NULL;
4176 /* If we were called without locking, then it's not safe to free
4177 * the table as others might be using it. So we just WARN, leak
4178 * the memory, and potentially get an inconsistent filter table
4179 * state.
4180 * This should never actually happen.
4181 */
4182 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4183 return;
4184
3856 if (!table) 4185 if (!table)
3857 return; 4186 return;
3858 4187
@@ -3880,37 +4209,54 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
3880 kfree(table); 4209 kfree(table);
3881} 4210}
3882 4211
3883#define EFX_EF10_FILTER_DO_MARK_OLD(id) \ 4212static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
3884 if (id != EFX_EF10_FILTER_ID_INVALID) { \
3885 filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
3886 if (!table->entry[filter_idx].spec) \
3887 netif_dbg(efx, drv, efx->net_dev, \
3888 "%s: marked null spec old %04x:%04x\n", \
3889 __func__, id, filter_idx); \
3890 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\
3891 }
3892static void efx_ef10_filter_mark_old(struct efx_nic *efx)
3893{ 4213{
3894 struct efx_ef10_filter_table *table = efx->filter_state; 4214 struct efx_ef10_filter_table *table = efx->filter_state;
3895 unsigned int filter_idx, i; 4215 unsigned int filter_idx;
3896 4216
3897 if (!table) 4217 if (*id != EFX_EF10_FILTER_ID_INVALID) {
3898 return; 4218 filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id);
4219 if (!table->entry[filter_idx].spec)
4220 netif_dbg(efx, drv, efx->net_dev,
4221 "marked null spec old %04x:%04x\n", *id,
4222 filter_idx);
4223 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
4224 *id = EFX_EF10_FILTER_ID_INVALID;
4225 }
4226}
4227
4228/* Mark old per-VLAN filters that may need to be removed */
4229static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
4230 struct efx_ef10_filter_vlan *vlan)
4231{
4232 struct efx_ef10_filter_table *table = efx->filter_state;
4233 unsigned int i;
3899 4234
3900 /* Mark old filters that may need to be removed */
3901 spin_lock_bh(&efx->filter_lock);
3902 for (i = 0; i < table->dev_uc_count; i++) 4235 for (i = 0; i < table->dev_uc_count; i++)
3903 EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id); 4236 efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
3904 for (i = 0; i < table->dev_mc_count; i++) 4237 for (i = 0; i < table->dev_mc_count; i++)
3905 EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id); 4238 efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
3906 EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id); 4239 efx_ef10_filter_mark_one_old(efx, &vlan->ucdef);
3907 EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id); 4240 efx_ef10_filter_mark_one_old(efx, &vlan->bcast);
3908 EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id); 4241 efx_ef10_filter_mark_one_old(efx, &vlan->mcdef);
4242}
4243
4244/* Mark old filters that may need to be removed.
4245 * Caller must hold efx->filter_sem for read if race against
4246 * efx_ef10_filter_table_remove() is possible
4247 */
4248static void efx_ef10_filter_mark_old(struct efx_nic *efx)
4249{
4250 struct efx_ef10_filter_table *table = efx->filter_state;
4251 struct efx_ef10_filter_vlan *vlan;
4252
4253 spin_lock_bh(&efx->filter_lock);
4254 list_for_each_entry(vlan, &table->vlan_list, list)
4255 _efx_ef10_filter_vlan_mark_old(efx, vlan);
3909 spin_unlock_bh(&efx->filter_lock); 4256 spin_unlock_bh(&efx->filter_lock);
3910} 4257}
3911#undef EFX_EF10_FILTER_DO_MARK_OLD
3912 4258
3913static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc) 4259static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
3914{ 4260{
3915 struct efx_ef10_filter_table *table = efx->filter_state; 4261 struct efx_ef10_filter_table *table = efx->filter_state;
3916 struct net_device *net_dev = efx->net_dev; 4262 struct net_device *net_dev = efx->net_dev;
@@ -3918,45 +4264,38 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
3918 int addr_count; 4264 int addr_count;
3919 unsigned int i; 4265 unsigned int i;
3920 4266
3921 table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
3922 addr_count = netdev_uc_count(net_dev); 4267 addr_count = netdev_uc_count(net_dev);
3923 if (net_dev->flags & IFF_PROMISC) 4268 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
3924 *promisc = true;
3925 table->dev_uc_count = 1 + addr_count; 4269 table->dev_uc_count = 1 + addr_count;
3926 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); 4270 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
3927 i = 1; 4271 i = 1;
3928 netdev_for_each_uc_addr(uc, net_dev) { 4272 netdev_for_each_uc_addr(uc, net_dev) {
3929 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { 4273 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
3930 *promisc = true; 4274 table->uc_promisc = true;
3931 break; 4275 break;
3932 } 4276 }
3933 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); 4277 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
3934 table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
3935 i++; 4278 i++;
3936 } 4279 }
3937} 4280}
3938 4281
3939static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc) 4282static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
3940{ 4283{
3941 struct efx_ef10_filter_table *table = efx->filter_state; 4284 struct efx_ef10_filter_table *table = efx->filter_state;
3942 struct net_device *net_dev = efx->net_dev; 4285 struct net_device *net_dev = efx->net_dev;
3943 struct netdev_hw_addr *mc; 4286 struct netdev_hw_addr *mc;
3944 unsigned int i, addr_count; 4287 unsigned int i, addr_count;
3945 4288
3946 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; 4289 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
3947 table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
3948 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
3949 *promisc = true;
3950 4290
3951 addr_count = netdev_mc_count(net_dev); 4291 addr_count = netdev_mc_count(net_dev);
3952 i = 0; 4292 i = 0;
3953 netdev_for_each_mc_addr(mc, net_dev) { 4293 netdev_for_each_mc_addr(mc, net_dev) {
3954 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { 4294 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
3955 *promisc = true; 4295 table->mc_promisc = true;
3956 break; 4296 break;
3957 } 4297 }
3958 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); 4298 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
3959 table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
3960 i++; 4299 i++;
3961 } 4300 }
3962 4301
@@ -3964,7 +4303,8 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
3964} 4303}
3965 4304
3966static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, 4305static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3967 bool multicast, bool rollback) 4306 struct efx_ef10_filter_vlan *vlan,
4307 bool multicast, bool rollback)
3968{ 4308{
3969 struct efx_ef10_filter_table *table = efx->filter_state; 4309 struct efx_ef10_filter_table *table = efx->filter_state;
3970 struct efx_ef10_dev_addr *addr_list; 4310 struct efx_ef10_dev_addr *addr_list;
@@ -3973,14 +4313,17 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3973 u8 baddr[ETH_ALEN]; 4313 u8 baddr[ETH_ALEN];
3974 unsigned int i, j; 4314 unsigned int i, j;
3975 int addr_count; 4315 int addr_count;
4316 u16 *ids;
3976 int rc; 4317 int rc;
3977 4318
3978 if (multicast) { 4319 if (multicast) {
3979 addr_list = table->dev_mc_list; 4320 addr_list = table->dev_mc_list;
3980 addr_count = table->dev_mc_count; 4321 addr_count = table->dev_mc_count;
4322 ids = vlan->mc;
3981 } else { 4323 } else {
3982 addr_list = table->dev_uc_list; 4324 addr_list = table->dev_uc_list;
3983 addr_count = table->dev_uc_count; 4325 addr_count = table->dev_uc_count;
4326 ids = vlan->uc;
3984 } 4327 }
3985 4328
3986 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 4329 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
@@ -3988,8 +4331,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3988 /* Insert/renew filters */ 4331 /* Insert/renew filters */
3989 for (i = 0; i < addr_count; i++) { 4332 for (i = 0; i < addr_count; i++) {
3990 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 4333 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
3991 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 4334 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
3992 addr_list[i].addr);
3993 rc = efx_ef10_filter_insert(efx, &spec, true); 4335 rc = efx_ef10_filter_insert(efx, &spec, true);
3994 if (rc < 0) { 4336 if (rc < 0) {
3995 if (rollback) { 4337 if (rollback) {
@@ -3998,12 +4340,10 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3998 rc); 4340 rc);
3999 /* Fall back to promiscuous */ 4341 /* Fall back to promiscuous */
4000 for (j = 0; j < i; j++) { 4342 for (j = 0; j < i; j++) {
4001 if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
4002 continue;
4003 efx_ef10_filter_remove_unsafe( 4343 efx_ef10_filter_remove_unsafe(
4004 efx, EFX_FILTER_PRI_AUTO, 4344 efx, EFX_FILTER_PRI_AUTO,
4005 addr_list[j].id); 4345 ids[j]);
4006 addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; 4346 ids[j] = EFX_EF10_FILTER_ID_INVALID;
4007 } 4347 }
4008 return rc; 4348 return rc;
4009 } else { 4349 } else {
@@ -4011,40 +4351,40 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
4011 rc = EFX_EF10_FILTER_ID_INVALID; 4351 rc = EFX_EF10_FILTER_ID_INVALID;
4012 } 4352 }
4013 } 4353 }
4014 addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc); 4354 ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc);
4015 } 4355 }
4016 4356
4017 if (multicast && rollback) { 4357 if (multicast && rollback) {
4018 /* Also need an Ethernet broadcast filter */ 4358 /* Also need an Ethernet broadcast filter */
4019 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 4359 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
4020 eth_broadcast_addr(baddr); 4360 eth_broadcast_addr(baddr);
4021 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); 4361 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
4022 rc = efx_ef10_filter_insert(efx, &spec, true); 4362 rc = efx_ef10_filter_insert(efx, &spec, true);
4023 if (rc < 0) { 4363 if (rc < 0) {
4024 netif_warn(efx, drv, efx->net_dev, 4364 netif_warn(efx, drv, efx->net_dev,
4025 "Broadcast filter insert failed rc=%d\n", rc); 4365 "Broadcast filter insert failed rc=%d\n", rc);
4026 /* Fall back to promiscuous */ 4366 /* Fall back to promiscuous */
4027 for (j = 0; j < i; j++) { 4367 for (j = 0; j < i; j++) {
4028 if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
4029 continue;
4030 efx_ef10_filter_remove_unsafe( 4368 efx_ef10_filter_remove_unsafe(
4031 efx, EFX_FILTER_PRI_AUTO, 4369 efx, EFX_FILTER_PRI_AUTO,
4032 addr_list[j].id); 4370 ids[j]);
4033 addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; 4371 ids[j] = EFX_EF10_FILTER_ID_INVALID;
4034 } 4372 }
4035 return rc; 4373 return rc;
4036 } else { 4374 } else {
4037 table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); 4375 EFX_WARN_ON_PARANOID(vlan->bcast !=
4376 EFX_EF10_FILTER_ID_INVALID);
4377 vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
4038 } 4378 }
4039 } 4379 }
4040 4380
4041 return 0; 4381 return 0;
4042} 4382}
4043 4383
4044static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, 4384static int efx_ef10_filter_insert_def(struct efx_nic *efx,
4045 bool rollback) 4385 struct efx_ef10_filter_vlan *vlan,
4386 bool multicast, bool rollback)
4046{ 4387{
4047 struct efx_ef10_filter_table *table = efx->filter_state;
4048 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4388 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4049 enum efx_filter_flags filter_flags; 4389 enum efx_filter_flags filter_flags;
4050 struct efx_filter_spec spec; 4390 struct efx_filter_spec spec;
@@ -4060,6 +4400,9 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
4060 else 4400 else
4061 efx_filter_set_uc_def(&spec); 4401 efx_filter_set_uc_def(&spec);
4062 4402
4403 if (vlan->vid != EFX_FILTER_VID_UNSPEC)
4404 efx_filter_set_eth_local(&spec, vlan->vid, NULL);
4405
4063 rc = efx_ef10_filter_insert(efx, &spec, true); 4406 rc = efx_ef10_filter_insert(efx, &spec, true);
4064 if (rc < 0) { 4407 if (rc < 0) {
4065 netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING, 4408 netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
@@ -4067,14 +4410,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
4067 "%scast mismatch filter insert failed rc=%d\n", 4410 "%scast mismatch filter insert failed rc=%d\n",
4068 multicast ? "Multi" : "Uni", rc); 4411 multicast ? "Multi" : "Uni", rc);
4069 } else if (multicast) { 4412 } else if (multicast) {
4070 table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc); 4413 EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID);
4414 vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc);
4071 if (!nic_data->workaround_26807) { 4415 if (!nic_data->workaround_26807) {
4072 /* Also need an Ethernet broadcast filter */ 4416 /* Also need an Ethernet broadcast filter */
4073 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 4417 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
4074 filter_flags, 0); 4418 filter_flags, 0);
4075 eth_broadcast_addr(baddr); 4419 eth_broadcast_addr(baddr);
4076 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 4420 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
4077 baddr);
4078 rc = efx_ef10_filter_insert(efx, &spec, true); 4421 rc = efx_ef10_filter_insert(efx, &spec, true);
4079 if (rc < 0) { 4422 if (rc < 0) {
4080 netif_warn(efx, drv, efx->net_dev, 4423 netif_warn(efx, drv, efx->net_dev,
@@ -4084,17 +4427,20 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
4084 /* Roll back the mc_def filter */ 4427 /* Roll back the mc_def filter */
4085 efx_ef10_filter_remove_unsafe( 4428 efx_ef10_filter_remove_unsafe(
4086 efx, EFX_FILTER_PRI_AUTO, 4429 efx, EFX_FILTER_PRI_AUTO,
4087 table->mcdef_id); 4430 vlan->mcdef);
4088 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; 4431 vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
4089 return rc; 4432 return rc;
4090 } 4433 }
4091 } else { 4434 } else {
4092 table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); 4435 EFX_WARN_ON_PARANOID(vlan->bcast !=
4436 EFX_EF10_FILTER_ID_INVALID);
4437 vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
4093 } 4438 }
4094 } 4439 }
4095 rc = 0; 4440 rc = 0;
4096 } else { 4441 } else {
4097 table->ucdef_id = rc; 4442 EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID);
4443 vlan->ucdef = rc;
4098 rc = 0; 4444 rc = 0;
4099 } 4445 }
4100 return rc; 4446 return rc;
@@ -4203,64 +4549,55 @@ reset_nic:
4203/* Caller must hold efx->filter_sem for read if race against 4549/* Caller must hold efx->filter_sem for read if race against
4204 * efx_ef10_filter_table_remove() is possible 4550 * efx_ef10_filter_table_remove() is possible
4205 */ 4551 */
4206static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) 4552static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
4553 struct efx_ef10_filter_vlan *vlan)
4207{ 4554{
4208 struct efx_ef10_filter_table *table = efx->filter_state; 4555 struct efx_ef10_filter_table *table = efx->filter_state;
4209 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4556 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4210 struct net_device *net_dev = efx->net_dev;
4211 bool uc_promisc = false, mc_promisc = false;
4212 4557
4213 if (!efx_dev_registered(efx)) 4558 /* Do not install unspecified VID if VLAN filtering is enabled.
4214 return; 4559 * Do not install all specified VIDs if VLAN filtering is disabled.
4215
4216 if (!table)
4217 return;
4218
4219 efx_ef10_filter_mark_old(efx);
4220
4221 /* Copy/convert the address lists; add the primary station
4222 * address and broadcast address
4223 */ 4560 */
4224 netif_addr_lock_bh(net_dev); 4561 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
4225 efx_ef10_filter_uc_addr_list(efx, &uc_promisc); 4562 return;
4226 efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
4227 netif_addr_unlock_bh(net_dev);
4228 4563
4229 /* Insert/renew unicast filters */ 4564 /* Insert/renew unicast filters */
4230 if (uc_promisc) { 4565 if (table->uc_promisc) {
4231 efx_ef10_filter_insert_def(efx, false, false); 4566 efx_ef10_filter_insert_def(efx, vlan, false, false);
4232 efx_ef10_filter_insert_addr_list(efx, false, false); 4567 efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
4233 } else { 4568 } else {
4234 /* If any of the filters failed to insert, fall back to 4569 /* If any of the filters failed to insert, fall back to
4235 * promiscuous mode - add in the uc_def filter. But keep 4570 * promiscuous mode - add in the uc_def filter. But keep
4236 * our individual unicast filters. 4571 * our individual unicast filters.
4237 */ 4572 */
4238 if (efx_ef10_filter_insert_addr_list(efx, false, false)) 4573 if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
4239 efx_ef10_filter_insert_def(efx, false, false); 4574 efx_ef10_filter_insert_def(efx, vlan, false, false);
4240 } 4575 }
4241 4576
4242 /* Insert/renew multicast filters */ 4577 /* Insert/renew multicast filters */
4243 /* If changing promiscuous state with cascaded multicast filters, remove 4578 /* If changing promiscuous state with cascaded multicast filters, remove
4244 * old filters first, so that packets are dropped rather than duplicated 4579 * old filters first, so that packets are dropped rather than duplicated
4245 */ 4580 */
4246 if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc) 4581 if (nic_data->workaround_26807 &&
4582 table->mc_promisc_last != table->mc_promisc)
4247 efx_ef10_filter_remove_old(efx); 4583 efx_ef10_filter_remove_old(efx);
4248 if (mc_promisc) { 4584 if (table->mc_promisc) {
4249 if (nic_data->workaround_26807) { 4585 if (nic_data->workaround_26807) {
4250 /* If we failed to insert promiscuous filters, rollback 4586 /* If we failed to insert promiscuous filters, rollback
4251 * and fall back to individual multicast filters 4587 * and fall back to individual multicast filters
4252 */ 4588 */
4253 if (efx_ef10_filter_insert_def(efx, true, true)) { 4589 if (efx_ef10_filter_insert_def(efx, vlan, true, true)) {
4254 /* Changing promisc state, so remove old filters */ 4590 /* Changing promisc state, so remove old filters */
4255 efx_ef10_filter_remove_old(efx); 4591 efx_ef10_filter_remove_old(efx);
4256 efx_ef10_filter_insert_addr_list(efx, true, false); 4592 efx_ef10_filter_insert_addr_list(efx, vlan,
4593 true, false);
4257 } 4594 }
4258 } else { 4595 } else {
4259 /* If we failed to insert promiscuous filters, don't 4596 /* If we failed to insert promiscuous filters, don't
4260 * rollback. Regardless, also insert the mc_list 4597 * rollback. Regardless, also insert the mc_list
4261 */ 4598 */
4262 efx_ef10_filter_insert_def(efx, true, false); 4599 efx_ef10_filter_insert_def(efx, vlan, true, false);
4263 efx_ef10_filter_insert_addr_list(efx, true, false); 4600 efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
4264 } 4601 }
4265 } else { 4602 } else {
4266 /* If any filters failed to insert, rollback and fall back to 4603 /* If any filters failed to insert, rollback and fall back to
@@ -4268,17 +4605,153 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
4268 * that fails, roll back again and insert as many of our 4605 * that fails, roll back again and insert as many of our
4269 * individual multicast filters as we can. 4606 * individual multicast filters as we can.
4270 */ 4607 */
4271 if (efx_ef10_filter_insert_addr_list(efx, true, true)) { 4608 if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
4272 /* Changing promisc state, so remove old filters */ 4609 /* Changing promisc state, so remove old filters */
4273 if (nic_data->workaround_26807) 4610 if (nic_data->workaround_26807)
4274 efx_ef10_filter_remove_old(efx); 4611 efx_ef10_filter_remove_old(efx);
4275 if (efx_ef10_filter_insert_def(efx, true, true)) 4612 if (efx_ef10_filter_insert_def(efx, vlan, true, true))
4276 efx_ef10_filter_insert_addr_list(efx, true, false); 4613 efx_ef10_filter_insert_addr_list(efx, vlan,
4614 true, false);
4277 } 4615 }
4278 } 4616 }
4617}
4618
4619/* Caller must hold efx->filter_sem for read if race against
4620 * efx_ef10_filter_table_remove() is possible
4621 */
4622static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
4623{
4624 struct efx_ef10_filter_table *table = efx->filter_state;
4625 struct net_device *net_dev = efx->net_dev;
4626 struct efx_ef10_filter_vlan *vlan;
4627 bool vlan_filter;
4628
4629 if (!efx_dev_registered(efx))
4630 return;
4631
4632 if (!table)
4633 return;
4634
4635 efx_ef10_filter_mark_old(efx);
4636
4637 /* Copy/convert the address lists; add the primary station
4638 * address and broadcast address
4639 */
4640 netif_addr_lock_bh(net_dev);
4641 efx_ef10_filter_uc_addr_list(efx);
4642 efx_ef10_filter_mc_addr_list(efx);
4643 netif_addr_unlock_bh(net_dev);
4644
4645 /* If VLAN filtering changes, all old filters are finally removed.
4646 * Do it in advance to avoid conflicts for unicast untagged and
4647 * VLAN 0 tagged filters.
4648 */
4649 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
4650 if (table->vlan_filter != vlan_filter) {
4651 table->vlan_filter = vlan_filter;
4652 efx_ef10_filter_remove_old(efx);
4653 }
4654
4655 list_for_each_entry(vlan, &table->vlan_list, list)
4656 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
4279 4657
4280 efx_ef10_filter_remove_old(efx); 4658 efx_ef10_filter_remove_old(efx);
4281 efx->mc_promisc = mc_promisc; 4659 table->mc_promisc_last = table->mc_promisc;
4660}
4661
4662static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
4663{
4664 struct efx_ef10_filter_table *table = efx->filter_state;
4665 struct efx_ef10_filter_vlan *vlan;
4666
4667 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
4668
4669 list_for_each_entry(vlan, &table->vlan_list, list) {
4670 if (vlan->vid == vid)
4671 return vlan;
4672 }
4673
4674 return NULL;
4675}
4676
4677static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
4678{
4679 struct efx_ef10_filter_table *table = efx->filter_state;
4680 struct efx_ef10_filter_vlan *vlan;
4681 unsigned int i;
4682
4683 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4684 return -EINVAL;
4685
4686 vlan = efx_ef10_filter_find_vlan(efx, vid);
4687 if (WARN_ON(vlan)) {
4688 netif_err(efx, drv, efx->net_dev,
4689 "VLAN %u already added\n", vid);
4690 return -EALREADY;
4691 }
4692
4693 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
4694 if (!vlan)
4695 return -ENOMEM;
4696
4697 vlan->vid = vid;
4698
4699 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
4700 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
4701 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
4702 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
4703 vlan->ucdef = EFX_EF10_FILTER_ID_INVALID;
4704 vlan->bcast = EFX_EF10_FILTER_ID_INVALID;
4705 vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
4706
4707 list_add_tail(&vlan->list, &table->vlan_list);
4708
4709 if (efx_dev_registered(efx))
4710 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
4711
4712 return 0;
4713}
4714
4715static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
4716 struct efx_ef10_filter_vlan *vlan)
4717{
4718 unsigned int i;
4719
4720 /* See comment in efx_ef10_filter_table_remove() */
4721 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4722 return;
4723
4724 list_del(&vlan->list);
4725
4726 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
4727 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
4728 vlan->uc[i]);
4729 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
4730 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
4731 vlan->mc[i]);
4732 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef);
4733 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast);
4734 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef);
4735
4736 kfree(vlan);
4737}
4738
4739static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
4740{
4741 struct efx_ef10_filter_vlan *vlan;
4742
4743 /* See comment in efx_ef10_filter_table_remove() */
4744 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4745 return;
4746
4747 vlan = efx_ef10_filter_find_vlan(efx, vid);
4748 if (!vlan) {
4749 netif_err(efx, drv, efx->net_dev,
4750 "VLAN %u not found in filter state\n", vid);
4751 return;
4752 }
4753
4754 efx_ef10_filter_del_vlan_internal(efx, vlan);
4282} 4755}
4283 4756
4284static int efx_ef10_set_mac_address(struct efx_nic *efx) 4757static int efx_ef10_set_mac_address(struct efx_nic *efx)
@@ -4290,6 +4763,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
4290 4763
4291 efx_device_detach_sync(efx); 4764 efx_device_detach_sync(efx);
4292 efx_net_stop(efx->net_dev); 4765 efx_net_stop(efx->net_dev);
4766
4767 mutex_lock(&efx->mac_lock);
4293 down_write(&efx->filter_sem); 4768 down_write(&efx->filter_sem);
4294 efx_ef10_filter_table_remove(efx); 4769 efx_ef10_filter_table_remove(efx);
4295 4770
@@ -4302,6 +4777,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
4302 4777
4303 efx_ef10_filter_table_probe(efx); 4778 efx_ef10_filter_table_probe(efx);
4304 up_write(&efx->filter_sem); 4779 up_write(&efx->filter_sem);
4780 mutex_unlock(&efx->mac_lock);
4781
4305 if (was_enabled) 4782 if (was_enabled)
4306 efx_net_open(efx->net_dev); 4783 efx_net_open(efx->net_dev);
4307 netif_device_attach(efx->net_dev); 4784 netif_device_attach(efx->net_dev);
@@ -4703,6 +5180,29 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
4703 } 5180 }
4704} 5181}
4705 5182
5183static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
5184{
5185 if (proto != htons(ETH_P_8021Q))
5186 return -EINVAL;
5187
5188 return efx_ef10_add_vlan(efx, vid);
5189}
5190
5191static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
5192{
5193 if (proto != htons(ETH_P_8021Q))
5194 return -EINVAL;
5195
5196 return efx_ef10_del_vlan(efx, vid);
5197}
5198
5199#define EF10_OFFLOAD_FEATURES \
5200 (NETIF_F_IP_CSUM | \
5201 NETIF_F_HW_VLAN_CTAG_FILTER | \
5202 NETIF_F_IPV6_CSUM | \
5203 NETIF_F_RXHASH | \
5204 NETIF_F_NTUPLE)
5205
4706const struct efx_nic_type efx_hunt_a0_vf_nic_type = { 5206const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
4707 .is_vf = true, 5207 .is_vf = true,
4708 .mem_bar = EFX_MEM_VF_BAR, 5208 .mem_bar = EFX_MEM_VF_BAR,
@@ -4780,6 +5280,8 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
4780#endif 5280#endif
4781 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, 5281 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
4782 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, 5282 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
5283 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
5284 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
4783#ifdef CONFIG_SFC_SRIOV 5285#ifdef CONFIG_SFC_SRIOV
4784 .vswitching_probe = efx_ef10_vswitching_probe_vf, 5286 .vswitching_probe = efx_ef10_vswitching_probe_vf,
4785 .vswitching_restore = efx_ef10_vswitching_restore_vf, 5287 .vswitching_restore = efx_ef10_vswitching_restore_vf,
@@ -4798,8 +5300,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
4798 .always_rx_scatter = true, 5300 .always_rx_scatter = true,
4799 .max_interrupt_mode = EFX_INT_MODE_MSIX, 5301 .max_interrupt_mode = EFX_INT_MODE_MSIX,
4800 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 5302 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
4801 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 5303 .offload_features = EF10_OFFLOAD_FEATURES,
4802 NETIF_F_RXHASH | NETIF_F_NTUPLE),
4803 .mcdi_max_ver = 2, 5304 .mcdi_max_ver = 2,
4804 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, 5305 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
4805 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 5306 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
@@ -4891,6 +5392,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
4891 .ptp_write_host_time = efx_ef10_ptp_write_host_time, 5392 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
4892 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, 5393 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
4893 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, 5394 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
5395 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
5396 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
4894#ifdef CONFIG_SFC_SRIOV 5397#ifdef CONFIG_SFC_SRIOV
4895 .sriov_configure = efx_ef10_sriov_configure, 5398 .sriov_configure = efx_ef10_sriov_configure,
4896 .sriov_init = efx_ef10_sriov_init, 5399 .sriov_init = efx_ef10_sriov_init,
@@ -4919,8 +5422,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
4919 .always_rx_scatter = true, 5422 .always_rx_scatter = true,
4920 .max_interrupt_mode = EFX_INT_MODE_MSIX, 5423 .max_interrupt_mode = EFX_INT_MODE_MSIX,
4921 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 5424 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
4922 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 5425 .offload_features = EF10_OFFLOAD_FEATURES,
4923 NETIF_F_RXHASH | NETIF_F_NTUPLE),
4924 .mcdi_max_ver = 2, 5426 .mcdi_max_ver = 2,
4925 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, 5427 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
4926 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 5428 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 3c17f274e802..a949b9d27329 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -232,6 +232,35 @@ fail:
232 return rc; 232 return rc;
233} 233}
234 234
235static int efx_ef10_vadaptor_alloc_set_features(struct efx_nic *efx)
236{
237 struct efx_ef10_nic_data *nic_data = efx->nic_data;
238 u32 port_flags;
239 int rc;
240
241 rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
242 if (rc)
243 goto fail_vadaptor_alloc;
244
245 rc = efx_ef10_vadaptor_query(efx, nic_data->vport_id,
246 &port_flags, NULL, NULL);
247 if (rc)
248 goto fail_vadaptor_query;
249
250 if (port_flags &
251 (1 << MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN))
252 efx->fixed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
253 else
254 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
255
256 return 0;
257
258fail_vadaptor_query:
259 efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
260fail_vadaptor_alloc:
261 return rc;
262}
263
235/* On top of the default firmware vswitch setup, create a VEB vswitch and 264/* On top of the default firmware vswitch setup, create a VEB vswitch and
236 * expansion vport for use by this function. 265 * expansion vport for use by this function.
237 */ 266 */
@@ -243,7 +272,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
243 272
244 if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) { 273 if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) {
245 /* vswitch not needed as we have no VFs */ 274 /* vswitch not needed as we have no VFs */
246 efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); 275 efx_ef10_vadaptor_alloc_set_features(efx);
247 return 0; 276 return 0;
248 } 277 }
249 278
@@ -263,7 +292,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
263 goto fail3; 292 goto fail3;
264 ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr); 293 ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
265 294
266 rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); 295 rc = efx_ef10_vadaptor_alloc_set_features(efx);
267 if (rc) 296 if (rc)
268 goto fail4; 297 goto fail4;
269 298
@@ -282,9 +311,7 @@ fail1:
282 311
283int efx_ef10_vswitching_probe_vf(struct efx_nic *efx) 312int efx_ef10_vswitching_probe_vf(struct efx_nic *efx)
284{ 313{
285 struct efx_ef10_nic_data *nic_data = efx->nic_data; 314 return efx_ef10_vadaptor_alloc_set_features(efx);
286
287 return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
288} 315}
289 316
290int efx_ef10_vswitching_restore_pf(struct efx_nic *efx) 317int efx_ef10_vswitching_restore_pf(struct efx_nic *efx)
@@ -554,6 +581,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
554 efx_device_detach_sync(vf->efx); 581 efx_device_detach_sync(vf->efx);
555 efx_net_stop(vf->efx->net_dev); 582 efx_net_stop(vf->efx->net_dev);
556 583
584 mutex_lock(&vf->efx->mac_lock);
557 down_write(&vf->efx->filter_sem); 585 down_write(&vf->efx->filter_sem);
558 vf->efx->type->filter_table_remove(vf->efx); 586 vf->efx->type->filter_table_remove(vf->efx);
559 587
@@ -630,6 +658,7 @@ restore_filters:
630 goto reset_nic_up_write; 658 goto reset_nic_up_write;
631 659
632 up_write(&vf->efx->filter_sem); 660 up_write(&vf->efx->filter_sem);
661 mutex_unlock(&vf->efx->mac_lock);
633 662
634 up_write(&vf->efx->filter_sem); 663 up_write(&vf->efx->filter_sem);
635 664
@@ -642,9 +671,10 @@ restore_filters:
642 return rc; 671 return rc;
643 672
644reset_nic_up_write: 673reset_nic_up_write:
645 if (vf->efx) 674 if (vf->efx) {
646 up_write(&vf->efx->filter_sem); 675 up_write(&vf->efx->filter_sem);
647 676 mutex_unlock(&vf->efx->mac_lock);
677 }
648reset_nic: 678reset_nic:
649 if (vf->efx) { 679 if (vf->efx) {
650 netif_err(efx, drv, efx->net_dev, 680 netif_err(efx, drv, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index 6d25b92cb45e..9ceb7ef0a210 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -70,6 +70,9 @@ int efx_ef10_vport_add_mac(struct efx_nic *efx,
70int efx_ef10_vport_del_mac(struct efx_nic *efx, 70int efx_ef10_vport_del_mac(struct efx_nic *efx,
71 unsigned int port_id, u8 *mac); 71 unsigned int port_id, u8 *mac);
72int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id); 72int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
73int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
74 u32 *port_flags, u32 *vadaptor_flags,
75 unsigned int *vlan_tags);
73int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id); 76int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id);
74 77
75#endif /* EF10_SRIOV_H */ 78#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 097f363f1630..14b821b1c880 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -600,6 +600,7 @@ fail:
600 */ 600 */
601static void efx_start_datapath(struct efx_nic *efx) 601static void efx_start_datapath(struct efx_nic *efx)
602{ 602{
603 netdev_features_t old_features = efx->net_dev->features;
603 bool old_rx_scatter = efx->rx_scatter; 604 bool old_rx_scatter = efx->rx_scatter;
604 struct efx_tx_queue *tx_queue; 605 struct efx_tx_queue *tx_queue;
605 struct efx_rx_queue *rx_queue; 606 struct efx_rx_queue *rx_queue;
@@ -644,6 +645,15 @@ static void efx_start_datapath(struct efx_nic *efx)
644 efx->rx_dma_len, efx->rx_page_buf_step, 645 efx->rx_dma_len, efx->rx_page_buf_step,
645 efx->rx_bufs_per_page, efx->rx_pages_per_batch); 646 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
646 647
648 /* Restore previously fixed features in hw_features and remove
649 * features which are fixed now
650 */
651 efx->net_dev->hw_features |= efx->net_dev->features;
652 efx->net_dev->hw_features &= ~efx->fixed_features;
653 efx->net_dev->features |= efx->fixed_features;
654 if (efx->net_dev->features != old_features)
655 netdev_features_change(efx->net_dev);
656
647 /* RX filters may also have scatter-enabled flags */ 657 /* RX filters may also have scatter-enabled flags */
648 if (efx->rx_scatter != old_rx_scatter) 658 if (efx->rx_scatter != old_rx_scatter)
649 efx->type->filter_update_rx_scatter(efx); 659 efx->type->filter_update_rx_scatter(efx);
@@ -1719,6 +1729,7 @@ static int efx_probe_filters(struct efx_nic *efx)
1719 1729
1720 spin_lock_init(&efx->filter_lock); 1730 spin_lock_init(&efx->filter_lock);
1721 init_rwsem(&efx->filter_sem); 1731 init_rwsem(&efx->filter_sem);
1732 mutex_lock(&efx->mac_lock);
1722 down_write(&efx->filter_sem); 1733 down_write(&efx->filter_sem);
1723 rc = efx->type->filter_table_probe(efx); 1734 rc = efx->type->filter_table_probe(efx);
1724 if (rc) 1735 if (rc)
@@ -1757,6 +1768,7 @@ static int efx_probe_filters(struct efx_nic *efx)
1757#endif 1768#endif
1758out_unlock: 1769out_unlock:
1759 up_write(&efx->filter_sem); 1770 up_write(&efx->filter_sem);
1771 mutex_unlock(&efx->mac_lock);
1760 return rc; 1772 return rc;
1761} 1773}
1762 1774
@@ -2312,14 +2324,46 @@ static void efx_set_rx_mode(struct net_device *net_dev)
2312static int efx_set_features(struct net_device *net_dev, netdev_features_t data) 2324static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2313{ 2325{
2314 struct efx_nic *efx = netdev_priv(net_dev); 2326 struct efx_nic *efx = netdev_priv(net_dev);
2327 int rc;
2315 2328
2316 /* If disabling RX n-tuple filtering, clear existing filters */ 2329 /* If disabling RX n-tuple filtering, clear existing filters */
2317 if (net_dev->features & ~data & NETIF_F_NTUPLE) 2330 if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2318 return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); 2331 rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
2332 if (rc)
2333 return rc;
2334 }
2335
2336 /* If Rx VLAN filter is changed, update filters via mac_reconfigure */
2337 if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
2338 /* efx_set_rx_mode() will schedule MAC work to update filters
2339 * when a new features are finally set in net_dev.
2340 */
2341 efx_set_rx_mode(net_dev);
2342 }
2319 2343
2320 return 0; 2344 return 0;
2321} 2345}
2322 2346
2347static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
2348{
2349 struct efx_nic *efx = netdev_priv(net_dev);
2350
2351 if (efx->type->vlan_rx_add_vid)
2352 return efx->type->vlan_rx_add_vid(efx, proto, vid);
2353 else
2354 return -EOPNOTSUPP;
2355}
2356
2357static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
2358{
2359 struct efx_nic *efx = netdev_priv(net_dev);
2360
2361 if (efx->type->vlan_rx_kill_vid)
2362 return efx->type->vlan_rx_kill_vid(efx, proto, vid);
2363 else
2364 return -EOPNOTSUPP;
2365}
2366
2323static const struct net_device_ops efx_netdev_ops = { 2367static const struct net_device_ops efx_netdev_ops = {
2324 .ndo_open = efx_net_open, 2368 .ndo_open = efx_net_open,
2325 .ndo_stop = efx_net_stop, 2369 .ndo_stop = efx_net_stop,
@@ -2332,6 +2376,8 @@ static const struct net_device_ops efx_netdev_ops = {
2332 .ndo_set_mac_address = efx_set_mac_address, 2376 .ndo_set_mac_address = efx_set_mac_address,
2333 .ndo_set_rx_mode = efx_set_rx_mode, 2377 .ndo_set_rx_mode = efx_set_rx_mode,
2334 .ndo_set_features = efx_set_features, 2378 .ndo_set_features = efx_set_features,
2379 .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
2380 .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
2335#ifdef CONFIG_SFC_SRIOV 2381#ifdef CONFIG_SFC_SRIOV
2336 .ndo_set_vf_mac = efx_sriov_set_vf_mac, 2382 .ndo_set_vf_mac = efx_sriov_set_vf_mac,
2337 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, 2383 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
@@ -3147,17 +3193,25 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
3147 return -ENOMEM; 3193 return -ENOMEM;
3148 efx = netdev_priv(net_dev); 3194 efx = netdev_priv(net_dev);
3149 efx->type = (const struct efx_nic_type *) entry->driver_data; 3195 efx->type = (const struct efx_nic_type *) entry->driver_data;
3196 efx->fixed_features |= NETIF_F_HIGHDMA;
3150 net_dev->features |= (efx->type->offload_features | NETIF_F_SG | 3197 net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
3151 NETIF_F_HIGHDMA | NETIF_F_TSO | 3198 NETIF_F_TSO | NETIF_F_RXCSUM);
3152 NETIF_F_RXCSUM);
3153 if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) 3199 if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
3154 net_dev->features |= NETIF_F_TSO6; 3200 net_dev->features |= NETIF_F_TSO6;
3155 /* Mask for features that also apply to VLAN devices */ 3201 /* Mask for features that also apply to VLAN devices */
3156 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | 3202 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
3157 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | 3203 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
3158 NETIF_F_RXCSUM); 3204 NETIF_F_RXCSUM);
3159 /* All offloads can be toggled */ 3205
3160 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; 3206 net_dev->hw_features = net_dev->features & ~efx->fixed_features;
3207
3208 /* Disable VLAN filtering by default. It may be enforced if
3209 * the feature is fixed (i.e. VLAN filters are required to
3210 * receive VLAN tagged packets due to vPort restrictions).
3211 */
3212 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3213 net_dev->features |= efx->fixed_features;
3214
3161 pci_set_drvdata(pci_dev, efx); 3215 pci_set_drvdata(pci_dev, efx);
3162 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 3216 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
3163 rc = efx_init_struct(efx, pci_dev, net_dev); 3217 rc = efx_init_struct(efx, pci_dev, net_dev);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 5e3f93f04e62..c3ae739e9c7a 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -274,4 +274,13 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
274 netif_tx_unlock_bh(dev); 274 netif_tx_unlock_bh(dev);
275} 275}
276 276
277static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
278{
279 if (WARN_ON(down_read_trylock(sem))) {
280 up_read(sem);
281 return false;
282 }
283 return true;
284}
285
277#endif /* EFX_EFX_H */ 286#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 4cc772164a79..c9a5b003caaf 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -273,6 +273,9 @@
273 * have already installed filters. See the comment at 273 * have already installed filters. See the comment at
274 * MC_CMD_WORKAROUND_BUG26807. */ 274 * MC_CMD_WORKAROUND_BUG26807. */
275#define MC_CMD_ERR_FILTERS_PRESENT 0x1014 275#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
276/* The clock whose frequency you've attempted to set set
277 * doesn't exist on this NIC */
278#define MC_CMD_ERR_NO_CLOCK 0x1015
276 279
277#define MC_CMD_ERR_CODE_OFST 0 280#define MC_CMD_ERR_CODE_OFST 0
278 281
@@ -292,9 +295,11 @@
292/* Point to the copycode entry point. */ 295/* Point to the copycode entry point. */
293#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) 296#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
294#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) 297#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
298#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
295/* Points to the recovery mode entry point. */ 299/* Points to the recovery mode entry point. */
296#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) 300#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
297#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) 301#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
302#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
298 303
299/* The command set exported by the boot ROM (MCDI v0) */ 304/* The command set exported by the boot ROM (MCDI v0) */
300#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ 305#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -686,6 +691,12 @@
686#define FCDI_EVENT_CODE_PTP_STATUS 0x9 691#define FCDI_EVENT_CODE_PTP_STATUS 0x9
687/* enum: Port id config to map MC-FC port idx */ 692/* enum: Port id config to map MC-FC port idx */
688#define FCDI_EVENT_CODE_PORT_CONFIG 0xa 693#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
694/* enum: Boot result or error code */
695#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
696#define FCDI_EVENT_REBOOT_SRC_LBN 36
697#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
698#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
699#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
689#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 700#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
690#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 701#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
691#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 702#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -717,6 +728,11 @@
717#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 728#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
718#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 729#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
719#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 730#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
731#define FCDI_EVENT_BOOT_RESULT_OFST 0
732/* Enum values, see field(s): */
733/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
734#define FCDI_EVENT_BOOT_RESULT_LBN 0
735#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
720 736
721/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events 737/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
722 * to the MC. Note that this structure | is overlayed over a normal FCDI event 738 * to the MC. Note that this structure | is overlayed over a normal FCDI event
@@ -1649,15 +1665,30 @@
1649 1665
1650/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ 1666/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
1651#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 1667#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
1652/* Uncorrected error on transmit timestamps in NIC clock format */ 1668/* Uncorrected error on PTP transmit timestamps in NIC clock format */
1653#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 1669#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
1654/* Uncorrected error on receive timestamps in NIC clock format */ 1670/* Uncorrected error on PTP receive timestamps in NIC clock format */
1655#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 1671#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
1656/* Uncorrected error on PPS output in NIC clock format */ 1672/* Uncorrected error on PPS output in NIC clock format */
1657#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 1673#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
1658/* Uncorrected error on PPS input in NIC clock format */ 1674/* Uncorrected error on PPS input in NIC clock format */
1659#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 1675#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
1660 1676
1677/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
1678#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
1679/* Uncorrected error on PTP transmit timestamps in NIC clock format */
1680#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
1681/* Uncorrected error on PTP receive timestamps in NIC clock format */
1682#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
1683/* Uncorrected error on PPS output in NIC clock format */
1684#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
1685/* Uncorrected error on PPS input in NIC clock format */
1686#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
1687/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
1688#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
1689/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
1690#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
1691
1661/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ 1692/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
1662#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 1693#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
1663/* Results of testing */ 1694/* Results of testing */
@@ -2158,8 +2189,12 @@
2158 2189
2159/* MC_CMD_DRV_ATTACH_IN msgrequest */ 2190/* MC_CMD_DRV_ATTACH_IN msgrequest */
2160#define MC_CMD_DRV_ATTACH_IN_LEN 12 2191#define MC_CMD_DRV_ATTACH_IN_LEN 12
2161/* new state (0=detached, 1=attached) to set if UPDATE=1 */ 2192/* new state to set if UPDATE=1 */
2162#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 2193#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
2194#define MC_CMD_DRV_ATTACH_LBN 0
2195#define MC_CMD_DRV_ATTACH_WIDTH 1
2196#define MC_CMD_DRV_PREBOOT_LBN 1
2197#define MC_CMD_DRV_PREBOOT_WIDTH 1
2163/* 1 to set new state, or 0 to just report the existing state */ 2198/* 1 to set new state, or 0 to just report the existing state */
2164#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 2199#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
2165/* preferred datapath firmware (for Huntington; ignored for Siena) */ 2200/* preferred datapath firmware (for Huntington; ignored for Siena) */
@@ -2181,12 +2216,12 @@
2181 2216
2182/* MC_CMD_DRV_ATTACH_OUT msgresponse */ 2217/* MC_CMD_DRV_ATTACH_OUT msgresponse */
2183#define MC_CMD_DRV_ATTACH_OUT_LEN 4 2218#define MC_CMD_DRV_ATTACH_OUT_LEN 4
2184/* previous or existing state (0=detached, 1=attached) */ 2219/* previous or existing state, see the bitmask at NEW_STATE */
2185#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 2220#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
2186 2221
2187/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ 2222/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
2188#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 2223#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
2189/* previous or existing state (0=detached, 1=attached) */ 2224/* previous or existing state, see the bitmask at NEW_STATE */
2190#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 2225#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
2191/* Flags associated with this function */ 2226/* Flags associated with this function */
2192#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 2227#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
@@ -2198,6 +2233,10 @@
2198#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1 2233#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
2199/* enum: The function can perform privileged operations */ 2234/* enum: The function can perform privileged operations */
2200#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2 2235#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
2236/* enum: The function does not have an active port associated with it. The port
2237 * refers to the Sorrento external FPGA port.
2238 */
2239#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
2201 2240
2202 2241
2203/***********************************/ 2242/***********************************/
@@ -2892,7 +2931,7 @@
2892 */ 2931 */
2893#define MC_CMD_SET_MAC 0x2c 2932#define MC_CMD_SET_MAC 0x2c
2894 2933
2895#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK 2934#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
2896 2935
2897/* MC_CMD_SET_MAC_IN msgrequest */ 2936/* MC_CMD_SET_MAC_IN msgrequest */
2898#define MC_CMD_SET_MAC_IN_LEN 28 2937#define MC_CMD_SET_MAC_IN_LEN 28
@@ -2927,9 +2966,66 @@
2927#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 2966#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
2928#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 2967#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
2929 2968
2969/* MC_CMD_SET_MAC_EXT_IN msgrequest */
2970#define MC_CMD_SET_MAC_EXT_IN_LEN 32
2971/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
2972 * EtherII, VLAN, bug16011 padding).
2973 */
2974#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
2975#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
2976#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
2977#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
2978#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
2979#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
2980#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
2981#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
2982#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
2983#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
2984#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
2985#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
2986/* enum: Flow control is off. */
2987/* MC_CMD_FCNTL_OFF 0x0 */
2988/* enum: Respond to flow control. */
2989/* MC_CMD_FCNTL_RESPOND 0x1 */
2990/* enum: Respond to and Issue flow control. */
2991/* MC_CMD_FCNTL_BIDIR 0x2 */
2992/* enum: Auto neg flow control. */
2993/* MC_CMD_FCNTL_AUTO 0x3 */
2994/* enum: Priority flow control (eftest builds only). */
2995/* MC_CMD_FCNTL_QBB 0x4 */
2996/* enum: Issue flow control. */
2997/* MC_CMD_FCNTL_GENERATE 0x5 */
2998#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
2999#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
3000#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
3001/* Select which parameters to configure. A parameter will only be modified if
3002 * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
3003 * capabilities then this field is ignored (and all flags are assumed to be
3004 * set).
3005 */
3006#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
3007#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
3008#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
3009#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
3010#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
3011#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
3012#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
3013#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
3014#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
3015#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
3016#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
3017
2930/* MC_CMD_SET_MAC_OUT msgresponse */ 3018/* MC_CMD_SET_MAC_OUT msgresponse */
2931#define MC_CMD_SET_MAC_OUT_LEN 0 3019#define MC_CMD_SET_MAC_OUT_LEN 0
2932 3020
3021/* MC_CMD_SET_MAC_V2_OUT msgresponse */
3022#define MC_CMD_SET_MAC_V2_OUT_LEN 4
3023/* MTU as configured after processing the request. See comment at
3024 * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
3025 * to 0.
3026 */
3027#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
3028
2933 3029
2934/***********************************/ 3030/***********************************/
2935/* MC_CMD_PHY_STATS 3031/* MC_CMD_PHY_STATS
@@ -3521,6 +3617,26 @@
3521#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 3617#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
3522#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 3618#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
3523 3619
3620/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
3621#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
3622#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
3623/* Enum values, see field(s): */
3624/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
3625#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
3626#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
3627#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
3628#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
3629#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
3630#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
3631#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
3632#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
3633#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
3634#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
3635#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
3636/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
3637 */
3638#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
3639
3524 3640
3525/***********************************/ 3641/***********************************/
3526/* MC_CMD_NVRAM_UPDATE_START 3642/* MC_CMD_NVRAM_UPDATE_START
@@ -3561,6 +3677,37 @@
3561/* amount to read in bytes */ 3677/* amount to read in bytes */
3562#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 3678#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
3563 3679
3680/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
3681#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
3682#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
3683/* Enum values, see field(s): */
3684/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
3685#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
3686/* amount to read in bytes */
3687#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
3688/* Optional control info. If a partition is stored with an A/B versioning
3689 * scheme (i.e. in more than one physical partition in NVRAM) the host can set
3690 * this to control which underlying physical partition is used to read data
3691 * from. This allows it to perform a read-modify-write-verify with the write
3692 * lock continuously held by calling NVRAM_UPDATE_START, reading the old
3693 * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
3694 * verifying by reading with MODE=TARGET_BACKUP.
3695 */
3696#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
3697/* enum: Same as omitting MODE: caller sees data in current partition unless it
3698 * holds the write lock in which case it sees data in the partition it is
3699 * updating.
3700 */
3701#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
3702/* enum: Read from the current partition of an A/B pair, even if holding the
3703 * write lock.
3704 */
3705#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
3706/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
3707 * pair
3708 */
3709#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
3710
3564/* MC_CMD_NVRAM_READ_OUT msgresponse */ 3711/* MC_CMD_NVRAM_READ_OUT msgresponse */
3565#define MC_CMD_NVRAM_READ_OUT_LENMIN 1 3712#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
3566#define MC_CMD_NVRAM_READ_OUT_LENMAX 252 3713#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
@@ -3895,6 +4042,8 @@
3895#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 4042#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
3896/* enum: CCOM AVREG 1v8 supply (external ADC): mV */ 4043/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
3897#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a 4044#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
4045/* enum: CCOM RTS temperature: degC */
4046#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
3898/* enum: Not a sensor: reserved for the next page flag */ 4047/* enum: Not a sensor: reserved for the next page flag */
3899#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f 4048#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
3900/* enum: controller internal temperature sensor voltage on master core 4049/* enum: controller internal temperature sensor voltage on master core
@@ -3931,6 +4080,12 @@
3931#define MC_CMD_SENSOR_PHY0_VCC 0x4c 4080#define MC_CMD_SENSOR_PHY0_VCC 0x4c
3932/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ 4081/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
3933#define MC_CMD_SENSOR_PHY1_VCC 0x4d 4082#define MC_CMD_SENSOR_PHY1_VCC 0x4d
4083/* enum: Controller die temperature (TDIODE): degC */
4084#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
4085/* enum: Board temperature (front): degC */
4086#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
4087/* enum: Board temperature (back): degC */
4088#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
3934/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ 4089/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
3935#define MC_CMD_SENSOR_ENTRY_OFST 4 4090#define MC_CMD_SENSOR_ENTRY_OFST 4
3936#define MC_CMD_SENSOR_ENTRY_LEN 8 4091#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -4007,7 +4162,7 @@
4007 4162
4008/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */ 4163/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
4009#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12 4164#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
4010/* DMA address of host buffer for sensor readings */ 4165/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
4011#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0 4166#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
4012#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8 4167#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
4013#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0 4168#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
@@ -4608,6 +4763,10 @@
4608 * operations 4763 * operations
4609 */ 4764 */
4610#define MC_CMD_MUM_OP_QSFP 0xc 4765#define MC_CMD_MUM_OP_QSFP 0xc
4766/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
4767 * level) from MUM
4768 */
4769#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
4611 4770
4612/* MC_CMD_MUM_IN_NULL msgrequest */ 4771/* MC_CMD_MUM_IN_NULL msgrequest */
4613#define MC_CMD_MUM_IN_NULL_LEN 4 4772#define MC_CMD_MUM_IN_NULL_LEN 4
@@ -4793,6 +4952,10 @@
4793#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 4952#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
4794#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 4953#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
4795#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 4954#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
4955#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
4956#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
4957#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
4958#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
4796 4959
4797/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ 4960/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
4798#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 4961#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
@@ -4862,6 +5025,11 @@
4862#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 5025#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
4863#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 5026#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
4864 5027
5028/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
5029#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
5030/* MUM cmd header */
5031/* MC_CMD_MUM_IN_CMD_OFST 0 */
5032
4865/* MC_CMD_MUM_OUT msgresponse */ 5033/* MC_CMD_MUM_OUT msgresponse */
4866#define MC_CMD_MUM_OUT_LEN 0 5034#define MC_CMD_MUM_OUT_LEN 0
4867 5035
@@ -5004,6 +5172,69 @@
5004#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 5172#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
5005#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 5173#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
5006 5174
5175/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
5176#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
5177#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
5178#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
5179/* Discrete (soldered) DDR resistor strap info */
5180#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
5181#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
5182#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
5183#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
5184#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
5185/* Number of SODIMM info records */
5186#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
5187/* Array of SODIMM info records */
5188#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
5189#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
5190#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
5191#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
5192#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
5193#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
5194#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
5195#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
5196/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
5197#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
5198/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
5199#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
5200/* enum: Total number of SODIMM banks */
5201#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
5202#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
5203#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
5204#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
5205#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
5206#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
5207#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
5208#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
5209#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
5210#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
5211#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
5212/* enum: Values 5-15 are reserved for future usage */
5213#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
5214#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
5215#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
5216#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
5217#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
5218#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
5219#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
5220/* enum: No module present */
5221#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
5222/* enum: Module present supported and powered on */
5223#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
5224/* enum: Module present but bad type */
5225#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
5226/* enum: Module present but incompatible voltage */
5227#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
5228/* enum: Module present but unknown SPD */
5229#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
5230/* enum: Module present but slot cannot support it */
5231#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
5232/* enum: Modules may or may not be present, but cannot establish contact by I2C
5233 */
5234#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
5235#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
5236#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
5237
5007/* MC_CMD_RESOURCE_SPECIFIER enum */ 5238/* MC_CMD_RESOURCE_SPECIFIER enum */
5008/* enum: Any */ 5239/* enum: Any */
5009#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff 5240#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
@@ -5076,6 +5307,8 @@
5076#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 5307#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
5077/* enum: Expansion ROM configuration data for port 0 */ 5308/* enum: Expansion ROM configuration data for port 0 */
5078#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 5309#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
5310/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
5311#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
5079/* enum: Expansion ROM configuration data for port 1 */ 5312/* enum: Expansion ROM configuration data for port 1 */
5080#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 5313#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
5081/* enum: Expansion ROM configuration data for port 2 */ 5314/* enum: Expansion ROM configuration data for port 2 */
@@ -5084,6 +5317,8 @@
5084#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 5317#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
5085/* enum: Non-volatile log output partition */ 5318/* enum: Non-volatile log output partition */
5086#define NVRAM_PARTITION_TYPE_LOG 0x700 5319#define NVRAM_PARTITION_TYPE_LOG 0x700
5320/* enum: Non-volatile log output of second core on dual-core device */
5321#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
5087/* enum: Device state dump output partition */ 5322/* enum: Device state dump output partition */
5088#define NVRAM_PARTITION_TYPE_DUMP 0x800 5323#define NVRAM_PARTITION_TYPE_DUMP 0x800
5089/* enum: Application license key storage partition */ 5324/* enum: Application license key storage partition */
@@ -5116,6 +5351,20 @@
5116#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 5351#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
5117/* enum: MUM fuses and lockbits partition. */ 5352/* enum: MUM fuses and lockbits partition. */
5118#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 5353#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
5354/* enum: UEFI expansion ROM if separate from PXE */
5355#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
5356/* enum: Spare partition 0 */
5357#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
5358/* enum: Spare partition 1 */
5359#define NVRAM_PARTITION_TYPE_SPARE_1 0x1100
5360/* enum: Spare partition 2 */
5361#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
5362/* enum: Spare partition 3 */
5363#define NVRAM_PARTITION_TYPE_SPARE_3 0x1300
5364/* enum: Spare partition 4 */
5365#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
5366/* enum: Spare partition 5 */
5367#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
5119/* enum: Start of reserved value range (firmware may use for any purpose) */ 5368/* enum: Start of reserved value range (firmware may use for any purpose) */
5120#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 5369#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
5121/* enum: End of reserved value range (firmware may use for any purpose) */ 5370/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -5149,6 +5398,90 @@
5149#define LICENSED_APP_ID_ID_LBN 0 5398#define LICENSED_APP_ID_ID_LBN 0
5150#define LICENSED_APP_ID_ID_WIDTH 32 5399#define LICENSED_APP_ID_ID_WIDTH 32
5151 5400
5401/* LICENSED_FEATURES structuredef */
5402#define LICENSED_FEATURES_LEN 8
5403/* Bitmask of licensed firmware features */
5404#define LICENSED_FEATURES_MASK_OFST 0
5405#define LICENSED_FEATURES_MASK_LEN 8
5406#define LICENSED_FEATURES_MASK_LO_OFST 0
5407#define LICENSED_FEATURES_MASK_HI_OFST 4
5408#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
5409#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
5410#define LICENSED_FEATURES_PIO_LBN 1
5411#define LICENSED_FEATURES_PIO_WIDTH 1
5412#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
5413#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
5414#define LICENSED_FEATURES_CLOCK_LBN 3
5415#define LICENSED_FEATURES_CLOCK_WIDTH 1
5416#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
5417#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
5418#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
5419#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
5420#define LICENSED_FEATURES_RX_SNIFF_LBN 6
5421#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
5422#define LICENSED_FEATURES_TX_SNIFF_LBN 7
5423#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
5424#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
5425#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
5426#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
5427#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
5428#define LICENSED_FEATURES_MASK_LBN 0
5429#define LICENSED_FEATURES_MASK_WIDTH 64
5430
5431/* LICENSED_V3_APPS structuredef */
5432#define LICENSED_V3_APPS_LEN 8
5433/* Bitmask of licensed applications */
5434#define LICENSED_V3_APPS_MASK_OFST 0
5435#define LICENSED_V3_APPS_MASK_LEN 8
5436#define LICENSED_V3_APPS_MASK_LO_OFST 0
5437#define LICENSED_V3_APPS_MASK_HI_OFST 4
5438#define LICENSED_V3_APPS_ONLOAD_LBN 0
5439#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
5440#define LICENSED_V3_APPS_PTP_LBN 1
5441#define LICENSED_V3_APPS_PTP_WIDTH 1
5442#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
5443#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
5444#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
5445#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
5446#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
5447#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
5448#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
5449#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
5450#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
5451#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
5452#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
5453#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
5454#define LICENSED_V3_APPS_MASK_LBN 0
5455#define LICENSED_V3_APPS_MASK_WIDTH 64
5456
5457/* LICENSED_V3_FEATURES structuredef */
5458#define LICENSED_V3_FEATURES_LEN 8
5459/* Bitmask of licensed firmware features */
5460#define LICENSED_V3_FEATURES_MASK_OFST 0
5461#define LICENSED_V3_FEATURES_MASK_LEN 8
5462#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
5463#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
5464#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
5465#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
5466#define LICENSED_V3_FEATURES_PIO_LBN 1
5467#define LICENSED_V3_FEATURES_PIO_WIDTH 1
5468#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
5469#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
5470#define LICENSED_V3_FEATURES_CLOCK_LBN 3
5471#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1
5472#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
5473#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
5474#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
5475#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
5476#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
5477#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
5478#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
5479#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
5480#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
5481#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
5482#define LICENSED_V3_FEATURES_MASK_LBN 0
5483#define LICENSED_V3_FEATURES_MASK_WIDTH 64
5484
5152/* TX_TIMESTAMP_EVENT structuredef */ 5485/* TX_TIMESTAMP_EVENT structuredef */
5153#define TX_TIMESTAMP_EVENT_LEN 6 5486#define TX_TIMESTAMP_EVENT_LEN 6
5154/* lower 16 bits of timestamp data */ 5487/* lower 16 bits of timestamp data */
@@ -5258,6 +5591,8 @@
5258#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1 5591#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
5259#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5 5592#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
5260#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1 5593#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
5594#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
5595#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
5261#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 5596#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
5262/* enum: Disabled */ 5597/* enum: Disabled */
5263#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 5598#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
@@ -5362,6 +5697,8 @@
5362#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 5697#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
5363#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 5698#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
5364#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 5699#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
5700#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_LBN 10
5701#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
5365/* Owner ID to use if in buffer mode (zero if physical) */ 5702/* Owner ID to use if in buffer mode (zero if physical) */
5366#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 5703#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
5367/* The port ID associated with the v-adaptor which should contain this DMAQ. */ 5704/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5422,6 +5759,8 @@
5422#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ 5759#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
5423#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 5760#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
5424#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 5761#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
5762#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
5763#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
5425/* Owner ID to use if in buffer mode (zero if physical) */ 5764/* Owner ID to use if in buffer mode (zero if physical) */
5426#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 5765#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
5427/* The port ID associated with the v-adaptor which should contain this DMAQ. */ 5766/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5535,6 +5874,8 @@
5535#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 5874#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
5536#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 5875#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
5537#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 5876#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
5877#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
5878#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
5538/* Owner ID to use if in buffer mode (zero if physical) */ 5879/* Owner ID to use if in buffer mode (zero if physical) */
5539#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 5880#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
5540/* The port ID associated with the v-adaptor which should contain this DMAQ. */ 5881/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5747,6 +6088,46 @@
5747#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 6088#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
5748#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 6089#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
5749 6090
6091/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
6092#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
6093#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
6094#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
6095#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
6096/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
6097 * of blocks, each of the size REQUEST_BLOCK_SIZE.
6098 */
6099#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
6100#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
6101#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
6102#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
6103/* Must be a power of 2 */
6104#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
6105/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
6106 * of blocks, each of the size REPLY_BLOCK_SIZE.
6107 */
6108#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
6109#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
6110#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
6111#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
6112/* Must be a power of 2 */
6113#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
6114/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
6115 * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
6116 * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
6117 */
6118#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
6119#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
6120#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
6121#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
6122/* Must be a power of 2, or zero if this buffer is not provided */
6123#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
6124/* Applies to all three buffers */
6125#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
6126/* A bit mask defining which MCDI operations may be proxied */
6127#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
6128#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
6129#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
6130
5750/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ 6131/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
5751#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 6132#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
5752 6133
@@ -6323,6 +6704,15 @@
6323 * client 6704 * client
6324 */ 6705 */
6325#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 6706#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
6707/* enum: read properties relating to security rules (Medford-only; for use by
6708 * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
6709 */
6710#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
6711/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
6712 * encapsulated frames, which follow a different match sequence to normal
6713 * frames (Medford only)
6714 */
6715#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
6326 6716
6327/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ 6717/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
6328#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 6718#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -6356,7 +6746,10 @@
6356 6746
6357/***********************************/ 6747/***********************************/
6358/* MC_CMD_PARSER_DISP_RW 6748/* MC_CMD_PARSER_DISP_RW
6359 * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging 6749 * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
6750 * Please note that this interface is only of use to debug tools which have
6751 * knowledge of firmware and hardware data structures; nothing here is intended
6752 * for use by normal driver code.
6360 */ 6753 */
6361#define MC_CMD_PARSER_DISP_RW 0xe5 6754#define MC_CMD_PARSER_DISP_RW 0xe5
6362 6755
@@ -6374,6 +6767,12 @@
6374#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 6767#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
6375/* enum: Lookup engine (with requested metadata format) */ 6768/* enum: Lookup engine (with requested metadata format) */
6376#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 6769#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
6770/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
6771#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
6772/* enum: RX1 dispatcher CPU (only valid for Medford) */
6773#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
6774/* enum: Miscellaneous other state (only valid for Medford) */
6775#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
6377/* identifies the type of operation requested */ 6776/* identifies the type of operation requested */
6378#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 6777#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
6379/* enum: read a word of DICPU DMEM or a LUE entry */ 6778/* enum: read a word of DICPU DMEM or a LUE entry */
@@ -6382,8 +6781,12 @@
6382#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 6781#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
6383/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */ 6782/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
6384#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 6783#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
6385/* data memory address or LUE index */ 6784/* data memory address (DICPU targets) or LUE index (LUE targets) */
6386#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 6785#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
6786/* selector (for MISC_STATE target) */
6787#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
6788/* enum: Port to datapath mapping */
6789#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
6387/* value to write (for DMEM writes) */ 6790/* value to write (for DMEM writes) */
6388#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 6791#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
6389/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */ 6792/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
@@ -6408,6 +6811,12 @@
6408 */ 6811 */
6409#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20 6812#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
6410#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32 6813#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
6814/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
6815#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
6816#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
6817#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
6818#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
6819#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
6411 6820
6412 6821
6413/***********************************/ 6822/***********************************/
@@ -7071,6 +7480,24 @@
7071#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 7480#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
7072/* First word of flags. */ 7481/* First word of flags. */
7073#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 7482#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
7483#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
7484#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
7485#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
7486#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
7487#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
7488#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
7489#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
7490#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
7491#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
7492#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
7493#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
7494#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
7495#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
7496#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
7497#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
7498#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
7499#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
7500#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
7074#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 7501#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
7075#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 7502#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
7076#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 7503#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
@@ -7138,6 +7565,8 @@
7138#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 7565#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
7139/* enum: RXDP Test firmware image 8 */ 7566/* enum: RXDP Test firmware image 8 */
7140#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 7567#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
7568/* enum: RXDP Test firmware image 9 */
7569#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
7141/* TxDPCPU firmware id. */ 7570/* TxDPCPU firmware id. */
7142#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 7571#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
7143#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 7572#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -7153,6 +7582,8 @@
7153#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 7582#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
7154/* enum: TXDP Test firmware image 2 */ 7583/* enum: TXDP Test firmware image 2 */
7155#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 7584#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
7585/* enum: TXDP CSR bus test firmware */
7586#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
7156#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 7587#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
7157#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 7588#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
7158#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 7589#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -7227,6 +7658,258 @@
7227/* Licensed capabilities */ 7658/* Licensed capabilities */
7228#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 7659#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
7229 7660
7661/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
7662#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
7663
7664/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
7665#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
7666/* First word of flags. */
7667#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
7668#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
7669#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
7670#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
7671#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
7672#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
7673#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
7674#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
7675#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
7676#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
7677#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
7678#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
7679#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
7680#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
7681#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
7682#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
7683#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
7684#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
7685#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
7686#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
7687#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
7688#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
7689#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
7690#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
7691#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
7692#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
7693#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
7694#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
7695#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
7696#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
7697#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
7698#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
7699#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
7700#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
7701#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
7702#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
7703#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
7704#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
7705#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
7706#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
7707#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
7708#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
7709#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
7710#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
7711#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
7712#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
7713#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
7714#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
7715#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
7716#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
7717#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
7718#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
7719#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
7720#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
7721#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
7722#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
7723#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
7724#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
7725#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
7726/* RxDPCPU firmware id. */
7727#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
7728#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
7729/* enum: Standard RXDP firmware */
7730#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
7731/* enum: Low latency RXDP firmware */
7732#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
7733/* enum: Packed stream RXDP firmware */
7734#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
7735/* enum: BIST RXDP firmware */
7736#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
7737/* enum: RXDP Test firmware image 1 */
7738#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
7739/* enum: RXDP Test firmware image 2 */
7740#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
7741/* enum: RXDP Test firmware image 3 */
7742#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
7743/* enum: RXDP Test firmware image 4 */
7744#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
7745/* enum: RXDP Test firmware image 5 */
7746#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
7747/* enum: RXDP Test firmware image 6 */
7748#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
7749/* enum: RXDP Test firmware image 7 */
7750#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
7751/* enum: RXDP Test firmware image 8 */
7752#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
7753/* enum: RXDP Test firmware image 9 */
7754#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
7755/* TxDPCPU firmware id. */
7756#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
7757#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
7758/* enum: Standard TXDP firmware */
7759#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
7760/* enum: Low latency TXDP firmware */
7761#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
7762/* enum: High packet rate TXDP firmware */
7763#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
7764/* enum: BIST TXDP firmware */
7765#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
7766/* enum: TXDP Test firmware image 1 */
7767#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
7768/* enum: TXDP Test firmware image 2 */
7769#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
7770/* enum: TXDP CSR bus test firmware */
7771#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
7772#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
7773#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
7774#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
7775#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
7776#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
7777#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
7778/* enum: reserved value - do not use (may indicate alternative interpretation
7779 * of REV field in future)
7780 */
7781#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
7782/* enum: Trivial RX PD firmware for early Huntington development (Huntington
7783 * development only)
7784 */
7785#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
7786/* enum: RX PD firmware with approximately Siena-compatible behaviour
7787 * (Huntington development only)
7788 */
7789#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
7790/* enum: Virtual switching (full feature) RX PD production firmware */
7791#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
7792/* enum: siena_compat variant RX PD firmware using PM rather than MAC
7793 * (Huntington development only)
7794 */
7795#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
7796/* enum: Low latency RX PD production firmware */
7797#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
7798/* enum: Packed stream RX PD production firmware */
7799#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
7800/* enum: RX PD firmware handling layer 2 only for high packet rate performance
7801 * tests (Medford development only)
7802 */
7803#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
7804/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
7805#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
7806/* enum: RX PD firmware parsing but not filtering network overlay tunnel
7807 * encapsulations (Medford development only)
7808 */
7809#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
7810#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
7811#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
7812#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
7813#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
7814#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
7815#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
7816/* enum: reserved value - do not use (may indicate alternative interpretation
7817 * of REV field in future)
7818 */
7819#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
7820/* enum: Trivial TX PD firmware for early Huntington development (Huntington
7821 * development only)
7822 */
7823#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
7824/* enum: TX PD firmware with approximately Siena-compatible behaviour
7825 * (Huntington development only)
7826 */
7827#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
7828/* enum: Virtual switching (full feature) TX PD production firmware */
7829#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
7830/* enum: siena_compat variant TX PD firmware using PM rather than MAC
7831 * (Huntington development only)
7832 */
7833#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
7834#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
7835/* enum: TX PD firmware handling layer 2 only for high packet rate performance
7836 * tests (Medford development only)
7837 */
7838#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
7839/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
7840#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
7841/* Hardware capabilities of NIC */
7842#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
7843/* Licensed capabilities */
7844#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
7845/* Second word of flags. Not present on older firmware (check the length). */
7846#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
7847#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
7848#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
7849#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
7850#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
7851#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
7852#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
7853#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
7854#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
7855#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
7856#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
7857/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
7858 * on older firmware (check the length).
7859 */
7860#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
7861#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
7862/* One byte per PF containing the number of the external port assigned to this
7863 * PF, indexed by PF number. Special values indicate that a PF is either not
7864 * present or not assigned.
7865 */
7866#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
7867#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
7868#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
7869/* enum: The caller is not permitted to access information on this PF. */
7870#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
7871/* enum: PF does not exist. */
7872#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
7873/* enum: PF does exist but is not assigned to any external port. */
7874#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
7875/* enum: This value indicates that PF is assigned, but it cannot be expressed
7876 * in this field. It is intended for a possible future situation where a more
7877 * complex scheme of PFs to ports mapping is being used. The future driver
7878 * should look for a new field supporting the new scheme. The current/old
7879 * driver should treat this value as PF_NOT_ASSIGNED.
7880 */
7881#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
7882/* One byte per PF containing the number of its VFs, indexed by PF number. A
7883 * special value indicates that a PF is not present.
7884 */
7885#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
7886#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
7887#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
7888/* enum: The caller is not permitted to access information on this PF. */
7889/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
7890/* enum: PF does not exist. */
7891/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
7892/* Number of VIs available for each external port */
7893#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
7894#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
7895#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
7896/* Size of RX descriptor cache expressed as binary logarithm The actual size
7897 * equals (2 ^ RX_DESC_CACHE_SIZE)
7898 */
7899#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
7900#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
7901/* Size of TX descriptor cache expressed as binary logarithm The actual size
7902 * equals (2 ^ TX_DESC_CACHE_SIZE)
7903 */
7904#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
7905#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
7906/* Total number of available PIO buffers */
7907#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
7908#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
7909/* Size of a single PIO buffer */
7910#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
7911#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
7912
7230 7913
7231/***********************************/ 7914/***********************************/
7232/* MC_CMD_V2_EXTN 7915/* MC_CMD_V2_EXTN
@@ -7475,6 +8158,25 @@
7475 8158
7476 8159
7477/***********************************/ 8160/***********************************/
8161/* MC_CMD_VSWITCH_QUERY
8162 * read some config of v-switch. For now this command is an empty placeholder.
8163 * It may be used to check if a v-switch is connected to a given EVB port (if
8164 * not, then the command returns ENOENT).
8165 */
8166#define MC_CMD_VSWITCH_QUERY 0x63
8167
8168#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
8169
8170/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
8171#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
8172/* The port to which the v-switch is connected. */
8173#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
8174
8175/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
8176#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
8177
8178
8179/***********************************/
7478/* MC_CMD_VPORT_ALLOC 8180/* MC_CMD_VPORT_ALLOC
7479 * allocate a v-port. 8181 * allocate a v-port.
7480 */ 8182 */
@@ -7510,6 +8212,8 @@
7510#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 8212#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
7511#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 8213#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
7512#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 8214#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
8215#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
8216#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
7513/* The number of VLAN tags to insert/remove. An error will be returned if 8217/* The number of VLAN tags to insert/remove. An error will be returned if
7514 * incompatible with the number of VLAN tags specified for the upstream 8218 * incompatible with the number of VLAN tags specified for the upstream
7515 * v-switch. 8219 * v-switch.
@@ -7561,6 +8265,8 @@
7561#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 8265#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
7562#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 8266#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
7563#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 8267#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
8268#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
8269#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
7564/* The number of VLAN tags to strip on receive */ 8270/* The number of VLAN tags to strip on receive */
7565#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 8271#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
7566/* The number of VLAN tags to transparently insert/remove. */ 8272/* The number of VLAN tags to transparently insert/remove. */
@@ -7639,6 +8345,29 @@
7639 8345
7640 8346
7641/***********************************/ 8347/***********************************/
8348/* MC_CMD_VADAPTOR_QUERY
8349 * read some config of v-adaptor.
8350 */
8351#define MC_CMD_VADAPTOR_QUERY 0x61
8352
8353#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
8354
8355/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
8356#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
8357/* The port to which the v-adaptor is connected. */
8358#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
8359
8360/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
8361#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
8362/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
8363#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
8364/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
8365#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
8366/* The number of VLAN tags that may still be added */
8367#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
8368
8369
8370/***********************************/
7642/* MC_CMD_EVB_PORT_ASSIGN 8371/* MC_CMD_EVB_PORT_ASSIGN
7643 * assign a port to a PCI function. 8372 * assign a port to a PCI function.
7644 */ 8373 */
@@ -7875,10 +8604,17 @@
7875#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 8604#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
7876/* The handle of the RSS context */ 8605/* The handle of the RSS context */
7877#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 8606#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
7878/* Hash control flags. The _EN bits are always supported. The _MODE bits only 8607/* Hash control flags. The _EN bits are always supported, but new modes are
7879 * work when the firmware reports ADDITIONAL_RSS_MODES in 8608 * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
7880 * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0. 8609 * in this case, the MODE fields may be set to non-zero values, and will take
7881 * See the RSS_MODE structure for the meaning of the mode bits. 8610 * effect regardless of the settings of the _EN flags. See the RSS_MODE
8611 * structure for the meaning of the mode bits. Drivers must check the
8612 * capability before trying to set any _MODE fields, as older firmware will
8613 * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
8614 * the case where all the _MODE flags are zero, the _EN flags take effect,
8615 * providing backward compatibility for existing drivers. (Setting all _MODE
8616 * *and* all _EN flags to zero is valid, to disable RSS spreading for that
8617 * particular packet type.)
7882 */ 8618 */
7883#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 8619#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
7884#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 8620#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
@@ -7923,11 +8659,18 @@
7923 8659
7924/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ 8660/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
7925#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 8661#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
7926/* Hash control flags. If any _MODE bits are non-zero (which will only be true 8662/* Hash control flags. If all _MODE bits are zero (which will always be true
7927 * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be 8663 * for older firmware which does not report the ADDITIONAL_RSS_MODES
7928 * disregarded (but are guaranteed to be consistent with the _MODE bits if 8664 * capability), the _EN bits report the state. If any _MODE bits are non-zero
7929 * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was 8665 * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
7930 * allocated). 8666 * then the _EN bits should be disregarded, although the _MODE flags are
8667 * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
8668 * context and in the case where the _EN flags were used in the SET. This
8669 * provides backward compatibility: old drivers will not be attempting to
8670 * derive any meaning from the _MODE bits (and can never set them to any value
8671 * not representable by the _EN bits); new drivers can always determine the
8672 * mode by looking only at the _MODE bits; the value returned by a GET can
8673 * always be used for a SET regardless of old/new driver vs. old/new firmware.
7931 */ 8674 */
7932#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 8675#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
7933#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 8676#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
@@ -8155,6 +8898,74 @@
8155 8898
8156 8899
8157/***********************************/ 8900/***********************************/
8901/* MC_CMD_VPORT_RECONFIGURE
8902 * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
8903 * has already been passed to another function (v-port's user), then that
8904 * function will be reset before applying the changes.
8905 */
8906#define MC_CMD_VPORT_RECONFIGURE 0xeb
8907
8908#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
8909
8910/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
8911#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
8912/* The handle of the v-port */
8913#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
8914/* Flags requesting what should be changed. */
8915#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
8916#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
8917#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
8918#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
8919#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
8920/* The number of VLAN tags to insert/remove. An error will be returned if
8921 * incompatible with the number of VLAN tags specified for the upstream
8922 * v-switch.
8923 */
8924#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
8925/* The actual VLAN tags to insert/remove */
8926#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
8927#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
8928#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
8929#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
8930#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
8931/* The number of MAC addresses to add */
8932#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
8933/* MAC addresses to add */
8934#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
8935#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
8936#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
8937
8938/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
8939#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
8940#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
8941#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
8942#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
8943
8944
8945/***********************************/
8946/* MC_CMD_EVB_PORT_QUERY
8947 * read some config of v-port.
8948 */
8949#define MC_CMD_EVB_PORT_QUERY 0x62
8950
8951#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
8952
8953/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
8954#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
8955/* The handle of the v-port */
8956#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
8957
8958/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
8959#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
8960/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
8961#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
8962/* The number of VLAN tags that may be used on a v-adaptor connected to this
8963 * EVB port.
8964 */
8965#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
8966
8967
8968/***********************************/
8158/* MC_CMD_DUMP_BUFTBL_ENTRIES 8969/* MC_CMD_DUMP_BUFTBL_ENTRIES
8159 * Dump buffer table entries, mainly for command client debug use. Dumps 8970 * Dump buffer table entries, mainly for command client debug use. Dumps
8160 * absolute entries, and does not use chunk handles. All entries must be in 8971 * absolute entries, and does not use chunk handles. All entries must be in
@@ -8196,6 +9007,14 @@
8196#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 9007#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
8197#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 9008#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
8198#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 9009#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
9010#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
9011#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
9012/* enum: pad to 64 bytes */
9013#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
9014/* enum: pad to 128 bytes (Medford only) */
9015#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
9016/* enum: pad to 256 bytes (Medford only) */
9017#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
8199 9018
8200/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */ 9019/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
8201#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0 9020#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
@@ -8217,6 +9036,10 @@
8217#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 9036#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
8218#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 9037#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
8219#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 9038#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
9039#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
9040#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
9041/* Enum values, see field(s): */
9042/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
8220 9043
8221 9044
8222/***********************************/ 9045/***********************************/
@@ -8788,32 +9611,38 @@
8788#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 9611#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
8789#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 9612#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
8790#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 9613#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
8791/* enum: Attenuation (0-15, TBD for Medford) */ 9614/* enum: Attenuation (0-15, Huntington) */
8792#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 9615#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
8793/* enum: CTLE Boost (0-15, TBD for Medford) */ 9616/* enum: CTLE Boost (0-15, Huntington) */
8794#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 9617#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
8795/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD 9618/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
8796 * for Medford) 9619 * positive, Medford - 0-31)
8797 */ 9620 */
8798#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 9621#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
8799/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for 9622/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
8800 * Medford) 9623 * positive, Medford - 0-31)
8801 */ 9624 */
8802#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 9625#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
8803/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for 9626/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
8804 * Medford) 9627 * positive, Medford - 0-16)
8805 */ 9628 */
8806#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 9629#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
8807/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for 9630/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
8808 * Medford) 9631 * positive, Medford - 0-16)
8809 */ 9632 */
8810#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 9633#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
8811/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for 9634/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
8812 * Medford) 9635 * positive, Medford - 0-16)
8813 */ 9636 */
8814#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 9637#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
8815/* enum: Edge DFE DLEV (TBD for Medford) */ 9638/* enum: Edge DFE DLEV (0-128 for Medford) */
8816#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 9639#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
9640/* enum: Variable Gain Amplifier (0-15, Medford) */
9641#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
9642/* enum: CTLE EQ Capacitor (0-15, Medford) */
9643#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
9644/* enum: CTLE EQ Resistor (0-7, Medford) */
9645#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
8817#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 9646#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
8818#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 9647#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
8819#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ 9648#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -8885,26 +9714,32 @@
8885#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 9714#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
8886#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 9715#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
8887#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 9716#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
8888/* enum: TX Amplitude */ 9717/* enum: TX Amplitude (Huntington, Medford) */
8889#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 9718#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
8890/* enum: De-Emphasis Tap1 Magnitude (0-7) */ 9719/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
8891#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 9720#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
8892/* enum: De-Emphasis Tap1 Fine */ 9721/* enum: De-Emphasis Tap1 Fine */
8893#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 9722#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
8894/* enum: De-Emphasis Tap2 Magnitude (0-6) */ 9723/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
8895#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 9724#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
8896/* enum: De-Emphasis Tap2 Fine */ 9725/* enum: De-Emphasis Tap2 Fine (Huntington) */
8897#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 9726#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
8898/* enum: Pre-Emphasis Magnitude */ 9727/* enum: Pre-Emphasis Magnitude (Huntington) */
8899#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 9728#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
8900/* enum: Pre-Emphasis Fine */ 9729/* enum: Pre-Emphasis Fine (Huntington) */
8901#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 9730#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
8902/* enum: TX Slew Rate Coarse control */ 9731/* enum: TX Slew Rate Coarse control (Huntington) */
8903#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 9732#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
8904/* enum: TX Slew Rate Fine control */ 9733/* enum: TX Slew Rate Fine control (Huntington) */
8905#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 9734#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
8906/* enum: TX Termination Impedance control */ 9735/* enum: TX Termination Impedance control (Huntington) */
8907#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 9736#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
9737/* enum: TX Amplitude Fine control (Medford) */
9738#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
9739/* enum: Pre-shoot Tap (Medford) */
9740#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
9741/* enum: De-emphasis Tap (Medford) */
9742#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
8908#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 9743#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
8909#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 9744#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
8910#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ 9745#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -9086,8 +9921,16 @@
9086#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 9921#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
9087/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ 9922/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
9088#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 9923#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
9924/* enum: DFE DLev */
9925#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
9926/* enum: Figure of Merit */
9927#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
9928/* enum: CTLE EQ Capacitor (HF Gain) */
9929#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
9930/* enum: CTLE EQ Resistor (DC Gain) */
9931#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
9089#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 9932#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
9090#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4 9933#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
9091#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ 9934#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
9092#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ 9935#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
9093#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ 9936#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
@@ -9096,12 +9939,57 @@
9096#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ 9939#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
9097#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ 9940#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
9098#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ 9941#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
9099#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */ 9942#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
9100#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 9943#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
9101#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12 9944#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
9945#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
9946#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
9947#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
9948#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
9949#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
9950#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
9951#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
9952#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
9953#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
9954#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
9102#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 9955#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
9103#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 9956#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
9104 9957
9958/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
9959#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
9960#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
9961#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
9962/* Requested operation */
9963#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
9964#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
9965/* Align the arguments to 32 bits */
9966#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
9967#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
9968/* RXEQ Parameter */
9969#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
9970#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
9971#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
9972#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
9973#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
9974#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
9975/* Enum values, see field(s): */
9976/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
9977#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
9978#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
9979/* Enum values, see field(s): */
9980/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
9981#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
9982#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
9983#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
9984#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
9985#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
9986#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
9987#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
9988#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
9989
9990/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
9991#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
9992
9105/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */ 9993/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
9106#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4 9994#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
9107/* Requested operation */ 9995/* Requested operation */
@@ -9176,6 +10064,7 @@
9176/***********************************/ 10064/***********************************/
9177/* MC_CMD_LICENSING 10065/* MC_CMD_LICENSING
9178 * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition 10066 * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
10067 * - not used for V3 licensing
9179 */ 10068 */
9180#define MC_CMD_LICENSING 0xf3 10069#define MC_CMD_LICENSING 0xf3
9181 10070
@@ -9220,6 +10109,93 @@
9220 10109
9221 10110
9222/***********************************/ 10111/***********************************/
10112/* MC_CMD_LICENSING_V3
10113 * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
10114 * - V3 licensing (Medford)
10115 */
10116#define MC_CMD_LICENSING_V3 0xd0
10117
10118#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
10119
10120/* MC_CMD_LICENSING_V3_IN msgrequest */
10121#define MC_CMD_LICENSING_V3_IN_LEN 4
10122/* identifies the type of operation requested */
10123#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
10124/* enum: re-read and apply licenses after a license key partition update; note
10125 * that this operation returns a zero-length response
10126 */
10127#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
10128/* enum: report counts of installed licenses */
10129#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
10130
10131/* MC_CMD_LICENSING_V3_OUT msgresponse */
10132#define MC_CMD_LICENSING_V3_OUT_LEN 88
10133/* count of keys which are valid */
10134#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
10135/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
10136 * MC_CMD_FC_OP_LICENSE)
10137 */
10138#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
10139/* count of keys which are invalid due to being unverifiable */
10140#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
10141/* count of keys which are invalid due to being for the wrong node */
10142#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
10143/* licensing state (for diagnostics; the exact meaning of the bits in this
10144 * field are private to the firmware)
10145 */
10146#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
10147/* licensing subsystem self-test report (for manftest) */
10148#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
10149/* enum: licensing subsystem self-test failed */
10150#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
10151/* enum: licensing subsystem self-test passed */
10152#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
10153/* bitmask of licensed applications */
10154#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
10155#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
10156#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
10157#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
10158/* reserved for future use */
10159#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
10160#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
10161/* bitmask of licensed features */
10162#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
10163#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
10164#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
10165#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
10166/* reserved for future use */
10167#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
10168#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
10169
10170
10171/***********************************/
10172/* MC_CMD_LICENSING_GET_ID_V3
10173 * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
10174 * partition - V3 licensing (Medford)
10175 */
10176#define MC_CMD_LICENSING_GET_ID_V3 0xd1
10177
10178#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
10179
10180/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
10181#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
10182
10183/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
10184#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
10185#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
10186#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
10187/* type of license (eg 3) */
10188#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
10189/* length of the license ID (in bytes) */
10190#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
10191/* the unique license ID of the adapter */
10192#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
10193#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
10194#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
10195#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
10196
10197
10198/***********************************/
9223/* MC_CMD_MC2MC_PROXY 10199/* MC_CMD_MC2MC_PROXY
9224 * Execute an arbitrary MCDI command on the slave MC of a dual-core device. 10200 * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
9225 * This will fail on a single-core system. 10201 * This will fail on a single-core system.
@@ -9239,7 +10215,7 @@
9239/* MC_CMD_GET_LICENSED_APP_STATE 10215/* MC_CMD_GET_LICENSED_APP_STATE
9240 * Query the state of an individual licensed application. (Note that the actual 10216 * Query the state of an individual licensed application. (Note that the actual
9241 * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation 10217 * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
9242 * or a reboot of the MC.) 10218 * or a reboot of the MC.) Not used for V3 licensing
9243 */ 10219 */
9244#define MC_CMD_GET_LICENSED_APP_STATE 0xf5 10220#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
9245 10221
@@ -9261,8 +10237,68 @@
9261 10237
9262 10238
9263/***********************************/ 10239/***********************************/
10240/* MC_CMD_GET_LICENSED_V3_APP_STATE
10241 * Query the state of an individual licensed application. (Note that the actual
10242 * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
10243 * operation or a reboot of the MC.) Used for V3 licensing (Medford)
10244 */
10245#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
10246
10247#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
10248
10249/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
10250#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
10251/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
10252 * mask
10253 */
10254#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
10255#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
10256#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
10257#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
10258
10259/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
10260#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
10261/* state of this application */
10262#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
10263/* enum: no (or invalid) license is present for the application */
10264#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
10265/* enum: a valid license is present for the application */
10266#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
10267
10268
10269/***********************************/
10270/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
10271 * Query the state of an one or more licensed features. (Note that the actual
10272 * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
10273 * operation or a reboot of the MC.) Used for V3 licensing (Medford)
10274 */
10275#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
10276
10277#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
10278
10279/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
10280#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
10281/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
10282 * more bits set
10283 */
10284#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
10285#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
10286#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
10287#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
10288
10289/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
10290#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
10291/* states of these features - bit set for licensed, clear for not licensed */
10292#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
10293#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
10294#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
10295#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
10296
10297
10298/***********************************/
9264/* MC_CMD_LICENSED_APP_OP 10299/* MC_CMD_LICENSED_APP_OP
9265 * Perform an action for an individual licensed application. 10300 * Perform an action for an individual licensed application - not used for V3
10301 * licensing.
9266 */ 10302 */
9267#define MC_CMD_LICENSED_APP_OP 0xf6 10303#define MC_CMD_LICENSED_APP_OP 0xf6
9268 10304
@@ -9328,6 +10364,67 @@
9328 10364
9329 10365
9330/***********************************/ 10366/***********************************/
10367/* MC_CMD_LICENSED_V3_VALIDATE_APP
10368 * Perform validation for an individual licensed application - V3 licensing
10369 * (Medford)
10370 */
10371#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
10372
10373#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
10374
10375/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
10376#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 72
10377/* application ID expressed as a single bit mask */
10378#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 0
10379#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
10380#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 0
10381#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 4
10382/* challenge for validation */
10383#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 8
10384#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 64
10385
10386/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
10387#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 72
10388/* application expiry time */
10389#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 0
10390/* application expiry units */
10391#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 4
10392/* enum: expiry units are accounting units */
10393#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
10394/* enum: expiry units are calendar days */
10395#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
10396/* validation response to challenge */
10397#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 8
10398#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 64
10399
10400
10401/***********************************/
10402/* MC_CMD_LICENSED_V3_MASK_FEATURES
10403 * Mask features - V3 licensing (Medford)
10404 */
10405#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
10406
10407#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
10408
10409/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
10410#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
10411/* mask to be applied to features to be changed */
10412#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
10413#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
10414#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
10415#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
10416/* whether to turn on or turn off the masked features */
10417#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
10418/* enum: turn the features off */
10419#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
10420/* enum: turn the features back on */
10421#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
10422
10423/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
10424#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
10425
10426
10427/***********************************/
9331/* MC_CMD_SET_PORT_SNIFF_CONFIG 10428/* MC_CMD_SET_PORT_SNIFF_CONFIG
9332 * Configure RX port sniffing for the physical port associated with the calling 10429 * Configure RX port sniffing for the physical port associated with the calling
9333 * function. Only a privileged function may change the port sniffing 10430 * function. Only a privileged function may change the port sniffing
@@ -9696,12 +10793,27 @@
9696#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ 10793#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
9697#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ 10794#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
9698#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ 10795#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
9699#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */ 10796/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
10797#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
9700#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ 10798#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
9701#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ 10799#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
9702#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ 10800#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
9703#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ 10801#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
9704#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ 10802#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
10803/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
10804 * adress.
10805 */
10806#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
10807/* enum: Privilege that allows a Function to change the MAC address configured
10808 * in its associated vAdapter/vPort.
10809 */
10810#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
10811/* enum: Privilege that allows a Function to install filters that specify VLANs
10812 * that are not in the permit list for the associated vPort. This privilege is
10813 * primarily to support ESX where vPorts are created that restrict traffic to
10814 * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
10815 */
10816#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
9705/* enum: Set this bit to indicate that a new privilege mask is to be set, 10817/* enum: Set this bit to indicate that a new privilege mask is to be set,
9706 * otherwise the command will only read the existing mask. 10818 * otherwise the command will only read the existing mask.
9707 */ 10819 */
@@ -9951,7 +11063,7 @@
9951/* Sector type */ 11063/* Sector type */
9952#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 11064#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
9953/* Enum values, see field(s): */ 11065/* Enum values, see field(s): */
9954/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ 11066/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
9955/* Sector size */ 11067/* Sector size */
9956#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 11068#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
9957/* Sector data */ 11069/* Sector data */
@@ -10067,4 +11179,123 @@
10067#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0 11179#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
10068 11180
10069 11181
11182/***********************************/
11183/* MC_CMD_EXEC_SIGNED
11184 * Check the CMAC of the contents of IMEM and DMEM against the value supplied
11185 * and if correct begin execution from the start of IMEM. The caller supplies a
11186 * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
11187 * computation runs from the start of IMEM, and from the start of DMEM + 16k,
11188 * to match flash booting. The command will respond with EINVAL if the CMAC
11189 * does match, otherwise it will respond with success before it jumps to IMEM.
11190 */
11191#define MC_CMD_EXEC_SIGNED 0x10c
11192
11193#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
11194
11195/* MC_CMD_EXEC_SIGNED_IN msgrequest */
11196#define MC_CMD_EXEC_SIGNED_IN_LEN 28
11197/* the length of code to include in the CMAC */
11198#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
11199/* the length of date to include in the CMAC */
11200#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
11201/* the XPM sector containing the key to use */
11202#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
11203/* the expected CMAC value */
11204#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
11205#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
11206
11207/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
11208#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
11209
11210
11211/***********************************/
11212/* MC_CMD_PREPARE_SIGNED
11213 * Prepare to upload a signed image. This will scrub the specified length of
11214 * the data region, which must be at least as large as the DATALEN supplied to
11215 * MC_CMD_EXEC_SIGNED.
11216 */
11217#define MC_CMD_PREPARE_SIGNED 0x10d
11218
11219#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
11220
11221/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
11222#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
11223/* the length of data area to clear */
11224#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
11225
11226/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
11227#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
11228
11229
11230/***********************************/
11231/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
11232 * Configure UDP ports for tunnel encapsulation hardware acceleration. The
11233 * parser-dispatcher will attempt to parse traffic on these ports as tunnel
11234 * encapsulation PDUs and filter them using the tunnel encapsulation filter
11235 * chain rather than the standard filter chain. Note that this command can
11236 * cause all functions to see a reset. (Available on Medford only.)
11237 */
11238#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
11239
11240#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
11241
11242/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
11243#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
11244#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
11245#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
11246/* Flags */
11247#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
11248#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
11249#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
11250#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
11251/* The number of entries in the ENTRIES array */
11252#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
11253#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
11254/* Entries defining the UDP port to protocol mapping, each laid out as a
11255 * TUNNEL_ENCAP_UDP_PORT_ENTRY
11256 */
11257#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
11258#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
11259#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
11260#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
11261
11262/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
11263#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
11264/* Flags */
11265#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
11266#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
11267#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
11268#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
11269
11270
11271/***********************************/
11272/* MC_CMD_RX_BALANCING
11273 * Configure a port upconverter to distribute the packets on both RX engines.
11274 * Packets are distributed based on a table with the destination vFIFO. The
11275 * index of the table is a hash of source and destination of IPV4 and VLAN
11276 * priority.
11277 */
11278#define MC_CMD_RX_BALANCING 0x118
11279
11280#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
11281
11282/* MC_CMD_RX_BALANCING_IN msgrequest */
11283#define MC_CMD_RX_BALANCING_IN_LEN 4
11284/* The RX port whose upconverter table will be modified */
11285#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
11286#define MC_CMD_RX_BALANCING_IN_PORT_LEN 1
11287/* The VLAN priority associated to the table index and vFIFO */
11288#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 1
11289#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 1
11290/* The resulting bit of SRC^DST for indexing the table */
11291#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 2
11292#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 1
11293/* The RX engine to which the vFIFO in the table entry will point to */
11294#define MC_CMD_RX_BALANCING_IN_ENG_OFST 3
11295#define MC_CMD_RX_BALANCING_IN_ENG_LEN 1
11296
11297/* MC_CMD_RX_BALANCING_OUT msgresponse */
11298#define MC_CMD_RX_BALANCING_OUT_LEN 0
11299
11300
10070#endif /* MCDI_PCOL_H */ 11301#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index d13ddf9703ff..9ff062a36ea8 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -868,6 +868,7 @@ struct vfdi_status;
868 * be held to modify it. 868 * be held to modify it.
869 * @port_initialized: Port initialized? 869 * @port_initialized: Port initialized?
870 * @net_dev: Operating system network device. Consider holding the rtnl lock 870 * @net_dev: Operating system network device. Consider holding the rtnl lock
871 * @fixed_features: Features which cannot be turned off
871 * @stats_buffer: DMA buffer for statistics 872 * @stats_buffer: DMA buffer for statistics
872 * @phy_type: PHY type 873 * @phy_type: PHY type
873 * @phy_op: PHY interface 874 * @phy_op: PHY interface
@@ -916,7 +917,6 @@ struct vfdi_status;
916 * @stats_lock: Statistics update lock. Must be held when calling 917 * @stats_lock: Statistics update lock. Must be held when calling
917 * efx_nic_type::{update,start,stop}_stats. 918 * efx_nic_type::{update,start,stop}_stats.
918 * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb 919 * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
919 * @mc_promisc: Whether in multicast promiscuous mode when last changed
920 * 920 *
921 * This is stored in the private area of the &struct net_device. 921 * This is stored in the private area of the &struct net_device.
922 */ 922 */
@@ -1008,6 +1008,8 @@ struct efx_nic {
1008 bool port_initialized; 1008 bool port_initialized;
1009 struct net_device *net_dev; 1009 struct net_device *net_dev;
1010 1010
1011 netdev_features_t fixed_features;
1012
1011 struct efx_buffer stats_buffer; 1013 struct efx_buffer stats_buffer;
1012 u64 rx_nodesc_drops_total; 1014 u64 rx_nodesc_drops_total;
1013 u64 rx_nodesc_drops_while_down; 1015 u64 rx_nodesc_drops_while_down;
@@ -1065,7 +1067,6 @@ struct efx_nic {
1065 int last_irq_cpu; 1067 int last_irq_cpu;
1066 spinlock_t stats_lock; 1068 spinlock_t stats_lock;
1067 atomic_t n_rx_noskb_drops; 1069 atomic_t n_rx_noskb_drops;
1068 bool mc_promisc;
1069}; 1070};
1070 1071
1071static inline int efx_dev_registered(struct efx_nic *efx) 1072static inline int efx_dev_registered(struct efx_nic *efx)
@@ -1333,6 +1334,8 @@ struct efx_nic_type {
1333 int (*ptp_set_ts_config)(struct efx_nic *efx, 1334 int (*ptp_set_ts_config)(struct efx_nic *efx,
1334 struct hwtstamp_config *init); 1335 struct hwtstamp_config *init);
1335 int (*sriov_configure)(struct efx_nic *efx, int num_vfs); 1336 int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
1337 int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
1338 int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
1336 int (*sriov_init)(struct efx_nic *efx); 1339 int (*sriov_init)(struct efx_nic *efx);
1337 void (*sriov_fini)(struct efx_nic *efx); 1340 void (*sriov_fini)(struct efx_nic *efx);
1338 bool (*sriov_wanted)(struct efx_nic *efx); 1341 bool (*sriov_wanted)(struct efx_nic *efx);
@@ -1521,4 +1524,16 @@ static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
1521 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1524 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1522} 1525}
1523 1526
1527/* Get all supported features.
1528 * If a feature is not fixed, it is present in hw_features.
1529 * If a feature is fixed, it does not present in hw_features, but
1530 * always in features.
1531 */
1532static inline netdev_features_t efx_supported_features(const struct efx_nic *efx)
1533{
1534 const struct net_device *net_dev = efx->net_dev;
1535
1536 return net_dev->features | net_dev->hw_features;
1537}
1538
1524#endif /* EFX_NET_DRIVER_H */ 1539#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 0b536e27d3b2..96944c3c9d14 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -519,6 +519,9 @@ enum {
519#ifdef CONFIG_SFC_SRIOV 519#ifdef CONFIG_SFC_SRIOV
520 * @vf: Pointer to VF data structure 520 * @vf: Pointer to VF data structure
521#endif 521#endif
522 * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
523 * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
524 * @vlan_lock: Lock to serialize access to vlan_list.
522 */ 525 */
523struct efx_ef10_nic_data { 526struct efx_ef10_nic_data {
524 struct efx_buffer mcdi_buf; 527 struct efx_buffer mcdi_buf;
@@ -550,6 +553,8 @@ struct efx_ef10_nic_data {
550 struct ef10_vf *vf; 553 struct ef10_vf *vf;
551#endif 554#endif
552 u8 vport_mac[ETH_ALEN]; 555 u8 vport_mac[ETH_ALEN];
556 struct list_head vlan_list;
557 struct mutex vlan_lock;
553}; 558};
554 559
555int efx_init_sriov(void); 560int efx_init_sriov(void);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index cec147d1d34f..8f06a6621ab1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -40,7 +40,7 @@ config DWMAC_GENERIC
40config DWMAC_IPQ806X 40config DWMAC_IPQ806X
41 tristate "QCA IPQ806x DWMAC support" 41 tristate "QCA IPQ806x DWMAC support"
42 default ARCH_QCOM 42 default ARCH_QCOM
43 depends on OF 43 depends on OF && (ARCH_QCOM || COMPILE_TEST)
44 select MFD_SYSCON 44 select MFD_SYSCON
45 help 45 help
46 Support for QCA IPQ806X DWMAC Ethernet. 46 Support for QCA IPQ806X DWMAC Ethernet.
@@ -53,7 +53,7 @@ config DWMAC_IPQ806X
53config DWMAC_LPC18XX 53config DWMAC_LPC18XX
54 tristate "NXP LPC18xx/43xx DWMAC support" 54 tristate "NXP LPC18xx/43xx DWMAC support"
55 default ARCH_LPC18XX 55 default ARCH_LPC18XX
56 depends on OF 56 depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
57 select MFD_SYSCON 57 select MFD_SYSCON
58 ---help--- 58 ---help---
59 Support for NXP LPC18xx/43xx DWMAC Ethernet. 59 Support for NXP LPC18xx/43xx DWMAC Ethernet.
@@ -61,7 +61,7 @@ config DWMAC_LPC18XX
61config DWMAC_MESON 61config DWMAC_MESON
62 tristate "Amlogic Meson dwmac support" 62 tristate "Amlogic Meson dwmac support"
63 default ARCH_MESON 63 default ARCH_MESON
64 depends on OF 64 depends on OF && (ARCH_MESON || COMPILE_TEST)
65 help 65 help
66 Support for Ethernet controller on Amlogic Meson SoCs. 66 Support for Ethernet controller on Amlogic Meson SoCs.
67 67
@@ -72,7 +72,7 @@ config DWMAC_MESON
72config DWMAC_ROCKCHIP 72config DWMAC_ROCKCHIP
73 tristate "Rockchip dwmac support" 73 tristate "Rockchip dwmac support"
74 default ARCH_ROCKCHIP 74 default ARCH_ROCKCHIP
75 depends on OF 75 depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
76 select MFD_SYSCON 76 select MFD_SYSCON
77 help 77 help
78 Support for Ethernet controller on Rockchip RK3288 SoC. 78 Support for Ethernet controller on Rockchip RK3288 SoC.
@@ -83,7 +83,7 @@ config DWMAC_ROCKCHIP
83config DWMAC_SOCFPGA 83config DWMAC_SOCFPGA
84 tristate "SOCFPGA dwmac support" 84 tristate "SOCFPGA dwmac support"
85 default ARCH_SOCFPGA 85 default ARCH_SOCFPGA
86 depends on OF 86 depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
87 select MFD_SYSCON 87 select MFD_SYSCON
88 help 88 help
89 Support for ethernet controller on Altera SOCFPGA 89 Support for ethernet controller on Altera SOCFPGA
@@ -95,7 +95,7 @@ config DWMAC_SOCFPGA
95config DWMAC_STI 95config DWMAC_STI
96 tristate "STi GMAC support" 96 tristate "STi GMAC support"
97 default ARCH_STI 97 default ARCH_STI
98 depends on OF 98 depends on OF && (ARCH_STI || COMPILE_TEST)
99 select MFD_SYSCON 99 select MFD_SYSCON
100 ---help--- 100 ---help---
101 Support for ethernet controller on STi SOCs. 101 Support for ethernet controller on STi SOCs.
@@ -107,7 +107,7 @@ config DWMAC_STI
107config DWMAC_SUNXI 107config DWMAC_SUNXI
108 tristate "Allwinner GMAC support" 108 tristate "Allwinner GMAC support"
109 default ARCH_SUNXI 109 default ARCH_SUNXI
110 depends on OF 110 depends on OF && (ARCH_SUNXI || COMPILE_TEST)
111 ---help--- 111 ---help---
112 Support for Allwinner A20/A31 GMAC ethernet controllers. 112 Support for Allwinner A20/A31 GMAC ethernet controllers.
113 113
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index fc60368df2e7..2533b91f1421 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -232,6 +232,11 @@ struct stmmac_extra_stats {
232#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ 232#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
233#define DEFAULT_DMA_PBL 8 233#define DEFAULT_DMA_PBL 8
234 234
235/* PCS status and mask defines */
236#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */
237#define PCS_LINK_IRQ BIT(1) /* PCS Link */
238#define PCS_RGSMIIIS_IRQ BIT(0) /* RGMII or SMII Interrupt */
239
235/* Max/Min RI Watchdog Timer count value */ 240/* Max/Min RI Watchdog Timer count value */
236#define MAX_DMA_RIWT 0xff 241#define MAX_DMA_RIWT 0xff
237#define MIN_DMA_RIWT 0x20 242#define MIN_DMA_RIWT 0x20
@@ -272,9 +277,6 @@ enum dma_irq_status {
272#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) 277#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
273#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) 278#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
274 279
275#define CORE_PCS_ANE_COMPLETE (1 << 5)
276#define CORE_PCS_LINK_STATUS (1 << 6)
277#define CORE_RGMII_IRQ (1 << 7)
278#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) 280#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
279 281
280/* Physical Coding Sublayer */ 282/* Physical Coding Sublayer */
@@ -469,9 +471,12 @@ struct stmmac_ops {
469 void (*reset_eee_mode)(struct mac_device_info *hw); 471 void (*reset_eee_mode)(struct mac_device_info *hw);
470 void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); 472 void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
471 void (*set_eee_pls)(struct mac_device_info *hw, int link); 473 void (*set_eee_pls)(struct mac_device_info *hw, int link);
472 void (*ctrl_ane)(struct mac_device_info *hw, bool restart);
473 void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv);
474 void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x); 474 void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
475 /* PCS calls */
476 void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
477 bool loopback);
478 void (*pcs_rane)(void __iomem *ioaddr, bool restart);
479 void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
475}; 480};
476 481
477/* PTP and HW Timer helpers */ 482/* PTP and HW Timer helpers */
@@ -524,6 +529,9 @@ struct mac_device_info {
524 int unicast_filter_entries; 529 int unicast_filter_entries;
525 int mcast_bits_log2; 530 int mcast_bits_log2;
526 unsigned int rx_csum; 531 unsigned int rx_csum;
532 unsigned int pcs;
533 unsigned int pmt;
534 unsigned int ps;
527}; 535};
528 536
529struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, 537struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
@@ -546,6 +554,7 @@ void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
546void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable); 554void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
547 555
548void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 556void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
557
549extern const struct stmmac_mode_ops ring_mode_ops; 558extern const struct stmmac_mode_ops ring_mode_ops;
550extern const struct stmmac_mode_ops chain_mode_ops; 559extern const struct stmmac_mode_ops chain_mode_ops;
551extern const struct stmmac_desc_ops dwmac4_desc_ops; 560extern const struct stmmac_desc_ops dwmac4_desc_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 0cd3ecff768b..92105916ef40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -46,6 +46,7 @@ struct rk_priv_data {
46 struct platform_device *pdev; 46 struct platform_device *pdev;
47 int phy_iface; 47 int phy_iface;
48 struct regulator *regulator; 48 struct regulator *regulator;
49 bool suspended;
49 const struct rk_gmac_ops *ops; 50 const struct rk_gmac_ops *ops;
50 51
51 bool clk_enabled; 52 bool clk_enabled;
@@ -72,6 +73,122 @@ struct rk_priv_data {
72#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16)) 73#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
73#define GRF_CLR_BIT(nr) (BIT(nr+16)) 74#define GRF_CLR_BIT(nr) (BIT(nr+16))
74 75
76#define RK3228_GRF_MAC_CON0 0x0900
77#define RK3228_GRF_MAC_CON1 0x0904
78
79/* RK3228_GRF_MAC_CON0 */
80#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
81#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
82
83/* RK3228_GRF_MAC_CON1 */
84#define RK3228_GMAC_PHY_INTF_SEL_RGMII \
85 (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
86#define RK3228_GMAC_PHY_INTF_SEL_RMII \
87 (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
88#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3)
89#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
90#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2)
91#define RK3228_GMAC_SPEED_100M GRF_BIT(2)
92#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7)
93#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
94#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9))
95#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9))
96#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9))
97#define RK3228_GMAC_RMII_MODE GRF_BIT(10)
98#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10)
99#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
100#define RK3228_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
101#define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
102#define RK3228_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1)
103
104static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
105 int tx_delay, int rx_delay)
106{
107 struct device *dev = &bsp_priv->pdev->dev;
108
109 if (IS_ERR(bsp_priv->grf)) {
110 dev_err(dev, "Missing rockchip,grf property\n");
111 return;
112 }
113
114 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
115 RK3228_GMAC_PHY_INTF_SEL_RGMII |
116 RK3228_GMAC_RMII_MODE_CLR |
117 RK3228_GMAC_RXCLK_DLY_ENABLE |
118 RK3228_GMAC_TXCLK_DLY_ENABLE);
119
120 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
121 RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
122 RK3228_GMAC_CLK_TX_DL_CFG(tx_delay));
123}
124
125static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
126{
127 struct device *dev = &bsp_priv->pdev->dev;
128
129 if (IS_ERR(bsp_priv->grf)) {
130 dev_err(dev, "Missing rockchip,grf property\n");
131 return;
132 }
133
134 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
135 RK3228_GMAC_PHY_INTF_SEL_RMII |
136 RK3228_GMAC_RMII_MODE);
137
138 /* set MAC to RMII mode */
139 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11));
140}
141
142static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
143{
144 struct device *dev = &bsp_priv->pdev->dev;
145
146 if (IS_ERR(bsp_priv->grf)) {
147 dev_err(dev, "Missing rockchip,grf property\n");
148 return;
149 }
150
151 if (speed == 10)
152 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
153 RK3228_GMAC_CLK_2_5M);
154 else if (speed == 100)
155 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
156 RK3228_GMAC_CLK_25M);
157 else if (speed == 1000)
158 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
159 RK3228_GMAC_CLK_125M);
160 else
161 dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
162}
163
164static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
165{
166 struct device *dev = &bsp_priv->pdev->dev;
167
168 if (IS_ERR(bsp_priv->grf)) {
169 dev_err(dev, "Missing rockchip,grf property\n");
170 return;
171 }
172
173 if (speed == 10)
174 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
175 RK3228_GMAC_RMII_CLK_2_5M |
176 RK3228_GMAC_SPEED_10M);
177 else if (speed == 100)
178 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
179 RK3228_GMAC_RMII_CLK_25M |
180 RK3228_GMAC_SPEED_100M);
181 else
182 dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
183}
184
185static const struct rk_gmac_ops rk3228_ops = {
186 .set_to_rgmii = rk3228_set_to_rgmii,
187 .set_to_rmii = rk3228_set_to_rmii,
188 .set_rgmii_speed = rk3228_set_rgmii_speed,
189 .set_rmii_speed = rk3228_set_rmii_speed,
190};
191
75#define RK3288_GRF_SOC_CON1 0x0248 192#define RK3288_GRF_SOC_CON1 0x0248
76#define RK3288_GRF_SOC_CON3 0x0250 193#define RK3288_GRF_SOC_CON3 0x0250
77 194
@@ -529,9 +646,8 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
529 return bsp_priv; 646 return bsp_priv;
530} 647}
531 648
532static int rk_gmac_init(struct platform_device *pdev, void *priv) 649static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
533{ 650{
534 struct rk_priv_data *bsp_priv = priv;
535 int ret; 651 int ret;
536 652
537 ret = phy_power_on(bsp_priv, true); 653 ret = phy_power_on(bsp_priv, true);
@@ -545,14 +661,50 @@ static int rk_gmac_init(struct platform_device *pdev, void *priv)
545 return 0; 661 return 0;
546} 662}
547 663
548static void rk_gmac_exit(struct platform_device *pdev, void *priv) 664static void rk_gmac_powerdown(struct rk_priv_data *gmac)
549{ 665{
550 struct rk_priv_data *gmac = priv;
551
552 phy_power_on(gmac, false); 666 phy_power_on(gmac, false);
553 gmac_clk_enable(gmac, false); 667 gmac_clk_enable(gmac, false);
554} 668}
555 669
670static int rk_gmac_init(struct platform_device *pdev, void *priv)
671{
672 struct rk_priv_data *bsp_priv = priv;
673
674 return rk_gmac_powerup(bsp_priv);
675}
676
677static void rk_gmac_exit(struct platform_device *pdev, void *priv)
678{
679 struct rk_priv_data *bsp_priv = priv;
680
681 rk_gmac_powerdown(bsp_priv);
682}
683
684static void rk_gmac_suspend(struct platform_device *pdev, void *priv)
685{
686 struct rk_priv_data *bsp_priv = priv;
687
688 /* Keep the PHY up if we use Wake-on-Lan. */
689 if (device_may_wakeup(&pdev->dev))
690 return;
691
692 rk_gmac_powerdown(bsp_priv);
693 bsp_priv->suspended = true;
694}
695
696static void rk_gmac_resume(struct platform_device *pdev, void *priv)
697{
698 struct rk_priv_data *bsp_priv = priv;
699
700 /* The PHY was up for Wake-on-Lan. */
701 if (!bsp_priv->suspended)
702 return;
703
704 rk_gmac_powerup(bsp_priv);
705 bsp_priv->suspended = false;
706}
707
556static void rk_fix_speed(void *priv, unsigned int speed) 708static void rk_fix_speed(void *priv, unsigned int speed)
557{ 709{
558 struct rk_priv_data *bsp_priv = priv; 710 struct rk_priv_data *bsp_priv = priv;
@@ -591,6 +743,8 @@ static int rk_gmac_probe(struct platform_device *pdev)
591 plat_dat->init = rk_gmac_init; 743 plat_dat->init = rk_gmac_init;
592 plat_dat->exit = rk_gmac_exit; 744 plat_dat->exit = rk_gmac_exit;
593 plat_dat->fix_mac_speed = rk_fix_speed; 745 plat_dat->fix_mac_speed = rk_fix_speed;
746 plat_dat->suspend = rk_gmac_suspend;
747 plat_dat->resume = rk_gmac_resume;
594 748
595 plat_dat->bsp_priv = rk_gmac_setup(pdev, data); 749 plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
596 if (IS_ERR(plat_dat->bsp_priv)) 750 if (IS_ERR(plat_dat->bsp_priv))
@@ -604,6 +758,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
604} 758}
605 759
606static const struct of_device_id rk_gmac_dwmac_match[] = { 760static const struct of_device_id rk_gmac_dwmac_match[] = {
761 { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
607 { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, 762 { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
608 { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, 763 { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
609 { } 764 { }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index b0593a4268ee..ff3e5ab39bd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -38,19 +38,26 @@
38#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ 38#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
39 39
40#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ 40#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
41enum dwmac1000_irq_status { 41#define GMAC_INT_STATUS_PMT BIT(3)
42 lpiis_irq = 0x400, 42#define GMAC_INT_STATUS_MMCIS BIT(4)
43 time_stamp_irq = 0x0200, 43#define GMAC_INT_STATUS_MMCRIS BIT(5)
44 mmc_rx_csum_offload_irq = 0x0080, 44#define GMAC_INT_STATUS_MMCTIS BIT(6)
45 mmc_tx_irq = 0x0040, 45#define GMAC_INT_STATUS_MMCCSUM BIT(7)
46 mmc_rx_irq = 0x0020, 46#define GMAC_INT_STATUS_TSTAMP BIT(9)
47 mmc_irq = 0x0010, 47#define GMAC_INT_STATUS_LPIIS BIT(10)
48 pmt_irq = 0x0008, 48
49 pcs_ane_irq = 0x0004, 49/* interrupt mask register */
50 pcs_link_irq = 0x0002, 50#define GMAC_INT_MASK 0x0000003c
51 rgmii_irq = 0x0001, 51#define GMAC_INT_DISABLE_RGMII BIT(0)
52}; 52#define GMAC_INT_DISABLE_PCSLINK BIT(1)
53#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */ 53#define GMAC_INT_DISABLE_PCSAN BIT(2)
54#define GMAC_INT_DISABLE_PMT BIT(3)
55#define GMAC_INT_DISABLE_TIMESTAMP BIT(9)
56#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \
57 GMAC_INT_DISABLE_PCSLINK | \
58 GMAC_INT_DISABLE_PCSAN)
59#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \
60 GMAC_INT_DISABLE_PCS)
54 61
55/* PMT Control and Status */ 62/* PMT Control and Status */
56#define GMAC_PMT 0x0000002c 63#define GMAC_PMT 0x0000002c
@@ -90,42 +97,23 @@ enum power_event {
90 (reg * 8)) 97 (reg * 8))
91#define GMAC_MAX_PERFECT_ADDRESSES 1 98#define GMAC_MAX_PERFECT_ADDRESSES 1
92 99
93/* PCS registers (AN/TBI/SGMII/RGMII) offset */ 100#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
94#define GMAC_AN_CTRL 0x000000c0 /* AN control */ 101#define GMAC_RGSMIIIS 0x000000d8 /* RGMII/SMII status */
95#define GMAC_AN_STATUS 0x000000c4 /* AN status */ 102
96#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */ 103/* SGMII/RGMII status register */
97#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */ 104#define GMAC_RGSMIIIS_LNKMODE BIT(0)
98#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */ 105#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1)
99#define GMAC_TBI 0x000000d4 /* TBI extend status */ 106#define GMAC_RGSMIIIS_SPEED_SHIFT 1
100#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */ 107#define GMAC_RGSMIIIS_LNKSTS BIT(3)
101 108#define GMAC_RGSMIIIS_JABTO BIT(4)
102/* AN Configuration defines */ 109#define GMAC_RGSMIIIS_FALSECARDET BIT(5)
103#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */ 110#define GMAC_RGSMIIIS_SMIDRXS BIT(16)
104#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */ 111/* LNKMOD */
105#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */ 112#define GMAC_RGSMIIIS_LNKMOD_MASK 0x1
106#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */ 113/* LNKSPEED */
107#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */ 114#define GMAC_RGSMIIIS_SPEED_125 0x2
108#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */ 115#define GMAC_RGSMIIIS_SPEED_25 0x1
109 116#define GMAC_RGSMIIIS_SPEED_2_5 0x0
110/* AN Status defines */
111#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */
112#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */
113#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */
114#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */
115
116/* Register 54 (SGMII/RGMII status register) */
117#define GMAC_S_R_GMII_LINK 0x8
118#define GMAC_S_R_GMII_SPEED 0x5
119#define GMAC_S_R_GMII_SPEED_SHIFT 0x1
120#define GMAC_S_R_GMII_MODE 0x1
121#define GMAC_S_R_GMII_SPEED_125 2
122#define GMAC_S_R_GMII_SPEED_25 1
123
124/* Common ADV and LPA defines */
125#define GMAC_ANE_FD (1 << 5)
126#define GMAC_ANE_HD (1 << 6)
127#define GMAC_ANE_PSE (3 << 7)
128#define GMAC_ANE_PSE_SHIFT 7
129 117
130/* GMAC Configuration defines */ 118/* GMAC Configuration defines */
131#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */ 119#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index fb1eb578e34e..cbefe9e2207c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -30,22 +30,48 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/ethtool.h> 31#include <linux/ethtool.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include "stmmac_pcs.h"
33#include "dwmac1000.h" 34#include "dwmac1000.h"
34 35
35static void dwmac1000_core_init(struct mac_device_info *hw, int mtu) 36static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
36{ 37{
37 void __iomem *ioaddr = hw->pcsr; 38 void __iomem *ioaddr = hw->pcsr;
38 u32 value = readl(ioaddr + GMAC_CONTROL); 39 u32 value = readl(ioaddr + GMAC_CONTROL);
40
41 /* Configure GMAC core */
39 value |= GMAC_CORE_INIT; 42 value |= GMAC_CORE_INIT;
43
40 if (mtu > 1500) 44 if (mtu > 1500)
41 value |= GMAC_CONTROL_2K; 45 value |= GMAC_CONTROL_2K;
42 if (mtu > 2000) 46 if (mtu > 2000)
43 value |= GMAC_CONTROL_JE; 47 value |= GMAC_CONTROL_JE;
44 48
49 if (hw->ps) {
50 value |= GMAC_CONTROL_TE;
51
52 if (hw->ps == SPEED_1000) {
53 value &= ~GMAC_CONTROL_PS;
54 } else {
55 value |= GMAC_CONTROL_PS;
56
57 if (hw->ps == SPEED_10)
58 value &= ~GMAC_CONTROL_FES;
59 else
60 value |= GMAC_CONTROL_FES;
61 }
62 }
63
45 writel(value, ioaddr + GMAC_CONTROL); 64 writel(value, ioaddr + GMAC_CONTROL);
46 65
47 /* Mask GMAC interrupts */ 66 /* Mask GMAC interrupts */
48 writel(0x207, ioaddr + GMAC_INT_MASK); 67 value = GMAC_INT_DEFAULT_MASK;
68
69 if (hw->pmt)
70 value &= ~GMAC_INT_DISABLE_PMT;
71 if (hw->pcs)
72 value &= ~GMAC_INT_DISABLE_PCS;
73
74 writel(value, ioaddr + GMAC_INT_MASK);
49 75
50#ifdef STMMAC_VLAN_TAG_USED 76#ifdef STMMAC_VLAN_TAG_USED
51 /* Tag detection without filtering */ 77 /* Tag detection without filtering */
@@ -241,6 +267,39 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
241 writel(pmt, ioaddr + GMAC_PMT); 267 writel(pmt, ioaddr + GMAC_PMT);
242} 268}
243 269
270/* RGMII or SMII interface */
271static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x)
272{
273 u32 status;
274
275 status = readl(ioaddr + GMAC_RGSMIIIS);
276 x->irq_rgmii_n++;
277
278 /* Check the link status */
279 if (status & GMAC_RGSMIIIS_LNKSTS) {
280 int speed_value;
281
282 x->pcs_link = 1;
283
284 speed_value = ((status & GMAC_RGSMIIIS_SPEED) >>
285 GMAC_RGSMIIIS_SPEED_SHIFT);
286 if (speed_value == GMAC_RGSMIIIS_SPEED_125)
287 x->pcs_speed = SPEED_1000;
288 else if (speed_value == GMAC_RGSMIIIS_SPEED_25)
289 x->pcs_speed = SPEED_100;
290 else
291 x->pcs_speed = SPEED_10;
292
293 x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK);
294
295 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
296 x->pcs_duplex ? "Full" : "Half");
297 } else {
298 x->pcs_link = 0;
299 pr_info("Link is Down\n");
300 }
301}
302
244static int dwmac1000_irq_status(struct mac_device_info *hw, 303static int dwmac1000_irq_status(struct mac_device_info *hw,
245 struct stmmac_extra_stats *x) 304 struct stmmac_extra_stats *x)
246{ 305{
@@ -249,19 +308,20 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
249 int ret = 0; 308 int ret = 0;
250 309
251 /* Not used events (e.g. MMC interrupts) are not handled. */ 310 /* Not used events (e.g. MMC interrupts) are not handled. */
252 if ((intr_status & mmc_tx_irq)) 311 if ((intr_status & GMAC_INT_STATUS_MMCTIS))
253 x->mmc_tx_irq_n++; 312 x->mmc_tx_irq_n++;
254 if (unlikely(intr_status & mmc_rx_irq)) 313 if (unlikely(intr_status & GMAC_INT_STATUS_MMCRIS))
255 x->mmc_rx_irq_n++; 314 x->mmc_rx_irq_n++;
256 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) 315 if (unlikely(intr_status & GMAC_INT_STATUS_MMCCSUM))
257 x->mmc_rx_csum_offload_irq_n++; 316 x->mmc_rx_csum_offload_irq_n++;
258 if (unlikely(intr_status & pmt_irq)) { 317 if (unlikely(intr_status & GMAC_INT_DISABLE_PMT)) {
259 /* clear the PMT bits 5 and 6 by reading the PMT status reg */ 318 /* clear the PMT bits 5 and 6 by reading the PMT status reg */
260 readl(ioaddr + GMAC_PMT); 319 readl(ioaddr + GMAC_PMT);
261 x->irq_receive_pmt_irq_n++; 320 x->irq_receive_pmt_irq_n++;
262 } 321 }
263 /* MAC trx/rx EEE LPI entry/exit interrupts */ 322
264 if (intr_status & lpiis_irq) { 323 /* MAC tx/rx EEE LPI entry/exit interrupts */
324 if (intr_status & GMAC_INT_STATUS_LPIIS) {
265 /* Clean LPI interrupt by reading the Reg 12 */ 325 /* Clean LPI interrupt by reading the Reg 12 */
266 ret = readl(ioaddr + LPI_CTRL_STATUS); 326 ret = readl(ioaddr + LPI_CTRL_STATUS);
267 327
@@ -275,36 +335,10 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
275 x->irq_rx_path_exit_lpi_mode_n++; 335 x->irq_rx_path_exit_lpi_mode_n++;
276 } 336 }
277 337
278 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { 338 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
279 readl(ioaddr + GMAC_AN_STATUS);
280 x->irq_pcs_ane_n++;
281 }
282 if (intr_status & rgmii_irq) {
283 u32 status = readl(ioaddr + GMAC_S_R_GMII);
284 x->irq_rgmii_n++;
285
286 /* Save and dump the link status. */
287 if (status & GMAC_S_R_GMII_LINK) {
288 int speed_value = (status & GMAC_S_R_GMII_SPEED) >>
289 GMAC_S_R_GMII_SPEED_SHIFT;
290 x->pcs_duplex = (status & GMAC_S_R_GMII_MODE);
291
292 if (speed_value == GMAC_S_R_GMII_SPEED_125)
293 x->pcs_speed = SPEED_1000;
294 else if (speed_value == GMAC_S_R_GMII_SPEED_25)
295 x->pcs_speed = SPEED_100;
296 else
297 x->pcs_speed = SPEED_10;
298 339
299 x->pcs_link = 1; 340 if (intr_status & PCS_RGSMIIIS_IRQ)
300 pr_debug("%s: Link is Up - %d/%s\n", __func__, 341 dwmac1000_rgsmii(ioaddr, x);
301 (int)x->pcs_speed,
302 x->pcs_duplex ? "Full" : "Half");
303 } else {
304 x->pcs_link = 0;
305 pr_debug("%s: Link is Down\n", __func__);
306 }
307 }
308 342
309 return ret; 343 return ret;
310} 344}
@@ -363,38 +397,20 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
363 writel(value, ioaddr + LPI_TIMER_CTRL); 397 writel(value, ioaddr + LPI_TIMER_CTRL);
364} 398}
365 399
366static void dwmac1000_ctrl_ane(struct mac_device_info *hw, bool restart) 400static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
401 bool loopback)
367{ 402{
368 void __iomem *ioaddr = hw->pcsr; 403 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
369 /* auto negotiation enable and External Loopback enable */
370 u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
371
372 if (restart)
373 value |= GMAC_AN_CTRL_RAN;
374
375 writel(value, ioaddr + GMAC_AN_CTRL);
376} 404}
377 405
378static void dwmac1000_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) 406static void dwmac1000_rane(void __iomem *ioaddr, bool restart)
379{ 407{
380 void __iomem *ioaddr = hw->pcsr; 408 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
381 u32 value = readl(ioaddr + GMAC_ANE_ADV); 409}
382
383 if (value & GMAC_ANE_FD)
384 adv->duplex = DUPLEX_FULL;
385 if (value & GMAC_ANE_HD)
386 adv->duplex |= DUPLEX_HALF;
387
388 adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
389
390 value = readl(ioaddr + GMAC_ANE_LPA);
391
392 if (value & GMAC_ANE_FD)
393 adv->lp_duplex = DUPLEX_FULL;
394 if (value & GMAC_ANE_HD)
395 adv->lp_duplex = DUPLEX_HALF;
396 410
397 adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; 411static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
412{
413 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
398} 414}
399 415
400static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) 416static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
@@ -485,9 +501,10 @@ static const struct stmmac_ops dwmac1000_ops = {
485 .reset_eee_mode = dwmac1000_reset_eee_mode, 501 .reset_eee_mode = dwmac1000_reset_eee_mode,
486 .set_eee_timer = dwmac1000_set_eee_timer, 502 .set_eee_timer = dwmac1000_set_eee_timer,
487 .set_eee_pls = dwmac1000_set_eee_pls, 503 .set_eee_pls = dwmac1000_set_eee_pls,
488 .ctrl_ane = dwmac1000_ctrl_ane,
489 .get_adv = dwmac1000_get_adv,
490 .debug = dwmac1000_debug, 504 .debug = dwmac1000_debug,
505 .pcs_ctrl_ane = dwmac1000_ctrl_ane,
506 .pcs_rane = dwmac1000_rane,
507 .pcs_get_adv_lp = dwmac1000_get_adv_lp,
491}; 508};
492 509
493struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, 510struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index bc50952a18e7..6f4f5ce25114 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -24,10 +24,8 @@
24#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) 24#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
25#define GMAC_INT_STATUS 0x000000b0 25#define GMAC_INT_STATUS 0x000000b0
26#define GMAC_INT_EN 0x000000b4 26#define GMAC_INT_EN 0x000000b4
27#define GMAC_AN_CTRL 0x000000e0 27#define GMAC_PCS_BASE 0x000000e0
28#define GMAC_AN_STATUS 0x000000e4 28#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
29#define GMAC_AN_ADV 0x000000e8
30#define GMAC_AN_LPA 0x000000ec
31#define GMAC_PMT 0x000000c0 29#define GMAC_PMT 0x000000c0
32#define GMAC_VERSION 0x00000110 30#define GMAC_VERSION 0x00000110
33#define GMAC_DEBUG 0x00000114 31#define GMAC_DEBUG 0x00000114
@@ -54,9 +52,18 @@
54#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 52#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
55 53
56/* MAC Interrupt bitmap*/ 54/* MAC Interrupt bitmap*/
55#define GMAC_INT_RGSMIIS BIT(0)
56#define GMAC_INT_PCS_LINK BIT(1)
57#define GMAC_INT_PCS_ANE BIT(2)
58#define GMAC_INT_PCS_PHYIS BIT(3)
57#define GMAC_INT_PMT_EN BIT(4) 59#define GMAC_INT_PMT_EN BIT(4)
58#define GMAC_INT_LPI_EN BIT(5) 60#define GMAC_INT_LPI_EN BIT(5)
59 61
62#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
63 GMAC_INT_PCS_ANE)
64
65#define GMAC_INT_DEFAULT_MASK GMAC_INT_PMT_EN
66
60enum dwmac4_irq_status { 67enum dwmac4_irq_status {
61 time_stamp_irq = 0x00001000, 68 time_stamp_irq = 0x00001000,
62 mmc_rx_csum_offload_irq = 0x00000800, 69 mmc_rx_csum_offload_irq = 0x00000800,
@@ -64,19 +71,8 @@ enum dwmac4_irq_status {
64 mmc_rx_irq = 0x00000200, 71 mmc_rx_irq = 0x00000200,
65 mmc_irq = 0x00000100, 72 mmc_irq = 0x00000100,
66 pmt_irq = 0x00000010, 73 pmt_irq = 0x00000010,
67 pcs_ane_irq = 0x00000004,
68 pcs_link_irq = 0x00000002,
69}; 74};
70 75
71/* MAC Auto-Neg bitmap*/
72#define GMAC_AN_CTRL_RAN BIT(9)
73#define GMAC_AN_CTRL_ANE BIT(12)
74#define GMAC_AN_CTRL_ELE BIT(14)
75#define GMAC_AN_FD BIT(5)
76#define GMAC_AN_HD BIT(6)
77#define GMAC_AN_PSE_MASK GENMASK(8, 7)
78#define GMAC_AN_PSE_SHIFT 7
79
80/* MAC PMT bitmap */ 76/* MAC PMT bitmap */
81enum power_event { 77enum power_event {
82 pointer_reset = 0x80000000, 78 pointer_reset = 0x80000000,
@@ -250,6 +246,23 @@ enum power_event {
250#define MTL_DEBUG_RRCSTS_FLUSH 3 246#define MTL_DEBUG_RRCSTS_FLUSH 3
251#define MTL_DEBUG_RWCSTS BIT(0) 247#define MTL_DEBUG_RWCSTS BIT(0)
252 248
249/* SGMII/RGMII status register */
250#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0)
251#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1)
252#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4)
253#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16)
254#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17)
255#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17
256#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19)
257#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20)
258#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21)
259/* LNKMOD */
260#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1
261/* LNKSPEED */
262#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2
263#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1
264#define GMAC_PHYIF_CTRLSTATUS_SPEED_2_5 0x0
265
253extern const struct stmmac_dma_ops dwmac4_dma_ops; 266extern const struct stmmac_dma_ops dwmac4_dma_ops;
254extern const struct stmmac_dma_ops dwmac410_dma_ops; 267extern const struct stmmac_dma_ops dwmac410_dma_ops;
255#endif /* __DWMAC4_H__ */ 268#endif /* __DWMAC4_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 44da877d2483..df5580dcdfed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -17,6 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include "stmmac_pcs.h"
20#include "dwmac4.h" 21#include "dwmac4.h"
21 22
22static void dwmac4_core_init(struct mac_device_info *hw, int mtu) 23static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
@@ -31,10 +32,31 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
31 if (mtu > 2000) 32 if (mtu > 2000)
32 value |= GMAC_CONFIG_JE; 33 value |= GMAC_CONFIG_JE;
33 34
35 if (hw->ps) {
36 value |= GMAC_CONFIG_TE;
37
38 if (hw->ps == SPEED_1000) {
39 value &= ~GMAC_CONFIG_PS;
40 } else {
41 value |= GMAC_CONFIG_PS;
42
43 if (hw->ps == SPEED_10)
44 value &= ~GMAC_CONFIG_FES;
45 else
46 value |= GMAC_CONFIG_FES;
47 }
48 }
49
34 writel(value, ioaddr + GMAC_CONFIG); 50 writel(value, ioaddr + GMAC_CONFIG);
35 51
36 /* Mask GMAC interrupts */ 52 /* Mask GMAC interrupts */
37 writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN); 53 value = GMAC_INT_DEFAULT_MASK;
54 if (hw->pmt)
55 value |= GMAC_INT_PMT_EN;
56 if (hw->pcs)
57 value |= GMAC_PCS_IRQ_DEFAULT;
58
59 writel(value, ioaddr + GMAC_INT_EN);
38} 60}
39 61
40static void dwmac4_dump_regs(struct mac_device_info *hw) 62static void dwmac4_dump_regs(struct mac_device_info *hw)
@@ -190,39 +212,53 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
190 } 212 }
191} 213}
192 214
193static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart) 215static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
216 bool loopback)
194{ 217{
195 void __iomem *ioaddr = hw->pcsr; 218 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
196 219}
197 /* auto negotiation enable and External Loopback enable */
198 u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
199 220
200 if (restart) 221static void dwmac4_rane(void __iomem *ioaddr, bool restart)
201 value |= GMAC_AN_CTRL_RAN; 222{
223 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
224}
202 225
203 writel(value, ioaddr + GMAC_AN_CTRL); 226static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
227{
228 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
204} 229}
205 230
206static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) 231/* RGMII or SMII interface */
232static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
207{ 233{
208 void __iomem *ioaddr = hw->pcsr; 234 u32 status;
209 u32 value = readl(ioaddr + GMAC_AN_ADV);
210 235
211 if (value & GMAC_AN_FD) 236 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
212 adv->duplex = DUPLEX_FULL; 237 x->irq_rgmii_n++;
213 if (value & GMAC_AN_HD)
214 adv->duplex |= DUPLEX_HALF;
215 238
216 adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; 239 /* Check the link status */
240 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
241 int speed_value;
217 242
218 value = readl(ioaddr + GMAC_AN_LPA); 243 x->pcs_link = 1;
244
245 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
246 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
247 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
248 x->pcs_speed = SPEED_1000;
249 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
250 x->pcs_speed = SPEED_100;
251 else
252 x->pcs_speed = SPEED_10;
219 253
220 if (value & GMAC_AN_FD) 254 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
221 adv->lp_duplex = DUPLEX_FULL;
222 if (value & GMAC_AN_HD)
223 adv->lp_duplex = DUPLEX_HALF;
224 255
225 adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; 256 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
257 x->pcs_duplex ? "Full" : "Half");
258 } else {
259 x->pcs_link = 0;
260 pr_info("Link is Down\n");
261 }
226} 262}
227 263
228static int dwmac4_irq_status(struct mac_device_info *hw, 264static int dwmac4_irq_status(struct mac_device_info *hw,
@@ -248,11 +284,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
248 x->irq_receive_pmt_irq_n++; 284 x->irq_receive_pmt_irq_n++;
249 } 285 }
250 286
251 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
252 readl(ioaddr + GMAC_AN_STATUS);
253 x->irq_pcs_ane_n++;
254 }
255
256 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); 287 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
257 /* Check MTL Interrupt: Currently only one queue is used: Q0. */ 288 /* Check MTL Interrupt: Currently only one queue is used: Q0. */
258 if (mtl_int_qx_status & MTL_INT_Q0) { 289 if (mtl_int_qx_status & MTL_INT_Q0) {
@@ -267,6 +298,10 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
267 } 298 }
268 } 299 }
269 300
301 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
302 if (intr_status & PCS_RGSMIIIS_IRQ)
303 dwmac4_phystatus(ioaddr, x);
304
270 return ret; 305 return ret;
271} 306}
272 307
@@ -363,8 +398,9 @@ static const struct stmmac_ops dwmac4_ops = {
363 .pmt = dwmac4_pmt, 398 .pmt = dwmac4_pmt,
364 .set_umac_addr = dwmac4_set_umac_addr, 399 .set_umac_addr = dwmac4_set_umac_addr,
365 .get_umac_addr = dwmac4_get_umac_addr, 400 .get_umac_addr = dwmac4_get_umac_addr,
366 .ctrl_ane = dwmac4_ctrl_ane, 401 .pcs_ctrl_ane = dwmac4_ctrl_ane,
367 .get_adv = dwmac4_get_adv, 402 .pcs_rane = dwmac4_rane,
403 .pcs_get_adv_lp = dwmac4_get_adv_lp,
368 .debug = dwmac4_debug, 404 .debug = dwmac4_debug,
369 .set_filter = dwmac4_set_filter, 405 .set_filter = dwmac4_set_filter,
370}; 406};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 59ae6088cd22..8dc9056c1001 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -117,7 +117,6 @@ struct stmmac_priv {
117 int eee_enabled; 117 int eee_enabled;
118 int eee_active; 118 int eee_active;
119 int tx_lpi_timer; 119 int tx_lpi_timer;
120 int pcs;
121 unsigned int mode; 120 unsigned int mode;
122 int extend_desc; 121 int extend_desc;
123 struct ptp_clock *ptp_clock; 122 struct ptp_clock *ptp_clock;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e2b98b01647e..1e06173fc9d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -276,7 +276,8 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
276 struct phy_device *phy = priv->phydev; 276 struct phy_device *phy = priv->phydev;
277 int rc; 277 int rc;
278 278
279 if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { 279 if (priv->hw->pcs & STMMAC_PCS_RGMII ||
280 priv->hw->pcs & STMMAC_PCS_SGMII) {
280 struct rgmii_adv adv; 281 struct rgmii_adv adv;
281 282
282 if (!priv->xstats.pcs_link) { 283 if (!priv->xstats.pcs_link) {
@@ -289,10 +290,10 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
289 ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); 290 ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
290 291
291 /* Get and convert ADV/LP_ADV from the HW AN registers */ 292 /* Get and convert ADV/LP_ADV from the HW AN registers */
292 if (!priv->hw->mac->get_adv) 293 if (!priv->hw->mac->pcs_get_adv_lp)
293 return -EOPNOTSUPP; /* should never happen indeed */ 294 return -EOPNOTSUPP; /* should never happen indeed */
294 295
295 priv->hw->mac->get_adv(priv->hw, &adv); 296 priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv);
296 297
297 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ 298 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
298 299
@@ -361,7 +362,8 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
361 struct phy_device *phy = priv->phydev; 362 struct phy_device *phy = priv->phydev;
362 int rc; 363 int rc;
363 364
364 if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { 365 if (priv->hw->pcs & STMMAC_PCS_RGMII ||
366 priv->hw->pcs & STMMAC_PCS_SGMII) {
365 u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; 367 u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
366 368
367 /* Only support ANE */ 369 /* Only support ANE */
@@ -376,8 +378,11 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
376 ADVERTISED_10baseT_Full); 378 ADVERTISED_10baseT_Full);
377 379
378 spin_lock(&priv->lock); 380 spin_lock(&priv->lock);
379 if (priv->hw->mac->ctrl_ane) 381
380 priv->hw->mac->ctrl_ane(priv->hw, 1); 382 if (priv->hw->mac->pcs_ctrl_ane)
383 priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1,
384 priv->hw->ps, 0);
385
381 spin_unlock(&priv->lock); 386 spin_unlock(&priv->lock);
382 387
383 return 0; 388 return 0;
@@ -452,11 +457,22 @@ stmmac_get_pauseparam(struct net_device *netdev,
452{ 457{
453 struct stmmac_priv *priv = netdev_priv(netdev); 458 struct stmmac_priv *priv = netdev_priv(netdev);
454 459
455 if (priv->pcs) /* FIXME */
456 return;
457
458 pause->rx_pause = 0; 460 pause->rx_pause = 0;
459 pause->tx_pause = 0; 461 pause->tx_pause = 0;
462
463 if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
464 struct rgmii_adv adv_lp;
465
466 pause->autoneg = 1;
467 priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
468 if (!adv_lp.pause)
469 return;
470 } else {
471 if (!(priv->phydev->supported & SUPPORTED_Pause) ||
472 !(priv->phydev->supported & SUPPORTED_Asym_Pause))
473 return;
474 }
475
460 pause->autoneg = priv->phydev->autoneg; 476 pause->autoneg = priv->phydev->autoneg;
461 477
462 if (priv->flow_ctrl & FLOW_RX) 478 if (priv->flow_ctrl & FLOW_RX)
@@ -473,10 +489,19 @@ stmmac_set_pauseparam(struct net_device *netdev,
473 struct stmmac_priv *priv = netdev_priv(netdev); 489 struct stmmac_priv *priv = netdev_priv(netdev);
474 struct phy_device *phy = priv->phydev; 490 struct phy_device *phy = priv->phydev;
475 int new_pause = FLOW_OFF; 491 int new_pause = FLOW_OFF;
476 int ret = 0;
477 492
478 if (priv->pcs) /* FIXME */ 493 if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
479 return -EOPNOTSUPP; 494 struct rgmii_adv adv_lp;
495
496 pause->autoneg = 1;
497 priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
498 if (!adv_lp.pause)
499 return -EOPNOTSUPP;
500 } else {
501 if (!(phy->supported & SUPPORTED_Pause) ||
502 !(phy->supported & SUPPORTED_Asym_Pause))
503 return -EOPNOTSUPP;
504 }
480 505
481 if (pause->rx_pause) 506 if (pause->rx_pause)
482 new_pause |= FLOW_RX; 507 new_pause |= FLOW_RX;
@@ -488,11 +513,12 @@ stmmac_set_pauseparam(struct net_device *netdev,
488 513
489 if (phy->autoneg) { 514 if (phy->autoneg) {
490 if (netif_running(netdev)) 515 if (netif_running(netdev))
491 ret = phy_start_aneg(phy); 516 return phy_start_aneg(phy);
492 } else 517 }
493 priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, 518
494 priv->flow_ctrl, priv->pause); 519 priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
495 return ret; 520 priv->pause);
521 return 0;
496} 522}
497 523
498static void stmmac_get_ethtool_stats(struct net_device *dev, 524static void stmmac_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a473c182c91d..aab777c1ba33 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -285,8 +285,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
285 /* Using PCS we cannot dial with the phy registers at this stage 285 /* Using PCS we cannot dial with the phy registers at this stage
286 * so we do not support extra feature like EEE. 286 * so we do not support extra feature like EEE.
287 */ 287 */
288 if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) || 288 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
289 (priv->pcs == STMMAC_PCS_RTBI)) 289 (priv->hw->pcs == STMMAC_PCS_TBI) ||
290 (priv->hw->pcs == STMMAC_PCS_RTBI))
290 goto out; 291 goto out;
291 292
292 /* MAC core supports the EEE feature. */ 293 /* MAC core supports the EEE feature. */
@@ -799,10 +800,10 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
799 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 800 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
800 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 801 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
801 pr_debug("STMMAC: PCS RGMII support enable\n"); 802 pr_debug("STMMAC: PCS RGMII support enable\n");
802 priv->pcs = STMMAC_PCS_RGMII; 803 priv->hw->pcs = STMMAC_PCS_RGMII;
803 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 804 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
804 pr_debug("STMMAC: PCS SGMII support enable\n"); 805 pr_debug("STMMAC: PCS SGMII support enable\n");
805 priv->pcs = STMMAC_PCS_SGMII; 806 priv->hw->pcs = STMMAC_PCS_SGMII;
806 } 807 }
807 } 808 }
808} 809}
@@ -1665,6 +1666,19 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1665 if (priv->plat->bus_setup) 1666 if (priv->plat->bus_setup)
1666 priv->plat->bus_setup(priv->ioaddr); 1667 priv->plat->bus_setup(priv->ioaddr);
1667 1668
1669 /* PS and related bits will be programmed according to the speed */
1670 if (priv->hw->pcs) {
1671 int speed = priv->plat->mac_port_sel_speed;
1672
1673 if ((speed == SPEED_10) || (speed == SPEED_100) ||
1674 (speed == SPEED_1000)) {
1675 priv->hw->ps = speed;
1676 } else {
1677 dev_warn(priv->device, "invalid port speed\n");
1678 priv->hw->ps = 0;
1679 }
1680 }
1681
1668 /* Initialize the MAC Core */ 1682 /* Initialize the MAC Core */
1669 priv->hw->mac->core_init(priv->hw, dev->mtu); 1683 priv->hw->mac->core_init(priv->hw, dev->mtu);
1670 1684
@@ -1714,8 +1728,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1714 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); 1728 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1715 } 1729 }
1716 1730
1717 if (priv->pcs && priv->hw->mac->ctrl_ane) 1731 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
1718 priv->hw->mac->ctrl_ane(priv->hw, 0); 1732 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
1719 1733
1720 /* set TX ring length */ 1734 /* set TX ring length */
1721 if (priv->hw->dma->set_tx_ring_len) 1735 if (priv->hw->dma->set_tx_ring_len)
@@ -1748,8 +1762,9 @@ static int stmmac_open(struct net_device *dev)
1748 1762
1749 stmmac_check_ether_addr(priv); 1763 stmmac_check_ether_addr(priv);
1750 1764
1751 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 1765 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1752 priv->pcs != STMMAC_PCS_RTBI) { 1766 priv->hw->pcs != STMMAC_PCS_TBI &&
1767 priv->hw->pcs != STMMAC_PCS_RTBI) {
1753 ret = stmmac_init_phy(dev); 1768 ret = stmmac_init_phy(dev);
1754 if (ret) { 1769 if (ret) {
1755 pr_err("%s: Cannot attach to PHY (error: %d)\n", 1770 pr_err("%s: Cannot attach to PHY (error: %d)\n",
@@ -2809,6 +2824,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2809 priv->rx_tail_addr, 2824 priv->rx_tail_addr,
2810 STMMAC_CHAN0); 2825 STMMAC_CHAN0);
2811 } 2826 }
2827
2828 /* PCS link status */
2829 if (priv->hw->pcs) {
2830 if (priv->xstats.pcs_link)
2831 netif_carrier_on(dev);
2832 else
2833 netif_carrier_off(dev);
2834 }
2812 } 2835 }
2813 2836
2814 /* To handle DMA interrupts */ 2837 /* To handle DMA interrupts */
@@ -3130,6 +3153,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
3130 */ 3153 */
3131 priv->plat->enh_desc = priv->dma_cap.enh_desc; 3154 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3132 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 3155 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3156 priv->hw->pmt = priv->plat->pmt;
3133 3157
3134 /* TXCOE doesn't work in thresh DMA mode */ 3158 /* TXCOE doesn't work in thresh DMA mode */
3135 if (priv->plat->force_thresh_dma_mode) 3159 if (priv->plat->force_thresh_dma_mode)
@@ -3325,8 +3349,9 @@ int stmmac_dvr_probe(struct device *device,
3325 3349
3326 stmmac_check_pcs_mode(priv); 3350 stmmac_check_pcs_mode(priv);
3327 3351
3328 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 3352 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3329 priv->pcs != STMMAC_PCS_RTBI) { 3353 priv->hw->pcs != STMMAC_PCS_TBI &&
3354 priv->hw->pcs != STMMAC_PCS_RTBI) {
3330 /* MDIO bus Registration */ 3355 /* MDIO bus Registration */
3331 ret = stmmac_mdio_register(ndev); 3356 ret = stmmac_mdio_register(ndev);
3332 if (ret < 0) { 3357 if (ret < 0) {
@@ -3376,8 +3401,9 @@ int stmmac_dvr_remove(struct device *dev)
3376 reset_control_assert(priv->stmmac_rst); 3401 reset_control_assert(priv->stmmac_rst);
3377 clk_disable_unprepare(priv->pclk); 3402 clk_disable_unprepare(priv->pclk);
3378 clk_disable_unprepare(priv->stmmac_clk); 3403 clk_disable_unprepare(priv->stmmac_clk);
3379 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 3404 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3380 priv->pcs != STMMAC_PCS_RTBI) 3405 priv->hw->pcs != STMMAC_PCS_TBI &&
3406 priv->hw->pcs != STMMAC_PCS_RTBI)
3381 stmmac_mdio_unregister(ndev); 3407 stmmac_mdio_unregister(ndev);
3382 free_netdev(ndev); 3408 free_netdev(ndev);
3383 3409
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
new file mode 100644
index 000000000000..eba41c24b7a7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -0,0 +1,159 @@
1/*
2 * stmmac_pcs.h: Physical Coding Sublayer Header File
3 *
4 * Copyright (C) 2016 STMicroelectronics (R&D) Limited
5 * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef __STMMAC_PCS_H__
14#define __STMMAC_PCS_H__
15
16#include <linux/slab.h>
17#include <linux/io.h>
18#include "common.h"
19
20/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
21#define GMAC_AN_CTRL(x) (x) /* AN control */
22#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
23#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
24#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
25#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
26#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */
27
28/* AN Configuration defines */
29#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */
30#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */
31#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */
32#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */
33#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */
34#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */
35
36/* AN Status defines */
37#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */
38#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */
39#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */
40#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */
41
42/* ADV and LPA defines */
43#define GMAC_ANE_FD BIT(5)
44#define GMAC_ANE_HD BIT(6)
45#define GMAC_ANE_PSE GENMASK(8, 7)
46#define GMAC_ANE_PSE_SHIFT 7
47#define GMAC_ANE_RFE GENMASK(13, 12)
48#define GMAC_ANE_RFE_SHIFT 12
49#define GMAC_ANE_ACK BIT(14)
50
51/**
52 * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR
53 * @ioaddr: IO registers pointer
54 * @reg: Base address of the AN Control Register.
55 * @intr_status: GMAC core interrupt status
56 * @x: pointer to log these events as stats
57 * Description: it is the ISR for PCS events: Auto-Negotiation Completed and
58 * Link status.
59 */
60static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
61 unsigned int intr_status,
62 struct stmmac_extra_stats *x)
63{
64 u32 val = readl(ioaddr + GMAC_AN_STATUS(reg));
65
66 if (intr_status & PCS_ANE_IRQ) {
67 x->irq_pcs_ane_n++;
68 if (val & GMAC_AN_STATUS_ANC)
69 pr_info("stmmac_pcs: ANE process completed\n");
70 }
71
72 if (intr_status & PCS_LINK_IRQ) {
73 x->irq_pcs_link_n++;
74 if (val & GMAC_AN_STATUS_LS)
75 pr_info("stmmac_pcs: Link Up\n");
76 else
77 pr_info("stmmac_pcs: Link Down\n");
78 }
79}
80
81/**
82 * dwmac_rane - To restart ANE
83 * @ioaddr: IO registers pointer
84 * @reg: Base address of the AN Control Register.
85 * @restart: to restart ANE
86 * Description: this is to just restart the Auto-Negotiation.
87 */
88static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart)
89{
90 u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
91
92 if (restart)
93 value |= GMAC_AN_CTRL_RAN;
94
95 writel(value, ioaddr + GMAC_AN_CTRL(reg));
96}
97
98/**
99 * dwmac_ctrl_ane - To program the AN Control Register.
100 * @ioaddr: IO registers pointer
101 * @reg: Base address of the AN Control Register.
102 * @ane: to enable the auto-negotiation
103 * @srgmi_ral: to manage MAC-2-MAC SGMII connections.
104 * @loopback: to cause the PHY to loopback tx data into rx path.
105 * Description: this is the main function to configure the AN control register
106 * and init the ANE, select loopback (usually for debugging purpose) and
107 * configure SGMII RAL.
108 */
109static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
110 bool srgmi_ral, bool loopback)
111{
112 u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
113
114 /* Enable and restart the Auto-Negotiation */
115 if (ane)
116 value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN;
117
118 /* In case of MAC-2-MAC connection, block is configured to operate
119 * according to MAC conf register.
120 */
121 if (srgmi_ral)
122 value |= GMAC_AN_CTRL_SGMRAL;
123
124 if (loopback)
125 value |= GMAC_AN_CTRL_ELE;
126
127 writel(value, ioaddr + GMAC_AN_CTRL(reg));
128}
129
130/**
131 * dwmac_get_adv_lp - Get ADV and LP cap
132 * @ioaddr: IO registers pointer
133 * @reg: Base address of the AN Control Register.
134 * @adv_lp: structure to store the adv,lp status
135 * Description: this is to expose the ANE advertisement and Link partner ability
136 * status to ethtool support.
137 */
138static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg,
139 struct rgmii_adv *adv_lp)
140{
141 u32 value = readl(ioaddr + GMAC_ANE_ADV(reg));
142
143 if (value & GMAC_ANE_FD)
144 adv_lp->duplex = DUPLEX_FULL;
145 if (value & GMAC_ANE_HD)
146 adv_lp->duplex |= DUPLEX_HALF;
147
148 adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
149
150 value = readl(ioaddr + GMAC_ANE_LPA(reg));
151
152 if (value & GMAC_ANE_FD)
153 adv_lp->lp_duplex = DUPLEX_FULL;
154 if (value & GMAC_ANE_HD)
155 adv_lp->lp_duplex = DUPLEX_HALF;
156
157 adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
158}
159#endif /* __STMMAC_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 409db913b117..f7dfc0ae8e9c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -319,6 +319,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
319 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); 319 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
320 } 320 }
321 321
322 of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
323
322 plat->axi = stmmac_axi_setup(pdev); 324 plat->axi = stmmac_axi_setup(pdev);
323 325
324 return plat; 326 return plat;
@@ -411,7 +413,9 @@ static int stmmac_pltfr_suspend(struct device *dev)
411 struct platform_device *pdev = to_platform_device(dev); 413 struct platform_device *pdev = to_platform_device(dev);
412 414
413 ret = stmmac_suspend(dev); 415 ret = stmmac_suspend(dev);
414 if (priv->plat->exit) 416 if (priv->plat->suspend)
417 priv->plat->suspend(pdev, priv->plat->bsp_priv);
418 else if (priv->plat->exit)
415 priv->plat->exit(pdev, priv->plat->bsp_priv); 419 priv->plat->exit(pdev, priv->plat->bsp_priv);
416 420
417 return ret; 421 return ret;
@@ -430,7 +434,9 @@ static int stmmac_pltfr_resume(struct device *dev)
430 struct stmmac_priv *priv = netdev_priv(ndev); 434 struct stmmac_priv *priv = netdev_priv(ndev);
431 struct platform_device *pdev = to_platform_device(dev); 435 struct platform_device *pdev = to_platform_device(dev);
432 436
433 if (priv->plat->init) 437 if (priv->plat->resume)
438 priv->plat->resume(pdev, priv->plat->bsp_priv);
439 else if (priv->plat->init)
434 priv->plat->init(pdev, priv->plat->bsp_priv); 440 priv->plat->init(pdev, priv->plat->bsp_priv);
435 441
436 return stmmac_resume(dev); 442 return stmmac_resume(dev);
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 158213cd6cdd..c14fa91c825f 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -598,7 +598,6 @@ struct net_local {
598 struct work_struct txtimeout_reinit; 598 struct work_struct txtimeout_reinit;
599 599
600 phy_interface_t phy_interface; 600 phy_interface_t phy_interface;
601 struct phy_device *phy_dev;
602 struct mii_bus *mii_bus; 601 struct mii_bus *mii_bus;
603 602
604 unsigned int link; 603 unsigned int link;
@@ -816,7 +815,7 @@ static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
816static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 815static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
817{ 816{
818 struct net_local *lp = netdev_priv(ndev); 817 struct net_local *lp = netdev_priv(ndev);
819 struct phy_device *phydev = lp->phy_dev; 818 struct phy_device *phydev = ndev->phydev;
820 819
821 if (!netif_running(ndev)) 820 if (!netif_running(ndev))
822 return -EINVAL; 821 return -EINVAL;
@@ -850,6 +849,7 @@ static void dwceqos_link_down(struct net_local *lp)
850 849
851static void dwceqos_link_up(struct net_local *lp) 850static void dwceqos_link_up(struct net_local *lp)
852{ 851{
852 struct net_device *ndev = lp->ndev;
853 u32 regval; 853 u32 regval;
854 unsigned long flags; 854 unsigned long flags;
855 855
@@ -860,7 +860,7 @@ static void dwceqos_link_up(struct net_local *lp)
860 dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); 860 dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
861 spin_unlock_irqrestore(&lp->hw_lock, flags); 861 spin_unlock_irqrestore(&lp->hw_lock, flags);
862 862
863 lp->eee_active = !phy_init_eee(lp->phy_dev, 0); 863 lp->eee_active = !phy_init_eee(ndev->phydev, 0);
864 864
865 /* Check for changed EEE capability */ 865 /* Check for changed EEE capability */
866 if (!lp->eee_active && lp->eee_enabled) { 866 if (!lp->eee_active && lp->eee_enabled) {
@@ -876,7 +876,8 @@ static void dwceqos_link_up(struct net_local *lp)
876 876
877static void dwceqos_set_speed(struct net_local *lp) 877static void dwceqos_set_speed(struct net_local *lp)
878{ 878{
879 struct phy_device *phydev = lp->phy_dev; 879 struct net_device *ndev = lp->ndev;
880 struct phy_device *phydev = ndev->phydev;
880 u32 regval; 881 u32 regval;
881 882
882 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); 883 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
@@ -903,7 +904,7 @@ static void dwceqos_set_speed(struct net_local *lp)
903static void dwceqos_adjust_link(struct net_device *ndev) 904static void dwceqos_adjust_link(struct net_device *ndev)
904{ 905{
905 struct net_local *lp = netdev_priv(ndev); 906 struct net_local *lp = netdev_priv(ndev);
906 struct phy_device *phydev = lp->phy_dev; 907 struct phy_device *phydev = ndev->phydev;
907 int status_change = 0; 908 int status_change = 0;
908 909
909 if (lp->phy_defer) 910 if (lp->phy_defer)
@@ -987,7 +988,6 @@ static int dwceqos_mii_probe(struct net_device *ndev)
987 lp->link = 0; 988 lp->link = 0;
988 lp->speed = 0; 989 lp->speed = 0;
989 lp->duplex = DUPLEX_UNKNOWN; 990 lp->duplex = DUPLEX_UNKNOWN;
990 lp->phy_dev = phydev;
991 991
992 return 0; 992 return 0;
993} 993}
@@ -1531,6 +1531,7 @@ static void dwceqos_configure_bus(struct net_local *lp)
1531 1531
1532static void dwceqos_init_hw(struct net_local *lp) 1532static void dwceqos_init_hw(struct net_local *lp)
1533{ 1533{
1534 struct net_device *ndev = lp->ndev;
1534 u32 regval; 1535 u32 regval;
1535 u32 buswidth; 1536 u32 buswidth;
1536 u32 dma_skip; 1537 u32 dma_skip;
@@ -1645,10 +1646,10 @@ static void dwceqos_init_hw(struct net_local *lp)
1645 regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); 1646 regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
1646 1647
1647 lp->phy_defer = false; 1648 lp->phy_defer = false;
1648 mutex_lock(&lp->phy_dev->lock); 1649 mutex_lock(&ndev->phydev->lock);
1649 phy_read_status(lp->phy_dev); 1650 phy_read_status(ndev->phydev);
1650 dwceqos_adjust_link(lp->ndev); 1651 dwceqos_adjust_link(lp->ndev);
1651 mutex_unlock(&lp->phy_dev->lock); 1652 mutex_unlock(&ndev->phydev->lock);
1652} 1653}
1653 1654
1654static void dwceqos_tx_reclaim(unsigned long data) 1655static void dwceqos_tx_reclaim(unsigned long data)
@@ -1898,7 +1899,7 @@ static int dwceqos_open(struct net_device *ndev)
1898 * hence the unusual init order with phy_start first. 1899 * hence the unusual init order with phy_start first.
1899 */ 1900 */
1900 lp->phy_defer = true; 1901 lp->phy_defer = true;
1901 phy_start(lp->phy_dev); 1902 phy_start(ndev->phydev);
1902 dwceqos_init_hw(lp); 1903 dwceqos_init_hw(lp);
1903 napi_enable(&lp->napi); 1904 napi_enable(&lp->napi);
1904 1905
@@ -1943,7 +1944,7 @@ static int dwceqos_stop(struct net_device *ndev)
1943 1944
1944 dwceqos_drain_dma(lp); 1945 dwceqos_drain_dma(lp);
1945 dwceqos_reset_hw(lp); 1946 dwceqos_reset_hw(lp);
1946 phy_stop(lp->phy_dev); 1947 phy_stop(ndev->phydev);
1947 1948
1948 dwceqos_descriptor_free(lp); 1949 dwceqos_descriptor_free(lp);
1949 1950
@@ -2523,30 +2524,6 @@ dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
2523 return s; 2524 return s;
2524} 2525}
2525 2526
2526static int
2527dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
2528{
2529 struct net_local *lp = netdev_priv(ndev);
2530 struct phy_device *phydev = lp->phy_dev;
2531
2532 if (!phydev)
2533 return -ENODEV;
2534
2535 return phy_ethtool_gset(phydev, ecmd);
2536}
2537
2538static int
2539dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
2540{
2541 struct net_local *lp = netdev_priv(ndev);
2542 struct phy_device *phydev = lp->phy_dev;
2543
2544 if (!phydev)
2545 return -ENODEV;
2546
2547 return phy_ethtool_sset(phydev, ecmd);
2548}
2549
2550static void 2527static void
2551dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) 2528dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
2552{ 2529{
@@ -2574,17 +2551,17 @@ static int dwceqos_set_pauseparam(struct net_device *ndev,
2574 2551
2575 lp->flowcontrol.autoneg = pp->autoneg; 2552 lp->flowcontrol.autoneg = pp->autoneg;
2576 if (pp->autoneg) { 2553 if (pp->autoneg) {
2577 lp->phy_dev->advertising |= ADVERTISED_Pause; 2554 ndev->phydev->advertising |= ADVERTISED_Pause;
2578 lp->phy_dev->advertising |= ADVERTISED_Asym_Pause; 2555 ndev->phydev->advertising |= ADVERTISED_Asym_Pause;
2579 } else { 2556 } else {
2580 lp->phy_dev->advertising &= ~ADVERTISED_Pause; 2557 ndev->phydev->advertising &= ~ADVERTISED_Pause;
2581 lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause; 2558 ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause;
2582 lp->flowcontrol.rx = pp->rx_pause; 2559 lp->flowcontrol.rx = pp->rx_pause;
2583 lp->flowcontrol.tx = pp->tx_pause; 2560 lp->flowcontrol.tx = pp->tx_pause;
2584 } 2561 }
2585 2562
2586 if (netif_running(ndev)) 2563 if (netif_running(ndev))
2587 ret = phy_start_aneg(lp->phy_dev); 2564 ret = phy_start_aneg(ndev->phydev);
2588 2565
2589 return ret; 2566 return ret;
2590} 2567}
@@ -2705,7 +2682,7 @@ static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
2705 dwceqos_get_tx_lpi_state(regval)); 2682 dwceqos_get_tx_lpi_state(regval));
2706 } 2683 }
2707 2684
2708 return phy_ethtool_get_eee(lp->phy_dev, edata); 2685 return phy_ethtool_get_eee(ndev->phydev, edata);
2709} 2686}
2710 2687
2711static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) 2688static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
@@ -2747,7 +2724,7 @@ static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
2747 spin_unlock_irqrestore(&lp->hw_lock, flags); 2724 spin_unlock_irqrestore(&lp->hw_lock, flags);
2748 } 2725 }
2749 2726
2750 return phy_ethtool_set_eee(lp->phy_dev, edata); 2727 return phy_ethtool_set_eee(ndev->phydev, edata);
2751} 2728}
2752 2729
2753static u32 dwceqos_get_msglevel(struct net_device *ndev) 2730static u32 dwceqos_get_msglevel(struct net_device *ndev)
@@ -2765,8 +2742,6 @@ static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
2765} 2742}
2766 2743
2767static struct ethtool_ops dwceqos_ethtool_ops = { 2744static struct ethtool_ops dwceqos_ethtool_ops = {
2768 .get_settings = dwceqos_get_settings,
2769 .set_settings = dwceqos_set_settings,
2770 .get_drvinfo = dwceqos_get_drvinfo, 2745 .get_drvinfo = dwceqos_get_drvinfo,
2771 .get_link = ethtool_op_get_link, 2746 .get_link = ethtool_op_get_link,
2772 .get_pauseparam = dwceqos_get_pauseparam, 2747 .get_pauseparam = dwceqos_get_pauseparam,
@@ -2780,6 +2755,8 @@ static struct ethtool_ops dwceqos_ethtool_ops = {
2780 .set_eee = dwceqos_set_eee, 2755 .set_eee = dwceqos_set_eee,
2781 .get_msglevel = dwceqos_get_msglevel, 2756 .get_msglevel = dwceqos_get_msglevel,
2782 .set_msglevel = dwceqos_set_msglevel, 2757 .set_msglevel = dwceqos_set_msglevel,
2758 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2759 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2783}; 2760};
2784 2761
2785static struct net_device_ops netdev_ops = { 2762static struct net_device_ops netdev_ops = {
@@ -2981,8 +2958,8 @@ static int dwceqos_remove(struct platform_device *pdev)
2981 if (ndev) { 2958 if (ndev) {
2982 lp = netdev_priv(ndev); 2959 lp = netdev_priv(ndev);
2983 2960
2984 if (lp->phy_dev) 2961 if (ndev->phydev)
2985 phy_disconnect(lp->phy_dev); 2962 phy_disconnect(ndev->phydev);
2986 mdiobus_unregister(lp->mii_bus); 2963 mdiobus_unregister(lp->mii_bus);
2987 mdiobus_free(lp->mii_bus); 2964 mdiobus_free(lp->mii_bus);
2988 2965
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 53190894f17a..1a93a1f28433 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -364,7 +364,6 @@ static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
364} 364}
365 365
366struct cpsw_priv { 366struct cpsw_priv {
367 spinlock_t lock;
368 struct platform_device *pdev; 367 struct platform_device *pdev;
369 struct net_device *ndev; 368 struct net_device *ndev;
370 struct napi_struct napi_rx; 369 struct napi_struct napi_rx;
@@ -1244,6 +1243,7 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
1244 slave->phy = NULL; 1243 slave->phy = NULL;
1245 cpsw_ale_control_set(priv->ale, slave_port, 1244 cpsw_ale_control_set(priv->ale, slave_port,
1246 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 1245 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1246 soft_reset_slave(slave);
1247} 1247}
1248 1248
1249static int cpsw_ndo_open(struct net_device *ndev) 1249static int cpsw_ndo_open(struct net_device *ndev)
@@ -1252,7 +1252,11 @@ static int cpsw_ndo_open(struct net_device *ndev)
1252 int i, ret; 1252 int i, ret;
1253 u32 reg; 1253 u32 reg;
1254 1254
1255 pm_runtime_get_sync(&priv->pdev->dev); 1255 ret = pm_runtime_get_sync(&priv->pdev->dev);
1256 if (ret < 0) {
1257 pm_runtime_put_noidle(&priv->pdev->dev);
1258 return ret;
1259 }
1256 1260
1257 if (!cpsw_common_res_usage_state(priv)) 1261 if (!cpsw_common_res_usage_state(priv))
1258 cpsw_intr_disable(priv); 1262 cpsw_intr_disable(priv);
@@ -1278,6 +1282,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
1278 1282
1279 if (!cpsw_common_res_usage_state(priv)) { 1283 if (!cpsw_common_res_usage_state(priv)) {
1280 struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); 1284 struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
1285 int buf_num;
1281 1286
1282 /* setup tx dma to fixed prio and zero offset */ 1287 /* setup tx dma to fixed prio and zero offset */
1283 cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); 1288 cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
@@ -1305,10 +1310,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
1305 enable_irq(priv->irqs_table[0]); 1310 enable_irq(priv->irqs_table[0]);
1306 } 1311 }
1307 1312
1308 if (WARN_ON(!priv->data.rx_descs)) 1313 buf_num = cpdma_chan_get_rx_buf_num(priv->dma);
1309 priv->data.rx_descs = 128; 1314 for (i = 0; i < buf_num; i++) {
1310
1311 for (i = 0; i < priv->data.rx_descs; i++) {
1312 struct sk_buff *skb; 1315 struct sk_buff *skb;
1313 1316
1314 ret = -ENOMEM; 1317 ret = -ENOMEM;
@@ -1611,10 +1614,17 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1611 struct sockaddr *addr = (struct sockaddr *)p; 1614 struct sockaddr *addr = (struct sockaddr *)p;
1612 int flags = 0; 1615 int flags = 0;
1613 u16 vid = 0; 1616 u16 vid = 0;
1617 int ret;
1614 1618
1615 if (!is_valid_ether_addr(addr->sa_data)) 1619 if (!is_valid_ether_addr(addr->sa_data))
1616 return -EADDRNOTAVAIL; 1620 return -EADDRNOTAVAIL;
1617 1621
1622 ret = pm_runtime_get_sync(&priv->pdev->dev);
1623 if (ret < 0) {
1624 pm_runtime_put_noidle(&priv->pdev->dev);
1625 return ret;
1626 }
1627
1618 if (priv->data.dual_emac) { 1628 if (priv->data.dual_emac) {
1619 vid = priv->slaves[priv->emac_port].port_vlan; 1629 vid = priv->slaves[priv->emac_port].port_vlan;
1620 flags = ALE_VLAN; 1630 flags = ALE_VLAN;
@@ -1629,6 +1639,8 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1629 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 1639 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1630 for_each_slave(priv, cpsw_set_slave_mac, priv); 1640 for_each_slave(priv, cpsw_set_slave_mac, priv);
1631 1641
1642 pm_runtime_put(&priv->pdev->dev);
1643
1632 return 0; 1644 return 0;
1633} 1645}
1634 1646
@@ -1693,10 +1705,17 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1693 __be16 proto, u16 vid) 1705 __be16 proto, u16 vid)
1694{ 1706{
1695 struct cpsw_priv *priv = netdev_priv(ndev); 1707 struct cpsw_priv *priv = netdev_priv(ndev);
1708 int ret;
1696 1709
1697 if (vid == priv->data.default_vlan) 1710 if (vid == priv->data.default_vlan)
1698 return 0; 1711 return 0;
1699 1712
1713 ret = pm_runtime_get_sync(&priv->pdev->dev);
1714 if (ret < 0) {
1715 pm_runtime_put_noidle(&priv->pdev->dev);
1716 return ret;
1717 }
1718
1700 if (priv->data.dual_emac) { 1719 if (priv->data.dual_emac) {
1701 /* In dual EMAC, reserved VLAN id should not be used for 1720 /* In dual EMAC, reserved VLAN id should not be used for
1702 * creating VLAN interfaces as this can break the dual 1721 * creating VLAN interfaces as this can break the dual
@@ -1711,7 +1730,10 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1711 } 1730 }
1712 1731
1713 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 1732 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1714 return cpsw_add_vlan_ale_entry(priv, vid); 1733 ret = cpsw_add_vlan_ale_entry(priv, vid);
1734
1735 pm_runtime_put(&priv->pdev->dev);
1736 return ret;
1715} 1737}
1716 1738
1717static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, 1739static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
@@ -1723,6 +1745,12 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1723 if (vid == priv->data.default_vlan) 1745 if (vid == priv->data.default_vlan)
1724 return 0; 1746 return 0;
1725 1747
1748 ret = pm_runtime_get_sync(&priv->pdev->dev);
1749 if (ret < 0) {
1750 pm_runtime_put_noidle(&priv->pdev->dev);
1751 return ret;
1752 }
1753
1726 if (priv->data.dual_emac) { 1754 if (priv->data.dual_emac) {
1727 int i; 1755 int i;
1728 1756
@@ -1742,8 +1770,10 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1742 if (ret != 0) 1770 if (ret != 0)
1743 return ret; 1771 return ret;
1744 1772
1745 return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, 1773 ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
1746 0, ALE_VLAN, vid); 1774 0, ALE_VLAN, vid);
1775 pm_runtime_put(&priv->pdev->dev);
1776 return ret;
1747} 1777}
1748 1778
1749static const struct net_device_ops cpsw_netdev_ops = { 1779static const struct net_device_ops cpsw_netdev_ops = {
@@ -1902,10 +1932,33 @@ static int cpsw_set_pauseparam(struct net_device *ndev,
1902 priv->tx_pause = pause->tx_pause ? true : false; 1932 priv->tx_pause = pause->tx_pause ? true : false;
1903 1933
1904 for_each_slave(priv, _cpsw_adjust_link, priv, &link); 1934 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
1905
1906 return 0; 1935 return 0;
1907} 1936}
1908 1937
1938static int cpsw_ethtool_op_begin(struct net_device *ndev)
1939{
1940 struct cpsw_priv *priv = netdev_priv(ndev);
1941 int ret;
1942
1943 ret = pm_runtime_get_sync(&priv->pdev->dev);
1944 if (ret < 0) {
1945 cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
1946 pm_runtime_put_noidle(&priv->pdev->dev);
1947 }
1948
1949 return ret;
1950}
1951
1952static void cpsw_ethtool_op_complete(struct net_device *ndev)
1953{
1954 struct cpsw_priv *priv = netdev_priv(ndev);
1955 int ret;
1956
1957 ret = pm_runtime_put(&priv->pdev->dev);
1958 if (ret < 0)
1959 cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
1960}
1961
1909static const struct ethtool_ops cpsw_ethtool_ops = { 1962static const struct ethtool_ops cpsw_ethtool_ops = {
1910 .get_drvinfo = cpsw_get_drvinfo, 1963 .get_drvinfo = cpsw_get_drvinfo,
1911 .get_msglevel = cpsw_get_msglevel, 1964 .get_msglevel = cpsw_get_msglevel,
@@ -1925,6 +1978,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
1925 .set_wol = cpsw_set_wol, 1978 .set_wol = cpsw_set_wol,
1926 .get_regs_len = cpsw_get_regs_len, 1979 .get_regs_len = cpsw_get_regs_len,
1927 .get_regs = cpsw_get_regs, 1980 .get_regs = cpsw_get_regs,
1981 .begin = cpsw_ethtool_op_begin,
1982 .complete = cpsw_ethtool_op_complete,
1928}; 1983};
1929 1984
1930static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, 1985static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1999,12 +2054,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1999 } 2054 }
2000 data->bd_ram_size = prop; 2055 data->bd_ram_size = prop;
2001 2056
2002 if (of_property_read_u32(node, "rx_descs", &prop)) {
2003 dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
2004 return -EINVAL;
2005 }
2006 data->rx_descs = prop;
2007
2008 if (of_property_read_u32(node, "mac_control", &prop)) { 2057 if (of_property_read_u32(node, "mac_control", &prop)) {
2009 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n"); 2058 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
2010 return -EINVAL; 2059 return -EINVAL;
@@ -2022,7 +2071,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2022 if (ret) 2071 if (ret)
2023 dev_warn(&pdev->dev, "Doesn't have any child node\n"); 2072 dev_warn(&pdev->dev, "Doesn't have any child node\n");
2024 2073
2025 for_each_child_of_node(node, slave_node) { 2074 for_each_available_child_of_node(node, slave_node) {
2026 struct cpsw_slave_data *slave_data = data->slave_data + i; 2075 struct cpsw_slave_data *slave_data = data->slave_data + i;
2027 const void *mac_addr = NULL; 2076 const void *mac_addr = NULL;
2028 int lenp; 2077 int lenp;
@@ -2124,7 +2173,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
2124 } 2173 }
2125 2174
2126 priv_sl2 = netdev_priv(ndev); 2175 priv_sl2 = netdev_priv(ndev);
2127 spin_lock_init(&priv_sl2->lock);
2128 priv_sl2->data = *data; 2176 priv_sl2->data = *data;
2129 priv_sl2->pdev = pdev; 2177 priv_sl2->pdev = pdev;
2130 priv_sl2->ndev = ndev; 2178 priv_sl2->ndev = ndev;
@@ -2243,7 +2291,6 @@ static int cpsw_probe(struct platform_device *pdev)
2243 2291
2244 platform_set_drvdata(pdev, ndev); 2292 platform_set_drvdata(pdev, ndev);
2245 priv = netdev_priv(ndev); 2293 priv = netdev_priv(ndev);
2246 spin_lock_init(&priv->lock);
2247 priv->pdev = pdev; 2294 priv->pdev = pdev;
2248 priv->ndev = ndev; 2295 priv->ndev = ndev;
2249 priv->dev = &ndev->dev; 2296 priv->dev = &ndev->dev;
@@ -2321,7 +2368,11 @@ static int cpsw_probe(struct platform_device *pdev)
2321 /* Need to enable clocks with runtime PM api to access module 2368 /* Need to enable clocks with runtime PM api to access module
2322 * registers 2369 * registers
2323 */ 2370 */
2324 pm_runtime_get_sync(&pdev->dev); 2371 ret = pm_runtime_get_sync(&pdev->dev);
2372 if (ret < 0) {
2373 pm_runtime_put_noidle(&pdev->dev);
2374 goto clean_runtime_disable_ret;
2375 }
2325 priv->version = readl(&priv->regs->id_ver); 2376 priv->version = readl(&priv->regs->id_ver);
2326 pm_runtime_put_sync(&pdev->dev); 2377 pm_runtime_put_sync(&pdev->dev);
2327 2378
@@ -2554,16 +2605,12 @@ static int cpsw_suspend(struct device *dev)
2554 for (i = 0; i < priv->data.slaves; i++) { 2605 for (i = 0; i < priv->data.slaves; i++) {
2555 if (netif_running(priv->slaves[i].ndev)) 2606 if (netif_running(priv->slaves[i].ndev))
2556 cpsw_ndo_stop(priv->slaves[i].ndev); 2607 cpsw_ndo_stop(priv->slaves[i].ndev);
2557 soft_reset_slave(priv->slaves + i);
2558 } 2608 }
2559 } else { 2609 } else {
2560 if (netif_running(ndev)) 2610 if (netif_running(ndev))
2561 cpsw_ndo_stop(ndev); 2611 cpsw_ndo_stop(ndev);
2562 for_each_slave(priv, soft_reset_slave);
2563 } 2612 }
2564 2613
2565 pm_runtime_put_sync(&pdev->dev);
2566
2567 /* Select sleep pin state */ 2614 /* Select sleep pin state */
2568 pinctrl_pm_select_sleep_state(&pdev->dev); 2615 pinctrl_pm_select_sleep_state(&pdev->dev);
2569 2616
@@ -2576,8 +2623,6 @@ static int cpsw_resume(struct device *dev)
2576 struct net_device *ndev = platform_get_drvdata(pdev); 2623 struct net_device *ndev = platform_get_drvdata(pdev);
2577 struct cpsw_priv *priv = netdev_priv(ndev); 2624 struct cpsw_priv *priv = netdev_priv(ndev);
2578 2625
2579 pm_runtime_get_sync(&pdev->dev);
2580
2581 /* Select default pin state */ 2626 /* Select default pin state */
2582 pinctrl_pm_select_default_state(&pdev->dev); 2627 pinctrl_pm_select_default_state(&pdev->dev);
2583 2628
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index e50afd1b2eda..16b54c6f32c2 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -35,7 +35,6 @@ struct cpsw_platform_data {
35 u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */ 35 u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
36 u32 ale_entries; /* ale table size */ 36 u32 ale_entries; /* ale table size */
37 u32 bd_ram_size; /*buffer descriptor ram size */ 37 u32 bd_ram_size; /*buffer descriptor ram size */
38 u32 rx_descs; /* Number of Rx Descriptios */
39 u32 mac_control; /* Mac control register */ 38 u32 mac_control; /* Mac control register */
40 u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ 39 u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
41 bool dual_emac; /* Enable Dual EMAC mode */ 40 bool dual_emac; /* Enable Dual EMAC mode */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 18bf3a8fdc50..1c653ca7c316 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -21,7 +21,7 @@
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24 24#include <linux/genalloc.h>
25#include "davinci_cpdma.h" 25#include "davinci_cpdma.h"
26 26
27/* DMA Registers */ 27/* DMA Registers */
@@ -87,9 +87,8 @@ struct cpdma_desc_pool {
87 void *cpumap; /* dma_alloc map */ 87 void *cpumap; /* dma_alloc map */
88 int desc_size, mem_size; 88 int desc_size, mem_size;
89 int num_desc, used_desc; 89 int num_desc, used_desc;
90 unsigned long *bitmap;
91 struct device *dev; 90 struct device *dev;
92 spinlock_t lock; 91 struct gen_pool *gen_pool;
93}; 92};
94 93
95enum cpdma_state { 94enum cpdma_state {
@@ -117,6 +116,7 @@ struct cpdma_chan {
117 int chan_num; 116 int chan_num;
118 spinlock_t lock; 117 spinlock_t lock;
119 int count; 118 int count;
119 u32 desc_num;
120 u32 mask; 120 u32 mask;
121 cpdma_handler_fn handler; 121 cpdma_handler_fn handler;
122 enum dma_data_direction dir; 122 enum dma_data_direction dir;
@@ -145,6 +145,19 @@ struct cpdma_chan {
145 (directed << CPDMA_TO_PORT_SHIFT)); \ 145 (directed << CPDMA_TO_PORT_SHIFT)); \
146 } while (0) 146 } while (0)
147 147
148static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
149{
150 if (!pool)
151 return;
152
153 WARN_ON(pool->used_desc);
154 if (pool->cpumap)
155 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
156 pool->phys);
157 else
158 iounmap(pool->iomap);
159}
160
148/* 161/*
149 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci 162 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
150 * emac) have dedicated on-chip memory for these descriptors. Some other 163 * emac) have dedicated on-chip memory for these descriptors. Some other
@@ -155,24 +168,25 @@ static struct cpdma_desc_pool *
155cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, 168cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
156 int size, int align) 169 int size, int align)
157{ 170{
158 int bitmap_size;
159 struct cpdma_desc_pool *pool; 171 struct cpdma_desc_pool *pool;
172 int ret;
160 173
161 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); 174 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
162 if (!pool) 175 if (!pool)
163 goto fail; 176 goto gen_pool_create_fail;
164
165 spin_lock_init(&pool->lock);
166 177
167 pool->dev = dev; 178 pool->dev = dev;
168 pool->mem_size = size; 179 pool->mem_size = size;
169 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); 180 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
170 pool->num_desc = size / pool->desc_size; 181 pool->num_desc = size / pool->desc_size;
171 182
172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); 183 pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
173 pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); 184 "cpdma");
174 if (!pool->bitmap) 185 if (IS_ERR(pool->gen_pool)) {
175 goto fail; 186 dev_err(dev, "pool create failed %ld\n",
187 PTR_ERR(pool->gen_pool));
188 goto gen_pool_create_fail;
189 }
176 190
177 if (phys) { 191 if (phys) {
178 pool->phys = phys; 192 pool->phys = phys;
@@ -185,24 +199,22 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
185 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ 199 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
186 } 200 }
187 201
188 if (pool->iomap) 202 if (!pool->iomap)
189 return pool; 203 goto gen_pool_create_fail;
190fail:
191 return NULL;
192}
193
194static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
195{
196 if (!pool)
197 return;
198 204
199 WARN_ON(pool->used_desc); 205 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
200 if (pool->cpumap) { 206 pool->phys, pool->mem_size, -1);
201 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, 207 if (ret < 0) {
202 pool->phys); 208 dev_err(dev, "pool add failed %d\n", ret);
203 } else { 209 goto gen_pool_add_virt_fail;
204 iounmap(pool->iomap);
205 } 210 }
211
212 return pool;
213
214gen_pool_add_virt_fail:
215 cpdma_desc_pool_destroy(pool);
216gen_pool_create_fail:
217 return NULL;
206} 218}
207 219
208static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 220static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -220,47 +232,23 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
220} 232}
221 233
222static struct cpdma_desc __iomem * 234static struct cpdma_desc __iomem *
223cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) 235cpdma_desc_alloc(struct cpdma_desc_pool *pool)
224{ 236{
225 unsigned long flags;
226 int index;
227 int desc_start;
228 int desc_end;
229 struct cpdma_desc __iomem *desc = NULL; 237 struct cpdma_desc __iomem *desc = NULL;
230 238
231 spin_lock_irqsave(&pool->lock, flags); 239 desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool,
232 240 pool->desc_size);
233 if (is_rx) { 241 if (desc)
234 desc_start = 0;
235 desc_end = pool->num_desc/2;
236 } else {
237 desc_start = pool->num_desc/2;
238 desc_end = pool->num_desc;
239 }
240
241 index = bitmap_find_next_zero_area(pool->bitmap,
242 desc_end, desc_start, num_desc, 0);
243 if (index < desc_end) {
244 bitmap_set(pool->bitmap, index, num_desc);
245 desc = pool->iomap + pool->desc_size * index;
246 pool->used_desc++; 242 pool->used_desc++;
247 }
248 243
249 spin_unlock_irqrestore(&pool->lock, flags);
250 return desc; 244 return desc;
251} 245}
252 246
253static void cpdma_desc_free(struct cpdma_desc_pool *pool, 247static void cpdma_desc_free(struct cpdma_desc_pool *pool,
254 struct cpdma_desc __iomem *desc, int num_desc) 248 struct cpdma_desc __iomem *desc, int num_desc)
255{ 249{
256 unsigned long flags, index; 250 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
257
258 index = ((unsigned long)desc - (unsigned long)pool->iomap) /
259 pool->desc_size;
260 spin_lock_irqsave(&pool->lock, flags);
261 bitmap_clear(pool->bitmap, index, num_desc);
262 pool->used_desc--; 251 pool->used_desc--;
263 spin_unlock_irqrestore(&pool->lock, flags);
264} 252}
265 253
266struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) 254struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
@@ -516,6 +504,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
516 chan->state = CPDMA_STATE_IDLE; 504 chan->state = CPDMA_STATE_IDLE;
517 chan->chan_num = chan_num; 505 chan->chan_num = chan_num;
518 chan->handler = handler; 506 chan->handler = handler;
507 chan->desc_num = ctlr->pool->num_desc / 2;
519 508
520 if (is_rx_chan(chan)) { 509 if (is_rx_chan(chan)) {
521 chan->hdp = ctlr->params.rxhdp + offset; 510 chan->hdp = ctlr->params.rxhdp + offset;
@@ -543,6 +532,12 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
543} 532}
544EXPORT_SYMBOL_GPL(cpdma_chan_create); 533EXPORT_SYMBOL_GPL(cpdma_chan_create);
545 534
535int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr)
536{
537 return ctlr->pool->num_desc / 2;
538}
539EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
540
546int cpdma_chan_destroy(struct cpdma_chan *chan) 541int cpdma_chan_destroy(struct cpdma_chan *chan)
547{ 542{
548 struct cpdma_ctlr *ctlr; 543 struct cpdma_ctlr *ctlr;
@@ -675,7 +670,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
675 goto unlock_ret; 670 goto unlock_ret;
676 } 671 }
677 672
678 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); 673 if (chan->count >= chan->desc_num) {
674 chan->stats.desc_alloc_fail++;
675 ret = -ENOMEM;
676 goto unlock_ret;
677 }
678
679 desc = cpdma_desc_alloc(ctlr->pool);
679 if (!desc) { 680 if (!desc) {
680 chan->stats.desc_alloc_fail++; 681 chan->stats.desc_alloc_fail++;
681 ret = -ENOMEM; 682 ret = -ENOMEM;
@@ -721,24 +722,16 @@ EXPORT_SYMBOL_GPL(cpdma_chan_submit);
721 722
722bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) 723bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
723{ 724{
724 unsigned long flags;
725 int index;
726 bool ret;
727 struct cpdma_ctlr *ctlr = chan->ctlr; 725 struct cpdma_ctlr *ctlr = chan->ctlr;
728 struct cpdma_desc_pool *pool = ctlr->pool; 726 struct cpdma_desc_pool *pool = ctlr->pool;
727 bool free_tx_desc;
728 unsigned long flags;
729 729
730 spin_lock_irqsave(&pool->lock, flags); 730 spin_lock_irqsave(&chan->lock, flags);
731 731 free_tx_desc = (chan->count < chan->desc_num) &&
732 index = bitmap_find_next_zero_area(pool->bitmap, 732 gen_pool_avail(pool->gen_pool);
733 pool->num_desc, pool->num_desc/2, 1, 0); 733 spin_unlock_irqrestore(&chan->lock, flags);
734 734 return free_tx_desc;
735 if (index < pool->num_desc)
736 ret = true;
737 else
738 ret = false;
739
740 spin_unlock_irqrestore(&pool->lock, flags);
741 return ret;
742} 735}
743EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); 736EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
744 737
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 86dee487f2f0..80c015cbbce5 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -81,6 +81,7 @@ int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
81 81
82struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, 82struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
83 cpdma_handler_fn handler); 83 cpdma_handler_fn handler);
84int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr);
84int cpdma_chan_destroy(struct cpdma_chan *chan); 85int cpdma_chan_destroy(struct cpdma_chan *chan);
85int cpdma_chan_start(struct cpdma_chan *chan); 86int cpdma_chan_start(struct cpdma_chan *chan);
86int cpdma_chan_stop(struct cpdma_chan *chan); 87int cpdma_chan_stop(struct cpdma_chan *chan);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 4e7c9b9b042a..33df340db1f1 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -53,6 +53,10 @@
53 53
54#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */ 54#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
55 55
56struct davinci_mdio_of_param {
57 int autosuspend_delay_ms;
58};
59
56struct davinci_mdio_regs { 60struct davinci_mdio_regs {
57 u32 version; 61 u32 version;
58 u32 control; 62 u32 control;
@@ -90,19 +94,19 @@ static const struct mdio_platform_data default_pdata = {
90struct davinci_mdio_data { 94struct davinci_mdio_data {
91 struct mdio_platform_data pdata; 95 struct mdio_platform_data pdata;
92 struct davinci_mdio_regs __iomem *regs; 96 struct davinci_mdio_regs __iomem *regs;
93 spinlock_t lock;
94 struct clk *clk; 97 struct clk *clk;
95 struct device *dev; 98 struct device *dev;
96 struct mii_bus *bus; 99 struct mii_bus *bus;
97 bool suspended; 100 bool active_in_suspend;
98 unsigned long access_time; /* jiffies */ 101 unsigned long access_time; /* jiffies */
99 /* Indicates that driver shouldn't modify phy_mask in case 102 /* Indicates that driver shouldn't modify phy_mask in case
100 * if MDIO bus is registered from DT. 103 * if MDIO bus is registered from DT.
101 */ 104 */
102 bool skip_scan; 105 bool skip_scan;
106 u32 clk_div;
103}; 107};
104 108
105static void __davinci_mdio_reset(struct davinci_mdio_data *data) 109static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
106{ 110{
107 u32 mdio_in, div, mdio_out_khz, access_time; 111 u32 mdio_in, div, mdio_out_khz, access_time;
108 112
@@ -111,9 +115,7 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data)
111 if (div > CONTROL_MAX_DIV) 115 if (div > CONTROL_MAX_DIV)
112 div = CONTROL_MAX_DIV; 116 div = CONTROL_MAX_DIV;
113 117
114 /* set enable and clock divider */ 118 data->clk_div = div;
115 __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
116
117 /* 119 /*
118 * One mdio transaction consists of: 120 * One mdio transaction consists of:
119 * 32 bits of preamble 121 * 32 bits of preamble
@@ -134,12 +136,23 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data)
134 data->access_time = 1; 136 data->access_time = 1;
135} 137}
136 138
139static void davinci_mdio_enable(struct davinci_mdio_data *data)
140{
141 /* set enable and clock divider */
142 __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
143}
144
137static int davinci_mdio_reset(struct mii_bus *bus) 145static int davinci_mdio_reset(struct mii_bus *bus)
138{ 146{
139 struct davinci_mdio_data *data = bus->priv; 147 struct davinci_mdio_data *data = bus->priv;
140 u32 phy_mask, ver; 148 u32 phy_mask, ver;
149 int ret;
141 150
142 __davinci_mdio_reset(data); 151 ret = pm_runtime_get_sync(data->dev);
152 if (ret < 0) {
153 pm_runtime_put_noidle(data->dev);
154 return ret;
155 }
143 156
144 /* wait for scan logic to settle */ 157 /* wait for scan logic to settle */
145 msleep(PHY_MAX_ADDR * data->access_time); 158 msleep(PHY_MAX_ADDR * data->access_time);
@@ -150,7 +163,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
150 (ver >> 8) & 0xff, ver & 0xff); 163 (ver >> 8) & 0xff, ver & 0xff);
151 164
152 if (data->skip_scan) 165 if (data->skip_scan)
153 return 0; 166 goto done;
154 167
155 /* get phy mask from the alive register */ 168 /* get phy mask from the alive register */
156 phy_mask = __raw_readl(&data->regs->alive); 169 phy_mask = __raw_readl(&data->regs->alive);
@@ -165,6 +178,10 @@ static int davinci_mdio_reset(struct mii_bus *bus)
165 } 178 }
166 data->bus->phy_mask = phy_mask; 179 data->bus->phy_mask = phy_mask;
167 180
181done:
182 pm_runtime_mark_last_busy(data->dev);
183 pm_runtime_put_autosuspend(data->dev);
184
168 return 0; 185 return 0;
169} 186}
170 187
@@ -190,7 +207,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
190 * operation 207 * operation
191 */ 208 */
192 dev_warn(data->dev, "resetting idled controller\n"); 209 dev_warn(data->dev, "resetting idled controller\n");
193 __davinci_mdio_reset(data); 210 davinci_mdio_enable(data);
194 return -EAGAIN; 211 return -EAGAIN;
195 } 212 }
196 213
@@ -225,11 +242,10 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
225 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) 242 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
226 return -EINVAL; 243 return -EINVAL;
227 244
228 spin_lock(&data->lock); 245 ret = pm_runtime_get_sync(data->dev);
229 246 if (ret < 0) {
230 if (data->suspended) { 247 pm_runtime_put_noidle(data->dev);
231 spin_unlock(&data->lock); 248 return ret;
232 return -ENODEV;
233 } 249 }
234 250
235 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | 251 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
@@ -255,8 +271,8 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
255 break; 271 break;
256 } 272 }
257 273
258 spin_unlock(&data->lock); 274 pm_runtime_mark_last_busy(data->dev);
259 275 pm_runtime_put_autosuspend(data->dev);
260 return ret; 276 return ret;
261} 277}
262 278
@@ -270,11 +286,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
270 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) 286 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
271 return -EINVAL; 287 return -EINVAL;
272 288
273 spin_lock(&data->lock); 289 ret = pm_runtime_get_sync(data->dev);
274 290 if (ret < 0) {
275 if (data->suspended) { 291 pm_runtime_put_noidle(data->dev);
276 spin_unlock(&data->lock); 292 return ret;
277 return -ENODEV;
278 } 293 }
279 294
280 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | 295 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
@@ -295,9 +310,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
295 break; 310 break;
296 } 311 }
297 312
298 spin_unlock(&data->lock); 313 pm_runtime_mark_last_busy(data->dev);
314 pm_runtime_put_autosuspend(data->dev);
299 315
300 return 0; 316 return ret;
301} 317}
302 318
303#if IS_ENABLED(CONFIG_OF) 319#if IS_ENABLED(CONFIG_OF)
@@ -320,6 +336,19 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
320} 336}
321#endif 337#endif
322 338
339#if IS_ENABLED(CONFIG_OF)
340static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
341 .autosuspend_delay_ms = 100,
342};
343
344static const struct of_device_id davinci_mdio_of_mtable[] = {
345 { .compatible = "ti,davinci_mdio", },
346 { .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
347 { /* sentinel */ },
348};
349MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
350#endif
351
323static int davinci_mdio_probe(struct platform_device *pdev) 352static int davinci_mdio_probe(struct platform_device *pdev)
324{ 353{
325 struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev); 354 struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -328,6 +357,7 @@ static int davinci_mdio_probe(struct platform_device *pdev)
328 struct resource *res; 357 struct resource *res;
329 struct phy_device *phy; 358 struct phy_device *phy;
330 int ret, addr; 359 int ret, addr;
360 int autosuspend_delay_ms = -1;
331 361
332 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 362 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
333 if (!data) 363 if (!data)
@@ -340,9 +370,22 @@ static int davinci_mdio_probe(struct platform_device *pdev)
340 } 370 }
341 371
342 if (dev->of_node) { 372 if (dev->of_node) {
343 if (davinci_mdio_probe_dt(&data->pdata, pdev)) 373 const struct of_device_id *of_id;
344 data->pdata = default_pdata; 374
375 ret = davinci_mdio_probe_dt(&data->pdata, pdev);
376 if (ret)
377 return ret;
345 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); 378 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
379
380 of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
381 if (of_id) {
382 const struct davinci_mdio_of_param *of_mdio_data;
383
384 of_mdio_data = of_id->data;
385 if (of_mdio_data)
386 autosuspend_delay_ms =
387 of_mdio_data->autosuspend_delay_ms;
388 }
346 } else { 389 } else {
347 data->pdata = pdata ? (*pdata) : default_pdata; 390 data->pdata = pdata ? (*pdata) : default_pdata;
348 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", 391 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -356,26 +399,25 @@ static int davinci_mdio_probe(struct platform_device *pdev)
356 data->bus->parent = dev; 399 data->bus->parent = dev;
357 data->bus->priv = data; 400 data->bus->priv = data;
358 401
359 pm_runtime_enable(&pdev->dev);
360 pm_runtime_get_sync(&pdev->dev);
361 data->clk = devm_clk_get(dev, "fck"); 402 data->clk = devm_clk_get(dev, "fck");
362 if (IS_ERR(data->clk)) { 403 if (IS_ERR(data->clk)) {
363 dev_err(dev, "failed to get device clock\n"); 404 dev_err(dev, "failed to get device clock\n");
364 ret = PTR_ERR(data->clk); 405 return PTR_ERR(data->clk);
365 data->clk = NULL;
366 goto bail_out;
367 } 406 }
368 407
369 dev_set_drvdata(dev, data); 408 dev_set_drvdata(dev, data);
370 data->dev = dev; 409 data->dev = dev;
371 spin_lock_init(&data->lock);
372 410
373 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 411 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
374 data->regs = devm_ioremap_resource(dev, res); 412 data->regs = devm_ioremap_resource(dev, res);
375 if (IS_ERR(data->regs)) { 413 if (IS_ERR(data->regs))
376 ret = PTR_ERR(data->regs); 414 return PTR_ERR(data->regs);
377 goto bail_out; 415
378 } 416 davinci_mdio_init_clk(data);
417
418 pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
419 pm_runtime_use_autosuspend(&pdev->dev);
420 pm_runtime_enable(&pdev->dev);
379 421
380 /* register the mii bus 422 /* register the mii bus
381 * Create PHYs from DT only in case if PHY child nodes are explicitly 423 * Create PHYs from DT only in case if PHY child nodes are explicitly
@@ -404,9 +446,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
404 return 0; 446 return 0;
405 447
406bail_out: 448bail_out:
407 pm_runtime_put_sync(&pdev->dev); 449 pm_runtime_dont_use_autosuspend(&pdev->dev);
408 pm_runtime_disable(&pdev->dev); 450 pm_runtime_disable(&pdev->dev);
409
410 return ret; 451 return ret;
411} 452}
412 453
@@ -417,29 +458,47 @@ static int davinci_mdio_remove(struct platform_device *pdev)
417 if (data->bus) 458 if (data->bus)
418 mdiobus_unregister(data->bus); 459 mdiobus_unregister(data->bus);
419 460
420 pm_runtime_put_sync(&pdev->dev); 461 pm_runtime_dont_use_autosuspend(&pdev->dev);
421 pm_runtime_disable(&pdev->dev); 462 pm_runtime_disable(&pdev->dev);
422 463
423 return 0; 464 return 0;
424} 465}
425 466
426#ifdef CONFIG_PM_SLEEP 467#ifdef CONFIG_PM
427static int davinci_mdio_suspend(struct device *dev) 468static int davinci_mdio_runtime_suspend(struct device *dev)
428{ 469{
429 struct davinci_mdio_data *data = dev_get_drvdata(dev); 470 struct davinci_mdio_data *data = dev_get_drvdata(dev);
430 u32 ctrl; 471 u32 ctrl;
431 472
432 spin_lock(&data->lock);
433
434 /* shutdown the scan state machine */ 473 /* shutdown the scan state machine */
435 ctrl = __raw_readl(&data->regs->control); 474 ctrl = __raw_readl(&data->regs->control);
436 ctrl &= ~CONTROL_ENABLE; 475 ctrl &= ~CONTROL_ENABLE;
437 __raw_writel(ctrl, &data->regs->control); 476 __raw_writel(ctrl, &data->regs->control);
438 wait_for_idle(data); 477 wait_for_idle(data);
439 478
440 data->suspended = true; 479 return 0;
441 spin_unlock(&data->lock); 480}
442 pm_runtime_put_sync(data->dev); 481
482static int davinci_mdio_runtime_resume(struct device *dev)
483{
484 struct davinci_mdio_data *data = dev_get_drvdata(dev);
485
486 davinci_mdio_enable(data);
487 return 0;
488}
489#endif
490
491#ifdef CONFIG_PM_SLEEP
492static int davinci_mdio_suspend(struct device *dev)
493{
494 struct davinci_mdio_data *data = dev_get_drvdata(dev);
495 int ret = 0;
496
497 data->active_in_suspend = !pm_runtime_status_suspended(dev);
498 if (data->active_in_suspend)
499 ret = pm_runtime_force_suspend(dev);
500 if (ret < 0)
501 return ret;
443 502
444 /* Select sleep pin state */ 503 /* Select sleep pin state */
445 pinctrl_pm_select_sleep_state(dev); 504 pinctrl_pm_select_sleep_state(dev);
@@ -454,31 +513,19 @@ static int davinci_mdio_resume(struct device *dev)
454 /* Select default pin state */ 513 /* Select default pin state */
455 pinctrl_pm_select_default_state(dev); 514 pinctrl_pm_select_default_state(dev);
456 515
457 pm_runtime_get_sync(data->dev); 516 if (data->active_in_suspend)
458 517 pm_runtime_force_resume(dev);
459 spin_lock(&data->lock);
460 /* restart the scan state machine */
461 __davinci_mdio_reset(data);
462
463 data->suspended = false;
464 spin_unlock(&data->lock);
465 518
466 return 0; 519 return 0;
467} 520}
468#endif 521#endif
469 522
470static const struct dev_pm_ops davinci_mdio_pm_ops = { 523static const struct dev_pm_ops davinci_mdio_pm_ops = {
524 SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
525 davinci_mdio_runtime_resume, NULL)
471 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume) 526 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
472}; 527};
473 528
474#if IS_ENABLED(CONFIG_OF)
475static const struct of_device_id davinci_mdio_of_mtable[] = {
476 { .compatible = "ti,davinci_mdio", },
477 { /* sentinel */ },
478};
479MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
480#endif
481
482static struct platform_driver davinci_mdio_driver = { 529static struct platform_driver davinci_mdio_driver = {
483 .driver = { 530 .driver = {
484 .name = "davinci_mdio", 531 .name = "davinci_mdio",
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 561703317312..ece0ea0f6b38 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -1651,7 +1651,6 @@ static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1651 dma_addr_t head_list_phys; 1651 dma_addr_t head_list_phys;
1652 u32 ack = 1; 1652 u32 ack = 1;
1653 1653
1654 host_int = 0;
1655 if (priv->tlan_rev < 0x30) { 1654 if (priv->tlan_rev < 0x30) {
1656 TLAN_DBG(TLAN_DEBUG_TX, 1655 TLAN_DBG(TLAN_DEBUG_TX,
1657 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n", 1656 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 54874783476a..74e671906ddb 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -280,7 +280,7 @@ struct tc35815_regs {
280 * Descriptors 280 * Descriptors
281 */ 281 */
282 282
283/* Frame descripter */ 283/* Frame descriptor */
284struct FDesc { 284struct FDesc {
285 volatile __u32 FDNext; 285 volatile __u32 FDNext;
286 volatile __u32 FDSystem; 286 volatile __u32 FDSystem;
@@ -288,7 +288,7 @@ struct FDesc {
288 volatile __u32 FDCtl; 288 volatile __u32 FDCtl;
289}; 289};
290 290
291/* Buffer descripter */ 291/* Buffer descriptor */
292struct BDesc { 292struct BDesc {
293 volatile __u32 BuffData; 293 volatile __u32 BuffData;
294 volatile __u32 BDCtl; 294 volatile __u32 BDCtl;
@@ -296,7 +296,7 @@ struct BDesc {
296 296
297#define FD_ALIGN 16 297#define FD_ALIGN 16
298 298
299/* Frame Descripter bit assign ---------------------------------------------- */ 299/* Frame Descriptor bit assign ---------------------------------------------- */
300#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */ 300#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */
301#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */ 301#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */
302#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */ 302#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */
@@ -309,7 +309,7 @@ struct BDesc {
309#define FD_Next_EOL 0x00000001 /* FD EOL indicator */ 309#define FD_Next_EOL 0x00000001 /* FD EOL indicator */
310#define FD_BDCnt_SHIFT 16 310#define FD_BDCnt_SHIFT 16
311 311
312/* Buffer Descripter bit assign --------------------------------------------- */ 312/* Buffer Descriptor bit assign --------------------------------------------- */
313#define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */ 313#define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */
314#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */ 314#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */
315#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */ 315#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */
@@ -1387,7 +1387,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1387 if (status & Int_IntExBD) { 1387 if (status & Int_IntExBD) {
1388 if (netif_msg_rx_err(lp)) 1388 if (netif_msg_rx_err(lp))
1389 dev_warn(&dev->dev, 1389 dev_warn(&dev->dev,
1390 "Excessive Buffer Descriptiors (%#x).\n", 1390 "Excessive Buffer Descriptors (%#x).\n",
1391 status); 1391 status);
1392 dev->stats.rx_length_errors++; 1392 dev->stats.rx_length_errors++;
1393 ret = 0; 1393 ret = 0;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 4f6255cf62ce..37ab46cdbec4 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1154,7 +1154,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
1154 if (err < 0) 1154 if (err < 0)
1155 goto err_register; 1155 goto err_register;
1156 1156
1157 priv->xfer_wq = create_workqueue(netdev_name(ndev)); 1157 priv->xfer_wq = alloc_workqueue(netdev_name(ndev), WQ_MEM_RECLAIM, 0);
1158 if (!priv->xfer_wq) { 1158 if (!priv->xfer_wq) {
1159 err = -ENOMEM; 1159 err = -ENOMEM;
1160 goto err_wq; 1160 goto err_wq;
@@ -1233,7 +1233,6 @@ int w5100_remove(struct device *dev)
1233 1233
1234 flush_work(&priv->setrx_work); 1234 flush_work(&priv->setrx_work);
1235 flush_work(&priv->restart_work); 1235 flush_work(&priv->restart_work);
1236 flush_workqueue(priv->xfer_wq);
1237 destroy_workqueue(priv->xfer_wq); 1236 destroy_workqueue(priv->xfer_wq);
1238 1237
1239 unregister_netdev(ndev); 1238 unregister_netdev(ndev);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 7b44968e02e6..ddced28e8247 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1144,8 +1144,8 @@ xirc2ps_interrupt(int irq, void *dev_id)
1144 dev->stats.tx_packets += lp->last_ptr_value - n; 1144 dev->stats.tx_packets += lp->last_ptr_value - n;
1145 netif_wake_queue(dev); 1145 netif_wake_queue(dev);
1146 } 1146 }
1147 if (tx_status & 0x0002) { /* Execessive collissions */ 1147 if (tx_status & 0x0002) { /* Excessive collisions */
1148 pr_debug("tx restarted due to execssive collissions\n"); 1148 pr_debug("tx restarted due to excessive collisions\n");
1149 PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */ 1149 PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */
1150 } 1150 }
1151 if (tx_status & 0x0040) 1151 if (tx_status & 0x0040)
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 86c331bb5eb3..9006877c53f2 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1187,8 +1187,9 @@ static int fjes_probe(struct platform_device *plat_dev)
1187 adapter->force_reset = false; 1187 adapter->force_reset = false;
1188 adapter->open_guard = false; 1188 adapter->open_guard = false;
1189 1189
1190 adapter->txrx_wq = create_workqueue(DRV_NAME "/txrx"); 1190 adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1191 adapter->control_wq = create_workqueue(DRV_NAME "/control"); 1191 adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1192 WQ_MEM_RECLAIM, 0);
1192 1193
1193 INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task); 1194 INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1194 INIT_WORK(&adapter->raise_intr_rxdata_task, 1195 INIT_WORK(&adapter->raise_intr_rxdata_task,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index cc39cefeae45..310e0b9c2657 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -12,7 +12,6 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
17#include <linux/hash.h> 16#include <linux/hash.h>
18#include <net/dst_metadata.h> 17#include <net/dst_metadata.h>
@@ -397,23 +396,6 @@ static struct socket *geneve_create_sock(struct net *net, bool ipv6,
397 return sock; 396 return sock;
398} 397}
399 398
400static void geneve_notify_add_rx_port(struct geneve_sock *gs)
401{
402 struct net_device *dev;
403 struct sock *sk = gs->sock->sk;
404 struct net *net = sock_net(sk);
405 sa_family_t sa_family = geneve_get_sk_family(gs);
406 __be16 port = inet_sk(sk)->inet_sport;
407
408 rcu_read_lock();
409 for_each_netdev_rcu(net, dev) {
410 if (dev->netdev_ops->ndo_add_geneve_port)
411 dev->netdev_ops->ndo_add_geneve_port(dev, sa_family,
412 port);
413 }
414 rcu_read_unlock();
415}
416
417static int geneve_hlen(struct genevehdr *gh) 399static int geneve_hlen(struct genevehdr *gh)
418{ 400{
419 return sizeof(*gh) + gh->opt_len * 4; 401 return sizeof(*gh) + gh->opt_len * 4;
@@ -533,7 +515,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
533 INIT_HLIST_HEAD(&gs->vni_list[h]); 515 INIT_HLIST_HEAD(&gs->vni_list[h]);
534 516
535 /* Initialize the geneve udp offloads structure */ 517 /* Initialize the geneve udp offloads structure */
536 geneve_notify_add_rx_port(gs); 518 udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
537 519
538 /* Mark socket as an encapsulation socket */ 520 /* Mark socket as an encapsulation socket */
539 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); 521 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
@@ -548,31 +530,13 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
548 return gs; 530 return gs;
549} 531}
550 532
551static void geneve_notify_del_rx_port(struct geneve_sock *gs)
552{
553 struct net_device *dev;
554 struct sock *sk = gs->sock->sk;
555 struct net *net = sock_net(sk);
556 sa_family_t sa_family = geneve_get_sk_family(gs);
557 __be16 port = inet_sk(sk)->inet_sport;
558
559 rcu_read_lock();
560 for_each_netdev_rcu(net, dev) {
561 if (dev->netdev_ops->ndo_del_geneve_port)
562 dev->netdev_ops->ndo_del_geneve_port(dev, sa_family,
563 port);
564 }
565
566 rcu_read_unlock();
567}
568
569static void __geneve_sock_release(struct geneve_sock *gs) 533static void __geneve_sock_release(struct geneve_sock *gs)
570{ 534{
571 if (!gs || --gs->refcnt) 535 if (!gs || --gs->refcnt)
572 return; 536 return;
573 537
574 list_del(&gs->list); 538 list_del(&gs->list);
575 geneve_notify_del_rx_port(gs); 539 udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
576 udp_tunnel_sock_release(gs->sock); 540 udp_tunnel_sock_release(gs->sock);
577 kfree_rcu(gs, rcu); 541 kfree_rcu(gs, rcu);
578} 542}
@@ -1165,29 +1129,20 @@ static struct device_type geneve_type = {
1165 .name = "geneve", 1129 .name = "geneve",
1166}; 1130};
1167 1131
1168/* Calls the ndo_add_geneve_port of the caller in order to 1132/* Calls the ndo_add_udp_enc_port of the caller in order to
1169 * supply the listening GENEVE udp ports. Callers are expected 1133 * supply the listening GENEVE udp ports. Callers are expected
1170 * to implement the ndo_add_geneve_port. 1134 * to implement the ndo_add_udp_enc_port.
1171 */ 1135 */
1172static void geneve_push_rx_ports(struct net_device *dev) 1136static void geneve_push_rx_ports(struct net_device *dev)
1173{ 1137{
1174 struct net *net = dev_net(dev); 1138 struct net *net = dev_net(dev);
1175 struct geneve_net *gn = net_generic(net, geneve_net_id); 1139 struct geneve_net *gn = net_generic(net, geneve_net_id);
1176 struct geneve_sock *gs; 1140 struct geneve_sock *gs;
1177 sa_family_t sa_family;
1178 struct sock *sk;
1179 __be16 port;
1180
1181 if (!dev->netdev_ops->ndo_add_geneve_port)
1182 return;
1183 1141
1184 rcu_read_lock(); 1142 rcu_read_lock();
1185 list_for_each_entry_rcu(gs, &gn->sock_list, list) { 1143 list_for_each_entry_rcu(gs, &gn->sock_list, list)
1186 sk = gs->sock->sk; 1144 udp_tunnel_push_rx_port(dev, gs->sock,
1187 sa_family = sk->sk_family; 1145 UDP_TUNNEL_TYPE_GENEVE);
1188 port = inet_sk(sk)->inet_sport;
1189 dev->netdev_ops->ndo_add_geneve_port(dev, sa_family, port);
1190 }
1191 rcu_read_unlock(); 1146 rcu_read_unlock();
1192} 1147}
1193 1148
@@ -1550,7 +1505,7 @@ static int geneve_netdevice_event(struct notifier_block *unused,
1550{ 1505{
1551 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1506 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1552 1507
1553 if (event == NETDEV_OFFLOAD_PUSH_GENEVE) 1508 if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
1554 geneve_push_rx_ports(dev); 1509 geneve_push_rx_ports(dev);
1555 1510
1556 return NOTIFY_DONE; 1511 return NOTIFY_DONE;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 4e976a0d5a76..97e0cbca0a08 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -16,7 +16,6 @@
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/version.h>
20#include <linux/skbuff.h> 19#include <linux/skbuff.h>
21#include <linux/udp.h> 20#include <linux/udp.h>
22#include <linux/rculist.h> 21#include <linux/rculist.h>
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index acb636963e90..072cddce9264 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -156,7 +156,7 @@ struct baycom_state {
156 156
157/* --------------------------------------------------------------------- */ 157/* --------------------------------------------------------------------- */
158 158
159static void __inline__ baycom_int_freq(struct baycom_state *bc) 159static inline void baycom_int_freq(struct baycom_state *bc)
160{ 160{
161#ifdef BAYCOM_DEBUG 161#ifdef BAYCOM_DEBUG
162 unsigned long cur_jiffies = jiffies; 162 unsigned long cur_jiffies = jiffies;
@@ -192,7 +192,7 @@ static void __inline__ baycom_int_freq(struct baycom_state *bc)
192 192
193/* --------------------------------------------------------------------- */ 193/* --------------------------------------------------------------------- */
194 194
195static __inline__ void par96_tx(struct net_device *dev, struct baycom_state *bc) 195static inline void par96_tx(struct net_device *dev, struct baycom_state *bc)
196{ 196{
197 int i; 197 int i;
198 unsigned int data = hdlcdrv_getbits(&bc->hdrv); 198 unsigned int data = hdlcdrv_getbits(&bc->hdrv);
@@ -216,7 +216,7 @@ static __inline__ void par96_tx(struct net_device *dev, struct baycom_state *bc)
216 216
217/* --------------------------------------------------------------------- */ 217/* --------------------------------------------------------------------- */
218 218
219static __inline__ void par96_rx(struct net_device *dev, struct baycom_state *bc) 219static inline void par96_rx(struct net_device *dev, struct baycom_state *bc)
220{ 220{
221 int i; 221 int i;
222 unsigned int data, mask, mask2, descx; 222 unsigned int data, mask, mask2, descx;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index c270c5a54f3a..467fb8b4d083 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -173,6 +173,7 @@ struct rndis_device {
173 173
174/* Interface */ 174/* Interface */
175struct rndis_message; 175struct rndis_message;
176struct netvsc_device;
176int netvsc_device_add(struct hv_device *device, void *additional_info); 177int netvsc_device_add(struct hv_device *device, void *additional_info);
177int netvsc_device_remove(struct hv_device *device); 178int netvsc_device_remove(struct hv_device *device);
178int netvsc_send(struct hv_device *device, 179int netvsc_send(struct hv_device *device,
@@ -189,8 +190,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
189 struct vmbus_channel *channel, 190 struct vmbus_channel *channel,
190 u16 vlan_tci); 191 u16 vlan_tci);
191void netvsc_channel_cb(void *context); 192void netvsc_channel_cb(void *context);
192int rndis_filter_open(struct hv_device *dev); 193int rndis_filter_open(struct netvsc_device *nvdev);
193int rndis_filter_close(struct hv_device *dev); 194int rndis_filter_close(struct netvsc_device *nvdev);
194int rndis_filter_device_add(struct hv_device *dev, 195int rndis_filter_device_add(struct hv_device *dev,
195 void *additional_info); 196 void *additional_info);
196void rndis_filter_device_remove(struct hv_device *dev); 197void rndis_filter_device_remove(struct hv_device *dev);
@@ -200,7 +201,7 @@ int rndis_filter_receive(struct hv_device *dev,
200 struct vmbus_channel *channel); 201 struct vmbus_channel *channel);
201 202
202int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); 203int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
203int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac); 204int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
204 205
205void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); 206void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
206 207
@@ -743,6 +744,18 @@ struct netvsc_device {
743 atomic_t vf_use_cnt; 744 atomic_t vf_use_cnt;
744}; 745};
745 746
747static inline struct netvsc_device *
748net_device_to_netvsc_device(struct net_device *ndev)
749{
750 return ((struct net_device_context *)netdev_priv(ndev))->nvdev;
751}
752
753static inline struct netvsc_device *
754hv_device_to_netvsc_device(struct hv_device *device)
755{
756 return net_device_to_netvsc_device(hv_get_drvdata(device));
757}
758
746/* NdisInitialize message */ 759/* NdisInitialize message */
747struct rndis_initialize_request { 760struct rndis_initialize_request {
748 u32 req_id; 761 u32 req_id;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 719cb3578e55..6909c322de4e 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -95,9 +95,7 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
95 95
96static struct netvsc_device *get_outbound_net_device(struct hv_device *device) 96static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
97{ 97{
98 struct net_device *ndev = hv_get_drvdata(device); 98 struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
99 struct net_device_context *net_device_ctx = netdev_priv(ndev);
100 struct netvsc_device *net_device = net_device_ctx->nvdev;
101 99
102 if (net_device && net_device->destroy) 100 if (net_device && net_device->destroy)
103 net_device = NULL; 101 net_device = NULL;
@@ -107,9 +105,7 @@ static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
107 105
108static struct netvsc_device *get_inbound_net_device(struct hv_device *device) 106static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
109{ 107{
110 struct net_device *ndev = hv_get_drvdata(device); 108 struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
111 struct net_device_context *net_device_ctx = netdev_priv(ndev);
112 struct netvsc_device *net_device = net_device_ctx->nvdev;
113 109
114 if (!net_device) 110 if (!net_device)
115 goto get_in_err; 111 goto get_in_err;
@@ -128,8 +124,7 @@ static int netvsc_destroy_buf(struct hv_device *device)
128 struct nvsp_message *revoke_packet; 124 struct nvsp_message *revoke_packet;
129 int ret = 0; 125 int ret = 0;
130 struct net_device *ndev = hv_get_drvdata(device); 126 struct net_device *ndev = hv_get_drvdata(device);
131 struct net_device_context *net_device_ctx = netdev_priv(ndev); 127 struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
132 struct netvsc_device *net_device = net_device_ctx->nvdev;
133 128
134 /* 129 /*
135 * If we got a section count, it means we received a 130 * If we got a section count, it means we received a
@@ -249,7 +244,6 @@ static int netvsc_destroy_buf(struct hv_device *device)
249static int netvsc_init_buf(struct hv_device *device) 244static int netvsc_init_buf(struct hv_device *device)
250{ 245{
251 int ret = 0; 246 int ret = 0;
252 unsigned long t;
253 struct netvsc_device *net_device; 247 struct netvsc_device *net_device;
254 struct nvsp_message *init_packet; 248 struct nvsp_message *init_packet;
255 struct net_device *ndev; 249 struct net_device *ndev;
@@ -310,9 +304,7 @@ static int netvsc_init_buf(struct hv_device *device)
310 goto cleanup; 304 goto cleanup;
311 } 305 }
312 306
313 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 307 wait_for_completion(&net_device->channel_init_wait);
314 BUG_ON(t == 0);
315
316 308
317 /* Check the response */ 309 /* Check the response */
318 if (init_packet->msg.v1_msg. 310 if (init_packet->msg.v1_msg.
@@ -395,8 +387,7 @@ static int netvsc_init_buf(struct hv_device *device)
395 goto cleanup; 387 goto cleanup;
396 } 388 }
397 389
398 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 390 wait_for_completion(&net_device->channel_init_wait);
399 BUG_ON(t == 0);
400 391
401 /* Check the response */ 392 /* Check the response */
402 if (init_packet->msg.v1_msg. 393 if (init_packet->msg.v1_msg.
@@ -450,7 +441,6 @@ static int negotiate_nvsp_ver(struct hv_device *device,
450{ 441{
451 struct net_device *ndev = hv_get_drvdata(device); 442 struct net_device *ndev = hv_get_drvdata(device);
452 int ret; 443 int ret;
453 unsigned long t;
454 444
455 memset(init_packet, 0, sizeof(struct nvsp_message)); 445 memset(init_packet, 0, sizeof(struct nvsp_message));
456 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; 446 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
@@ -467,10 +457,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
467 if (ret != 0) 457 if (ret != 0)
468 return ret; 458 return ret;
469 459
470 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 460 wait_for_completion(&net_device->channel_init_wait);
471
472 if (t == 0)
473 return -ETIMEDOUT;
474 461
475 if (init_packet->msg.init_msg.init_complete.status != 462 if (init_packet->msg.init_msg.init_complete.status !=
476 NVSP_STAT_SUCCESS) 463 NVSP_STAT_SUCCESS)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 6a69b5cc9fe2..787a20235e5c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -98,16 +98,14 @@ static void netvsc_set_multicast_list(struct net_device *net)
98 98
99static int netvsc_open(struct net_device *net) 99static int netvsc_open(struct net_device *net)
100{ 100{
101 struct net_device_context *net_device_ctx = netdev_priv(net); 101 struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
102 struct hv_device *device_obj = net_device_ctx->device_ctx;
103 struct netvsc_device *nvdev = net_device_ctx->nvdev;
104 struct rndis_device *rdev; 102 struct rndis_device *rdev;
105 int ret = 0; 103 int ret = 0;
106 104
107 netif_carrier_off(net); 105 netif_carrier_off(net);
108 106
109 /* Open up the device */ 107 /* Open up the device */
110 ret = rndis_filter_open(device_obj); 108 ret = rndis_filter_open(nvdev);
111 if (ret != 0) { 109 if (ret != 0) {
112 netdev_err(net, "unable to open device (ret %d).\n", ret); 110 netdev_err(net, "unable to open device (ret %d).\n", ret);
113 return ret; 111 return ret;
@@ -125,7 +123,6 @@ static int netvsc_open(struct net_device *net)
125static int netvsc_close(struct net_device *net) 123static int netvsc_close(struct net_device *net)
126{ 124{
127 struct net_device_context *net_device_ctx = netdev_priv(net); 125 struct net_device_context *net_device_ctx = netdev_priv(net);
128 struct hv_device *device_obj = net_device_ctx->device_ctx;
129 struct netvsc_device *nvdev = net_device_ctx->nvdev; 126 struct netvsc_device *nvdev = net_device_ctx->nvdev;
130 int ret; 127 int ret;
131 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; 128 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
@@ -135,7 +132,7 @@ static int netvsc_close(struct net_device *net)
135 132
136 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ 133 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
137 cancel_work_sync(&net_device_ctx->work); 134 cancel_work_sync(&net_device_ctx->work);
138 ret = rndis_filter_close(device_obj); 135 ret = rndis_filter_close(nvdev);
139 if (ret != 0) { 136 if (ret != 0) {
140 netdev_err(net, "unable to close device (ret %d).\n", ret); 137 netdev_err(net, "unable to close device (ret %d).\n", ret);
141 return ret; 138 return ret;
@@ -701,7 +698,6 @@ int netvsc_recv_callback(struct hv_device *device_obj,
701 } 698 }
702 699
703vf_injection_done: 700vf_injection_done:
704 net_device_ctx = netdev_priv(net);
705 rx_stats = this_cpu_ptr(net_device_ctx->rx_stats); 701 rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
706 702
707 /* Allocate a skb - TODO direct I/O to pages? */ 703 /* Allocate a skb - TODO direct I/O to pages? */
@@ -986,8 +982,6 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
986 982
987static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 983static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
988{ 984{
989 struct net_device_context *ndevctx = netdev_priv(ndev);
990 struct hv_device *hdev = ndevctx->device_ctx;
991 struct sockaddr *addr = p; 985 struct sockaddr *addr = p;
992 char save_adr[ETH_ALEN]; 986 char save_adr[ETH_ALEN];
993 unsigned char save_aatype; 987 unsigned char save_aatype;
@@ -1000,7 +994,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1000 if (err != 0) 994 if (err != 0)
1001 return err; 995 return err;
1002 996
1003 err = rndis_filter_set_device_mac(hdev, addr->sa_data); 997 err = rndis_filter_set_device_mac(ndev, addr->sa_data);
1004 if (err != 0) { 998 if (err != 0) {
1005 /* roll back to saved MAC */ 999 /* roll back to saved MAC */
1006 memcpy(ndev->dev_addr, save_adr, ETH_ALEN); 1000 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
@@ -1248,7 +1242,7 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
1248 /* 1242 /*
1249 * Open the device before switching data path. 1243 * Open the device before switching data path.
1250 */ 1244 */
1251 rndis_filter_open(net_device_ctx->device_ctx); 1245 rndis_filter_open(netvsc_dev);
1252 1246
1253 /* 1247 /*
1254 * notify the host to switch the data path. 1248 * notify the host to switch the data path.
@@ -1303,7 +1297,7 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
1303 udelay(50); 1297 udelay(50);
1304 netvsc_switch_datapath(ndev, false); 1298 netvsc_switch_datapath(ndev, false);
1305 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); 1299 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
1306 rndis_filter_close(net_device_ctx->device_ctx); 1300 rndis_filter_close(netvsc_dev);
1307 netif_carrier_on(ndev); 1301 netif_carrier_on(ndev);
1308 /* 1302 /*
1309 * Notify peers. 1303 * Notify peers.
@@ -1500,6 +1494,10 @@ static int netvsc_netdev_event(struct notifier_block *this,
1500{ 1494{
1501 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 1495 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1502 1496
1497 /* Avoid Vlan dev with same MAC registering as VF */
1498 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1499 return NOTIFY_DONE;
1500
1503 switch (event) { 1501 switch (event) {
1504 case NETDEV_REGISTER: 1502 case NETDEV_REGISTER:
1505 return netvsc_register_vf(event_dev); 1503 return netvsc_register_vf(event_dev);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 97c292b7dbea..8e830f741d47 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -466,7 +466,6 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
466 struct rndis_query_request *query; 466 struct rndis_query_request *query;
467 struct rndis_query_complete *query_complete; 467 struct rndis_query_complete *query_complete;
468 int ret = 0; 468 int ret = 0;
469 unsigned long t;
470 469
471 if (!result) 470 if (!result)
472 return -EINVAL; 471 return -EINVAL;
@@ -503,11 +502,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
503 if (ret != 0) 502 if (ret != 0)
504 goto cleanup; 503 goto cleanup;
505 504
506 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 505 wait_for_completion(&request->wait_event);
507 if (t == 0) {
508 ret = -ETIMEDOUT;
509 goto cleanup;
510 }
511 506
512 /* Copy the response back */ 507 /* Copy the response back */
513 query_complete = &request->response_msg.msg.query_complete; 508 query_complete = &request->response_msg.msg.query_complete;
@@ -543,11 +538,9 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev)
543#define NWADR_STR "NetworkAddress" 538#define NWADR_STR "NetworkAddress"
544#define NWADR_STRLEN 14 539#define NWADR_STRLEN 14
545 540
546int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac) 541int rndis_filter_set_device_mac(struct net_device *ndev, char *mac)
547{ 542{
548 struct net_device *ndev = hv_get_drvdata(hdev); 543 struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev);
549 struct net_device_context *net_device_ctx = netdev_priv(ndev);
550 struct netvsc_device *nvdev = net_device_ctx->nvdev;
551 struct rndis_device *rdev = nvdev->extension; 544 struct rndis_device *rdev = nvdev->extension;
552 struct rndis_request *request; 545 struct rndis_request *request;
553 struct rndis_set_request *set; 546 struct rndis_set_request *set;
@@ -558,7 +551,6 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
558 u32 extlen = sizeof(struct rndis_config_parameter_info) + 551 u32 extlen = sizeof(struct rndis_config_parameter_info) +
559 2*NWADR_STRLEN + 4*ETH_ALEN; 552 2*NWADR_STRLEN + 4*ETH_ALEN;
560 int ret; 553 int ret;
561 unsigned long t;
562 554
563 request = get_rndis_request(rdev, RNDIS_MSG_SET, 555 request = get_rndis_request(rdev, RNDIS_MSG_SET,
564 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); 556 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
@@ -599,21 +591,13 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
599 if (ret != 0) 591 if (ret != 0)
600 goto cleanup; 592 goto cleanup;
601 593
602 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 594 wait_for_completion(&request->wait_event);
603 if (t == 0) { 595
604 netdev_err(ndev, "timeout before we got a set response...\n"); 596 set_complete = &request->response_msg.msg.set_complete;
605 /* 597 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
606 * can't put_rndis_request, since we may still receive a 598 netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
607 * send-completion. 599 set_complete->status);
608 */ 600 ret = -EINVAL;
609 return -EBUSY;
610 } else {
611 set_complete = &request->response_msg.msg.set_complete;
612 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
613 netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
614 set_complete->status);
615 ret = -EINVAL;
616 }
617 } 601 }
618 602
619cleanup: 603cleanup:
@@ -622,12 +606,10 @@ cleanup:
622} 606}
623 607
624static int 608static int
625rndis_filter_set_offload_params(struct hv_device *hdev, 609rndis_filter_set_offload_params(struct net_device *ndev,
626 struct ndis_offload_params *req_offloads) 610 struct ndis_offload_params *req_offloads)
627{ 611{
628 struct net_device *ndev = hv_get_drvdata(hdev); 612 struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev);
629 struct net_device_context *net_device_ctx = netdev_priv(ndev);
630 struct netvsc_device *nvdev = net_device_ctx->nvdev;
631 struct rndis_device *rdev = nvdev->extension; 613 struct rndis_device *rdev = nvdev->extension;
632 struct rndis_request *request; 614 struct rndis_request *request;
633 struct rndis_set_request *set; 615 struct rndis_set_request *set;
@@ -635,7 +617,6 @@ rndis_filter_set_offload_params(struct hv_device *hdev,
635 struct rndis_set_complete *set_complete; 617 struct rndis_set_complete *set_complete;
636 u32 extlen = sizeof(struct ndis_offload_params); 618 u32 extlen = sizeof(struct ndis_offload_params);
637 int ret; 619 int ret;
638 unsigned long t;
639 u32 vsp_version = nvdev->nvsp_version; 620 u32 vsp_version = nvdev->nvsp_version;
640 621
641 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) { 622 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
@@ -669,20 +650,12 @@ rndis_filter_set_offload_params(struct hv_device *hdev,
669 if (ret != 0) 650 if (ret != 0)
670 goto cleanup; 651 goto cleanup;
671 652
672 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 653 wait_for_completion(&request->wait_event);
673 if (t == 0) { 654 set_complete = &request->response_msg.msg.set_complete;
674 netdev_err(ndev, "timeout before we got aOFFLOAD set response...\n"); 655 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
675 /* can't put_rndis_request, since we may still receive a 656 netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
676 * send-completion. 657 set_complete->status);
677 */ 658 ret = -EINVAL;
678 return -EBUSY;
679 } else {
680 set_complete = &request->response_msg.msg.set_complete;
681 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
682 netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
683 set_complete->status);
684 ret = -EINVAL;
685 }
686 } 659 }
687 660
688cleanup: 661cleanup:
@@ -710,7 +683,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
710 u32 *itab; 683 u32 *itab;
711 u8 *keyp; 684 u8 *keyp;
712 int i, ret; 685 int i, ret;
713 unsigned long t;
714 686
715 request = get_rndis_request( 687 request = get_rndis_request(
716 rdev, RNDIS_MSG_SET, 688 rdev, RNDIS_MSG_SET,
@@ -753,20 +725,12 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
753 if (ret != 0) 725 if (ret != 0)
754 goto cleanup; 726 goto cleanup;
755 727
756 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 728 wait_for_completion(&request->wait_event);
757 if (t == 0) { 729 set_complete = &request->response_msg.msg.set_complete;
758 netdev_err(ndev, "timeout before we got a set response...\n"); 730 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
759 /* can't put_rndis_request, since we may still receive a 731 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
760 * send-completion. 732 set_complete->status);
761 */ 733 ret = -EINVAL;
762 return -ETIMEDOUT;
763 } else {
764 set_complete = &request->response_msg.msg.set_complete;
765 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
766 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
767 set_complete->status);
768 ret = -EINVAL;
769 }
770 } 734 }
771 735
772cleanup: 736cleanup:
@@ -795,8 +759,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
795 struct rndis_set_complete *set_complete; 759 struct rndis_set_complete *set_complete;
796 u32 status; 760 u32 status;
797 int ret; 761 int ret;
798 unsigned long t;
799 struct net_device *ndev = dev->ndev;
800 762
801 request = get_rndis_request(dev, RNDIS_MSG_SET, 763 request = get_rndis_request(dev, RNDIS_MSG_SET,
802 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + 764 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
@@ -819,26 +781,14 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
819 if (ret != 0) 781 if (ret != 0)
820 goto cleanup; 782 goto cleanup;
821 783
822 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 784 wait_for_completion(&request->wait_event);
823 785
824 if (t == 0) { 786 set_complete = &request->response_msg.msg.set_complete;
825 netdev_err(ndev, 787 status = set_complete->status;
826 "timeout before we got a set response...\n");
827 ret = -ETIMEDOUT;
828 /*
829 * We can't deallocate the request since we may still receive a
830 * send completion for it.
831 */
832 goto exit;
833 } else {
834 set_complete = &request->response_msg.msg.set_complete;
835 status = set_complete->status;
836 }
837 788
838cleanup: 789cleanup:
839 if (request) 790 if (request)
840 put_rndis_request(dev, request); 791 put_rndis_request(dev, request);
841exit:
842 return ret; 792 return ret;
843} 793}
844 794
@@ -850,9 +800,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
850 struct rndis_initialize_complete *init_complete; 800 struct rndis_initialize_complete *init_complete;
851 u32 status; 801 u32 status;
852 int ret; 802 int ret;
853 unsigned long t; 803 struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev);
854 struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
855 struct netvsc_device *nvdev = net_device_ctx->nvdev;
856 804
857 request = get_rndis_request(dev, RNDIS_MSG_INIT, 805 request = get_rndis_request(dev, RNDIS_MSG_INIT,
858 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); 806 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -875,12 +823,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
875 goto cleanup; 823 goto cleanup;
876 } 824 }
877 825
878 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 826 wait_for_completion(&request->wait_event);
879
880 if (t == 0) {
881 ret = -ETIMEDOUT;
882 goto cleanup;
883 }
884 827
885 init_complete = &request->response_msg.msg.init_complete; 828 init_complete = &request->response_msg.msg.init_complete;
886 status = init_complete->status; 829 status = init_complete->status;
@@ -977,8 +920,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
977{ 920{
978 struct net_device *ndev = 921 struct net_device *ndev =
979 hv_get_drvdata(new_sc->primary_channel->device_obj); 922 hv_get_drvdata(new_sc->primary_channel->device_obj);
980 struct net_device_context *net_device_ctx = netdev_priv(ndev); 923 struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
981 struct netvsc_device *nvscdev = net_device_ctx->nvdev;
982 u16 chn_index = new_sc->offermsg.offer.sub_channel_index; 924 u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
983 int ret; 925 int ret;
984 unsigned long flags; 926 unsigned long flags;
@@ -1014,7 +956,6 @@ int rndis_filter_device_add(struct hv_device *dev,
1014 struct netvsc_device_info *device_info = additional_info; 956 struct netvsc_device_info *device_info = additional_info;
1015 struct ndis_offload_params offloads; 957 struct ndis_offload_params offloads;
1016 struct nvsp_message *init_packet; 958 struct nvsp_message *init_packet;
1017 unsigned long t;
1018 struct ndis_recv_scale_cap rsscap; 959 struct ndis_recv_scale_cap rsscap;
1019 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); 960 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1020 u32 mtu, size; 961 u32 mtu, size;
@@ -1088,7 +1029,7 @@ int rndis_filter_device_add(struct hv_device *dev,
1088 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; 1029 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1089 1030
1090 1031
1091 ret = rndis_filter_set_offload_params(dev, &offloads); 1032 ret = rndis_filter_set_offload_params(net, &offloads);
1092 if (ret) 1033 if (ret)
1093 goto err_dev_remv; 1034 goto err_dev_remv;
1094 1035
@@ -1157,11 +1098,8 @@ int rndis_filter_device_add(struct hv_device *dev,
1157 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1098 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1158 if (ret) 1099 if (ret)
1159 goto out; 1100 goto out;
1160 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 1101 wait_for_completion(&net_device->channel_init_wait);
1161 if (t == 0) { 1102
1162 ret = -ETIMEDOUT;
1163 goto out;
1164 }
1165 if (init_packet->msg.v5_msg.subchn_comp.status != 1103 if (init_packet->msg.v5_msg.subchn_comp.status !=
1166 NVSP_STAT_SUCCESS) { 1104 NVSP_STAT_SUCCESS) {
1167 ret = -ENODEV; 1105 ret = -ENODEV;
@@ -1196,21 +1134,14 @@ err_dev_remv:
1196 1134
1197void rndis_filter_device_remove(struct hv_device *dev) 1135void rndis_filter_device_remove(struct hv_device *dev)
1198{ 1136{
1199 struct net_device *ndev = hv_get_drvdata(dev); 1137 struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev);
1200 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1201 struct netvsc_device *net_dev = net_device_ctx->nvdev;
1202 struct rndis_device *rndis_dev = net_dev->extension; 1138 struct rndis_device *rndis_dev = net_dev->extension;
1203 unsigned long t;
1204 1139
1205 /* If not all subchannel offers are complete, wait for them until 1140 /* If not all subchannel offers are complete, wait for them until
1206 * completion to avoid race. 1141 * completion to avoid race.
1207 */ 1142 */
1208 while (net_dev->num_sc_offered > 0) { 1143 if (net_dev->num_sc_offered > 0)
1209 t = wait_for_completion_timeout(&net_dev->channel_init_wait, 1144 wait_for_completion(&net_dev->channel_init_wait);
1210 10 * HZ);
1211 if (t == 0)
1212 WARN(1, "Netvsc: Waiting for sub-channel processing");
1213 }
1214 1145
1215 /* Halt and release the rndis device */ 1146 /* Halt and release the rndis device */
1216 rndis_filter_halt_device(rndis_dev); 1147 rndis_filter_halt_device(rndis_dev);
@@ -1222,27 +1153,19 @@ void rndis_filter_device_remove(struct hv_device *dev)
1222} 1153}
1223 1154
1224 1155
1225int rndis_filter_open(struct hv_device *dev) 1156int rndis_filter_open(struct netvsc_device *nvdev)
1226{ 1157{
1227 struct net_device *ndev = hv_get_drvdata(dev); 1158 if (!nvdev)
1228 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1229 struct netvsc_device *net_device = net_device_ctx->nvdev;
1230
1231 if (!net_device)
1232 return -EINVAL; 1159 return -EINVAL;
1233 1160
1234 if (atomic_inc_return(&net_device->open_cnt) != 1) 1161 if (atomic_inc_return(&nvdev->open_cnt) != 1)
1235 return 0; 1162 return 0;
1236 1163
1237 return rndis_filter_open_device(net_device->extension); 1164 return rndis_filter_open_device(nvdev->extension);
1238} 1165}
1239 1166
1240int rndis_filter_close(struct hv_device *dev) 1167int rndis_filter_close(struct netvsc_device *nvdev)
1241{ 1168{
1242 struct net_device *ndev = hv_get_drvdata(dev);
1243 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1244 struct netvsc_device *nvdev = net_device_ctx->nvdev;
1245
1246 if (!nvdev) 1169 if (!nvdev)
1247 return -EINVAL; 1170 return -EINVAL;
1248 1171
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1c4d395fbd49..18b4e8c7f68a 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -80,13 +80,6 @@ static void ipvlan_port_destroy(struct net_device *dev)
80 kfree_rcu(port, rcu); 80 kfree_rcu(port, rcu);
81} 81}
82 82
83/* ipvlan network devices have devices nesting below it and are a special
84 * "super class" of normal network devices; split their locks off into a
85 * separate class since they always nest.
86 */
87static struct lock_class_key ipvlan_netdev_xmit_lock_key;
88static struct lock_class_key ipvlan_netdev_addr_lock_key;
89
90#define IPVLAN_FEATURES \ 83#define IPVLAN_FEATURES \
91 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 84 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
92 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ 85 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
@@ -96,19 +89,6 @@ static struct lock_class_key ipvlan_netdev_addr_lock_key;
96#define IPVLAN_STATE_MASK \ 89#define IPVLAN_STATE_MASK \
97 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 90 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
98 91
99static void ipvlan_set_lockdep_class_one(struct net_device *dev,
100 struct netdev_queue *txq,
101 void *_unused)
102{
103 lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key);
104}
105
106static void ipvlan_set_lockdep_class(struct net_device *dev)
107{
108 lockdep_set_class(&dev->addr_list_lock, &ipvlan_netdev_addr_lock_key);
109 netdev_for_each_tx_queue(dev, ipvlan_set_lockdep_class_one, NULL);
110}
111
112static int ipvlan_init(struct net_device *dev) 92static int ipvlan_init(struct net_device *dev)
113{ 93{
114 struct ipvl_dev *ipvlan = netdev_priv(dev); 94 struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -123,7 +103,7 @@ static int ipvlan_init(struct net_device *dev)
123 dev->gso_max_segs = phy_dev->gso_max_segs; 103 dev->gso_max_segs = phy_dev->gso_max_segs;
124 dev->hard_header_len = phy_dev->hard_header_len; 104 dev->hard_header_len = phy_dev->hard_header_len;
125 105
126 ipvlan_set_lockdep_class(dev); 106 netdev_lockdep_set_classes(dev);
127 107
128 ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); 108 ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
129 if (!ipvlan->pcpu_stats) 109 if (!ipvlan->pcpu_stats)
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index a400288cb37b..6255973e3dda 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -169,10 +169,9 @@ static void loopback_setup(struct net_device *dev)
169 dev->flags = IFF_LOOPBACK; 169 dev->flags = IFF_LOOPBACK;
170 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 170 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
171 netif_keep_dst(dev); 171 netif_keep_dst(dev);
172 dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO; 172 dev->hw_features = NETIF_F_GSO_SOFTWARE;
173 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 173 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
174 | NETIF_F_ALL_TSO 174 | NETIF_F_GSO_SOFTWARE
175 | NETIF_F_UFO
176 | NETIF_F_HW_CSUM 175 | NETIF_F_HW_CSUM
177 | NETIF_F_RXCSUM 176 | NETIF_F_RXCSUM
178 | NETIF_F_SCTP_CRC 177 | NETIF_F_SCTP_CRC
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index cb01023eab41..cd9b53834bf6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -49,6 +49,7 @@ struct macvlan_port {
49 bool passthru; 49 bool passthru;
50 int count; 50 int count;
51 struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; 51 struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE];
52 DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
52}; 53};
53 54
54struct macvlan_source_entry { 55struct macvlan_source_entry {
@@ -305,11 +306,14 @@ static void macvlan_process_broadcast(struct work_struct *w)
305 306
306 rcu_read_unlock(); 307 rcu_read_unlock();
307 308
309 if (src)
310 dev_put(src->dev);
308 kfree_skb(skb); 311 kfree_skb(skb);
309 } 312 }
310} 313}
311 314
312static void macvlan_broadcast_enqueue(struct macvlan_port *port, 315static void macvlan_broadcast_enqueue(struct macvlan_port *port,
316 const struct macvlan_dev *src,
313 struct sk_buff *skb) 317 struct sk_buff *skb)
314{ 318{
315 struct sk_buff *nskb; 319 struct sk_buff *nskb;
@@ -319,8 +323,12 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
319 if (!nskb) 323 if (!nskb)
320 goto err; 324 goto err;
321 325
326 MACVLAN_SKB_CB(nskb)->src = src;
327
322 spin_lock(&port->bc_queue.lock); 328 spin_lock(&port->bc_queue.lock);
323 if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) { 329 if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
330 if (src)
331 dev_hold(src->dev);
324 __skb_queue_tail(&port->bc_queue, nskb); 332 __skb_queue_tail(&port->bc_queue, nskb);
325 err = 0; 333 err = 0;
326 } 334 }
@@ -412,6 +420,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
412 420
413 port = macvlan_port_get_rcu(skb->dev); 421 port = macvlan_port_get_rcu(skb->dev);
414 if (is_multicast_ether_addr(eth->h_dest)) { 422 if (is_multicast_ether_addr(eth->h_dest)) {
423 unsigned int hash;
424
415 skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); 425 skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
416 if (!skb) 426 if (!skb)
417 return RX_HANDLER_CONSUMED; 427 return RX_HANDLER_CONSUMED;
@@ -429,8 +439,9 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
429 goto out; 439 goto out;
430 } 440 }
431 441
432 MACVLAN_SKB_CB(skb)->src = src; 442 hash = mc_hash(NULL, eth->h_dest);
433 macvlan_broadcast_enqueue(port, skb); 443 if (test_bit(hash, port->mc_filter))
444 macvlan_broadcast_enqueue(port, src, skb);
434 445
435 return RX_HANDLER_PASS; 446 return RX_HANDLER_PASS;
436 } 447 }
@@ -716,12 +727,12 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
716 } 727 }
717} 728}
718 729
719static void macvlan_set_mac_lists(struct net_device *dev) 730static void macvlan_compute_filter(unsigned long *mc_filter,
731 struct net_device *dev,
732 struct macvlan_dev *vlan)
720{ 733{
721 struct macvlan_dev *vlan = netdev_priv(dev);
722
723 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 734 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
724 bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ); 735 bitmap_fill(mc_filter, MACVLAN_MC_FILTER_SZ);
725 } else { 736 } else {
726 struct netdev_hw_addr *ha; 737 struct netdev_hw_addr *ha;
727 DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ); 738 DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
@@ -733,10 +744,33 @@ static void macvlan_set_mac_lists(struct net_device *dev)
733 744
734 __set_bit(mc_hash(vlan, dev->broadcast), filter); 745 __set_bit(mc_hash(vlan, dev->broadcast), filter);
735 746
736 bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ); 747 bitmap_copy(mc_filter, filter, MACVLAN_MC_FILTER_SZ);
737 } 748 }
749}
750
751static void macvlan_set_mac_lists(struct net_device *dev)
752{
753 struct macvlan_dev *vlan = netdev_priv(dev);
754
755 macvlan_compute_filter(vlan->mc_filter, dev, vlan);
756
738 dev_uc_sync(vlan->lowerdev, dev); 757 dev_uc_sync(vlan->lowerdev, dev);
739 dev_mc_sync(vlan->lowerdev, dev); 758 dev_mc_sync(vlan->lowerdev, dev);
759
760 /* This is slightly inaccurate as we're including the subscription
761 * list of vlan->lowerdev too.
762 *
763 * Bug alert: This only works if everyone has the same broadcast
764 * address as lowerdev. As soon as someone changes theirs this
765 * will break.
766 *
767 * However, this is already broken as when you change your broadcast
768 * address we don't get called.
769 *
770 * The solution is to maintain a list of broadcast addresses like
771 * we do for uc/mc, if you care.
772 */
773 macvlan_compute_filter(vlan->port->mc_filter, vlan->lowerdev, NULL);
740} 774}
741 775
742static int macvlan_change_mtu(struct net_device *dev, int new_mtu) 776static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
@@ -754,7 +788,6 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
754 * "super class" of normal network devices; split their locks off into a 788 * "super class" of normal network devices; split their locks off into a
755 * separate class since they always nest. 789 * separate class since they always nest.
756 */ 790 */
757static struct lock_class_key macvlan_netdev_xmit_lock_key;
758static struct lock_class_key macvlan_netdev_addr_lock_key; 791static struct lock_class_key macvlan_netdev_addr_lock_key;
759 792
760#define ALWAYS_ON_FEATURES \ 793#define ALWAYS_ON_FEATURES \
@@ -775,20 +808,12 @@ static int macvlan_get_nest_level(struct net_device *dev)
775 return ((struct macvlan_dev *)netdev_priv(dev))->nest_level; 808 return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
776} 809}
777 810
778static void macvlan_set_lockdep_class_one(struct net_device *dev,
779 struct netdev_queue *txq,
780 void *_unused)
781{
782 lockdep_set_class(&txq->_xmit_lock,
783 &macvlan_netdev_xmit_lock_key);
784}
785
786static void macvlan_set_lockdep_class(struct net_device *dev) 811static void macvlan_set_lockdep_class(struct net_device *dev)
787{ 812{
813 netdev_lockdep_set_classes(dev);
788 lockdep_set_class_and_subclass(&dev->addr_list_lock, 814 lockdep_set_class_and_subclass(&dev->addr_list_lock,
789 &macvlan_netdev_addr_lock_key, 815 &macvlan_netdev_addr_lock_key,
790 macvlan_get_nest_level(dev)); 816 macvlan_get_nest_level(dev));
791 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
792} 817}
793 818
794static int macvlan_init(struct net_device *dev) 819static int macvlan_init(struct net_device *dev)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index bd6720962b1f..95a13321f688 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -627,93 +627,6 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
627 return skb; 627 return skb;
628} 628}
629 629
630/*
631 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
632 * be shared with the tun/tap driver.
633 */
634static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
635 struct sk_buff *skb,
636 struct virtio_net_hdr *vnet_hdr)
637{
638 unsigned short gso_type = 0;
639 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
640 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
641 case VIRTIO_NET_HDR_GSO_TCPV4:
642 gso_type = SKB_GSO_TCPV4;
643 break;
644 case VIRTIO_NET_HDR_GSO_TCPV6:
645 gso_type = SKB_GSO_TCPV6;
646 break;
647 case VIRTIO_NET_HDR_GSO_UDP:
648 gso_type = SKB_GSO_UDP;
649 break;
650 default:
651 return -EINVAL;
652 }
653
654 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
655 gso_type |= SKB_GSO_TCP_ECN;
656
657 if (vnet_hdr->gso_size == 0)
658 return -EINVAL;
659 }
660
661 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
662 if (!skb_partial_csum_set(skb, macvtap16_to_cpu(q, vnet_hdr->csum_start),
663 macvtap16_to_cpu(q, vnet_hdr->csum_offset)))
664 return -EINVAL;
665 }
666
667 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
668 skb_shinfo(skb)->gso_size = macvtap16_to_cpu(q, vnet_hdr->gso_size);
669 skb_shinfo(skb)->gso_type = gso_type;
670
671 /* Header must be checked, and gso_segs computed. */
672 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
673 skb_shinfo(skb)->gso_segs = 0;
674 }
675 return 0;
676}
677
678static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
679 const struct sk_buff *skb,
680 struct virtio_net_hdr *vnet_hdr)
681{
682 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
683
684 if (skb_is_gso(skb)) {
685 struct skb_shared_info *sinfo = skb_shinfo(skb);
686
687 /* This is a hint as to how much should be linear. */
688 vnet_hdr->hdr_len = cpu_to_macvtap16(q, skb_headlen(skb));
689 vnet_hdr->gso_size = cpu_to_macvtap16(q, sinfo->gso_size);
690 if (sinfo->gso_type & SKB_GSO_TCPV4)
691 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
692 else if (sinfo->gso_type & SKB_GSO_TCPV6)
693 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
694 else if (sinfo->gso_type & SKB_GSO_UDP)
695 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
696 else
697 BUG();
698 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
699 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
700 } else
701 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
702
703 if (skb->ip_summed == CHECKSUM_PARTIAL) {
704 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
705 if (skb_vlan_tag_present(skb))
706 vnet_hdr->csum_start = cpu_to_macvtap16(q,
707 skb_checksum_start_offset(skb) + VLAN_HLEN);
708 else
709 vnet_hdr->csum_start = cpu_to_macvtap16(q,
710 skb_checksum_start_offset(skb));
711 vnet_hdr->csum_offset = cpu_to_macvtap16(q, skb->csum_offset);
712 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
713 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
714 } /* else everything is zero */
715}
716
717/* Neighbour code has some assumptions on HH_DATA_MOD alignment */ 630/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
718#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) 631#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
719 632
@@ -812,7 +725,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
812 skb->protocol = eth_hdr(skb)->h_proto; 725 skb->protocol = eth_hdr(skb)->h_proto;
813 726
814 if (vnet_hdr_len) { 727 if (vnet_hdr_len) {
815 err = macvtap_skb_from_vnet_hdr(q, skb, &vnet_hdr); 728 err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
729 macvtap_is_little_endian(q));
816 if (err) 730 if (err)
817 goto err_kfree; 731 goto err_kfree;
818 } 732 }
@@ -880,7 +794,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
880 if (iov_iter_count(iter) < vnet_hdr_len) 794 if (iov_iter_count(iter) < vnet_hdr_len)
881 return -EINVAL; 795 return -EINVAL;
882 796
883 macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr); 797 ret = virtio_net_hdr_from_skb(skb, &vnet_hdr,
798 macvtap_is_little_endian(q));
799 if (ret)
800 BUG();
884 801
885 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != 802 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
886 sizeof(vnet_hdr)) 803 sizeof(vnet_hdr))
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 6dad9a9c356c..f96829415ce6 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -12,6 +12,9 @@ menuconfig PHYLIB
12 12
13if PHYLIB 13if PHYLIB
14 14
15config SWPHY
16 bool
17
15comment "MII PHY device drivers" 18comment "MII PHY device drivers"
16 19
17config AQUANTIA_PHY 20config AQUANTIA_PHY
@@ -159,6 +162,7 @@ config MICROCHIP_PHY
159config FIXED_PHY 162config FIXED_PHY
160 tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" 163 tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
161 depends on PHYLIB 164 depends on PHYLIB
165 select SWPHY
162 ---help--- 166 ---help---
163 Adds the platform "fixed" MDIO Bus to cover the boards that use 167 Adds the platform "fixed" MDIO Bus to cover the boards that use
164 PHYs that are not connected to the real MDIO bus. 168 PHYs that are not connected to the real MDIO bus.
@@ -254,6 +258,17 @@ config MDIO_BUS_MUX_MMIOREG
254 258
255 Currently, only 8-bit registers are supported. 259 Currently, only 8-bit registers are supported.
256 260
261config MDIO_BUS_MUX_BCM_IPROC
262 tristate "Support for iProc based MDIO bus multiplexers"
263 depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
264 select MDIO_BUS_MUX
265 default ARCH_BCM_IPROC
266 help
267 This module provides a driver for MDIO bus multiplexers found in
268 iProc based Broadcom SoCs. This multiplexer connects one of several
269 child MDIO bus to a parent bus. Buses could be internal as well as
270 external and selection logic lies inside the same multiplexer.
271
257config MDIO_BCM_UNIMAC 272config MDIO_BCM_UNIMAC
258 tristate "Broadcom UniMAC MDIO bus controller" 273 tristate "Broadcom UniMAC MDIO bus controller"
259 depends on HAS_IOMEM 274 depends on HAS_IOMEM
@@ -271,6 +286,14 @@ config MDIO_BCM_IPROC
271 This module provides a driver for the MDIO busses found in the 286 This module provides a driver for the MDIO busses found in the
272 Broadcom iProc SoC's. 287 Broadcom iProc SoC's.
273 288
289config INTEL_XWAY_PHY
290 tristate "Driver for Intel XWAY PHYs"
291 ---help---
292 Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs.
293 These PHYs are marked as standalone chips under the names
294 PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel
295 SoCs xRX200, xRX300, xRX330, xRX350 and xRX550.
296
274endif # PHYLIB 297endif # PHYLIB
275 298
276config MICREL_KS8995MA 299config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index fcdbb9299fab..7158274327d0 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,6 +1,7 @@
1# Makefile for Linux PHY drivers 1# Makefile for Linux PHY drivers
2 2
3libphy-objs := phy.o phy_device.o mdio_bus.o mdio_device.o 3libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o
4libphy-$(CONFIG_SWPHY) += swphy.o
4 5
5obj-$(CONFIG_PHYLIB) += libphy.o 6obj-$(CONFIG_PHYLIB) += libphy.o
6obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o 7obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
@@ -39,8 +40,10 @@ obj-$(CONFIG_AMD_PHY) += amd.o
39obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o 40obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
40obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o 41obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
41obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o 42obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
43obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
42obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o 44obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
43obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o 45obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
44obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o 46obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
45obj-$(CONFIG_MICROCHIP_PHY) += microchip.o 47obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
46obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o 48obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
49obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 9ec7f7353434..c649c101bbab 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -23,9 +23,10 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <linux/seqlock.h>
26#include <linux/idr.h> 27#include <linux/idr.h>
27 28
28#define MII_REGS_NUM 29 29#include "swphy.h"
29 30
30struct fixed_mdio_bus { 31struct fixed_mdio_bus {
31 struct mii_bus *mii_bus; 32 struct mii_bus *mii_bus;
@@ -34,8 +35,8 @@ struct fixed_mdio_bus {
34 35
35struct fixed_phy { 36struct fixed_phy {
36 int addr; 37 int addr;
37 u16 regs[MII_REGS_NUM];
38 struct phy_device *phydev; 38 struct phy_device *phydev;
39 seqcount_t seqcount;
39 struct fixed_phy_status status; 40 struct fixed_phy_status status;
40 int (*link_update)(struct net_device *, struct fixed_phy_status *); 41 int (*link_update)(struct net_device *, struct fixed_phy_status *);
41 struct list_head node; 42 struct list_head node;
@@ -47,103 +48,10 @@ static struct fixed_mdio_bus platform_fmb = {
47 .phys = LIST_HEAD_INIT(platform_fmb.phys), 48 .phys = LIST_HEAD_INIT(platform_fmb.phys),
48}; 49};
49 50
50static int fixed_phy_update_regs(struct fixed_phy *fp) 51static void fixed_phy_update(struct fixed_phy *fp)
51{ 52{
52 u16 bmsr = BMSR_ANEGCAPABLE;
53 u16 bmcr = 0;
54 u16 lpagb = 0;
55 u16 lpa = 0;
56
57 if (gpio_is_valid(fp->link_gpio)) 53 if (gpio_is_valid(fp->link_gpio))
58 fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio); 54 fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio);
59
60 if (fp->status.duplex) {
61 switch (fp->status.speed) {
62 case 1000:
63 bmsr |= BMSR_ESTATEN;
64 break;
65 case 100:
66 bmsr |= BMSR_100FULL;
67 break;
68 case 10:
69 bmsr |= BMSR_10FULL;
70 break;
71 default:
72 break;
73 }
74 } else {
75 switch (fp->status.speed) {
76 case 1000:
77 bmsr |= BMSR_ESTATEN;
78 break;
79 case 100:
80 bmsr |= BMSR_100HALF;
81 break;
82 case 10:
83 bmsr |= BMSR_10HALF;
84 break;
85 default:
86 break;
87 }
88 }
89
90 if (fp->status.link) {
91 bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
92
93 if (fp->status.duplex) {
94 bmcr |= BMCR_FULLDPLX;
95
96 switch (fp->status.speed) {
97 case 1000:
98 bmcr |= BMCR_SPEED1000;
99 lpagb |= LPA_1000FULL;
100 break;
101 case 100:
102 bmcr |= BMCR_SPEED100;
103 lpa |= LPA_100FULL;
104 break;
105 case 10:
106 lpa |= LPA_10FULL;
107 break;
108 default:
109 pr_warn("fixed phy: unknown speed\n");
110 return -EINVAL;
111 }
112 } else {
113 switch (fp->status.speed) {
114 case 1000:
115 bmcr |= BMCR_SPEED1000;
116 lpagb |= LPA_1000HALF;
117 break;
118 case 100:
119 bmcr |= BMCR_SPEED100;
120 lpa |= LPA_100HALF;
121 break;
122 case 10:
123 lpa |= LPA_10HALF;
124 break;
125 default:
126 pr_warn("fixed phy: unknown speed\n");
127 return -EINVAL;
128 }
129 }
130
131 if (fp->status.pause)
132 lpa |= LPA_PAUSE_CAP;
133
134 if (fp->status.asym_pause)
135 lpa |= LPA_PAUSE_ASYM;
136 }
137
138 fp->regs[MII_PHYSID1] = 0;
139 fp->regs[MII_PHYSID2] = 0;
140
141 fp->regs[MII_BMSR] = bmsr;
142 fp->regs[MII_BMCR] = bmcr;
143 fp->regs[MII_LPA] = lpa;
144 fp->regs[MII_STAT1000] = lpagb;
145
146 return 0;
147} 55}
148 56
149static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) 57static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
@@ -151,29 +59,23 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
151 struct fixed_mdio_bus *fmb = bus->priv; 59 struct fixed_mdio_bus *fmb = bus->priv;
152 struct fixed_phy *fp; 60 struct fixed_phy *fp;
153 61
154 if (reg_num >= MII_REGS_NUM)
155 return -1;
156
157 /* We do not support emulating Clause 45 over Clause 22 register reads
158 * return an error instead of bogus data.
159 */
160 switch (reg_num) {
161 case MII_MMD_CTRL:
162 case MII_MMD_DATA:
163 return -1;
164 default:
165 break;
166 }
167
168 list_for_each_entry(fp, &fmb->phys, node) { 62 list_for_each_entry(fp, &fmb->phys, node) {
169 if (fp->addr == phy_addr) { 63 if (fp->addr == phy_addr) {
170 /* Issue callback if user registered it. */ 64 struct fixed_phy_status state;
171 if (fp->link_update) { 65 int s;
172 fp->link_update(fp->phydev->attached_dev, 66
173 &fp->status); 67 do {
174 fixed_phy_update_regs(fp); 68 s = read_seqcount_begin(&fp->seqcount);
175 } 69 /* Issue callback if user registered it. */
176 return fp->regs[reg_num]; 70 if (fp->link_update) {
71 fp->link_update(fp->phydev->attached_dev,
72 &fp->status);
73 fixed_phy_update(fp);
74 }
75 state = fp->status;
76 } while (read_seqcount_retry(&fp->seqcount, s));
77
78 return swphy_read_reg(reg_num, &state);
177 } 79 }
178 } 80 }
179 81
@@ -225,6 +127,7 @@ int fixed_phy_update_state(struct phy_device *phydev,
225 127
226 list_for_each_entry(fp, &fmb->phys, node) { 128 list_for_each_entry(fp, &fmb->phys, node) {
227 if (fp->addr == phydev->mdio.addr) { 129 if (fp->addr == phydev->mdio.addr) {
130 write_seqcount_begin(&fp->seqcount);
228#define _UPD(x) if (changed->x) \ 131#define _UPD(x) if (changed->x) \
229 fp->status.x = status->x 132 fp->status.x = status->x
230 _UPD(link); 133 _UPD(link);
@@ -233,7 +136,8 @@ int fixed_phy_update_state(struct phy_device *phydev,
233 _UPD(pause); 136 _UPD(pause);
234 _UPD(asym_pause); 137 _UPD(asym_pause);
235#undef _UPD 138#undef _UPD
236 fixed_phy_update_regs(fp); 139 fixed_phy_update(fp);
140 write_seqcount_end(&fp->seqcount);
237 return 0; 141 return 0;
238 } 142 }
239 } 143 }
@@ -250,11 +154,15 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
250 struct fixed_mdio_bus *fmb = &platform_fmb; 154 struct fixed_mdio_bus *fmb = &platform_fmb;
251 struct fixed_phy *fp; 155 struct fixed_phy *fp;
252 156
157 ret = swphy_validate_state(status);
158 if (ret < 0)
159 return ret;
160
253 fp = kzalloc(sizeof(*fp), GFP_KERNEL); 161 fp = kzalloc(sizeof(*fp), GFP_KERNEL);
254 if (!fp) 162 if (!fp)
255 return -ENOMEM; 163 return -ENOMEM;
256 164
257 memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); 165 seqcount_init(&fp->seqcount);
258 166
259 if (irq != PHY_POLL) 167 if (irq != PHY_POLL)
260 fmb->mii_bus->irq[phy_addr] = irq; 168 fmb->mii_bus->irq[phy_addr] = irq;
@@ -270,17 +178,12 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
270 goto err_regs; 178 goto err_regs;
271 } 179 }
272 180
273 ret = fixed_phy_update_regs(fp); 181 fixed_phy_update(fp);
274 if (ret)
275 goto err_gpio;
276 182
277 list_add_tail(&fp->node, &fmb->phys); 183 list_add_tail(&fp->node, &fmb->phys);
278 184
279 return 0; 185 return 0;
280 186
281err_gpio:
282 if (gpio_is_valid(fp->link_gpio))
283 gpio_free(fp->link_gpio);
284err_regs: 187err_regs:
285 kfree(fp); 188 kfree(fp);
286 return ret; 189 return ret;
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
new file mode 100644
index 000000000000..c300ab5587b8
--- /dev/null
+++ b/drivers/net/phy/intel-xway.c
@@ -0,0 +1,376 @@
1/*
2 * Copyright (C) 2012 Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>
3 * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/mdio.h>
17#include <linux/module.h>
18#include <linux/phy.h>
19#include <linux/of.h>
20
21#define XWAY_MDIO_IMASK 0x19 /* interrupt mask */
22#define XWAY_MDIO_ISTAT 0x1A /* interrupt status */
23
24#define XWAY_MDIO_INIT_WOL BIT(15) /* Wake-On-LAN */
25#define XWAY_MDIO_INIT_MSRE BIT(14)
26#define XWAY_MDIO_INIT_NPRX BIT(13)
27#define XWAY_MDIO_INIT_NPTX BIT(12)
28#define XWAY_MDIO_INIT_ANE BIT(11) /* Auto-Neg error */
29#define XWAY_MDIO_INIT_ANC BIT(10) /* Auto-Neg complete */
30#define XWAY_MDIO_INIT_ADSC BIT(5) /* Link auto-downspeed detect */
31#define XWAY_MDIO_INIT_MPIPC BIT(4)
32#define XWAY_MDIO_INIT_MDIXC BIT(3)
33#define XWAY_MDIO_INIT_DXMC BIT(2) /* Duplex mode change */
34#define XWAY_MDIO_INIT_LSPC BIT(1) /* Link speed change */
35#define XWAY_MDIO_INIT_LSTC BIT(0) /* Link state change */
36#define XWAY_MDIO_INIT_MASK (XWAY_MDIO_INIT_LSTC | \
37 XWAY_MDIO_INIT_ADSC)
38
39#define ADVERTISED_MPD BIT(10) /* Multi-port device */
40
41/* LED Configuration */
42#define XWAY_MMD_LEDCH 0x01E0
43/* Inverse of SCAN Function */
44#define XWAY_MMD_LEDCH_NACS_NONE 0x0000
45#define XWAY_MMD_LEDCH_NACS_LINK 0x0001
46#define XWAY_MMD_LEDCH_NACS_PDOWN 0x0002
47#define XWAY_MMD_LEDCH_NACS_EEE 0x0003
48#define XWAY_MMD_LEDCH_NACS_ANEG 0x0004
49#define XWAY_MMD_LEDCH_NACS_ABIST 0x0005
50#define XWAY_MMD_LEDCH_NACS_CDIAG 0x0006
51#define XWAY_MMD_LEDCH_NACS_TEST 0x0007
52/* Slow Blink Frequency */
53#define XWAY_MMD_LEDCH_SBF_F02HZ 0x0000
54#define XWAY_MMD_LEDCH_SBF_F04HZ 0x0010
55#define XWAY_MMD_LEDCH_SBF_F08HZ 0x0020
56#define XWAY_MMD_LEDCH_SBF_F16HZ 0x0030
57/* Fast Blink Frequency */
58#define XWAY_MMD_LEDCH_FBF_F02HZ 0x0000
59#define XWAY_MMD_LEDCH_FBF_F04HZ 0x0040
60#define XWAY_MMD_LEDCH_FBF_F08HZ 0x0080
61#define XWAY_MMD_LEDCH_FBF_F16HZ 0x00C0
62/* LED Configuration */
63#define XWAY_MMD_LEDCL 0x01E1
64/* Complex Blinking Configuration */
65#define XWAY_MMD_LEDCH_CBLINK_NONE 0x0000
66#define XWAY_MMD_LEDCH_CBLINK_LINK 0x0001
67#define XWAY_MMD_LEDCH_CBLINK_PDOWN 0x0002
68#define XWAY_MMD_LEDCH_CBLINK_EEE 0x0003
69#define XWAY_MMD_LEDCH_CBLINK_ANEG 0x0004
70#define XWAY_MMD_LEDCH_CBLINK_ABIST 0x0005
71#define XWAY_MMD_LEDCH_CBLINK_CDIAG 0x0006
72#define XWAY_MMD_LEDCH_CBLINK_TEST 0x0007
73/* Complex SCAN Configuration */
74#define XWAY_MMD_LEDCH_SCAN_NONE 0x0000
75#define XWAY_MMD_LEDCH_SCAN_LINK 0x0010
76#define XWAY_MMD_LEDCH_SCAN_PDOWN 0x0020
77#define XWAY_MMD_LEDCH_SCAN_EEE 0x0030
78#define XWAY_MMD_LEDCH_SCAN_ANEG 0x0040
79#define XWAY_MMD_LEDCH_SCAN_ABIST 0x0050
80#define XWAY_MMD_LEDCH_SCAN_CDIAG 0x0060
81#define XWAY_MMD_LEDCH_SCAN_TEST 0x0070
82/* Configuration for LED Pin x */
83#define XWAY_MMD_LED0H 0x01E2
84/* Fast Blinking Configuration */
85#define XWAY_MMD_LEDxH_BLINKF_MASK 0x000F
86#define XWAY_MMD_LEDxH_BLINKF_NONE 0x0000
87#define XWAY_MMD_LEDxH_BLINKF_LINK10 0x0001
88#define XWAY_MMD_LEDxH_BLINKF_LINK100 0x0002
89#define XWAY_MMD_LEDxH_BLINKF_LINK10X 0x0003
90#define XWAY_MMD_LEDxH_BLINKF_LINK1000 0x0004
91#define XWAY_MMD_LEDxH_BLINKF_LINK10_0 0x0005
92#define XWAY_MMD_LEDxH_BLINKF_LINK100X 0x0006
93#define XWAY_MMD_LEDxH_BLINKF_LINK10XX 0x0007
94#define XWAY_MMD_LEDxH_BLINKF_PDOWN 0x0008
95#define XWAY_MMD_LEDxH_BLINKF_EEE 0x0009
96#define XWAY_MMD_LEDxH_BLINKF_ANEG 0x000A
97#define XWAY_MMD_LEDxH_BLINKF_ABIST 0x000B
98#define XWAY_MMD_LEDxH_BLINKF_CDIAG 0x000C
99/* Constant On Configuration */
100#define XWAY_MMD_LEDxH_CON_MASK 0x00F0
101#define XWAY_MMD_LEDxH_CON_NONE 0x0000
102#define XWAY_MMD_LEDxH_CON_LINK10 0x0010
103#define XWAY_MMD_LEDxH_CON_LINK100 0x0020
104#define XWAY_MMD_LEDxH_CON_LINK10X 0x0030
105#define XWAY_MMD_LEDxH_CON_LINK1000 0x0040
106#define XWAY_MMD_LEDxH_CON_LINK10_0 0x0050
107#define XWAY_MMD_LEDxH_CON_LINK100X 0x0060
108#define XWAY_MMD_LEDxH_CON_LINK10XX 0x0070
109#define XWAY_MMD_LEDxH_CON_PDOWN 0x0080
110#define XWAY_MMD_LEDxH_CON_EEE 0x0090
111#define XWAY_MMD_LEDxH_CON_ANEG 0x00A0
112#define XWAY_MMD_LEDxH_CON_ABIST 0x00B0
113#define XWAY_MMD_LEDxH_CON_CDIAG 0x00C0
114#define XWAY_MMD_LEDxH_CON_COPPER 0x00D0
115#define XWAY_MMD_LEDxH_CON_FIBER 0x00E0
116/* Configuration for LED Pin x */
117#define XWAY_MMD_LED0L 0x01E3
118/* Pulsing Configuration */
119#define XWAY_MMD_LEDxL_PULSE_MASK 0x000F
120#define XWAY_MMD_LEDxL_PULSE_NONE 0x0000
121#define XWAY_MMD_LEDxL_PULSE_TXACT 0x0001
122#define XWAY_MMD_LEDxL_PULSE_RXACT 0x0002
123#define XWAY_MMD_LEDxL_PULSE_COL 0x0004
124/* Slow Blinking Configuration */
125#define XWAY_MMD_LEDxL_BLINKS_MASK 0x00F0
126#define XWAY_MMD_LEDxL_BLINKS_NONE 0x0000
127#define XWAY_MMD_LEDxL_BLINKS_LINK10 0x0010
128#define XWAY_MMD_LEDxL_BLINKS_LINK100 0x0020
129#define XWAY_MMD_LEDxL_BLINKS_LINK10X 0x0030
130#define XWAY_MMD_LEDxL_BLINKS_LINK1000 0x0040
131#define XWAY_MMD_LEDxL_BLINKS_LINK10_0 0x0050
132#define XWAY_MMD_LEDxL_BLINKS_LINK100X 0x0060
133#define XWAY_MMD_LEDxL_BLINKS_LINK10XX 0x0070
134#define XWAY_MMD_LEDxL_BLINKS_PDOWN 0x0080
135#define XWAY_MMD_LEDxL_BLINKS_EEE 0x0090
136#define XWAY_MMD_LEDxL_BLINKS_ANEG 0x00A0
137#define XWAY_MMD_LEDxL_BLINKS_ABIST 0x00B0
138#define XWAY_MMD_LEDxL_BLINKS_CDIAG 0x00C0
139#define XWAY_MMD_LED1H 0x01E4
140#define XWAY_MMD_LED1L 0x01E5
141#define XWAY_MMD_LED2H 0x01E6
142#define XWAY_MMD_LED2L 0x01E7
143#define XWAY_MMD_LED3H 0x01E8
144#define XWAY_MMD_LED3L 0x01E9
145
146#define PHY_ID_PHY11G_1_3 0x030260D1
147#define PHY_ID_PHY22F_1_3 0x030260E1
148#define PHY_ID_PHY11G_1_4 0xD565A400
149#define PHY_ID_PHY22F_1_4 0xD565A410
150#define PHY_ID_PHY11G_1_5 0xD565A401
151#define PHY_ID_PHY22F_1_5 0xD565A411
152#define PHY_ID_PHY11G_VR9 0xD565A409
153#define PHY_ID_PHY22F_VR9 0xD565A419
154
155static int xway_gphy_config_init(struct phy_device *phydev)
156{
157 int err;
158 u32 ledxh;
159 u32 ledxl;
160
161 /* Mask all interrupts */
162 err = phy_write(phydev, XWAY_MDIO_IMASK, 0);
163 if (err)
164 return err;
165
166 /* Clear all pending interrupts */
167 phy_read(phydev, XWAY_MDIO_ISTAT);
168
169 phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCH, MDIO_MMD_VEND2,
170 XWAY_MMD_LEDCH_NACS_NONE |
171 XWAY_MMD_LEDCH_SBF_F02HZ |
172 XWAY_MMD_LEDCH_FBF_F16HZ);
173 phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCL, MDIO_MMD_VEND2,
174 XWAY_MMD_LEDCH_CBLINK_NONE |
175 XWAY_MMD_LEDCH_SCAN_NONE);
176
177 /**
178 * In most cases only one LED is connected to this phy, so
179 * configure them all to constant on and pulse mode. LED3 is
180 * only available in some packages, leave it in its reset
181 * configuration.
182 */
183 ledxh = XWAY_MMD_LEDxH_BLINKF_NONE | XWAY_MMD_LEDxH_CON_LINK10XX;
184 ledxl = XWAY_MMD_LEDxL_PULSE_TXACT | XWAY_MMD_LEDxL_PULSE_RXACT |
185 XWAY_MMD_LEDxL_BLINKS_NONE;
186 phy_write_mmd_indirect(phydev, XWAY_MMD_LED0H, MDIO_MMD_VEND2, ledxh);
187 phy_write_mmd_indirect(phydev, XWAY_MMD_LED0L, MDIO_MMD_VEND2, ledxl);
188 phy_write_mmd_indirect(phydev, XWAY_MMD_LED1H, MDIO_MMD_VEND2, ledxh);
189 phy_write_mmd_indirect(phydev, XWAY_MMD_LED1L, MDIO_MMD_VEND2, ledxl);
190 phy_write_mmd_indirect(phydev, XWAY_MMD_LED2H, MDIO_MMD_VEND2, ledxh);
191 phy_write_mmd_indirect(phydev, XWAY_MMD_LED2L, MDIO_MMD_VEND2, ledxl);
192
193 return 0;
194}
195
196static int xway_gphy14_config_aneg(struct phy_device *phydev)
197{
198 int reg, err;
199
200 /* Advertise as multi-port device, see IEEE802.3-2002 40.5.1.1 */
201 /* This is a workaround for an errata in rev < 1.5 devices */
202 reg = phy_read(phydev, MII_CTRL1000);
203 reg |= ADVERTISED_MPD;
204 err = phy_write(phydev, MII_CTRL1000, reg);
205 if (err)
206 return err;
207
208 return genphy_config_aneg(phydev);
209}
210
211static int xway_gphy_ack_interrupt(struct phy_device *phydev)
212{
213 int reg;
214
215 reg = phy_read(phydev, XWAY_MDIO_ISTAT);
216 return (reg < 0) ? reg : 0;
217}
218
219static int xway_gphy_did_interrupt(struct phy_device *phydev)
220{
221 int reg;
222
223 reg = phy_read(phydev, XWAY_MDIO_ISTAT);
224 return reg & XWAY_MDIO_INIT_MASK;
225}
226
227static int xway_gphy_config_intr(struct phy_device *phydev)
228{
229 u16 mask = 0;
230
231 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
232 mask = XWAY_MDIO_INIT_MASK;
233
234 return phy_write(phydev, XWAY_MDIO_IMASK, mask);
235}
236
237static struct phy_driver xway_gphy[] = {
238 {
239 .phy_id = PHY_ID_PHY11G_1_3,
240 .phy_id_mask = 0xffffffff,
241 .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3",
242 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
243 SUPPORTED_Asym_Pause),
244 .flags = PHY_HAS_INTERRUPT,
245 .config_init = xway_gphy_config_init,
246 .config_aneg = xway_gphy14_config_aneg,
247 .read_status = genphy_read_status,
248 .ack_interrupt = xway_gphy_ack_interrupt,
249 .did_interrupt = xway_gphy_did_interrupt,
250 .config_intr = xway_gphy_config_intr,
251 .suspend = genphy_suspend,
252 .resume = genphy_resume,
253 }, {
254 .phy_id = PHY_ID_PHY22F_1_3,
255 .phy_id_mask = 0xffffffff,
256 .name = "Intel XWAY PHY22F (PEF 7061) v1.3",
257 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
258 SUPPORTED_Asym_Pause),
259 .flags = PHY_HAS_INTERRUPT,
260 .config_init = xway_gphy_config_init,
261 .config_aneg = xway_gphy14_config_aneg,
262 .read_status = genphy_read_status,
263 .ack_interrupt = xway_gphy_ack_interrupt,
264 .did_interrupt = xway_gphy_did_interrupt,
265 .config_intr = xway_gphy_config_intr,
266 .suspend = genphy_suspend,
267 .resume = genphy_resume,
268 }, {
269 .phy_id = PHY_ID_PHY11G_1_4,
270 .phy_id_mask = 0xffffffff,
271 .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4",
272 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
273 SUPPORTED_Asym_Pause),
274 .flags = PHY_HAS_INTERRUPT,
275 .config_init = xway_gphy_config_init,
276 .config_aneg = xway_gphy14_config_aneg,
277 .read_status = genphy_read_status,
278 .ack_interrupt = xway_gphy_ack_interrupt,
279 .did_interrupt = xway_gphy_did_interrupt,
280 .config_intr = xway_gphy_config_intr,
281 .suspend = genphy_suspend,
282 .resume = genphy_resume,
283 }, {
284 .phy_id = PHY_ID_PHY22F_1_4,
285 .phy_id_mask = 0xffffffff,
286 .name = "Intel XWAY PHY22F (PEF 7061) v1.4",
287 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
288 SUPPORTED_Asym_Pause),
289 .flags = PHY_HAS_INTERRUPT,
290 .config_init = xway_gphy_config_init,
291 .config_aneg = xway_gphy14_config_aneg,
292 .read_status = genphy_read_status,
293 .ack_interrupt = xway_gphy_ack_interrupt,
294 .did_interrupt = xway_gphy_did_interrupt,
295 .config_intr = xway_gphy_config_intr,
296 .suspend = genphy_suspend,
297 .resume = genphy_resume,
298 }, {
299 .phy_id = PHY_ID_PHY11G_1_5,
300 .phy_id_mask = 0xffffffff,
301 .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
302 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
303 SUPPORTED_Asym_Pause),
304 .flags = PHY_HAS_INTERRUPT,
305 .config_init = xway_gphy_config_init,
306 .config_aneg = genphy_config_aneg,
307 .read_status = genphy_read_status,
308 .ack_interrupt = xway_gphy_ack_interrupt,
309 .did_interrupt = xway_gphy_did_interrupt,
310 .config_intr = xway_gphy_config_intr,
311 .suspend = genphy_suspend,
312 .resume = genphy_resume,
313 }, {
314 .phy_id = PHY_ID_PHY22F_1_5,
315 .phy_id_mask = 0xffffffff,
316 .name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
317 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
318 SUPPORTED_Asym_Pause),
319 .flags = PHY_HAS_INTERRUPT,
320 .config_init = xway_gphy_config_init,
321 .config_aneg = genphy_config_aneg,
322 .read_status = genphy_read_status,
323 .ack_interrupt = xway_gphy_ack_interrupt,
324 .did_interrupt = xway_gphy_did_interrupt,
325 .config_intr = xway_gphy_config_intr,
326 .suspend = genphy_suspend,
327 .resume = genphy_resume,
328 }, {
329 .phy_id = PHY_ID_PHY11G_VR9,
330 .phy_id_mask = 0xffffffff,
331 .name = "Intel XWAY PHY11G (xRX integrated)",
332 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
333 SUPPORTED_Asym_Pause),
334 .flags = PHY_HAS_INTERRUPT,
335 .config_init = xway_gphy_config_init,
336 .config_aneg = genphy_config_aneg,
337 .read_status = genphy_read_status,
338 .ack_interrupt = xway_gphy_ack_interrupt,
339 .did_interrupt = xway_gphy_did_interrupt,
340 .config_intr = xway_gphy_config_intr,
341 .suspend = genphy_suspend,
342 .resume = genphy_resume,
343 }, {
344 .phy_id = PHY_ID_PHY22F_VR9,
345 .phy_id_mask = 0xffffffff,
346 .name = "Intel XWAY PHY22F (xRX integrated)",
347 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
348 SUPPORTED_Asym_Pause),
349 .flags = PHY_HAS_INTERRUPT,
350 .config_init = xway_gphy_config_init,
351 .config_aneg = genphy_config_aneg,
352 .read_status = genphy_read_status,
353 .ack_interrupt = xway_gphy_ack_interrupt,
354 .did_interrupt = xway_gphy_did_interrupt,
355 .config_intr = xway_gphy_config_intr,
356 .suspend = genphy_suspend,
357 .resume = genphy_resume,
358 },
359};
360module_phy_driver(xway_gphy);
361
362static struct mdio_device_id __maybe_unused xway_gphy_tbl[] = {
363 { PHY_ID_PHY11G_1_3, 0xffffffff },
364 { PHY_ID_PHY22F_1_3, 0xffffffff },
365 { PHY_ID_PHY11G_1_4, 0xffffffff },
366 { PHY_ID_PHY22F_1_4, 0xffffffff },
367 { PHY_ID_PHY11G_1_5, 0xffffffff },
368 { PHY_ID_PHY22F_1_5, 0xffffffff },
369 { PHY_ID_PHY11G_VR9, 0xffffffff },
370 { PHY_ID_PHY22F_VR9, 0xffffffff },
371 { }
372};
373MODULE_DEVICE_TABLE(mdio, xway_gphy_tbl);
374
375MODULE_DESCRIPTION("Intel XWAY PHY driver");
376MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
new file mode 100644
index 000000000000..0a0412524cec
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -0,0 +1,248 @@
1/*
2 * Copyright 2016 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation (the "GPL").
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License version 2 (GPLv2) for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * version 2 (GPLv2) along with this source code.
15 */
16
17#include <linux/platform_device.h>
18#include <linux/device.h>
19#include <linux/of_mdio.h>
20#include <linux/module.h>
21#include <linux/phy.h>
22#include <linux/mdio-mux.h>
23#include <linux/delay.h>
24
25#define MDIO_PARAM_OFFSET 0x00
26#define MDIO_PARAM_MIIM_CYCLE 29
27#define MDIO_PARAM_INTERNAL_SEL 25
28#define MDIO_PARAM_BUS_ID 22
29#define MDIO_PARAM_C45_SEL 21
30#define MDIO_PARAM_PHY_ID 16
31#define MDIO_PARAM_PHY_DATA 0
32
33#define MDIO_READ_OFFSET 0x04
34#define MDIO_READ_DATA_MASK 0xffff
35#define MDIO_ADDR_OFFSET 0x08
36
37#define MDIO_CTRL_OFFSET 0x0C
38#define MDIO_CTRL_WRITE_OP 0x1
39#define MDIO_CTRL_READ_OP 0x2
40
41#define MDIO_STAT_OFFSET 0x10
42#define MDIO_STAT_DONE 1
43
44#define BUS_MAX_ADDR 32
45#define EXT_BUS_START_ADDR 16
46
47struct iproc_mdiomux_desc {
48 void *mux_handle;
49 void __iomem *base;
50 struct device *dev;
51 struct mii_bus *mii_bus;
52};
53
54static int iproc_mdio_wait_for_idle(void __iomem *base, bool result)
55{
56 unsigned int timeout = 1000; /* loop for 1s */
57 u32 val;
58
59 do {
60 val = readl(base + MDIO_STAT_OFFSET);
61 if ((val & MDIO_STAT_DONE) == result)
62 return 0;
63
64 usleep_range(1000, 2000);
65 } while (timeout--);
66
67 return -ETIMEDOUT;
68}
69
70/* start_miim_ops- Program and start MDIO transaction over mdio bus.
71 * @base: Base address
72 * @phyid: phyid of the selected bus.
73 * @reg: register offset to be read/written.
74 * @val :0 if read op else value to be written in @reg;
75 * @op: Operation that need to be carried out.
76 * MDIO_CTRL_READ_OP: Read transaction.
77 * MDIO_CTRL_WRITE_OP: Write transaction.
78 *
79 * Return value: Successful Read operation returns read reg values and write
80 * operation returns 0. Failure operation returns negative error code.
81 */
82static int start_miim_ops(void __iomem *base,
83 u16 phyid, u32 reg, u16 val, u32 op)
84{
85 u32 param;
86 int ret;
87
88 writel(0, base + MDIO_CTRL_OFFSET);
89 ret = iproc_mdio_wait_for_idle(base, 0);
90 if (ret)
91 goto err;
92
93 param = readl(base + MDIO_PARAM_OFFSET);
94 param |= phyid << MDIO_PARAM_PHY_ID;
95 param |= val << MDIO_PARAM_PHY_DATA;
96 if (reg & MII_ADDR_C45)
97 param |= BIT(MDIO_PARAM_C45_SEL);
98
99 writel(param, base + MDIO_PARAM_OFFSET);
100
101 writel(reg, base + MDIO_ADDR_OFFSET);
102
103 writel(op, base + MDIO_CTRL_OFFSET);
104
105 ret = iproc_mdio_wait_for_idle(base, 1);
106 if (ret)
107 goto err;
108
109 if (op == MDIO_CTRL_READ_OP)
110 ret = readl(base + MDIO_READ_OFFSET) & MDIO_READ_DATA_MASK;
111err:
112 return ret;
113}
114
115static int iproc_mdiomux_read(struct mii_bus *bus, int phyid, int reg)
116{
117 struct iproc_mdiomux_desc *md = bus->priv;
118 int ret;
119
120 ret = start_miim_ops(md->base, phyid, reg, 0, MDIO_CTRL_READ_OP);
121 if (ret < 0)
122 dev_err(&bus->dev, "mdiomux read operation failed!!!");
123
124 return ret;
125}
126
127static int iproc_mdiomux_write(struct mii_bus *bus,
128 int phyid, int reg, u16 val)
129{
130 struct iproc_mdiomux_desc *md = bus->priv;
131 int ret;
132
133 /* Write val at reg offset */
134 ret = start_miim_ops(md->base, phyid, reg, val, MDIO_CTRL_WRITE_OP);
135 if (ret < 0)
136 dev_err(&bus->dev, "mdiomux write operation failed!!!");
137
138 return ret;
139}
140
141static int mdio_mux_iproc_switch_fn(int current_child, int desired_child,
142 void *data)
143{
144 struct iproc_mdiomux_desc *md = data;
145 u32 param, bus_id;
146 bool bus_dir;
147
148 /* select bus and its properties */
149 bus_dir = (desired_child < EXT_BUS_START_ADDR);
150 bus_id = bus_dir ? desired_child : (desired_child - EXT_BUS_START_ADDR);
151
152 param = (bus_dir ? 1 : 0) << MDIO_PARAM_INTERNAL_SEL;
153 param |= (bus_id << MDIO_PARAM_BUS_ID);
154
155 writel(param, md->base + MDIO_PARAM_OFFSET);
156 return 0;
157}
158
159static int mdio_mux_iproc_probe(struct platform_device *pdev)
160{
161 struct iproc_mdiomux_desc *md;
162 struct mii_bus *bus;
163 struct resource *res;
164 int rc;
165
166 md = devm_kzalloc(&pdev->dev, sizeof(*md), GFP_KERNEL);
167 if (!md)
168 return -ENOMEM;
169 md->dev = &pdev->dev;
170
171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
172 md->base = devm_ioremap_resource(&pdev->dev, res);
173 if (IS_ERR(md->base)) {
174 dev_err(&pdev->dev, "failed to ioremap register\n");
175 return PTR_ERR(md->base);
176 }
177
178 md->mii_bus = mdiobus_alloc();
179 if (!md->mii_bus) {
180 dev_err(&pdev->dev, "mdiomux bus alloc failed\n");
181 return -ENOMEM;
182 }
183
184 bus = md->mii_bus;
185 bus->priv = md;
186 bus->name = "iProc MDIO mux bus";
187 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
188 bus->parent = &pdev->dev;
189 bus->read = iproc_mdiomux_read;
190 bus->write = iproc_mdiomux_write;
191
192 bus->phy_mask = ~0;
193 bus->dev.of_node = pdev->dev.of_node;
194 rc = mdiobus_register(bus);
195 if (rc) {
196 dev_err(&pdev->dev, "mdiomux registration failed\n");
197 goto out;
198 }
199
200 platform_set_drvdata(pdev, md);
201
202 rc = mdio_mux_init(md->dev, mdio_mux_iproc_switch_fn,
203 &md->mux_handle, md, md->mii_bus);
204 if (rc) {
205 dev_info(md->dev, "mdiomux initialization failed\n");
206 goto out;
207 }
208
209 dev_info(md->dev, "iProc mdiomux registered\n");
210 return 0;
211out:
212 mdiobus_free(bus);
213 return rc;
214}
215
216static int mdio_mux_iproc_remove(struct platform_device *pdev)
217{
218 struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev);
219
220 mdio_mux_uninit(md->mux_handle);
221 mdiobus_unregister(md->mii_bus);
222 mdiobus_free(md->mii_bus);
223
224 return 0;
225}
226
227static const struct of_device_id mdio_mux_iproc_match[] = {
228 {
229 .compatible = "brcm,mdio-mux-iproc",
230 },
231 {},
232};
233MODULE_DEVICE_TABLE(of, mdio_mux_iproc_match);
234
235static struct platform_driver mdiomux_iproc_driver = {
236 .driver = {
237 .name = "mdio-mux-iproc",
238 .of_match_table = mdio_mux_iproc_match,
239 },
240 .probe = mdio_mux_iproc_probe,
241 .remove = mdio_mux_iproc_remove,
242};
243
244module_platform_driver(mdiomux_iproc_driver);
245
246MODULE_DESCRIPTION("iProc MDIO Mux Bus Driver");
247MODULE_AUTHOR("Pramod Kumar <pramod.kumar@broadcom.com>");
248MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 7ddb1ab70891..919949960a10 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -55,7 +55,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
55 return PTR_ERR(s->gpios); 55 return PTR_ERR(s->gpios);
56 56
57 r = mdio_mux_init(&pdev->dev, 57 r = mdio_mux_init(&pdev->dev,
58 mdio_mux_gpio_switch_fn, &s->mux_handle, s); 58 mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL);
59 59
60 if (r != 0) { 60 if (r != 0) {
61 gpiod_put_array(s->gpios); 61 gpiod_put_array(s->gpios);
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 7fde454fbc4f..d0bed52c8d16 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -126,7 +126,7 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
126 } 126 }
127 127
128 ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn, 128 ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn,
129 &s->mux_handle, s); 129 &s->mux_handle, s, NULL);
130 if (ret) { 130 if (ret) {
131 dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n", 131 dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n",
132 np->full_name); 132 np->full_name);
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 5c81d6faf304..963838d4fac1 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -89,7 +89,8 @@ static int parent_count;
89int mdio_mux_init(struct device *dev, 89int mdio_mux_init(struct device *dev,
90 int (*switch_fn)(int cur, int desired, void *data), 90 int (*switch_fn)(int cur, int desired, void *data),
91 void **mux_handle, 91 void **mux_handle,
92 void *data) 92 void *data,
93 struct mii_bus *mux_bus)
93{ 94{
94 struct device_node *parent_bus_node; 95 struct device_node *parent_bus_node;
95 struct device_node *child_bus_node; 96 struct device_node *child_bus_node;
@@ -101,10 +102,22 @@ int mdio_mux_init(struct device *dev,
101 if (!dev->of_node) 102 if (!dev->of_node)
102 return -ENODEV; 103 return -ENODEV;
103 104
104 parent_bus_node = of_parse_phandle(dev->of_node, "mdio-parent-bus", 0); 105 if (!mux_bus) {
106 parent_bus_node = of_parse_phandle(dev->of_node,
107 "mdio-parent-bus", 0);
105 108
106 if (!parent_bus_node) 109 if (!parent_bus_node)
107 return -ENODEV; 110 return -ENODEV;
111
112 parent_bus = of_mdio_find_bus(parent_bus_node);
113 if (!parent_bus) {
114 ret_val = -EPROBE_DEFER;
115 goto err_parent_bus;
116 }
117 } else {
118 parent_bus_node = NULL;
119 parent_bus = mux_bus;
120 }
108 121
109 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); 122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
110 if (pb == NULL) { 123 if (pb == NULL) {
@@ -112,11 +125,6 @@ int mdio_mux_init(struct device *dev,
112 goto err_parent_bus; 125 goto err_parent_bus;
113 } 126 }
114 127
115 parent_bus = of_mdio_find_bus(parent_bus_node);
116 if (parent_bus == NULL) {
117 ret_val = -EPROBE_DEFER;
118 goto err_parent_bus;
119 }
120 128
121 pb->switch_data = data; 129 pb->switch_data = data;
122 pb->switch_fn = switch_fn; 130 pb->switch_fn = switch_fn;
diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c
new file mode 100644
index 000000000000..34f58f2349e9
--- /dev/null
+++ b/drivers/net/phy/swphy.c
@@ -0,0 +1,179 @@
1/*
2 * Software PHY emulation
3 *
4 * Code taken from fixed_phy.c by Russell King <rmk+kernel@arm.linux.org.uk>
5 *
6 * Author: Vitaly Bordug <vbordug@ru.mvista.com>
7 * Anton Vorontsov <avorontsov@ru.mvista.com>
8 *
9 * Copyright (c) 2006-2007 MontaVista Software, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/export.h>
17#include <linux/mii.h>
18#include <linux/phy.h>
19#include <linux/phy_fixed.h>
20
21#include "swphy.h"
22
23#define MII_REGS_NUM 29
24
25struct swmii_regs {
26 u16 bmcr;
27 u16 bmsr;
28 u16 lpa;
29 u16 lpagb;
30};
31
32enum {
33 SWMII_SPEED_10 = 0,
34 SWMII_SPEED_100,
35 SWMII_SPEED_1000,
36 SWMII_DUPLEX_HALF = 0,
37 SWMII_DUPLEX_FULL,
38};
39
40/*
41 * These two tables get bitwise-anded together to produce the final result.
42 * This means the speed table must contain both duplex settings, and the
43 * duplex table must contain all speed settings.
44 */
45static const struct swmii_regs speed[] = {
46 [SWMII_SPEED_10] = {
47 .bmcr = BMCR_FULLDPLX,
48 .lpa = LPA_10FULL | LPA_10HALF,
49 },
50 [SWMII_SPEED_100] = {
51 .bmcr = BMCR_FULLDPLX | BMCR_SPEED100,
52 .bmsr = BMSR_100FULL | BMSR_100HALF,
53 .lpa = LPA_100FULL | LPA_100HALF,
54 },
55 [SWMII_SPEED_1000] = {
56 .bmcr = BMCR_FULLDPLX | BMCR_SPEED1000,
57 .bmsr = BMSR_ESTATEN,
58 .lpagb = LPA_1000FULL | LPA_1000HALF,
59 },
60};
61
62static const struct swmii_regs duplex[] = {
63 [SWMII_DUPLEX_HALF] = {
64 .bmcr = ~BMCR_FULLDPLX,
65 .bmsr = BMSR_ESTATEN | BMSR_100HALF,
66 .lpa = LPA_10HALF | LPA_100HALF,
67 .lpagb = LPA_1000HALF,
68 },
69 [SWMII_DUPLEX_FULL] = {
70 .bmcr = ~0,
71 .bmsr = BMSR_ESTATEN | BMSR_100FULL,
72 .lpa = LPA_10FULL | LPA_100FULL,
73 .lpagb = LPA_1000FULL,
74 },
75};
76
77static int swphy_decode_speed(int speed)
78{
79 switch (speed) {
80 case 1000:
81 return SWMII_SPEED_1000;
82 case 100:
83 return SWMII_SPEED_100;
84 case 10:
85 return SWMII_SPEED_10;
86 default:
87 return -EINVAL;
88 }
89}
90
91/**
92 * swphy_validate_state - validate the software phy status
93 * @state: software phy status
94 *
95 * This checks that we can represent the state stored in @state can be
96 * represented in the emulated MII registers. Returns 0 if it can,
97 * otherwise returns -EINVAL.
98 */
99int swphy_validate_state(const struct fixed_phy_status *state)
100{
101 int err;
102
103 if (state->link) {
104 err = swphy_decode_speed(state->speed);
105 if (err < 0) {
106 pr_warn("swphy: unknown speed\n");
107 return -EINVAL;
108 }
109 }
110 return 0;
111}
112EXPORT_SYMBOL_GPL(swphy_validate_state);
113
114/**
115 * swphy_read_reg - return a MII register from the fixed phy state
116 * @reg: MII register
117 * @state: fixed phy status
118 *
119 * Return the MII @reg register generated from the fixed phy state @state.
120 */
121int swphy_read_reg(int reg, const struct fixed_phy_status *state)
122{
123 int speed_index, duplex_index;
124 u16 bmsr = BMSR_ANEGCAPABLE;
125 u16 bmcr = 0;
126 u16 lpagb = 0;
127 u16 lpa = 0;
128
129 if (reg > MII_REGS_NUM)
130 return -1;
131
132 speed_index = swphy_decode_speed(state->speed);
133 if (WARN_ON(speed_index < 0))
134 return 0;
135
136 duplex_index = state->duplex ? SWMII_DUPLEX_FULL : SWMII_DUPLEX_HALF;
137
138 bmsr |= speed[speed_index].bmsr & duplex[duplex_index].bmsr;
139
140 if (state->link) {
141 bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
142
143 bmcr |= speed[speed_index].bmcr & duplex[duplex_index].bmcr;
144 lpa |= speed[speed_index].lpa & duplex[duplex_index].lpa;
145 lpagb |= speed[speed_index].lpagb & duplex[duplex_index].lpagb;
146
147 if (state->pause)
148 lpa |= LPA_PAUSE_CAP;
149
150 if (state->asym_pause)
151 lpa |= LPA_PAUSE_ASYM;
152 }
153
154 switch (reg) {
155 case MII_BMCR:
156 return bmcr;
157 case MII_BMSR:
158 return bmsr;
159 case MII_PHYSID1:
160 case MII_PHYSID2:
161 return 0;
162 case MII_LPA:
163 return lpa;
164 case MII_STAT1000:
165 return lpagb;
166
167 /*
168 * We do not support emulating Clause 45 over Clause 22 register
169 * reads. Return an error instead of bogus data.
170 */
171 case MII_MMD_CTRL:
172 case MII_MMD_DATA:
173 return -1;
174
175 default:
176 return 0xffff;
177 }
178}
179EXPORT_SYMBOL_GPL(swphy_read_reg);
diff --git a/drivers/net/phy/swphy.h b/drivers/net/phy/swphy.h
new file mode 100644
index 000000000000..2f09ac324e18
--- /dev/null
+++ b/drivers/net/phy/swphy.h
@@ -0,0 +1,9 @@
1#ifndef SWPHY_H
2#define SWPHY_H
3
4struct fixed_phy_status;
5
6int swphy_validate_state(const struct fixed_phy_status *state);
7int swphy_read_reg(int reg, const struct fixed_phy_status *state);
8
9#endif
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 8dedafa1a95d..17953ab15000 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1312,10 +1312,9 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
1312 return stats64; 1312 return stats64;
1313} 1313}
1314 1314
1315static struct lock_class_key ppp_tx_busylock;
1316static int ppp_dev_init(struct net_device *dev) 1315static int ppp_dev_init(struct net_device *dev)
1317{ 1316{
1318 dev->qdisc_tx_busylock = &ppp_tx_busylock; 1317 netdev_lockdep_set_classes(dev);
1319 return 0; 1318 return 0;
1320} 1319}
1321 1320
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index fdee77207323..f9eebea83516 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1576,23 +1576,6 @@ static const struct team_option team_options[] = {
1576 }, 1576 },
1577}; 1577};
1578 1578
1579static struct lock_class_key team_netdev_xmit_lock_key;
1580static struct lock_class_key team_netdev_addr_lock_key;
1581static struct lock_class_key team_tx_busylock_key;
1582
1583static void team_set_lockdep_class_one(struct net_device *dev,
1584 struct netdev_queue *txq,
1585 void *unused)
1586{
1587 lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key);
1588}
1589
1590static void team_set_lockdep_class(struct net_device *dev)
1591{
1592 lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
1593 netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
1594 dev->qdisc_tx_busylock = &team_tx_busylock_key;
1595}
1596 1579
1597static int team_init(struct net_device *dev) 1580static int team_init(struct net_device *dev)
1598{ 1581{
@@ -1628,7 +1611,7 @@ static int team_init(struct net_device *dev)
1628 goto err_options_register; 1611 goto err_options_register;
1629 netif_carrier_off(dev); 1612 netif_carrier_off(dev);
1630 1613
1631 team_set_lockdep_class(dev); 1614 netdev_lockdep_set_classes(dev);
1632 1615
1633 return 0; 1616 return 0;
1634 1617
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e16487cc6a9a..4884802e0af1 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1254,13 +1254,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1254 return -EFAULT; 1254 return -EFAULT;
1255 } 1255 }
1256 1256
1257 if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1257 err = virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun));
1258 if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start), 1258 if (err) {
1259 tun16_to_cpu(tun, gso.csum_offset))) { 1259 this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1260 this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1260 kfree_skb(skb);
1261 kfree_skb(skb); 1261 return -EINVAL;
1262 return -EINVAL;
1263 }
1264 } 1262 }
1265 1263
1266 switch (tun->flags & TUN_TYPE_MASK) { 1264 switch (tun->flags & TUN_TYPE_MASK) {
@@ -1289,39 +1287,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1289 break; 1287 break;
1290 } 1288 }
1291 1289
1292 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1293 pr_debug("GSO!\n");
1294 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1295 case VIRTIO_NET_HDR_GSO_TCPV4:
1296 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1297 break;
1298 case VIRTIO_NET_HDR_GSO_TCPV6:
1299 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1300 break;
1301 case VIRTIO_NET_HDR_GSO_UDP:
1302 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1303 break;
1304 default:
1305 this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1306 kfree_skb(skb);
1307 return -EINVAL;
1308 }
1309
1310 if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1311 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1312
1313 skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
1314 if (skb_shinfo(skb)->gso_size == 0) {
1315 this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1316 kfree_skb(skb);
1317 return -EINVAL;
1318 }
1319
1320 /* Header must be checked, and gso_segs computed. */
1321 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1322 skb_shinfo(skb)->gso_segs = 0;
1323 }
1324
1325 /* copy skb_ubuf_info for callback when skb has no error */ 1290 /* copy skb_ubuf_info for callback when skb has no error */
1326 if (zerocopy) { 1291 if (zerocopy) {
1327 skb_shinfo(skb)->destructor_arg = msg_control; 1292 skb_shinfo(skb)->destructor_arg = msg_control;
@@ -1399,46 +1364,26 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1399 1364
1400 if (vnet_hdr_sz) { 1365 if (vnet_hdr_sz) {
1401 struct virtio_net_hdr gso = { 0 }; /* no info leak */ 1366 struct virtio_net_hdr gso = { 0 }; /* no info leak */
1367 int ret;
1368
1402 if (iov_iter_count(iter) < vnet_hdr_sz) 1369 if (iov_iter_count(iter) < vnet_hdr_sz)
1403 return -EINVAL; 1370 return -EINVAL;
1404 1371
1405 if (skb_is_gso(skb)) { 1372 ret = virtio_net_hdr_from_skb(skb, &gso,
1373 tun_is_little_endian(tun));
1374 if (ret) {
1406 struct skb_shared_info *sinfo = skb_shinfo(skb); 1375 struct skb_shared_info *sinfo = skb_shinfo(skb);
1407 1376 pr_err("unexpected GSO type: "
1408 /* This is a hint as to how much should be linear. */ 1377 "0x%x, gso_size %d, hdr_len %d\n",
1409 gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb)); 1378 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
1410 gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size); 1379 tun16_to_cpu(tun, gso.hdr_len));
1411 if (sinfo->gso_type & SKB_GSO_TCPV4) 1380 print_hex_dump(KERN_ERR, "tun: ",
1412 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1381 DUMP_PREFIX_NONE,
1413 else if (sinfo->gso_type & SKB_GSO_TCPV6) 1382 16, 1, skb->head,
1414 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1383 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
1415 else if (sinfo->gso_type & SKB_GSO_UDP) 1384 WARN_ON_ONCE(1);
1416 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; 1385 return -EINVAL;
1417 else { 1386 }
1418 pr_err("unexpected GSO type: "
1419 "0x%x, gso_size %d, hdr_len %d\n",
1420 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
1421 tun16_to_cpu(tun, gso.hdr_len));
1422 print_hex_dump(KERN_ERR, "tun: ",
1423 DUMP_PREFIX_NONE,
1424 16, 1, skb->head,
1425 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
1426 WARN_ON_ONCE(1);
1427 return -EINVAL;
1428 }
1429 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1430 gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1431 } else
1432 gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1433
1434 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1435 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1436 gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) +
1437 vlan_hlen);
1438 gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset);
1439 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1440 gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
1441 } /* else everything is zero */
1442 1387
1443 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 1388 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
1444 return -EFAULT; 1389 return -EFAULT;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 4e257b8d8f3e..24d367280ecf 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -607,7 +607,7 @@ struct r8152 {
607 struct list_head rx_done, tx_free; 607 struct list_head rx_done, tx_free;
608 struct sk_buff_head tx_queue, rx_queue; 608 struct sk_buff_head tx_queue, rx_queue;
609 spinlock_t rx_lock, tx_lock; 609 spinlock_t rx_lock, tx_lock;
610 struct delayed_work schedule; 610 struct delayed_work schedule, hw_phy_work;
611 struct mii_if_info mii; 611 struct mii_if_info mii;
612 struct mutex control; /* use for hw setting */ 612 struct mutex control; /* use for hw setting */
613#ifdef CONFIG_PM_SLEEP 613#ifdef CONFIG_PM_SLEEP
@@ -624,6 +624,7 @@ struct r8152 {
624 int (*eee_get)(struct r8152 *, struct ethtool_eee *); 624 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
625 int (*eee_set)(struct r8152 *, struct ethtool_eee *); 625 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
626 bool (*in_nway)(struct r8152 *); 626 bool (*in_nway)(struct r8152 *);
627 void (*hw_phy_cfg)(struct r8152 *);
627 } rtl_ops; 628 } rtl_ops;
628 629
629 int intr_interval; 630 int intr_interval;
@@ -632,8 +633,11 @@ struct r8152 {
632 u32 tx_qlen; 633 u32 tx_qlen;
633 u32 coalesce; 634 u32 coalesce;
634 u16 ocp_base; 635 u16 ocp_base;
636 u16 speed;
635 u8 *intr_buff; 637 u8 *intr_buff;
636 u8 version; 638 u8 version;
639 u8 duplex;
640 u8 autoneg;
637}; 641};
638 642
639enum rtl_version { 643enum rtl_version {
@@ -1747,7 +1751,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
1747 pkt_len -= CRC_SIZE; 1751 pkt_len -= CRC_SIZE;
1748 rx_data += sizeof(struct rx_desc); 1752 rx_data += sizeof(struct rx_desc);
1749 1753
1750 skb = netdev_alloc_skb_ip_align(netdev, pkt_len); 1754 skb = napi_alloc_skb(&tp->napi, pkt_len);
1751 if (!skb) { 1755 if (!skb) {
1752 stats->rx_dropped++; 1756 stats->rx_dropped++;
1753 goto find_next_rx; 1757 goto find_next_rx;
@@ -2515,8 +2519,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
2515 2519
2516 rxdy_gated_en(tp, true); 2520 rxdy_gated_en(tp, true);
2517 r8153_teredo_off(tp); 2521 r8153_teredo_off(tp);
2518 r8152b_hw_phy_cfg(tp);
2519
2520 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2522 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2521 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00); 2523 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
2522 2524
@@ -2694,8 +2696,6 @@ static void r8153_first_init(struct r8152 *tp)
2694 ocp_data &= ~RCR_ACPT_ALL; 2696 ocp_data &= ~RCR_ACPT_ALL;
2695 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 2697 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
2696 2698
2697 r8153_hw_phy_cfg(tp);
2698
2699 rtl8152_nic_reset(tp); 2699 rtl8152_nic_reset(tp);
2700 rtl_reset_bmu(tp); 2700 rtl_reset_bmu(tp);
2701 2701
@@ -2891,7 +2891,7 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
2891 bmcr = BMCR_ANENABLE | BMCR_ANRESTART; 2891 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
2892 } 2892 }
2893 2893
2894 if (test_bit(PHY_RESET, &tp->flags)) 2894 if (test_and_clear_bit(PHY_RESET, &tp->flags))
2895 bmcr |= BMCR_RESET; 2895 bmcr |= BMCR_RESET;
2896 2896
2897 if (tp->mii.supports_gmii) 2897 if (tp->mii.supports_gmii)
@@ -2900,7 +2900,7 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
2900 r8152_mdio_write(tp, MII_ADVERTISE, anar); 2900 r8152_mdio_write(tp, MII_ADVERTISE, anar);
2901 r8152_mdio_write(tp, MII_BMCR, bmcr); 2901 r8152_mdio_write(tp, MII_BMCR, bmcr);
2902 2902
2903 if (test_and_clear_bit(PHY_RESET, &tp->flags)) { 2903 if (bmcr & BMCR_RESET) {
2904 int i; 2904 int i;
2905 2905
2906 for (i = 0; i < 50; i++) { 2906 for (i = 0; i < 50; i++) {
@@ -3059,6 +3059,27 @@ out1:
3059 usb_autopm_put_interface(tp->intf); 3059 usb_autopm_put_interface(tp->intf);
3060} 3060}
3061 3061
3062static void rtl_hw_phy_work_func_t(struct work_struct *work)
3063{
3064 struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work);
3065
3066 if (test_bit(RTL8152_UNPLUG, &tp->flags))
3067 return;
3068
3069 if (usb_autopm_get_interface(tp->intf) < 0)
3070 return;
3071
3072 mutex_lock(&tp->control);
3073
3074 tp->rtl_ops.hw_phy_cfg(tp);
3075
3076 rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex);
3077
3078 mutex_unlock(&tp->control);
3079
3080 usb_autopm_put_interface(tp->intf);
3081}
3082
3062#ifdef CONFIG_PM_SLEEP 3083#ifdef CONFIG_PM_SLEEP
3063static int rtl_notifier(struct notifier_block *nb, unsigned long action, 3084static int rtl_notifier(struct notifier_block *nb, unsigned long action,
3064 void *data) 3085 void *data)
@@ -3107,9 +3128,6 @@ static int rtl8152_open(struct net_device *netdev)
3107 3128
3108 tp->rtl_ops.up(tp); 3129 tp->rtl_ops.up(tp);
3109 3130
3110 rtl8152_set_speed(tp, AUTONEG_ENABLE,
3111 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
3112 DUPLEX_FULL);
3113 netif_carrier_off(netdev); 3131 netif_carrier_off(netdev);
3114 netif_start_queue(netdev); 3132 netif_start_queue(netdev);
3115 set_bit(WORK_ENABLE, &tp->flags); 3133 set_bit(WORK_ENABLE, &tp->flags);
@@ -3533,6 +3551,7 @@ static int rtl8152_resume(struct usb_interface *intf)
3533 3551
3534 if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3552 if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3535 tp->rtl_ops.init(tp); 3553 tp->rtl_ops.init(tp);
3554 queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
3536 netif_device_attach(tp->netdev); 3555 netif_device_attach(tp->netdev);
3537 } 3556 }
3538 3557
@@ -3547,10 +3566,6 @@ static int rtl8152_resume(struct usb_interface *intf)
3547 napi_enable(&tp->napi); 3566 napi_enable(&tp->napi);
3548 } else { 3567 } else {
3549 tp->rtl_ops.up(tp); 3568 tp->rtl_ops.up(tp);
3550 rtl8152_set_speed(tp, AUTONEG_ENABLE,
3551 tp->mii.supports_gmii ?
3552 SPEED_1000 : SPEED_100,
3553 DUPLEX_FULL);
3554 netif_carrier_off(tp->netdev); 3569 netif_carrier_off(tp->netdev);
3555 set_bit(WORK_ENABLE, &tp->flags); 3570 set_bit(WORK_ENABLE, &tp->flags);
3556 } 3571 }
@@ -3680,6 +3695,11 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3680 mutex_lock(&tp->control); 3695 mutex_lock(&tp->control);
3681 3696
3682 ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); 3697 ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
3698 if (!ret) {
3699 tp->autoneg = cmd->autoneg;
3700 tp->speed = cmd->speed;
3701 tp->duplex = cmd->duplex;
3702 }
3683 3703
3684 mutex_unlock(&tp->control); 3704 mutex_unlock(&tp->control);
3685 3705
@@ -4137,6 +4157,7 @@ static int rtl_ops_init(struct r8152 *tp)
4137 ops->eee_get = r8152_get_eee; 4157 ops->eee_get = r8152_get_eee;
4138 ops->eee_set = r8152_set_eee; 4158 ops->eee_set = r8152_set_eee;
4139 ops->in_nway = rtl8152_in_nway; 4159 ops->in_nway = rtl8152_in_nway;
4160 ops->hw_phy_cfg = r8152b_hw_phy_cfg;
4140 break; 4161 break;
4141 4162
4142 case RTL_VER_03: 4163 case RTL_VER_03:
@@ -4152,6 +4173,7 @@ static int rtl_ops_init(struct r8152 *tp)
4152 ops->eee_get = r8153_get_eee; 4173 ops->eee_get = r8153_get_eee;
4153 ops->eee_set = r8153_set_eee; 4174 ops->eee_set = r8153_set_eee;
4154 ops->in_nway = rtl8153_in_nway; 4175 ops->in_nway = rtl8153_in_nway;
4176 ops->hw_phy_cfg = r8153_hw_phy_cfg;
4155 break; 4177 break;
4156 4178
4157 default: 4179 default:
@@ -4198,6 +4220,7 @@ static int rtl8152_probe(struct usb_interface *intf,
4198 4220
4199 mutex_init(&tp->control); 4221 mutex_init(&tp->control);
4200 INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); 4222 INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
4223 INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
4201 4224
4202 netdev->netdev_ops = &rtl8152_netdev_ops; 4225 netdev->netdev_ops = &rtl8152_netdev_ops;
4203 netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; 4226 netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
@@ -4237,9 +4260,14 @@ static int rtl8152_probe(struct usb_interface *intf,
4237 break; 4260 break;
4238 } 4261 }
4239 4262
4263 tp->autoneg = AUTONEG_ENABLE;
4264 tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100;
4265 tp->duplex = DUPLEX_FULL;
4266
4240 intf->needs_remote_wakeup = 1; 4267 intf->needs_remote_wakeup = 1;
4241 4268
4242 tp->rtl_ops.init(tp); 4269 tp->rtl_ops.init(tp);
4270 queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
4243 set_ethernet_addr(tp); 4271 set_ethernet_addr(tp);
4244 4272
4245 usb_set_intfdata(intf, tp); 4273 usb_set_intfdata(intf, tp);
@@ -4285,6 +4313,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
4285 4313
4286 netif_napi_del(&tp->napi); 4314 netif_napi_del(&tp->napi);
4287 unregister_netdev(tp->netdev); 4315 unregister_netdev(tp->netdev);
4316 cancel_delayed_work_sync(&tp->hw_phy_work);
4288 tp->rtl_ops.unload(tp); 4317 tp->rtl_ops.unload(tp);
4289 free_netdev(tp->netdev); 4318 free_netdev(tp->netdev);
4290 } 4319 }
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e0638e556fe7..1dd08d4b9c31 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -479,53 +479,21 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
479 stats->rx_packets++; 479 stats->rx_packets++;
480 u64_stats_update_end(&stats->rx_syncp); 480 u64_stats_update_end(&stats->rx_syncp);
481 481
482 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 482 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
483 pr_debug("Needs csum!\n");
484 if (!skb_partial_csum_set(skb,
485 virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start),
486 virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset)))
487 goto frame_err;
488 } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
489 skb->ip_summed = CHECKSUM_UNNECESSARY; 483 skb->ip_summed = CHECKSUM_UNNECESSARY;
484
485 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
486 virtio_is_little_endian(vi->vdev))) {
487 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
488 dev->name, hdr->hdr.gso_type,
489 hdr->hdr.gso_size);
490 goto frame_err;
490 } 491 }
491 492
492 skb->protocol = eth_type_trans(skb, dev); 493 skb->protocol = eth_type_trans(skb, dev);
493 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 494 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
494 ntohs(skb->protocol), skb->len, skb->pkt_type); 495 ntohs(skb->protocol), skb->len, skb->pkt_type);
495 496
496 if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
497 pr_debug("GSO!\n");
498 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
499 case VIRTIO_NET_HDR_GSO_TCPV4:
500 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
501 break;
502 case VIRTIO_NET_HDR_GSO_UDP:
503 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
504 break;
505 case VIRTIO_NET_HDR_GSO_TCPV6:
506 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
507 break;
508 default:
509 net_warn_ratelimited("%s: bad gso type %u.\n",
510 dev->name, hdr->hdr.gso_type);
511 goto frame_err;
512 }
513
514 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
515 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
516
517 skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev,
518 hdr->hdr.gso_size);
519 if (skb_shinfo(skb)->gso_size == 0) {
520 net_warn_ratelimited("%s: zero gso size.\n", dev->name);
521 goto frame_err;
522 }
523
524 /* Header must be checked, and gso_segs computed. */
525 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
526 skb_shinfo(skb)->gso_segs = 0;
527 }
528
529 napi_gro_receive(&rq->napi, skb); 497 napi_gro_receive(&rq->napi, skb);
530 return; 498 return;
531 499
@@ -868,35 +836,9 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
868 else 836 else
869 hdr = skb_vnet_hdr(skb); 837 hdr = skb_vnet_hdr(skb);
870 838
871 if (skb->ip_summed == CHECKSUM_PARTIAL) { 839 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
872 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 840 virtio_is_little_endian(vi->vdev)))
873 hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev, 841 BUG();
874 skb_checksum_start_offset(skb));
875 hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev,
876 skb->csum_offset);
877 } else {
878 hdr->hdr.flags = 0;
879 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
880 }
881
882 if (skb_is_gso(skb)) {
883 hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb));
884 hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev,
885 skb_shinfo(skb)->gso_size);
886 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
887 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
888 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
889 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
890 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
891 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
892 else
893 BUG();
894 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
895 hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
896 } else {
897 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
898 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
899 }
900 842
901 if (vi->mergeable_rx_bufs) 843 if (vi->mergeable_rx_bufs)
902 hdr->num_buffers = 0; 844 hdr->num_buffers = 0;
@@ -1780,6 +1722,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1780 struct net_device *dev; 1722 struct net_device *dev;
1781 struct virtnet_info *vi; 1723 struct virtnet_info *vi;
1782 u16 max_queue_pairs; 1724 u16 max_queue_pairs;
1725 int mtu;
1783 1726
1784 if (!vdev->config->get) { 1727 if (!vdev->config->get) {
1785 dev_err(&vdev->dev, "%s failure: config access disabled\n", 1728 dev_err(&vdev->dev, "%s failure: config access disabled\n",
@@ -1896,6 +1839,14 @@ static int virtnet_probe(struct virtio_device *vdev)
1896 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1839 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1897 vi->has_cvq = true; 1840 vi->has_cvq = true;
1898 1841
1842 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
1843 mtu = virtio_cread16(vdev,
1844 offsetof(struct virtio_net_config,
1845 mtu));
1846 if (virtnet_change_mtu(dev, mtu))
1847 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
1848 }
1849
1899 if (vi->any_header_sg) 1850 if (vi->any_header_sg)
1900 dev->needed_headroom = vi->hdr_len; 1851 dev->needed_headroom = vi->hdr_len;
1901 1852
@@ -2067,6 +2018,7 @@ static unsigned int features[] = {
2067 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, 2018 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
2068 VIRTIO_NET_F_CTRL_MAC_ADDR, 2019 VIRTIO_NET_F_CTRL_MAC_ADDR,
2069 VIRTIO_F_ANY_LAYOUT, 2020 VIRTIO_F_ANY_LAYOUT,
2021 VIRTIO_NET_F_MTU,
2070}; 2022};
2071 2023
2072static struct virtio_driver virtio_net_driver = { 2024static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 880f5098eac9..8cdbb63d1bb0 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -2,7 +2,7 @@
2# 2#
3# Linux driver for VMware's vmxnet3 ethernet NIC. 3# Linux driver for VMware's vmxnet3 ethernet NIC.
4# 4#
5# Copyright (C) 2007-2009, VMware, Inc. All Rights Reserved. 5# Copyright (C) 2007-2016, VMware, Inc. All Rights Reserved.
6# 6#
7# This program is free software; you can redistribute it and/or modify it 7# This program is free software; you can redistribute it and/or modify it
8# under the terms of the GNU General Public License as published by the 8# under the terms of the GNU General Public License as published by the
@@ -21,7 +21,7 @@
21# The full GNU General Public License is included in this distribution in 21# The full GNU General Public License is included in this distribution in
22# the file called "COPYING". 22# the file called "COPYING".
23# 23#
24# Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> 24# Maintained by: pv-drivers@vmware.com
25# 25#
26# 26#
27################################################################################ 27################################################################################
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index 969c751ee404..db9f1fde3aac 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC. 2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 * 3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. 4 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
20 * The full GNU General Public License is included in this distribution in 20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING". 21 * the file called "COPYING".
22 * 22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> 23 * Maintained by: pv-drivers@vmware.com
24 * 24 *
25 */ 25 */
26 26
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 72ba8ae7f09a..c3a31646189f 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC. 2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 * 3 *
4 * Copyright (C) 2008-2015, VMware, Inc. All Rights Reserved. 4 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
20 * The full GNU General Public License is included in this distribution in 20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING". 21 * the file called "COPYING".
22 * 22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> 23 * Maintained by: pv-drivers@vmware.com
24 * 24 *
25 */ 25 */
26 26
@@ -76,7 +76,12 @@ enum {
76 VMXNET3_CMD_UPDATE_IML, 76 VMXNET3_CMD_UPDATE_IML,
77 VMXNET3_CMD_UPDATE_PMCFG, 77 VMXNET3_CMD_UPDATE_PMCFG,
78 VMXNET3_CMD_UPDATE_FEATURE, 78 VMXNET3_CMD_UPDATE_FEATURE,
79 VMXNET3_CMD_RESERVED1,
79 VMXNET3_CMD_LOAD_PLUGIN, 80 VMXNET3_CMD_LOAD_PLUGIN,
81 VMXNET3_CMD_RESERVED2,
82 VMXNET3_CMD_RESERVED3,
83 VMXNET3_CMD_SET_COALESCE,
84 VMXNET3_CMD_REGISTER_MEMREGS,
80 85
81 VMXNET3_CMD_FIRST_GET = 0xF00D0000, 86 VMXNET3_CMD_FIRST_GET = 0xF00D0000,
82 VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET, 87 VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
@@ -87,7 +92,10 @@ enum {
87 VMXNET3_CMD_GET_DID_LO, 92 VMXNET3_CMD_GET_DID_LO,
88 VMXNET3_CMD_GET_DID_HI, 93 VMXNET3_CMD_GET_DID_HI,
89 VMXNET3_CMD_GET_DEV_EXTRA_INFO, 94 VMXNET3_CMD_GET_DEV_EXTRA_INFO,
90 VMXNET3_CMD_GET_CONF_INTR 95 VMXNET3_CMD_GET_CONF_INTR,
96 VMXNET3_CMD_GET_RESERVED1,
97 VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
98 VMXNET3_CMD_GET_COALESCE,
91}; 99};
92 100
93/* 101/*
@@ -169,6 +177,8 @@ struct Vmxnet3_TxDataDesc {
169 u8 data[VMXNET3_HDR_COPY_SIZE]; 177 u8 data[VMXNET3_HDR_COPY_SIZE];
170}; 178};
171 179
180typedef u8 Vmxnet3_RxDataDesc;
181
172#define VMXNET3_TCD_GEN_SHIFT 31 182#define VMXNET3_TCD_GEN_SHIFT 31
173#define VMXNET3_TCD_GEN_SIZE 1 183#define VMXNET3_TCD_GEN_SIZE 1
174#define VMXNET3_TCD_TXIDX_SHIFT 0 184#define VMXNET3_TCD_TXIDX_SHIFT 0
@@ -373,6 +383,14 @@ union Vmxnet3_GenericDesc {
373#define VMXNET3_RING_SIZE_ALIGN 32 383#define VMXNET3_RING_SIZE_ALIGN 32
374#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1) 384#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
375 385
386/* Tx Data Ring buffer size must be a multiple of 64 */
387#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64
388#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1)
389
390/* Rx Data Ring buffer size must be a multiple of 64 */
391#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
392#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
393
376/* Max ring size */ 394/* Max ring size */
377#define VMXNET3_TX_RING_MAX_SIZE 4096 395#define VMXNET3_TX_RING_MAX_SIZE 4096
378#define VMXNET3_TC_RING_MAX_SIZE 4096 396#define VMXNET3_TC_RING_MAX_SIZE 4096
@@ -380,6 +398,11 @@ union Vmxnet3_GenericDesc {
380#define VMXNET3_RX_RING2_MAX_SIZE 4096 398#define VMXNET3_RX_RING2_MAX_SIZE 4096
381#define VMXNET3_RC_RING_MAX_SIZE 8192 399#define VMXNET3_RC_RING_MAX_SIZE 8192
382 400
401#define VMXNET3_TXDATA_DESC_MIN_SIZE 128
402#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048
403
404#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
405
383/* a list of reasons for queue stop */ 406/* a list of reasons for queue stop */
384 407
385enum { 408enum {
@@ -466,7 +489,9 @@ struct Vmxnet3_TxQueueConf {
466 __le32 compRingSize; /* # of comp desc */ 489 __le32 compRingSize; /* # of comp desc */
467 __le32 ddLen; /* size of driver data */ 490 __le32 ddLen; /* size of driver data */
468 u8 intrIdx; 491 u8 intrIdx;
469 u8 _pad[7]; 492 u8 _pad1[1];
493 __le16 txDataRingDescSize;
494 u8 _pad2[4];
470}; 495};
471 496
472 497
@@ -474,12 +499,14 @@ struct Vmxnet3_RxQueueConf {
474 __le64 rxRingBasePA[2]; 499 __le64 rxRingBasePA[2];
475 __le64 compRingBasePA; 500 __le64 compRingBasePA;
476 __le64 ddPA; /* driver data */ 501 __le64 ddPA; /* driver data */
477 __le64 reserved; 502 __le64 rxDataRingBasePA;
478 __le32 rxRingSize[2]; /* # of rx desc */ 503 __le32 rxRingSize[2]; /* # of rx desc */
479 __le32 compRingSize; /* # of rx comp desc */ 504 __le32 compRingSize; /* # of rx comp desc */
480 __le32 ddLen; /* size of driver data */ 505 __le32 ddLen; /* size of driver data */
481 u8 intrIdx; 506 u8 intrIdx;
482 u8 _pad[7]; 507 u8 _pad1[1];
508 __le16 rxDataRingDescSize; /* size of rx data ring buffer */
509 u8 _pad2[4];
483}; 510};
484 511
485 512
@@ -609,6 +636,63 @@ struct Vmxnet3_RxQueueDesc {
609 u8 __pad[88]; /* 128 aligned */ 636 u8 __pad[88]; /* 128 aligned */
610}; 637};
611 638
639struct Vmxnet3_SetPolling {
640 u8 enablePolling;
641};
642
643#define VMXNET3_COAL_STATIC_MAX_DEPTH 128
644#define VMXNET3_COAL_RBC_MIN_RATE 100
645#define VMXNET3_COAL_RBC_MAX_RATE 100000
646
647enum Vmxnet3_CoalesceMode {
648 VMXNET3_COALESCE_DISABLED = 0,
649 VMXNET3_COALESCE_ADAPT = 1,
650 VMXNET3_COALESCE_STATIC = 2,
651 VMXNET3_COALESCE_RBC = 3
652};
653
654struct Vmxnet3_CoalesceRbc {
655 u32 rbc_rate;
656};
657
658struct Vmxnet3_CoalesceStatic {
659 u32 tx_depth;
660 u32 tx_comp_depth;
661 u32 rx_depth;
662};
663
664struct Vmxnet3_CoalesceScheme {
665 enum Vmxnet3_CoalesceMode coalMode;
666 union {
667 struct Vmxnet3_CoalesceRbc coalRbc;
668 struct Vmxnet3_CoalesceStatic coalStatic;
669 } coalPara;
670};
671
672struct Vmxnet3_MemoryRegion {
673 __le64 startPA;
674 __le32 length;
675 __le16 txQueueBits;
676 __le16 rxQueueBits;
677};
678
679#define MAX_MEMORY_REGION_PER_QUEUE 16
680#define MAX_MEMORY_REGION_PER_DEVICE 256
681
682struct Vmxnet3_MemRegs {
683 __le16 numRegs;
684 __le16 pad[3];
685 struct Vmxnet3_MemoryRegion memRegs[1];
686};
687
688/* If the command data <= 16 bytes, use the shared memory directly.
689 * otherwise, use variable length configuration descriptor.
690 */
691union Vmxnet3_CmdInfo {
692 struct Vmxnet3_VariableLenConfDesc varConf;
693 struct Vmxnet3_SetPolling setPolling;
694 __le64 data[2];
695};
612 696
613struct Vmxnet3_DSDevRead { 697struct Vmxnet3_DSDevRead {
614 /* read-only region for device, read by dev in response to a SET cmd */ 698 /* read-only region for device, read by dev in response to a SET cmd */
@@ -627,7 +711,14 @@ struct Vmxnet3_DriverShared {
627 __le32 pad; 711 __le32 pad;
628 struct Vmxnet3_DSDevRead devRead; 712 struct Vmxnet3_DSDevRead devRead;
629 __le32 ecr; 713 __le32 ecr;
630 __le32 reserved[5]; 714 __le32 reserved;
715 union {
716 __le32 reserved1[4];
717 union Vmxnet3_CmdInfo cmdInfo; /* only valid in the context of
718 * executing the relevant
719 * command
720 */
721 } cu;
631}; 722};
632 723
633 724
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 08885bc8d6db..c68fe495d3f9 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC. 2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 * 3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. 4 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
20 * The full GNU General Public License is included in this distribution in 20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING". 21 * the file called "COPYING".
22 * 22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> 23 * Maintained by: pv-drivers@vmware.com
24 * 24 *
25 */ 25 */
26 26
@@ -435,8 +435,8 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
435 tq->tx_ring.base = NULL; 435 tq->tx_ring.base = NULL;
436 } 436 }
437 if (tq->data_ring.base) { 437 if (tq->data_ring.base) {
438 dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size * 438 dma_free_coherent(&adapter->pdev->dev,
439 sizeof(struct Vmxnet3_TxDataDesc), 439 tq->data_ring.size * tq->txdata_desc_size,
440 tq->data_ring.base, tq->data_ring.basePA); 440 tq->data_ring.base, tq->data_ring.basePA);
441 tq->data_ring.base = NULL; 441 tq->data_ring.base = NULL;
442 } 442 }
@@ -478,8 +478,8 @@ vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
478 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 478 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
479 tq->tx_ring.gen = VMXNET3_INIT_GEN; 479 tq->tx_ring.gen = VMXNET3_INIT_GEN;
480 480
481 memset(tq->data_ring.base, 0, tq->data_ring.size * 481 memset(tq->data_ring.base, 0,
482 sizeof(struct Vmxnet3_TxDataDesc)); 482 tq->data_ring.size * tq->txdata_desc_size);
483 483
484 /* reset the tx comp ring contents to 0 and reset comp ring states */ 484 /* reset the tx comp ring contents to 0 and reset comp ring states */
485 memset(tq->comp_ring.base, 0, tq->comp_ring.size * 485 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
@@ -514,10 +514,10 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
514 } 514 }
515 515
516 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, 516 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
517 tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc), 517 tq->data_ring.size * tq->txdata_desc_size,
518 &tq->data_ring.basePA, GFP_KERNEL); 518 &tq->data_ring.basePA, GFP_KERNEL);
519 if (!tq->data_ring.base) { 519 if (!tq->data_ring.base) {
520 netdev_err(adapter->netdev, "failed to allocate data ring\n"); 520 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
521 goto err; 521 goto err;
522 } 522 }
523 523
@@ -689,7 +689,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
689 if (ctx->copy_size) { 689 if (ctx->copy_size) {
690 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + 690 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
691 tq->tx_ring.next2fill * 691 tq->tx_ring.next2fill *
692 sizeof(struct Vmxnet3_TxDataDesc)); 692 tq->txdata_desc_size);
693 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); 693 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
694 ctx->sop_txd->dword[3] = 0; 694 ctx->sop_txd->dword[3] = 0;
695 695
@@ -873,8 +873,9 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
873 ctx->eth_ip_hdr_size = 0; 873 ctx->eth_ip_hdr_size = 0;
874 ctx->l4_hdr_size = 0; 874 ctx->l4_hdr_size = 0;
875 /* copy as much as allowed */ 875 /* copy as much as allowed */
876 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE 876 ctx->copy_size = min_t(unsigned int,
877 , skb_headlen(skb)); 877 tq->txdata_desc_size,
878 skb_headlen(skb));
878 } 879 }
879 880
880 if (skb->len <= VMXNET3_HDR_COPY_SIZE) 881 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
@@ -885,7 +886,7 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
885 goto err; 886 goto err;
886 } 887 }
887 888
888 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) { 889 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
889 tq->stats.oversized_hdr++; 890 tq->stats.oversized_hdr++;
890 ctx->copy_size = 0; 891 ctx->copy_size = 0;
891 return 0; 892 return 0;
@@ -1283,9 +1284,10 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1283 */ 1284 */
1284 break; 1285 break;
1285 } 1286 }
1286 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); 1287 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1288 rcd->rqID != rq->dataRingQid);
1287 idx = rcd->rxdIdx; 1289 idx = rcd->rxdIdx;
1288 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; 1290 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1289 ring = rq->rx_ring + ring_idx; 1291 ring = rq->rx_ring + ring_idx;
1290 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, 1292 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1291 &rxCmdDesc); 1293 &rxCmdDesc);
@@ -1300,8 +1302,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1300 } 1302 }
1301 1303
1302 if (rcd->sop) { /* first buf of the pkt */ 1304 if (rcd->sop) { /* first buf of the pkt */
1305 bool rxDataRingUsed;
1306 u16 len;
1307
1303 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || 1308 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1304 rcd->rqID != rq->qid); 1309 (rcd->rqID != rq->qid &&
1310 rcd->rqID != rq->dataRingQid));
1305 1311
1306 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); 1312 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1307 BUG_ON(ctx->skb != NULL || rbi->skb == NULL); 1313 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
@@ -1317,8 +1323,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1317 1323
1318 skip_page_frags = false; 1324 skip_page_frags = false;
1319 ctx->skb = rbi->skb; 1325 ctx->skb = rbi->skb;
1326
1327 rxDataRingUsed =
1328 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1329 len = rxDataRingUsed ? rcd->len : rbi->len;
1320 new_skb = netdev_alloc_skb_ip_align(adapter->netdev, 1330 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1321 rbi->len); 1331 len);
1322 if (new_skb == NULL) { 1332 if (new_skb == NULL) {
1323 /* Skb allocation failed, do not handover this 1333 /* Skb allocation failed, do not handover this
1324 * skb to stack. Reuse it. Drop the existing pkt 1334 * skb to stack. Reuse it. Drop the existing pkt
@@ -1329,25 +1339,48 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1329 skip_page_frags = true; 1339 skip_page_frags = true;
1330 goto rcd_done; 1340 goto rcd_done;
1331 } 1341 }
1332 new_dma_addr = dma_map_single(&adapter->pdev->dev,
1333 new_skb->data, rbi->len,
1334 PCI_DMA_FROMDEVICE);
1335 if (dma_mapping_error(&adapter->pdev->dev,
1336 new_dma_addr)) {
1337 dev_kfree_skb(new_skb);
1338 /* Skb allocation failed, do not handover this
1339 * skb to stack. Reuse it. Drop the existing pkt
1340 */
1341 rq->stats.rx_buf_alloc_failure++;
1342 ctx->skb = NULL;
1343 rq->stats.drop_total++;
1344 skip_page_frags = true;
1345 goto rcd_done;
1346 }
1347 1342
1348 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, 1343 if (rxDataRingUsed) {
1349 rbi->len, 1344 size_t sz;
1350 PCI_DMA_FROMDEVICE); 1345
1346 BUG_ON(rcd->len > rq->data_ring.desc_size);
1347
1348 ctx->skb = new_skb;
1349 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1350 memcpy(new_skb->data,
1351 &rq->data_ring.base[sz], rcd->len);
1352 } else {
1353 ctx->skb = rbi->skb;
1354
1355 new_dma_addr =
1356 dma_map_single(&adapter->pdev->dev,
1357 new_skb->data, rbi->len,
1358 PCI_DMA_FROMDEVICE);
1359 if (dma_mapping_error(&adapter->pdev->dev,
1360 new_dma_addr)) {
1361 dev_kfree_skb(new_skb);
1362 /* Skb allocation failed, do not
1363 * handover this skb to stack. Reuse
1364 * it. Drop the existing pkt.
1365 */
1366 rq->stats.rx_buf_alloc_failure++;
1367 ctx->skb = NULL;
1368 rq->stats.drop_total++;
1369 skip_page_frags = true;
1370 goto rcd_done;
1371 }
1372
1373 dma_unmap_single(&adapter->pdev->dev,
1374 rbi->dma_addr,
1375 rbi->len,
1376 PCI_DMA_FROMDEVICE);
1377
1378 /* Immediate refill */
1379 rbi->skb = new_skb;
1380 rbi->dma_addr = new_dma_addr;
1381 rxd->addr = cpu_to_le64(rbi->dma_addr);
1382 rxd->len = rbi->len;
1383 }
1351 1384
1352#ifdef VMXNET3_RSS 1385#ifdef VMXNET3_RSS
1353 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && 1386 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
@@ -1358,12 +1391,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1358#endif 1391#endif
1359 skb_put(ctx->skb, rcd->len); 1392 skb_put(ctx->skb, rcd->len);
1360 1393
1361 /* Immediate refill */ 1394 if (VMXNET3_VERSION_GE_2(adapter) &&
1362 rbi->skb = new_skb;
1363 rbi->dma_addr = new_dma_addr;
1364 rxd->addr = cpu_to_le64(rbi->dma_addr);
1365 rxd->len = rbi->len;
1366 if (adapter->version == 2 &&
1367 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { 1395 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1368 struct Vmxnet3_RxCompDescExt *rcdlro; 1396 struct Vmxnet3_RxCompDescExt *rcdlro;
1369 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; 1397 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
@@ -1589,6 +1617,13 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1589 rq->buf_info[i] = NULL; 1617 rq->buf_info[i] = NULL;
1590 } 1618 }
1591 1619
1620 if (rq->data_ring.base) {
1621 dma_free_coherent(&adapter->pdev->dev,
1622 rq->rx_ring[0].size * rq->data_ring.desc_size,
1623 rq->data_ring.base, rq->data_ring.basePA);
1624 rq->data_ring.base = NULL;
1625 }
1626
1592 if (rq->comp_ring.base) { 1627 if (rq->comp_ring.base) {
1593 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size 1628 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1594 * sizeof(struct Vmxnet3_RxCompDesc), 1629 * sizeof(struct Vmxnet3_RxCompDesc),
@@ -1604,6 +1639,25 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1604 } 1639 }
1605} 1640}
1606 1641
1642void
1643vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
1644{
1645 int i;
1646
1647 for (i = 0; i < adapter->num_rx_queues; i++) {
1648 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1649
1650 if (rq->data_ring.base) {
1651 dma_free_coherent(&adapter->pdev->dev,
1652 (rq->rx_ring[0].size *
1653 rq->data_ring.desc_size),
1654 rq->data_ring.base,
1655 rq->data_ring.basePA);
1656 rq->data_ring.base = NULL;
1657 rq->data_ring.desc_size = 0;
1658 }
1659 }
1660}
1607 1661
1608static int 1662static int
1609vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, 1663vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
@@ -1697,6 +1751,22 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1697 } 1751 }
1698 } 1752 }
1699 1753
1754 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
1755 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
1756 rq->data_ring.base =
1757 dma_alloc_coherent(&adapter->pdev->dev, sz,
1758 &rq->data_ring.basePA,
1759 GFP_KERNEL);
1760 if (!rq->data_ring.base) {
1761 netdev_err(adapter->netdev,
1762 "rx data ring will be disabled\n");
1763 adapter->rxdataring_enabled = false;
1764 }
1765 } else {
1766 rq->data_ring.base = NULL;
1767 rq->data_ring.desc_size = 0;
1768 }
1769
1700 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); 1770 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1701 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, 1771 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1702 &rq->comp_ring.basePA, 1772 &rq->comp_ring.basePA,
@@ -1729,6 +1799,8 @@ vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1729{ 1799{
1730 int i, err = 0; 1800 int i, err = 0;
1731 1801
1802 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
1803
1732 for (i = 0; i < adapter->num_rx_queues; i++) { 1804 for (i = 0; i < adapter->num_rx_queues; i++) {
1733 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); 1805 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1734 if (unlikely(err)) { 1806 if (unlikely(err)) {
@@ -1738,6 +1810,10 @@ vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1738 goto err_out; 1810 goto err_out;
1739 } 1811 }
1740 } 1812 }
1813
1814 if (!adapter->rxdataring_enabled)
1815 vmxnet3_rq_destroy_all_rxdataring(adapter);
1816
1741 return err; 1817 return err;
1742err_out: 1818err_out:
1743 vmxnet3_rq_destroy_all(adapter); 1819 vmxnet3_rq_destroy_all(adapter);
@@ -2045,10 +2121,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2045 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 2121 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2046 rq->qid = i; 2122 rq->qid = i;
2047 rq->qid2 = i + adapter->num_rx_queues; 2123 rq->qid2 = i + adapter->num_rx_queues;
2124 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2048 } 2125 }
2049 2126
2050
2051
2052 /* init our intr settings */ 2127 /* init our intr settings */
2053 for (i = 0; i < intr->num_intrs; i++) 2128 for (i = 0; i < intr->num_intrs; i++)
2054 intr->mod_levels[i] = UPT1_IML_ADAPTIVE; 2129 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
@@ -2336,6 +2411,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2336 tqc->ddPA = cpu_to_le64(tq->buf_info_pa); 2411 tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
2337 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); 2412 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2338 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); 2413 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2414 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2339 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); 2415 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2340 tqc->ddLen = cpu_to_le32( 2416 tqc->ddLen = cpu_to_le32(
2341 sizeof(struct vmxnet3_tx_buf_info) * 2417 sizeof(struct vmxnet3_tx_buf_info) *
@@ -2360,6 +2436,12 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2360 (rqc->rxRingSize[0] + 2436 (rqc->rxRingSize[0] +
2361 rqc->rxRingSize[1])); 2437 rqc->rxRingSize[1]));
2362 rqc->intrIdx = rq->comp_ring.intr_idx; 2438 rqc->intrIdx = rq->comp_ring.intr_idx;
2439 if (VMXNET3_VERSION_GE_3(adapter)) {
2440 rqc->rxDataRingBasePA =
2441 cpu_to_le64(rq->data_ring.basePA);
2442 rqc->rxDataRingDescSize =
2443 cpu_to_le16(rq->data_ring.desc_size);
2444 }
2363 } 2445 }
2364 2446
2365#ifdef VMXNET3_RSS 2447#ifdef VMXNET3_RSS
@@ -2409,6 +2491,32 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2409 /* the rest are already zeroed */ 2491 /* the rest are already zeroed */
2410} 2492}
2411 2493
2494static void
2495vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2496{
2497 struct Vmxnet3_DriverShared *shared = adapter->shared;
2498 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2499 unsigned long flags;
2500
2501 if (!VMXNET3_VERSION_GE_3(adapter))
2502 return;
2503
2504 spin_lock_irqsave(&adapter->cmd_lock, flags);
2505 cmdInfo->varConf.confVer = 1;
2506 cmdInfo->varConf.confLen =
2507 cpu_to_le32(sizeof(*adapter->coal_conf));
2508 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
2509
2510 if (adapter->default_coal_mode) {
2511 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2512 VMXNET3_CMD_GET_COALESCE);
2513 } else {
2514 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2515 VMXNET3_CMD_SET_COALESCE);
2516 }
2517
2518 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2519}
2412 2520
2413int 2521int
2414vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) 2522vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
@@ -2458,6 +2566,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2458 goto activate_err; 2566 goto activate_err;
2459 } 2567 }
2460 2568
2569 vmxnet3_init_coalesce(adapter);
2570
2461 for (i = 0; i < adapter->num_rx_queues; i++) { 2571 for (i = 0; i < adapter->num_rx_queues; i++) {
2462 VMXNET3_WRITE_BAR0_REG(adapter, 2572 VMXNET3_WRITE_BAR0_REG(adapter,
2463 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN, 2573 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
@@ -2689,7 +2799,8 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2689 2799
2690int 2800int
2691vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, 2801vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2692 u32 rx_ring_size, u32 rx_ring2_size) 2802 u32 rx_ring_size, u32 rx_ring2_size,
2803 u16 txdata_desc_size, u16 rxdata_desc_size)
2693{ 2804{
2694 int err = 0, i; 2805 int err = 0, i;
2695 2806
@@ -2698,6 +2809,7 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2698 tq->tx_ring.size = tx_ring_size; 2809 tq->tx_ring.size = tx_ring_size;
2699 tq->data_ring.size = tx_ring_size; 2810 tq->data_ring.size = tx_ring_size;
2700 tq->comp_ring.size = tx_ring_size; 2811 tq->comp_ring.size = tx_ring_size;
2812 tq->txdata_desc_size = txdata_desc_size;
2701 tq->shared = &adapter->tqd_start[i].ctrl; 2813 tq->shared = &adapter->tqd_start[i].ctrl;
2702 tq->stopped = true; 2814 tq->stopped = true;
2703 tq->adapter = adapter; 2815 tq->adapter = adapter;
@@ -2714,12 +2826,15 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2714 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; 2826 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2715 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; 2827 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2716 vmxnet3_adjust_rx_ring_size(adapter); 2828 vmxnet3_adjust_rx_ring_size(adapter);
2829
2830 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2717 for (i = 0; i < adapter->num_rx_queues; i++) { 2831 for (i = 0; i < adapter->num_rx_queues; i++) {
2718 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 2832 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2719 /* qid and qid2 for rx queues will be assigned later when num 2833 /* qid and qid2 for rx queues will be assigned later when num
2720 * of rx queues is finalized after allocating intrs */ 2834 * of rx queues is finalized after allocating intrs */
2721 rq->shared = &adapter->rqd_start[i].ctrl; 2835 rq->shared = &adapter->rqd_start[i].ctrl;
2722 rq->adapter = adapter; 2836 rq->adapter = adapter;
2837 rq->data_ring.desc_size = rxdata_desc_size;
2723 err = vmxnet3_rq_create(rq, adapter); 2838 err = vmxnet3_rq_create(rq, adapter);
2724 if (err) { 2839 if (err) {
2725 if (i == 0) { 2840 if (i == 0) {
@@ -2737,6 +2852,10 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2737 } 2852 }
2738 } 2853 }
2739 } 2854 }
2855
2856 if (!adapter->rxdataring_enabled)
2857 vmxnet3_rq_destroy_all_rxdataring(adapter);
2858
2740 return err; 2859 return err;
2741queue_err: 2860queue_err:
2742 vmxnet3_tq_destroy_all(adapter); 2861 vmxnet3_tq_destroy_all(adapter);
@@ -2754,9 +2873,35 @@ vmxnet3_open(struct net_device *netdev)
2754 for (i = 0; i < adapter->num_tx_queues; i++) 2873 for (i = 0; i < adapter->num_tx_queues; i++)
2755 spin_lock_init(&adapter->tx_queue[i].tx_lock); 2874 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2756 2875
2757 err = vmxnet3_create_queues(adapter, adapter->tx_ring_size, 2876 if (VMXNET3_VERSION_GE_3(adapter)) {
2877 unsigned long flags;
2878 u16 txdata_desc_size;
2879
2880 spin_lock_irqsave(&adapter->cmd_lock, flags);
2881 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2882 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
2883 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
2884 VMXNET3_REG_CMD);
2885 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2886
2887 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
2888 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
2889 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
2890 adapter->txdata_desc_size =
2891 sizeof(struct Vmxnet3_TxDataDesc);
2892 } else {
2893 adapter->txdata_desc_size = txdata_desc_size;
2894 }
2895 } else {
2896 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
2897 }
2898
2899 err = vmxnet3_create_queues(adapter,
2900 adapter->tx_ring_size,
2758 adapter->rx_ring_size, 2901 adapter->rx_ring_size,
2759 adapter->rx_ring2_size); 2902 adapter->rx_ring2_size,
2903 adapter->txdata_desc_size,
2904 adapter->rxdata_desc_size);
2760 if (err) 2905 if (err)
2761 goto queue_err; 2906 goto queue_err;
2762 2907
@@ -3200,12 +3345,21 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3200 goto err_alloc_pci; 3345 goto err_alloc_pci;
3201 3346
3202 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); 3347 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3203 if (ver & 2) { 3348 if (ver & (1 << VMXNET3_REV_3)) {
3204 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 2); 3349 VMXNET3_WRITE_BAR1_REG(adapter,
3205 adapter->version = 2; 3350 VMXNET3_REG_VRRS,
3206 } else if (ver & 1) { 3351 1 << VMXNET3_REV_3);
3207 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); 3352 adapter->version = VMXNET3_REV_3 + 1;
3208 adapter->version = 1; 3353 } else if (ver & (1 << VMXNET3_REV_2)) {
3354 VMXNET3_WRITE_BAR1_REG(adapter,
3355 VMXNET3_REG_VRRS,
3356 1 << VMXNET3_REV_2);
3357 adapter->version = VMXNET3_REV_2 + 1;
3358 } else if (ver & (1 << VMXNET3_REV_1)) {
3359 VMXNET3_WRITE_BAR1_REG(adapter,
3360 VMXNET3_REG_VRRS,
3361 1 << VMXNET3_REV_1);
3362 adapter->version = VMXNET3_REV_1 + 1;
3209 } else { 3363 } else {
3210 dev_err(&pdev->dev, 3364 dev_err(&pdev->dev,
3211 "Incompatible h/w version (0x%x) for adapter\n", ver); 3365 "Incompatible h/w version (0x%x) for adapter\n", ver);
@@ -3224,9 +3378,28 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3224 goto err_ver; 3378 goto err_ver;
3225 } 3379 }
3226 3380
3381 if (VMXNET3_VERSION_GE_3(adapter)) {
3382 adapter->coal_conf =
3383 dma_alloc_coherent(&adapter->pdev->dev,
3384 sizeof(struct Vmxnet3_CoalesceScheme)
3385 ,
3386 &adapter->coal_conf_pa,
3387 GFP_KERNEL);
3388 if (!adapter->coal_conf) {
3389 err = -ENOMEM;
3390 goto err_ver;
3391 }
3392 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
3393 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
3394 adapter->default_coal_mode = true;
3395 }
3396
3227 SET_NETDEV_DEV(netdev, &pdev->dev); 3397 SET_NETDEV_DEV(netdev, &pdev->dev);
3228 vmxnet3_declare_features(adapter, dma64); 3398 vmxnet3_declare_features(adapter, dma64);
3229 3399
3400 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
3401 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
3402
3230 if (adapter->num_tx_queues == adapter->num_rx_queues) 3403 if (adapter->num_tx_queues == adapter->num_rx_queues)
3231 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; 3404 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3232 else 3405 else
@@ -3283,6 +3456,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3283 return 0; 3456 return 0;
3284 3457
3285err_register: 3458err_register:
3459 if (VMXNET3_VERSION_GE_3(adapter)) {
3460 dma_free_coherent(&adapter->pdev->dev,
3461 sizeof(struct Vmxnet3_CoalesceScheme),
3462 adapter->coal_conf, adapter->coal_conf_pa);
3463 }
3286 vmxnet3_free_intr_resources(adapter); 3464 vmxnet3_free_intr_resources(adapter);
3287err_ver: 3465err_ver:
3288 vmxnet3_free_pci_resources(adapter); 3466 vmxnet3_free_pci_resources(adapter);
@@ -3333,6 +3511,11 @@ vmxnet3_remove_device(struct pci_dev *pdev)
3333 3511
3334 vmxnet3_free_intr_resources(adapter); 3512 vmxnet3_free_intr_resources(adapter);
3335 vmxnet3_free_pci_resources(adapter); 3513 vmxnet3_free_pci_resources(adapter);
3514 if (VMXNET3_VERSION_GE_3(adapter)) {
3515 dma_free_coherent(&adapter->pdev->dev,
3516 sizeof(struct Vmxnet3_CoalesceScheme),
3517 adapter->coal_conf, adapter->coal_conf_pa);
3518 }
3336#ifdef VMXNET3_RSS 3519#ifdef VMXNET3_RSS
3337 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), 3520 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3338 adapter->rss_conf, adapter->rss_conf_pa); 3521 adapter->rss_conf, adapter->rss_conf_pa);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 9ba11d737753..aabc6ef366b4 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC. 2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 * 3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. 4 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
20 * The full GNU General Public License is included in this distribution in 20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING". 21 * the file called "COPYING".
22 * 22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> 23 * Maintained by: pv-drivers@vmware.com
24 * 24 *
25 */ 25 */
26 26
@@ -396,8 +396,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
396 buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA); 396 buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA);
397 buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA); 397 buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA);
398 buf[j++] = tq->data_ring.size; 398 buf[j++] = tq->data_ring.size;
399 /* transmit data ring buffer size */ 399 buf[j++] = tq->txdata_desc_size;
400 buf[j++] = VMXNET3_HDR_COPY_SIZE;
401 400
402 buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA); 401 buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA);
403 buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA); 402 buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA);
@@ -431,11 +430,10 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
431 buf[j++] = rq->rx_ring[1].next2comp; 430 buf[j++] = rq->rx_ring[1].next2comp;
432 buf[j++] = rq->rx_ring[1].gen; 431 buf[j++] = rq->rx_ring[1].gen;
433 432
434 /* receive data ring */ 433 buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA);
435 buf[j++] = 0; 434 buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA);
436 buf[j++] = 0; 435 buf[j++] = rq->rx_ring[0].size;
437 buf[j++] = 0; 436 buf[j++] = rq->data_ring.desc_size;
438 buf[j++] = 0;
439 437
440 buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA); 438 buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA);
441 buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA); 439 buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA);
@@ -504,12 +502,14 @@ vmxnet3_get_ringparam(struct net_device *netdev,
504 502
505 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; 503 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
506 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; 504 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
507 param->rx_mini_max_pending = 0; 505 param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ?
506 VMXNET3_RXDATA_DESC_MAX_SIZE : 0;
508 param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE; 507 param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
509 508
510 param->rx_pending = adapter->rx_ring_size; 509 param->rx_pending = adapter->rx_ring_size;
511 param->tx_pending = adapter->tx_ring_size; 510 param->tx_pending = adapter->tx_ring_size;
512 param->rx_mini_pending = 0; 511 param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ?
512 adapter->rxdata_desc_size : 0;
513 param->rx_jumbo_pending = adapter->rx_ring2_size; 513 param->rx_jumbo_pending = adapter->rx_ring2_size;
514} 514}
515 515
@@ -520,6 +520,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
520{ 520{
521 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 521 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
522 u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size; 522 u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
523 u16 new_rxdata_desc_size;
523 u32 sz; 524 u32 sz;
524 int err = 0; 525 int err = 0;
525 526
@@ -542,6 +543,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
542 return -EOPNOTSUPP; 543 return -EOPNOTSUPP;
543 } 544 }
544 545
546 if (VMXNET3_VERSION_GE_3(adapter)) {
547 if (param->rx_mini_pending < 0 ||
548 param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) {
549 return -EINVAL;
550 }
551 } else if (param->rx_mini_pending != 0) {
552 return -EINVAL;
553 }
554
545 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ 555 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
546 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & 556 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
547 ~VMXNET3_RING_SIZE_MASK; 557 ~VMXNET3_RING_SIZE_MASK;
@@ -568,9 +578,19 @@ vmxnet3_set_ringparam(struct net_device *netdev,
568 new_rx_ring2_size = min_t(u32, new_rx_ring2_size, 578 new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
569 VMXNET3_RX_RING2_MAX_SIZE); 579 VMXNET3_RX_RING2_MAX_SIZE);
570 580
581 /* rx data ring buffer size has to be a multiple of
582 * VMXNET3_RXDATA_DESC_SIZE_ALIGN
583 */
584 new_rxdata_desc_size =
585 (param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) &
586 ~VMXNET3_RXDATA_DESC_SIZE_MASK;
587 new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size,
588 VMXNET3_RXDATA_DESC_MAX_SIZE);
589
571 if (new_tx_ring_size == adapter->tx_ring_size && 590 if (new_tx_ring_size == adapter->tx_ring_size &&
572 new_rx_ring_size == adapter->rx_ring_size && 591 new_rx_ring_size == adapter->rx_ring_size &&
573 new_rx_ring2_size == adapter->rx_ring2_size) { 592 new_rx_ring2_size == adapter->rx_ring2_size &&
593 new_rxdata_desc_size == adapter->rxdata_desc_size) {
574 return 0; 594 return 0;
575 } 595 }
576 596
@@ -591,8 +611,9 @@ vmxnet3_set_ringparam(struct net_device *netdev,
591 vmxnet3_rq_destroy_all(adapter); 611 vmxnet3_rq_destroy_all(adapter);
592 612
593 err = vmxnet3_create_queues(adapter, new_tx_ring_size, 613 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
594 new_rx_ring_size, new_rx_ring2_size); 614 new_rx_ring_size, new_rx_ring2_size,
595 615 adapter->txdata_desc_size,
616 new_rxdata_desc_size);
596 if (err) { 617 if (err) {
597 /* failed, most likely because of OOM, try default 618 /* failed, most likely because of OOM, try default
598 * size */ 619 * size */
@@ -601,10 +622,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
601 new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; 622 new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
602 new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; 623 new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
603 new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; 624 new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
625 new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
626 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
627
604 err = vmxnet3_create_queues(adapter, 628 err = vmxnet3_create_queues(adapter,
605 new_tx_ring_size, 629 new_tx_ring_size,
606 new_rx_ring_size, 630 new_rx_ring_size,
607 new_rx_ring2_size); 631 new_rx_ring2_size,
632 adapter->txdata_desc_size,
633 new_rxdata_desc_size);
608 if (err) { 634 if (err) {
609 netdev_err(netdev, "failed to create queues " 635 netdev_err(netdev, "failed to create queues "
610 "with default sizes. Closing it\n"); 636 "with default sizes. Closing it\n");
@@ -620,6 +646,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
620 adapter->tx_ring_size = new_tx_ring_size; 646 adapter->tx_ring_size = new_tx_ring_size;
621 adapter->rx_ring_size = new_rx_ring_size; 647 adapter->rx_ring_size = new_rx_ring_size;
622 adapter->rx_ring2_size = new_rx_ring2_size; 648 adapter->rx_ring2_size = new_rx_ring2_size;
649 adapter->rxdata_desc_size = new_rxdata_desc_size;
623 650
624out: 651out:
625 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 652 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
@@ -698,6 +725,162 @@ vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
698} 725}
699#endif 726#endif
700 727
728static int
729vmxnet3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
730{
731 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
732
733 if (!VMXNET3_VERSION_GE_3(adapter))
734 return -EOPNOTSUPP;
735
736 switch (adapter->coal_conf->coalMode) {
737 case VMXNET3_COALESCE_DISABLED:
738 /* struct ethtool_coalesce is already initialized to 0 */
739 break;
740 case VMXNET3_COALESCE_ADAPT:
741 ec->use_adaptive_rx_coalesce = true;
742 break;
743 case VMXNET3_COALESCE_STATIC:
744 ec->tx_max_coalesced_frames =
745 adapter->coal_conf->coalPara.coalStatic.tx_comp_depth;
746 ec->rx_max_coalesced_frames =
747 adapter->coal_conf->coalPara.coalStatic.rx_depth;
748 break;
749 case VMXNET3_COALESCE_RBC: {
750 u32 rbc_rate;
751
752 rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate;
753 ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate);
754 }
755 break;
756 default:
757 return -EOPNOTSUPP;
758 }
759
760 return 0;
761}
762
763static int
764vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
765{
766 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
767 struct Vmxnet3_DriverShared *shared = adapter->shared;
768 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
769 unsigned long flags;
770
771 if (!VMXNET3_VERSION_GE_3(adapter))
772 return -EOPNOTSUPP;
773
774 if (ec->rx_coalesce_usecs_irq ||
775 ec->rx_max_coalesced_frames_irq ||
776 ec->tx_coalesce_usecs ||
777 ec->tx_coalesce_usecs_irq ||
778 ec->tx_max_coalesced_frames_irq ||
779 ec->stats_block_coalesce_usecs ||
780 ec->use_adaptive_tx_coalesce ||
781 ec->pkt_rate_low ||
782 ec->rx_coalesce_usecs_low ||
783 ec->rx_max_coalesced_frames_low ||
784 ec->tx_coalesce_usecs_low ||
785 ec->tx_max_coalesced_frames_low ||
786 ec->pkt_rate_high ||
787 ec->rx_coalesce_usecs_high ||
788 ec->rx_max_coalesced_frames_high ||
789 ec->tx_coalesce_usecs_high ||
790 ec->tx_max_coalesced_frames_high ||
791 ec->rate_sample_interval) {
792 return -EINVAL;
793 }
794
795 if ((ec->rx_coalesce_usecs == 0) &&
796 (ec->use_adaptive_rx_coalesce == 0) &&
797 (ec->tx_max_coalesced_frames == 0) &&
798 (ec->rx_max_coalesced_frames == 0)) {
799 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
800 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
801 goto done;
802 }
803
804 if (ec->rx_coalesce_usecs != 0) {
805 u32 rbc_rate;
806
807 if ((ec->use_adaptive_rx_coalesce != 0) ||
808 (ec->tx_max_coalesced_frames != 0) ||
809 (ec->rx_max_coalesced_frames != 0)) {
810 return -EINVAL;
811 }
812
813 rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs);
814 if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE ||
815 rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) {
816 return -EINVAL;
817 }
818
819 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
820 adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC;
821 adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate;
822 goto done;
823 }
824
825 if (ec->use_adaptive_rx_coalesce != 0) {
826 if ((ec->rx_coalesce_usecs != 0) ||
827 (ec->tx_max_coalesced_frames != 0) ||
828 (ec->rx_max_coalesced_frames != 0)) {
829 return -EINVAL;
830 }
831 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
832 adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT;
833 goto done;
834 }
835
836 if ((ec->tx_max_coalesced_frames != 0) ||
837 (ec->rx_max_coalesced_frames != 0)) {
838 if ((ec->rx_coalesce_usecs != 0) ||
839 (ec->use_adaptive_rx_coalesce != 0)) {
840 return -EINVAL;
841 }
842
843 if ((ec->tx_max_coalesced_frames >
844 VMXNET3_COAL_STATIC_MAX_DEPTH) ||
845 (ec->rx_max_coalesced_frames >
846 VMXNET3_COAL_STATIC_MAX_DEPTH)) {
847 return -EINVAL;
848 }
849
850 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
851 adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC;
852
853 adapter->coal_conf->coalPara.coalStatic.tx_comp_depth =
854 (ec->tx_max_coalesced_frames ?
855 ec->tx_max_coalesced_frames :
856 VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
857
858 adapter->coal_conf->coalPara.coalStatic.rx_depth =
859 (ec->rx_max_coalesced_frames ?
860 ec->rx_max_coalesced_frames :
861 VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
862
863 adapter->coal_conf->coalPara.coalStatic.tx_depth =
864 VMXNET3_COAL_STATIC_DEFAULT_DEPTH;
865 goto done;
866 }
867
868done:
869 adapter->default_coal_mode = false;
870 if (netif_running(netdev)) {
871 spin_lock_irqsave(&adapter->cmd_lock, flags);
872 cmdInfo->varConf.confVer = 1;
873 cmdInfo->varConf.confLen =
874 cpu_to_le32(sizeof(*adapter->coal_conf));
875 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
876 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
877 VMXNET3_CMD_SET_COALESCE);
878 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
879 }
880
881 return 0;
882}
883
701static const struct ethtool_ops vmxnet3_ethtool_ops = { 884static const struct ethtool_ops vmxnet3_ethtool_ops = {
702 .get_settings = vmxnet3_get_settings, 885 .get_settings = vmxnet3_get_settings,
703 .get_drvinfo = vmxnet3_get_drvinfo, 886 .get_drvinfo = vmxnet3_get_drvinfo,
@@ -706,6 +889,8 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
706 .get_wol = vmxnet3_get_wol, 889 .get_wol = vmxnet3_get_wol,
707 .set_wol = vmxnet3_set_wol, 890 .set_wol = vmxnet3_set_wol,
708 .get_link = ethtool_op_get_link, 891 .get_link = ethtool_op_get_link,
892 .get_coalesce = vmxnet3_get_coalesce,
893 .set_coalesce = vmxnet3_set_coalesce,
709 .get_strings = vmxnet3_get_strings, 894 .get_strings = vmxnet3_get_strings,
710 .get_sset_count = vmxnet3_get_sset_count, 895 .get_sset_count = vmxnet3_get_sset_count,
711 .get_ethtool_stats = vmxnet3_get_ethtool_stats, 896 .get_ethtool_stats = vmxnet3_get_ethtool_stats,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3d2b64e63408..74fc03072b87 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC. 2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 * 3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. 4 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
20 * The full GNU General Public License is included in this distribution in 20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING". 21 * the file called "COPYING".
22 * 22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> 23 * Maintained by: pv-drivers@vmware.com
24 * 24 *
25 */ 25 */
26 26
@@ -69,16 +69,20 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.9.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040800 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040900
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
79 #define VMXNET3_RSS 79 #define VMXNET3_RSS
80#endif 80#endif
81 81
82#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
83#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
84#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
85
82/* 86/*
83 * Capabilities 87 * Capabilities
84 */ 88 */
@@ -237,6 +241,7 @@ struct vmxnet3_tx_queue {
237 int num_stop; /* # of times the queue is 241 int num_stop; /* # of times the queue is
238 * stopped */ 242 * stopped */
239 int qid; 243 int qid;
244 u16 txdata_desc_size;
240} __attribute__((__aligned__(SMP_CACHE_BYTES))); 245} __attribute__((__aligned__(SMP_CACHE_BYTES)));
241 246
242enum vmxnet3_rx_buf_type { 247enum vmxnet3_rx_buf_type {
@@ -267,15 +272,23 @@ struct vmxnet3_rq_driver_stats {
267 u64 rx_buf_alloc_failure; 272 u64 rx_buf_alloc_failure;
268}; 273};
269 274
275struct vmxnet3_rx_data_ring {
276 Vmxnet3_RxDataDesc *base;
277 dma_addr_t basePA;
278 u16 desc_size;
279};
280
270struct vmxnet3_rx_queue { 281struct vmxnet3_rx_queue {
271 char name[IFNAMSIZ + 8]; /* To identify interrupt */ 282 char name[IFNAMSIZ + 8]; /* To identify interrupt */
272 struct vmxnet3_adapter *adapter; 283 struct vmxnet3_adapter *adapter;
273 struct napi_struct napi; 284 struct napi_struct napi;
274 struct vmxnet3_cmd_ring rx_ring[2]; 285 struct vmxnet3_cmd_ring rx_ring[2];
286 struct vmxnet3_rx_data_ring data_ring;
275 struct vmxnet3_comp_ring comp_ring; 287 struct vmxnet3_comp_ring comp_ring;
276 struct vmxnet3_rx_ctx rx_ctx; 288 struct vmxnet3_rx_ctx rx_ctx;
277 u32 qid; /* rqID in RCD for buffer from 1st ring */ 289 u32 qid; /* rqID in RCD for buffer from 1st ring */
278 u32 qid2; /* rqID in RCD for buffer from 2nd ring */ 290 u32 qid2; /* rqID in RCD for buffer from 2nd ring */
291 u32 dataRingQid; /* rqID in RCD for buffer from data ring */
279 struct vmxnet3_rx_buf_info *buf_info[2]; 292 struct vmxnet3_rx_buf_info *buf_info[2];
280 dma_addr_t buf_info_pa; 293 dma_addr_t buf_info_pa;
281 struct Vmxnet3_RxQueueCtrl *shared; 294 struct Vmxnet3_RxQueueCtrl *shared;
@@ -345,6 +358,7 @@ struct vmxnet3_adapter {
345 int rx_buf_per_pkt; /* only apply to the 1st ring */ 358 int rx_buf_per_pkt; /* only apply to the 1st ring */
346 dma_addr_t shared_pa; 359 dma_addr_t shared_pa;
347 dma_addr_t queue_desc_pa; 360 dma_addr_t queue_desc_pa;
361 dma_addr_t coal_conf_pa;
348 362
349 /* Wake-on-LAN */ 363 /* Wake-on-LAN */
350 u32 wol; 364 u32 wol;
@@ -359,12 +373,21 @@ struct vmxnet3_adapter {
359 u32 rx_ring_size; 373 u32 rx_ring_size;
360 u32 rx_ring2_size; 374 u32 rx_ring2_size;
361 375
376 /* Size of buffer in the data ring */
377 u16 txdata_desc_size;
378 u16 rxdata_desc_size;
379
380 bool rxdataring_enabled;
381
362 struct work_struct work; 382 struct work_struct work;
363 383
364 unsigned long state; /* VMXNET3_STATE_BIT_xxx */ 384 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
365 385
366 int share_intr; 386 int share_intr;
367 387
388 struct Vmxnet3_CoalesceScheme *coal_conf;
389 bool default_coal_mode;
390
368 dma_addr_t adapter_pa; 391 dma_addr_t adapter_pa;
369 dma_addr_t pm_conf_pa; 392 dma_addr_t pm_conf_pa;
370 dma_addr_t rss_conf_pa; 393 dma_addr_t rss_conf_pa;
@@ -387,14 +410,34 @@ struct vmxnet3_adapter {
387#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma)) 410#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
388#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32)) 411#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
389 412
413#define VMXNET3_VERSION_GE_2(adapter) \
414 (adapter->version >= VMXNET3_REV_2 + 1)
415#define VMXNET3_VERSION_GE_3(adapter) \
416 (adapter->version >= VMXNET3_REV_3 + 1)
417
390/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ 418/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
391#define VMXNET3_DEF_TX_RING_SIZE 512 419#define VMXNET3_DEF_TX_RING_SIZE 512
392#define VMXNET3_DEF_RX_RING_SIZE 256 420#define VMXNET3_DEF_RX_RING_SIZE 256
393#define VMXNET3_DEF_RX_RING2_SIZE 128 421#define VMXNET3_DEF_RX_RING2_SIZE 128
394 422
423#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
424
395#define VMXNET3_MAX_ETH_HDR_SIZE 22 425#define VMXNET3_MAX_ETH_HDR_SIZE 22
396#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 426#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
397 427
428#define VMXNET3_GET_RING_IDX(adapter, rqID) \
429 ((rqID >= adapter->num_rx_queues && \
430 rqID < 2 * adapter->num_rx_queues) ? 1 : 0) \
431
432#define VMXNET3_RX_DATA_RING(adapter, rqID) \
433 (rqID >= 2 * adapter->num_rx_queues && \
434 rqID < 3 * adapter->num_rx_queues) \
435
436#define VMXNET3_COAL_STATIC_DEFAULT_DEPTH 64
437
438#define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs)
439#define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate)
440
398int 441int
399vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 442vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
400 443
@@ -418,7 +461,8 @@ vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
418 461
419int 462int
420vmxnet3_create_queues(struct vmxnet3_adapter *adapter, 463vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
421 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size); 464 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
465 u16 txdata_desc_size, u16 rxdata_desc_size);
422 466
423void vmxnet3_set_ethtool_ops(struct net_device *netdev); 467void vmxnet3_set_ethtool_ops(struct net_device *netdev);
424 468
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 8bd8c7e1ee87..b3762822b653 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -35,6 +35,7 @@
35#include <net/route.h> 35#include <net/route.h>
36#include <net/addrconf.h> 36#include <net/addrconf.h>
37#include <net/l3mdev.h> 37#include <net/l3mdev.h>
38#include <net/fib_rules.h>
38 39
39#define RT_FL_TOS(oldflp4) \ 40#define RT_FL_TOS(oldflp4) \
40 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) 41 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -42,9 +43,14 @@
42#define DRV_NAME "vrf" 43#define DRV_NAME "vrf"
43#define DRV_VERSION "1.0" 44#define DRV_VERSION "1.0"
44 45
46#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
47static bool add_fib_rules = true;
48
45struct net_vrf { 49struct net_vrf {
46 struct rtable __rcu *rth; 50 struct rtable __rcu *rth;
51 struct rtable __rcu *rth_local;
47 struct rt6_info __rcu *rt6; 52 struct rt6_info __rcu *rt6;
53 struct rt6_info __rcu *rt6_local;
48 u32 tb_id; 54 u32 tb_id;
49}; 55};
50 56
@@ -54,9 +60,20 @@ struct pcpu_dstats {
54 u64 tx_drps; 60 u64 tx_drps;
55 u64 rx_pkts; 61 u64 rx_pkts;
56 u64 rx_bytes; 62 u64 rx_bytes;
63 u64 rx_drps;
57 struct u64_stats_sync syncp; 64 struct u64_stats_sync syncp;
58}; 65};
59 66
67static void vrf_rx_stats(struct net_device *dev, int len)
68{
69 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
70
71 u64_stats_update_begin(&dstats->syncp);
72 dstats->rx_pkts++;
73 dstats->rx_bytes += len;
74 u64_stats_update_end(&dstats->syncp);
75}
76
60static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) 77static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
61{ 78{
62 vrf_dev->stats.tx_errors++; 79 vrf_dev->stats.tx_errors++;
@@ -91,6 +108,34 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
91 return stats; 108 return stats;
92} 109}
93 110
111/* Local traffic destined to local address. Reinsert the packet to rx
112 * path, similar to loopback handling.
113 */
114static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
115 struct dst_entry *dst)
116{
117 int len = skb->len;
118
119 skb_orphan(skb);
120
121 skb_dst_set(skb, dst);
122 skb_dst_force(skb);
123
124 /* set pkt_type to avoid skb hitting packet taps twice -
125 * once on Tx and again in Rx processing
126 */
127 skb->pkt_type = PACKET_LOOPBACK;
128
129 skb->protocol = eth_type_trans(skb, dev);
130
131 if (likely(netif_rx(skb) == NET_RX_SUCCESS))
132 vrf_rx_stats(dev, len);
133 else
134 this_cpu_inc(dev->dstats->rx_drps);
135
136 return NETDEV_TX_OK;
137}
138
94#if IS_ENABLED(CONFIG_IPV6) 139#if IS_ENABLED(CONFIG_IPV6)
95static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, 140static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
96 struct net_device *dev) 141 struct net_device *dev)
@@ -117,8 +162,51 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
117 goto err; 162 goto err;
118 163
119 skb_dst_drop(skb); 164 skb_dst_drop(skb);
165
166 /* if dst.dev is loopback or the VRF device again this is locally
167 * originated traffic destined to a local address. Short circuit
168 * to Rx path using our local dst
169 */
170 if (dst->dev == net->loopback_dev || dst->dev == dev) {
171 struct net_vrf *vrf = netdev_priv(dev);
172 struct rt6_info *rt6_local;
173
174 /* release looked up dst and use cached local dst */
175 dst_release(dst);
176
177 rcu_read_lock();
178
179 rt6_local = rcu_dereference(vrf->rt6_local);
180 if (unlikely(!rt6_local)) {
181 rcu_read_unlock();
182 goto err;
183 }
184
185 /* Ordering issue: cached local dst is created on newlink
186 * before the IPv6 initialization. Using the local dst
187 * requires rt6i_idev to be set so make sure it is.
188 */
189 if (unlikely(!rt6_local->rt6i_idev)) {
190 rt6_local->rt6i_idev = in6_dev_get(dev);
191 if (!rt6_local->rt6i_idev) {
192 rcu_read_unlock();
193 goto err;
194 }
195 }
196
197 dst = &rt6_local->dst;
198 dst_hold(dst);
199
200 rcu_read_unlock();
201
202 return vrf_local_xmit(skb, dev, &rt6_local->dst);
203 }
204
120 skb_dst_set(skb, dst); 205 skb_dst_set(skb, dst);
121 206
207 /* strip the ethernet header added for pass through VRF device */
208 __skb_pull(skb, skb_network_offset(skb));
209
122 ret = ip6_local_out(net, skb->sk, skb); 210 ret = ip6_local_out(net, skb->sk, skb);
123 if (unlikely(net_xmit_eval(ret))) 211 if (unlikely(net_xmit_eval(ret)))
124 dev->stats.tx_errors++; 212 dev->stats.tx_errors++;
@@ -139,29 +227,6 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
139} 227}
140#endif 228#endif
141 229
142static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
143 struct net_device *vrf_dev)
144{
145 struct rtable *rt;
146 int err = 1;
147
148 rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
149 if (IS_ERR(rt))
150 goto out;
151
152 /* TO-DO: what about broadcast ? */
153 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
154 ip_rt_put(rt);
155 goto out;
156 }
157
158 skb_dst_drop(skb);
159 skb_dst_set(skb, &rt->dst);
160 err = 0;
161out:
162 return err;
163}
164
165static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, 230static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
166 struct net_device *vrf_dev) 231 struct net_device *vrf_dev)
167{ 232{
@@ -176,9 +241,51 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
176 FLOWI_FLAG_SKIP_NH_OIF, 241 FLOWI_FLAG_SKIP_NH_OIF,
177 .daddr = ip4h->daddr, 242 .daddr = ip4h->daddr,
178 }; 243 };
244 struct net *net = dev_net(vrf_dev);
245 struct rtable *rt;
246
247 rt = ip_route_output_flow(net, &fl4, NULL);
248 if (IS_ERR(rt))
249 goto err;
179 250
180 if (vrf_send_v4_prep(skb, &fl4, vrf_dev)) 251 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
252 ip_rt_put(rt);
181 goto err; 253 goto err;
254 }
255
256 skb_dst_drop(skb);
257
258 /* if dst.dev is loopback or the VRF device again this is locally
259 * originated traffic destined to a local address. Short circuit
260 * to Rx path using our local dst
261 */
262 if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) {
263 struct net_vrf *vrf = netdev_priv(vrf_dev);
264 struct rtable *rth_local;
265 struct dst_entry *dst = NULL;
266
267 ip_rt_put(rt);
268
269 rcu_read_lock();
270
271 rth_local = rcu_dereference(vrf->rth_local);
272 if (likely(rth_local)) {
273 dst = &rth_local->dst;
274 dst_hold(dst);
275 }
276
277 rcu_read_unlock();
278
279 if (unlikely(!dst))
280 goto err;
281
282 return vrf_local_xmit(skb, vrf_dev, dst);
283 }
284
285 skb_dst_set(skb, &rt->dst);
286
287 /* strip the ethernet header added for pass through VRF device */
288 __skb_pull(skb, skb_network_offset(skb));
182 289
183 if (!ip4h->saddr) { 290 if (!ip4h->saddr) {
184 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, 291 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
@@ -200,9 +307,6 @@ err:
200 307
201static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) 308static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
202{ 309{
203 /* strip the ethernet header added for pass through VRF device */
204 __skb_pull(skb, skb_network_offset(skb));
205
206 switch (skb->protocol) { 310 switch (skb->protocol) {
207 case htons(ETH_P_IP): 311 case htons(ETH_P_IP):
208 return vrf_process_v4_outbound(skb, dev); 312 return vrf_process_v4_outbound(skb, dev);
@@ -274,45 +378,92 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
274} 378}
275 379
276/* holding rtnl */ 380/* holding rtnl */
277static void vrf_rt6_release(struct net_vrf *vrf) 381static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
278{ 382{
279 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6); 383 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
384 struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local);
385 struct net *net = dev_net(dev);
386 struct dst_entry *dst;
280 387
281 rcu_assign_pointer(vrf->rt6, NULL); 388 RCU_INIT_POINTER(vrf->rt6, NULL);
389 RCU_INIT_POINTER(vrf->rt6_local, NULL);
390 synchronize_rcu();
391
392 /* move dev in dst's to loopback so this VRF device can be deleted
393 * - based on dst_ifdown
394 */
395 if (rt6) {
396 dst = &rt6->dst;
397 dev_put(dst->dev);
398 dst->dev = net->loopback_dev;
399 dev_hold(dst->dev);
400 dst_release(dst);
401 }
282 402
283 if (rt6) 403 if (rt6_local) {
284 dst_release(&rt6->dst); 404 if (rt6_local->rt6i_idev)
405 in6_dev_put(rt6_local->rt6i_idev);
406
407 dst = &rt6_local->dst;
408 dev_put(dst->dev);
409 dst->dev = net->loopback_dev;
410 dev_hold(dst->dev);
411 dst_release(dst);
412 }
285} 413}
286 414
287static int vrf_rt6_create(struct net_device *dev) 415static int vrf_rt6_create(struct net_device *dev)
288{ 416{
417 int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE;
289 struct net_vrf *vrf = netdev_priv(dev); 418 struct net_vrf *vrf = netdev_priv(dev);
290 struct net *net = dev_net(dev); 419 struct net *net = dev_net(dev);
291 struct fib6_table *rt6i_table; 420 struct fib6_table *rt6i_table;
292 struct rt6_info *rt6; 421 struct rt6_info *rt6, *rt6_local;
293 int rc = -ENOMEM; 422 int rc = -ENOMEM;
294 423
424 /* IPv6 can be CONFIG enabled and then disabled runtime */
425 if (!ipv6_mod_enabled())
426 return 0;
427
295 rt6i_table = fib6_new_table(net, vrf->tb_id); 428 rt6i_table = fib6_new_table(net, vrf->tb_id);
296 if (!rt6i_table) 429 if (!rt6i_table)
297 goto out; 430 goto out;
298 431
299 rt6 = ip6_dst_alloc(net, dev, 432 /* create a dst for routing packets out a VRF device */
300 DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE); 433 rt6 = ip6_dst_alloc(net, dev, flags);
301 if (!rt6) 434 if (!rt6)
302 goto out; 435 goto out;
303 436
304 dst_hold(&rt6->dst); 437 dst_hold(&rt6->dst);
305 438
306 rt6->rt6i_table = rt6i_table; 439 rt6->rt6i_table = rt6i_table;
307 rt6->dst.output = vrf_output6; 440 rt6->dst.output = vrf_output6;
441
442 /* create a dst for local routing - packets sent locally
443 * to local address via the VRF device as a loopback
444 */
445 rt6_local = ip6_dst_alloc(net, dev, flags);
446 if (!rt6_local) {
447 dst_release(&rt6->dst);
448 goto out;
449 }
450
451 dst_hold(&rt6_local->dst);
452
453 rt6_local->rt6i_idev = in6_dev_get(dev);
454 rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL;
455 rt6_local->rt6i_table = rt6i_table;
456 rt6_local->dst.input = ip6_input;
457
308 rcu_assign_pointer(vrf->rt6, rt6); 458 rcu_assign_pointer(vrf->rt6, rt6);
459 rcu_assign_pointer(vrf->rt6_local, rt6_local);
309 460
310 rc = 0; 461 rc = 0;
311out: 462out:
312 return rc; 463 return rc;
313} 464}
314#else 465#else
315static void vrf_rt6_release(struct net_vrf *vrf) 466static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
316{ 467{
317} 468}
318 469
@@ -381,32 +532,66 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
381} 532}
382 533
383/* holding rtnl */ 534/* holding rtnl */
384static void vrf_rtable_release(struct net_vrf *vrf) 535static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
385{ 536{
386 struct rtable *rth = rtnl_dereference(vrf->rth); 537 struct rtable *rth = rtnl_dereference(vrf->rth);
538 struct rtable *rth_local = rtnl_dereference(vrf->rth_local);
539 struct net *net = dev_net(dev);
540 struct dst_entry *dst;
387 541
388 rcu_assign_pointer(vrf->rth, NULL); 542 RCU_INIT_POINTER(vrf->rth, NULL);
543 RCU_INIT_POINTER(vrf->rth_local, NULL);
544 synchronize_rcu();
545
546 /* move dev in dst's to loopback so this VRF device can be deleted
547 * - based on dst_ifdown
548 */
549 if (rth) {
550 dst = &rth->dst;
551 dev_put(dst->dev);
552 dst->dev = net->loopback_dev;
553 dev_hold(dst->dev);
554 dst_release(dst);
555 }
389 556
390 if (rth) 557 if (rth_local) {
391 dst_release(&rth->dst); 558 dst = &rth_local->dst;
559 dev_put(dst->dev);
560 dst->dev = net->loopback_dev;
561 dev_hold(dst->dev);
562 dst_release(dst);
563 }
392} 564}
393 565
394static int vrf_rtable_create(struct net_device *dev) 566static int vrf_rtable_create(struct net_device *dev)
395{ 567{
396 struct net_vrf *vrf = netdev_priv(dev); 568 struct net_vrf *vrf = netdev_priv(dev);
397 struct rtable *rth; 569 struct rtable *rth, *rth_local;
398 570
399 if (!fib_new_table(dev_net(dev), vrf->tb_id)) 571 if (!fib_new_table(dev_net(dev), vrf->tb_id))
400 return -ENOMEM; 572 return -ENOMEM;
401 573
574 /* create a dst for routing packets out through a VRF device */
402 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); 575 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
403 if (!rth) 576 if (!rth)
404 return -ENOMEM; 577 return -ENOMEM;
405 578
406 rth->dst.output = vrf_output; 579 /* create a dst for local ingress routing - packets sent locally
580 * to local address via the VRF device as a loopback
581 */
582 rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0);
583 if (!rth_local) {
584 dst_release(&rth->dst);
585 return -ENOMEM;
586 }
587
588 rth->dst.output = vrf_output;
407 rth->rt_table_id = vrf->tb_id; 589 rth->rt_table_id = vrf->tb_id;
408 590
591 rth_local->rt_table_id = vrf->tb_id;
592
409 rcu_assign_pointer(vrf->rth, rth); 593 rcu_assign_pointer(vrf->rth, rth);
594 rcu_assign_pointer(vrf->rth_local, rth_local);
410 595
411 return 0; 596 return 0;
412} 597}
@@ -477,8 +662,8 @@ static void vrf_dev_uninit(struct net_device *dev)
477 struct net_device *port_dev; 662 struct net_device *port_dev;
478 struct list_head *iter; 663 struct list_head *iter;
479 664
480 vrf_rtable_release(vrf); 665 vrf_rtable_release(dev, vrf);
481 vrf_rt6_release(vrf); 666 vrf_rt6_release(dev, vrf);
482 667
483 netdev_for_each_lower_dev(dev, port_dev, iter) 668 netdev_for_each_lower_dev(dev, port_dev, iter)
484 vrf_del_slave(dev, port_dev); 669 vrf_del_slave(dev, port_dev);
@@ -504,10 +689,16 @@ static int vrf_dev_init(struct net_device *dev)
504 689
505 dev->flags = IFF_MASTER | IFF_NOARP; 690 dev->flags = IFF_MASTER | IFF_NOARP;
506 691
692 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
693 dev->mtu = 64 * 1024;
694
695 /* similarly, oper state is irrelevant; set to up to avoid confusion */
696 dev->operstate = IF_OPER_UP;
697 netdev_lockdep_set_classes(dev);
507 return 0; 698 return 0;
508 699
509out_rth: 700out_rth:
510 vrf_rtable_release(vrf); 701 vrf_rtable_release(dev, vrf);
511out_stats: 702out_stats:
512 free_percpu(dev->dstats); 703 free_percpu(dev->dstats);
513 dev->dstats = NULL; 704 dev->dstats = NULL;
@@ -623,11 +814,78 @@ out:
623 return rc; 814 return rc;
624} 815}
625 816
817static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
818 const struct net_device *dev,
819 struct flowi6 *fl6,
820 int ifindex,
821 int flags)
822{
823 struct net_vrf *vrf = netdev_priv(dev);
824 struct fib6_table *table = NULL;
825 struct rt6_info *rt6;
826
827 rcu_read_lock();
828
829 /* fib6_table does not have a refcnt and can not be freed */
830 rt6 = rcu_dereference(vrf->rt6);
831 if (likely(rt6))
832 table = rt6->rt6i_table;
833
834 rcu_read_unlock();
835
836 if (!table)
837 return NULL;
838
839 return ip6_pol_route(net, table, ifindex, fl6, flags);
840}
841
842static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
843 int ifindex)
844{
845 const struct ipv6hdr *iph = ipv6_hdr(skb);
846 struct flowi6 fl6 = {
847 .daddr = iph->daddr,
848 .saddr = iph->saddr,
849 .flowlabel = ip6_flowinfo(iph),
850 .flowi6_mark = skb->mark,
851 .flowi6_proto = iph->nexthdr,
852 .flowi6_iif = ifindex,
853 };
854 struct net *net = dev_net(vrf_dev);
855 struct rt6_info *rt6;
856
857 rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex,
858 RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
859 if (unlikely(!rt6))
860 return;
861
862 if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
863 return;
864
865 skb_dst_set(skb, &rt6->dst);
866}
867
626static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, 868static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
627 struct sk_buff *skb) 869 struct sk_buff *skb)
628{ 870{
629 /* if packet is NDISC keep the ingress interface */ 871 int orig_iif = skb->skb_iif;
630 if (!ipv6_ndisc_frame(skb)) { 872 bool need_strict;
873
874 /* loopback traffic; do not push through packet taps again.
875 * Reset pkt_type for upper layers to process skb
876 */
877 if (skb->pkt_type == PACKET_LOOPBACK) {
878 skb->dev = vrf_dev;
879 skb->skb_iif = vrf_dev->ifindex;
880 skb->pkt_type = PACKET_HOST;
881 goto out;
882 }
883
884 /* if packet is NDISC or addressed to multicast or link-local
885 * then keep the ingress interface
886 */
887 need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
888 if (!ipv6_ndisc_frame(skb) && !need_strict) {
631 skb->dev = vrf_dev; 889 skb->dev = vrf_dev;
632 skb->skb_iif = vrf_dev->ifindex; 890 skb->skb_iif = vrf_dev->ifindex;
633 891
@@ -638,6 +896,10 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
638 IP6CB(skb)->flags |= IP6SKB_L3SLAVE; 896 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
639 } 897 }
640 898
899 if (need_strict)
900 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
901
902out:
641 return skb; 903 return skb;
642} 904}
643 905
@@ -655,10 +917,19 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
655 skb->dev = vrf_dev; 917 skb->dev = vrf_dev;
656 skb->skb_iif = vrf_dev->ifindex; 918 skb->skb_iif = vrf_dev->ifindex;
657 919
920 /* loopback traffic; do not push through packet taps again.
921 * Reset pkt_type for upper layers to process skb
922 */
923 if (skb->pkt_type == PACKET_LOOPBACK) {
924 skb->pkt_type = PACKET_HOST;
925 goto out;
926 }
927
658 skb_push(skb, skb->mac_len); 928 skb_push(skb, skb->mac_len);
659 dev_queue_xmit_nit(skb, vrf_dev); 929 dev_queue_xmit_nit(skb, vrf_dev);
660 skb_pull(skb, skb->mac_len); 930 skb_pull(skb, skb->mac_len);
661 931
932out:
662 return skb; 933 return skb;
663} 934}
664 935
@@ -679,13 +950,37 @@ static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
679 950
680#if IS_ENABLED(CONFIG_IPV6) 951#if IS_ENABLED(CONFIG_IPV6)
681static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev, 952static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
682 const struct flowi6 *fl6) 953 struct flowi6 *fl6)
683{ 954{
955 bool need_strict = rt6_need_strict(&fl6->daddr);
956 struct net_vrf *vrf = netdev_priv(dev);
957 struct net *net = dev_net(dev);
684 struct dst_entry *dst = NULL; 958 struct dst_entry *dst = NULL;
959 struct rt6_info *rt;
685 960
686 if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) { 961 /* send to link-local or multicast address */
687 struct net_vrf *vrf = netdev_priv(dev); 962 if (need_strict) {
688 struct rt6_info *rt; 963 int flags = RT6_LOOKUP_F_IFACE;
964
965 /* VRF device does not have a link-local address and
966 * sending packets to link-local or mcast addresses over
967 * a VRF device does not make sense
968 */
969 if (fl6->flowi6_oif == dev->ifindex) {
970 struct dst_entry *dst = &net->ipv6.ip6_null_entry->dst;
971
972 dst_hold(dst);
973 return dst;
974 }
975
976 if (!ipv6_addr_any(&fl6->saddr))
977 flags |= RT6_LOOKUP_F_HAS_SADDR;
978
979 rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
980 if (rt)
981 dst = &rt->dst;
982
983 } else if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
689 984
690 rcu_read_lock(); 985 rcu_read_lock();
691 986
@@ -698,8 +993,52 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
698 rcu_read_unlock(); 993 rcu_read_unlock();
699 } 994 }
700 995
996 /* make sure oif is set to VRF device for lookup */
997 if (!need_strict)
998 fl6->flowi6_oif = dev->ifindex;
999
701 return dst; 1000 return dst;
702} 1001}
1002
1003/* called under rcu_read_lock */
1004static int vrf_get_saddr6(struct net_device *dev, const struct sock *sk,
1005 struct flowi6 *fl6)
1006{
1007 struct net *net = dev_net(dev);
1008 struct dst_entry *dst;
1009 struct rt6_info *rt;
1010 int err;
1011
1012 if (rt6_need_strict(&fl6->daddr)) {
1013 rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif,
1014 RT6_LOOKUP_F_IFACE);
1015 if (unlikely(!rt))
1016 return 0;
1017
1018 dst = &rt->dst;
1019 } else {
1020 __u8 flags = fl6->flowi6_flags;
1021
1022 fl6->flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
1023 fl6->flowi6_flags |= FLOWI_FLAG_SKIP_NH_OIF;
1024
1025 dst = ip6_route_output(net, sk, fl6);
1026 rt = (struct rt6_info *)dst;
1027
1028 fl6->flowi6_flags = flags;
1029 }
1030
1031 err = dst->error;
1032 if (!err) {
1033 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
1034 sk ? inet6_sk(sk)->srcprefs : 0,
1035 &fl6->saddr);
1036 }
1037
1038 dst_release(dst);
1039
1040 return err;
1041}
703#endif 1042#endif
704 1043
705static const struct l3mdev_ops vrf_l3mdev_ops = { 1044static const struct l3mdev_ops vrf_l3mdev_ops = {
@@ -709,6 +1048,7 @@ static const struct l3mdev_ops vrf_l3mdev_ops = {
709 .l3mdev_l3_rcv = vrf_l3_rcv, 1048 .l3mdev_l3_rcv = vrf_l3_rcv,
710#if IS_ENABLED(CONFIG_IPV6) 1049#if IS_ENABLED(CONFIG_IPV6)
711 .l3mdev_get_rt6_dst = vrf_get_rt6_dst, 1050 .l3mdev_get_rt6_dst = vrf_get_rt6_dst,
1051 .l3mdev_get_saddr6 = vrf_get_saddr6,
712#endif 1052#endif
713}; 1053};
714 1054
@@ -723,6 +1063,94 @@ static const struct ethtool_ops vrf_ethtool_ops = {
723 .get_drvinfo = vrf_get_drvinfo, 1063 .get_drvinfo = vrf_get_drvinfo,
724}; 1064};
725 1065
1066static inline size_t vrf_fib_rule_nl_size(void)
1067{
1068 size_t sz;
1069
1070 sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1071 sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
1072 sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
1073
1074 return sz;
1075}
1076
1077static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1078{
1079 struct fib_rule_hdr *frh;
1080 struct nlmsghdr *nlh;
1081 struct sk_buff *skb;
1082 int err;
1083
1084 if (family == AF_INET6 && !ipv6_mod_enabled())
1085 return 0;
1086
1087 skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1088 if (!skb)
1089 return -ENOMEM;
1090
1091 nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1092 if (!nlh)
1093 goto nla_put_failure;
1094
1095 /* rule only needs to appear once */
1096 nlh->nlmsg_flags &= NLM_F_EXCL;
1097
1098 frh = nlmsg_data(nlh);
1099 memset(frh, 0, sizeof(*frh));
1100 frh->family = family;
1101 frh->action = FR_ACT_TO_TBL;
1102
1103 if (nla_put_u32(skb, FRA_L3MDEV, 1))
1104 goto nla_put_failure;
1105
1106 if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1107 goto nla_put_failure;
1108
1109 nlmsg_end(skb, nlh);
1110
1111 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1112 skb->sk = dev_net(dev)->rtnl;
1113 if (add_it) {
1114 err = fib_nl_newrule(skb, nlh);
1115 if (err == -EEXIST)
1116 err = 0;
1117 } else {
1118 err = fib_nl_delrule(skb, nlh);
1119 if (err == -ENOENT)
1120 err = 0;
1121 }
1122 nlmsg_free(skb);
1123
1124 return err;
1125
1126nla_put_failure:
1127 nlmsg_free(skb);
1128
1129 return -EMSGSIZE;
1130}
1131
1132static int vrf_add_fib_rules(const struct net_device *dev)
1133{
1134 int err;
1135
1136 err = vrf_fib_rule(dev, AF_INET, true);
1137 if (err < 0)
1138 goto out_err;
1139
1140 err = vrf_fib_rule(dev, AF_INET6, true);
1141 if (err < 0)
1142 goto ipv6_err;
1143
1144 return 0;
1145
1146ipv6_err:
1147 vrf_fib_rule(dev, AF_INET, false);
1148
1149out_err:
1150 netdev_err(dev, "Failed to add FIB rules.\n");
1151 return err;
1152}
1153
726static void vrf_setup(struct net_device *dev) 1154static void vrf_setup(struct net_device *dev)
727{ 1155{
728 ether_setup(dev); 1156 ether_setup(dev);
@@ -741,6 +1169,20 @@ static void vrf_setup(struct net_device *dev)
741 1169
742 /* don't allow vrf devices to change network namespaces. */ 1170 /* don't allow vrf devices to change network namespaces. */
743 dev->features |= NETIF_F_NETNS_LOCAL; 1171 dev->features |= NETIF_F_NETNS_LOCAL;
1172
1173 /* does not make sense for a VLAN to be added to a vrf device */
1174 dev->features |= NETIF_F_VLAN_CHALLENGED;
1175
1176 /* enable offload features */
1177 dev->features |= NETIF_F_GSO_SOFTWARE;
1178 dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
1179 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1180
1181 dev->hw_features = dev->features;
1182 dev->hw_enc_features = dev->features;
1183
1184 /* default to no qdisc; user can add if desired */
1185 dev->priv_flags |= IFF_NO_QUEUE;
744} 1186}
745 1187
746static int vrf_validate(struct nlattr *tb[], struct nlattr *data[]) 1188static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -763,6 +1205,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
763 struct nlattr *tb[], struct nlattr *data[]) 1205 struct nlattr *tb[], struct nlattr *data[])
764{ 1206{
765 struct net_vrf *vrf = netdev_priv(dev); 1207 struct net_vrf *vrf = netdev_priv(dev);
1208 int err;
766 1209
767 if (!data || !data[IFLA_VRF_TABLE]) 1210 if (!data || !data[IFLA_VRF_TABLE])
768 return -EINVAL; 1211 return -EINVAL;
@@ -771,7 +1214,21 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
771 1214
772 dev->priv_flags |= IFF_L3MDEV_MASTER; 1215 dev->priv_flags |= IFF_L3MDEV_MASTER;
773 1216
774 return register_netdevice(dev); 1217 err = register_netdevice(dev);
1218 if (err)
1219 goto out;
1220
1221 if (add_fib_rules) {
1222 err = vrf_add_fib_rules(dev);
1223 if (err) {
1224 unregister_netdevice(dev);
1225 goto out;
1226 }
1227 add_fib_rules = false;
1228 }
1229
1230out:
1231 return err;
775} 1232}
776 1233
777static size_t vrf_nl_getsize(const struct net_device *dev) 1234static size_t vrf_nl_getsize(const struct net_device *dev)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index b3b9db68f758..ae7455da1687 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -11,32 +11,18 @@
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <linux/module.h> 14#include <linux/module.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/slab.h> 16#include <linux/slab.h>
18#include <linux/skbuff.h>
19#include <linux/rculist.h>
20#include <linux/netdevice.h>
21#include <linux/in.h>
22#include <linux/ip.h>
23#include <linux/udp.h> 17#include <linux/udp.h>
24#include <linux/igmp.h> 18#include <linux/igmp.h>
25#include <linux/etherdevice.h>
26#include <linux/if_ether.h> 19#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <linux/hash.h>
29#include <linux/ethtool.h> 20#include <linux/ethtool.h>
30#include <net/arp.h> 21#include <net/arp.h>
31#include <net/ndisc.h> 22#include <net/ndisc.h>
32#include <net/ip.h> 23#include <net/ip.h>
33#include <net/ip_tunnels.h>
34#include <net/icmp.h> 24#include <net/icmp.h>
35#include <net/udp.h>
36#include <net/udp_tunnel.h>
37#include <net/rtnetlink.h> 25#include <net/rtnetlink.h>
38#include <net/route.h>
39#include <net/dsfield.h>
40#include <net/inet_ecn.h> 26#include <net/inet_ecn.h>
41#include <net/net_namespace.h> 27#include <net/net_namespace.h>
42#include <net/netns/generic.h> 28#include <net/netns/generic.h>
@@ -44,12 +30,9 @@
44#include <net/protocol.h> 30#include <net/protocol.h>
45 31
46#if IS_ENABLED(CONFIG_IPV6) 32#if IS_ENABLED(CONFIG_IPV6)
47#include <net/ipv6.h>
48#include <net/addrconf.h>
49#include <net/ip6_tunnel.h> 33#include <net/ip6_tunnel.h>
50#include <net/ip6_checksum.h> 34#include <net/ip6_checksum.h>
51#endif 35#endif
52#include <net/dst_metadata.h>
53 36
54#define VXLAN_VERSION "0.1" 37#define VXLAN_VERSION "0.1"
55 38
@@ -619,42 +602,6 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
619 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 602 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
620} 603}
621 604
622/* Notify netdevs that UDP port started listening */
623static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
624{
625 struct net_device *dev;
626 struct sock *sk = vs->sock->sk;
627 struct net *net = sock_net(sk);
628 sa_family_t sa_family = vxlan_get_sk_family(vs);
629 __be16 port = inet_sk(sk)->inet_sport;
630
631 rcu_read_lock();
632 for_each_netdev_rcu(net, dev) {
633 if (dev->netdev_ops->ndo_add_vxlan_port)
634 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
635 port);
636 }
637 rcu_read_unlock();
638}
639
640/* Notify netdevs that UDP port is no more listening */
641static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
642{
643 struct net_device *dev;
644 struct sock *sk = vs->sock->sk;
645 struct net *net = sock_net(sk);
646 sa_family_t sa_family = vxlan_get_sk_family(vs);
647 __be16 port = inet_sk(sk)->inet_sport;
648
649 rcu_read_lock();
650 for_each_netdev_rcu(net, dev) {
651 if (dev->netdev_ops->ndo_del_vxlan_port)
652 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
653 port);
654 }
655 rcu_read_unlock();
656}
657
658/* Add new entry to forwarding table -- assumes lock held */ 605/* Add new entry to forwarding table -- assumes lock held */
659static int vxlan_fdb_create(struct vxlan_dev *vxlan, 606static int vxlan_fdb_create(struct vxlan_dev *vxlan,
660 const u8 *mac, union vxlan_addr *ip, 607 const u8 *mac, union vxlan_addr *ip,
@@ -1050,7 +997,10 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
1050 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); 997 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
1051 spin_lock(&vn->sock_lock); 998 spin_lock(&vn->sock_lock);
1052 hlist_del_rcu(&vs->hlist); 999 hlist_del_rcu(&vs->hlist);
1053 vxlan_notify_del_rx_port(vs); 1000 udp_tunnel_notify_del_rx_port(vs->sock,
1001 (vs->flags & VXLAN_F_GPE) ?
1002 UDP_TUNNEL_TYPE_VXLAN_GPE :
1003 UDP_TUNNEL_TYPE_VXLAN);
1054 spin_unlock(&vn->sock_lock); 1004 spin_unlock(&vn->sock_lock);
1055 1005
1056 return true; 1006 return true;
@@ -2525,30 +2475,24 @@ static struct device_type vxlan_type = {
2525 .name = "vxlan", 2475 .name = "vxlan",
2526}; 2476};
2527 2477
2528/* Calls the ndo_add_vxlan_port of the caller in order to 2478/* Calls the ndo_add_udp_enc_port of the caller in order to
2529 * supply the listening VXLAN udp ports. Callers are expected 2479 * supply the listening VXLAN udp ports. Callers are expected
2530 * to implement the ndo_add_vxlan_port. 2480 * to implement the ndo_add_udp_enc_port.
2531 */ 2481 */
2532static void vxlan_push_rx_ports(struct net_device *dev) 2482static void vxlan_push_rx_ports(struct net_device *dev)
2533{ 2483{
2534 struct vxlan_sock *vs; 2484 struct vxlan_sock *vs;
2535 struct net *net = dev_net(dev); 2485 struct net *net = dev_net(dev);
2536 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2486 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2537 sa_family_t sa_family;
2538 __be16 port;
2539 unsigned int i; 2487 unsigned int i;
2540 2488
2541 if (!dev->netdev_ops->ndo_add_vxlan_port)
2542 return;
2543
2544 spin_lock(&vn->sock_lock); 2489 spin_lock(&vn->sock_lock);
2545 for (i = 0; i < PORT_HASH_SIZE; ++i) { 2490 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2546 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { 2491 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist)
2547 port = inet_sk(vs->sock->sk)->inet_sport; 2492 udp_tunnel_push_rx_port(dev, vs->sock,
2548 sa_family = vxlan_get_sk_family(vs); 2493 (vs->flags & VXLAN_F_GPE) ?
2549 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, 2494 UDP_TUNNEL_TYPE_VXLAN_GPE :
2550 port); 2495 UDP_TUNNEL_TYPE_VXLAN);
2551 }
2552 } 2496 }
2553 spin_unlock(&vn->sock_lock); 2497 spin_unlock(&vn->sock_lock);
2554} 2498}
@@ -2750,7 +2694,10 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
2750 2694
2751 spin_lock(&vn->sock_lock); 2695 spin_lock(&vn->sock_lock);
2752 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2696 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2753 vxlan_notify_add_rx_port(vs); 2697 udp_tunnel_notify_add_rx_port(sock,
2698 (vs->flags & VXLAN_F_GPE) ?
2699 UDP_TUNNEL_TYPE_VXLAN_GPE :
2700 UDP_TUNNEL_TYPE_VXLAN);
2754 spin_unlock(&vn->sock_lock); 2701 spin_unlock(&vn->sock_lock);
2755 2702
2756 /* Mark socket as an encapsulation socket. */ 2703 /* Mark socket as an encapsulation socket. */
@@ -3308,7 +3255,7 @@ static int vxlan_netdevice_event(struct notifier_block *unused,
3308 3255
3309 if (event == NETDEV_UNREGISTER) 3256 if (event == NETDEV_UNREGISTER)
3310 vxlan_handle_lowerdev_unregister(vn, dev); 3257 vxlan_handle_lowerdev_unregister(vn, dev);
3311 else if (event == NETDEV_OFFLOAD_PUSH_VXLAN) 3258 else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
3312 vxlan_push_rx_ports(dev); 3259 vxlan_push_rx_ports(dev);
3313 3260
3314 return NOTIFY_DONE; 3261 return NOTIFY_DONE;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index a2fdd15f285a..33ab3345d333 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -280,6 +280,28 @@ config DSCC4
280 To compile this driver as a module, choose M here: the 280 To compile this driver as a module, choose M here: the
281 module will be called dscc4. 281 module will be called dscc4.
282 282
283config FSL_UCC_HDLC
284 tristate "Freescale QUICC Engine HDLC support"
285 depends on HDLC
286 depends on QUICC_ENGINE
287 help
288 Driver for Freescale QUICC Engine HDLC controller. The driver
289 supports HDLC in NMSI and TDM mode.
290
291 To compile this driver as a module, choose M here: the
292 module will be called fsl_ucc_hdlc.
293
294config SLIC_DS26522
295 tristate "Slic Maxim ds26522 card support"
296 depends on SPI
297 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
298 help
299 This module initializes and configures the slic maxim card
300 in T1 or E1 mode.
301
302 To compile this driver as a module, choose M here: the
303 module will be called slic_ds26522.
304
283config DSCC4_PCISYNC 305config DSCC4_PCISYNC
284 bool "Etinc PCISYNC features" 306 bool "Etinc PCISYNC features"
285 depends on DSCC4 307 depends on DSCC4
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index c135ef47cbca..73c2326603fc 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -32,6 +32,8 @@ obj-$(CONFIG_WANXL) += wanxl.o
32obj-$(CONFIG_PCI200SYN) += pci200syn.o 32obj-$(CONFIG_PCI200SYN) += pci200syn.o
33obj-$(CONFIG_PC300TOO) += pc300too.o 33obj-$(CONFIG_PC300TOO) += pc300too.o
34obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o 34obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
35obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o
36obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o
35 37
36clean-files := wanxlfw.inc 38clean-files := wanxlfw.inc
37$(obj)/wanxl.o: $(obj)/wanxlfw.inc 39$(obj)/wanxl.o: $(obj)/wanxlfw.inc
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
new file mode 100644
index 000000000000..19174ac1e338
--- /dev/null
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -0,0 +1,1192 @@
1/* Freescale QUICC Engine HDLC Device Driver
2 *
3 * Copyright 2016 Freescale Semiconductor Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/hdlc.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/netdevice.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/of_platform.h>
24#include <linux/platform_device.h>
25#include <linux/sched.h>
26#include <linux/skbuff.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/stddef.h>
30#include <soc/fsl/qe/qe_tdm.h>
31#include <uapi/linux/if_arp.h>
32
33#include "fsl_ucc_hdlc.h"
34
35#define DRV_DESC "Freescale QE UCC HDLC Driver"
36#define DRV_NAME "ucc_hdlc"
37
38#define TDM_PPPOHT_SLIC_MAXIN
39#define BROKEN_FRAME_INFO
40
41static struct ucc_tdm_info utdm_primary_info = {
42 .uf_info = {
43 .tsa = 0,
44 .cdp = 0,
45 .cds = 1,
46 .ctsp = 1,
47 .ctss = 1,
48 .revd = 0,
49 .urfs = 256,
50 .utfs = 256,
51 .urfet = 128,
52 .urfset = 192,
53 .utfet = 128,
54 .utftt = 0x40,
55 .ufpt = 256,
56 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
57 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
58 .tenc = UCC_FAST_TX_ENCODING_NRZ,
59 .renc = UCC_FAST_RX_ENCODING_NRZ,
60 .tcrc = UCC_FAST_16_BIT_CRC,
61 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
62 },
63
64 .si_info = {
65#ifdef TDM_PPPOHT_SLIC_MAXIN
66 .simr_rfsd = 1,
67 .simr_tfsd = 2,
68#else
69 .simr_rfsd = 0,
70 .simr_tfsd = 0,
71#endif
72 .simr_crt = 0,
73 .simr_sl = 0,
74 .simr_ce = 1,
75 .simr_fe = 1,
76 .simr_gm = 0,
77 },
78};
79
80static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
81
82static int uhdlc_init(struct ucc_hdlc_private *priv)
83{
84 struct ucc_tdm_info *ut_info;
85 struct ucc_fast_info *uf_info;
86 u32 cecr_subblock;
87 u16 bd_status;
88 int ret, i;
89 void *bd_buffer;
90 dma_addr_t bd_dma_addr;
91 u32 riptr;
92 u32 tiptr;
93 u32 gumr;
94
95 ut_info = priv->ut_info;
96 uf_info = &ut_info->uf_info;
97
98 if (priv->tsa) {
99 uf_info->tsa = 1;
100 uf_info->ctsp = 1;
101 }
102 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
103 UCC_HDLC_UCCE_TXB) << 16);
104
105 ret = ucc_fast_init(uf_info, &priv->uccf);
106 if (ret) {
107 dev_err(priv->dev, "Failed to init uccf.");
108 return ret;
109 }
110
111 priv->uf_regs = priv->uccf->uf_regs;
112 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
113
114 /* Loopback mode */
115 if (priv->loopback) {
116 dev_info(priv->dev, "Loopback Mode\n");
117 gumr = ioread32be(&priv->uf_regs->gumr);
118 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
119 UCC_FAST_GUMR_TCI);
120 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
121 iowrite32be(gumr, &priv->uf_regs->gumr);
122 }
123
124 /* Initialize SI */
125 if (priv->tsa)
126 ucc_tdm_init(priv->utdm, priv->ut_info);
127
128 /* Write to QE CECR, UCCx channel to Stop Transmission */
129 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
130 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
131 QE_CR_PROTOCOL_UNSPECIFIED, 0);
132
133 /* Set UPSMR normal mode (need fixed)*/
134 iowrite32be(0, &priv->uf_regs->upsmr);
135
136 priv->rx_ring_size = RX_BD_RING_LEN;
137 priv->tx_ring_size = TX_BD_RING_LEN;
138 /* Alloc Rx BD */
139 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
140 RX_BD_RING_LEN * sizeof(struct qe_bd *),
141 &priv->dma_rx_bd, GFP_KERNEL);
142
143 if (!priv->rx_bd_base) {
144 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
145 ret = -ENOMEM;
146 goto rxbd_alloc_error;
147 }
148
149 /* Alloc Tx BD */
150 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
151 TX_BD_RING_LEN * sizeof(struct qe_bd *),
152 &priv->dma_tx_bd, GFP_KERNEL);
153
154 if (!priv->tx_bd_base) {
155 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
156 ret = -ENOMEM;
157 goto txbd_alloc_error;
158 }
159
160 /* Alloc parameter ram for ucc hdlc */
161 priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram),
162 ALIGNMENT_OF_UCC_HDLC_PRAM);
163
164 if (priv->ucc_pram_offset < 0) {
165 dev_err(priv->dev, "Can not allocate MURAM for hdlc prameter.\n");
166 ret = -ENOMEM;
167 goto pram_alloc_error;
168 }
169
170 priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
171 GFP_KERNEL);
172 if (!priv->rx_skbuff)
173 goto rx_skb_alloc_error;
174
175 priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
176 GFP_KERNEL);
177 if (!priv->tx_skbuff)
178 goto tx_skb_alloc_error;
179
180 priv->skb_curtx = 0;
181 priv->skb_dirtytx = 0;
182 priv->curtx_bd = priv->tx_bd_base;
183 priv->dirty_tx = priv->tx_bd_base;
184 priv->currx_bd = priv->rx_bd_base;
185 priv->currx_bdnum = 0;
186
187 /* init parameter base */
188 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
189 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
190 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
191
192 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
193 qe_muram_addr(priv->ucc_pram_offset);
194
195 /* Zero out parameter ram */
196 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
197
198 /* Alloc riptr, tiptr */
199 riptr = qe_muram_alloc(32, 32);
200 if (riptr < 0) {
201 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
202 ret = -ENOMEM;
203 goto riptr_alloc_error;
204 }
205
206 tiptr = qe_muram_alloc(32, 32);
207 if (tiptr < 0) {
208 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
209 ret = -ENOMEM;
210 goto tiptr_alloc_error;
211 }
212
213 /* Set RIPTR, TIPTR */
214 iowrite16be(riptr, &priv->ucc_pram->riptr);
215 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
216
217 /* Set MRBLR */
218 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
219
220 /* Set RBASE, TBASE */
221 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
222 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
223
224 /* Set RSTATE, TSTATE */
225 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
226 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
227
228 /* Set C_MASK, C_PRES for 16bit CRC */
229 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
230 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
231
232 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
233 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
234 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
235 iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
236 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
237 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
238 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
239 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
240
241 /* Get BD buffer */
242 bd_buffer = dma_alloc_coherent(priv->dev,
243 (RX_BD_RING_LEN + TX_BD_RING_LEN) *
244 MAX_RX_BUF_LENGTH,
245 &bd_dma_addr, GFP_KERNEL);
246
247 if (!bd_buffer) {
248 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
249 ret = -ENOMEM;
250 goto bd_alloc_error;
251 }
252
253 memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN)
254 * MAX_RX_BUF_LENGTH);
255
256 priv->rx_buffer = bd_buffer;
257 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
258
259 priv->dma_rx_addr = bd_dma_addr;
260 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
261
262 for (i = 0; i < RX_BD_RING_LEN; i++) {
263 if (i < (RX_BD_RING_LEN - 1))
264 bd_status = R_E_S | R_I_S;
265 else
266 bd_status = R_E_S | R_I_S | R_W_S;
267
268 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
269 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
270 &priv->rx_bd_base[i].buf);
271 }
272
273 for (i = 0; i < TX_BD_RING_LEN; i++) {
274 if (i < (TX_BD_RING_LEN - 1))
275 bd_status = T_I_S | T_TC_S;
276 else
277 bd_status = T_I_S | T_TC_S | T_W_S;
278
279 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
280 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
281 &priv->tx_bd_base[i].buf);
282 }
283
284 return 0;
285
286bd_alloc_error:
287 qe_muram_free(tiptr);
288tiptr_alloc_error:
289 qe_muram_free(riptr);
290riptr_alloc_error:
291 kfree(priv->tx_skbuff);
292tx_skb_alloc_error:
293 kfree(priv->rx_skbuff);
294rx_skb_alloc_error:
295 qe_muram_free(priv->ucc_pram_offset);
296pram_alloc_error:
297 dma_free_coherent(priv->dev,
298 TX_BD_RING_LEN * sizeof(struct qe_bd),
299 priv->tx_bd_base, priv->dma_tx_bd);
300txbd_alloc_error:
301 dma_free_coherent(priv->dev,
302 RX_BD_RING_LEN * sizeof(struct qe_bd),
303 priv->rx_bd_base, priv->dma_rx_bd);
304rxbd_alloc_error:
305 ucc_fast_free(priv->uccf);
306
307 return ret;
308}
309
310static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
311{
312 hdlc_device *hdlc = dev_to_hdlc(dev);
313 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
314 struct qe_bd __iomem *bd;
315 u16 bd_status;
316 unsigned long flags;
317 u8 *send_buf;
318 int i;
319 u16 *proto_head;
320
321 switch (dev->type) {
322 case ARPHRD_RAWHDLC:
323 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
324 dev->stats.tx_dropped++;
325 dev_kfree_skb(skb);
326 netdev_err(dev, "No enough space for hdlc head\n");
327 return -ENOMEM;
328 }
329
330 skb_push(skb, HDLC_HEAD_LEN);
331
332 proto_head = (u16 *)skb->data;
333 *proto_head = htons(DEFAULT_HDLC_HEAD);
334
335 dev->stats.tx_bytes += skb->len;
336 break;
337
338 case ARPHRD_PPP:
339 proto_head = (u16 *)skb->data;
340 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
341 dev->stats.tx_dropped++;
342 dev_kfree_skb(skb);
343 netdev_err(dev, "Wrong ppp header\n");
344 return -ENOMEM;
345 }
346
347 dev->stats.tx_bytes += skb->len;
348 break;
349
350 default:
351 dev->stats.tx_dropped++;
352 dev_kfree_skb(skb);
353 return -ENOMEM;
354 }
355
356 pr_info("Tx data skb->len:%d ", skb->len);
357 send_buf = (u8 *)skb->data;
358 pr_info("\nTransmitted data:\n");
359 for (i = 0; i < 16; i++) {
360 if (i == skb->len)
361 pr_info("++++");
362 else
363 pr_info("%02x\n", send_buf[i]);
364 }
365 spin_lock_irqsave(&priv->lock, flags);
366
367 /* Start from the next BD that should be filled */
368 bd = priv->curtx_bd;
369 bd_status = ioread16be(&bd->status);
370 /* Save the skb pointer so we can free it later */
371 priv->tx_skbuff[priv->skb_curtx] = skb;
372
373 /* Update the current skb pointer (wrapping if this was the last) */
374 priv->skb_curtx =
375 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
376
377 /* copy skb data to tx buffer for sdma processing */
378 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
379 skb->data, skb->len);
380
381 /* set bd status and length */
382 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
383
384 iowrite16be(bd_status, &bd->status);
385 iowrite16be(skb->len, &bd->length);
386
387 /* Move to next BD in the ring */
388 if (!(bd_status & T_W_S))
389 bd += 1;
390 else
391 bd = priv->tx_bd_base;
392
393 if (bd == priv->dirty_tx) {
394 if (!netif_queue_stopped(dev))
395 netif_stop_queue(dev);
396 }
397
398 priv->curtx_bd = bd;
399
400 spin_unlock_irqrestore(&priv->lock, flags);
401
402 return NETDEV_TX_OK;
403}
404
405static int hdlc_tx_done(struct ucc_hdlc_private *priv)
406{
407 /* Start from the next BD that should be filled */
408 struct net_device *dev = priv->ndev;
409 struct qe_bd *bd; /* BD pointer */
410 u16 bd_status;
411
412 bd = priv->dirty_tx;
413 bd_status = ioread16be(&bd->status);
414
415 /* Normal processing. */
416 while ((bd_status & T_R_S) == 0) {
417 struct sk_buff *skb;
418
419 /* BD contains already transmitted buffer. */
420 /* Handle the transmitted buffer and release */
421 /* the BD to be used with the current frame */
422
423 skb = priv->tx_skbuff[priv->skb_dirtytx];
424 if (!skb)
425 break;
426 pr_info("TxBD: %x\n", bd_status);
427 dev->stats.tx_packets++;
428 memset(priv->tx_buffer +
429 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
430 0, skb->len);
431 dev_kfree_skb_irq(skb);
432
433 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
434 priv->skb_dirtytx =
435 (priv->skb_dirtytx +
436 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
437
438 /* We freed a buffer, so now we can restart transmission */
439 if (netif_queue_stopped(dev))
440 netif_wake_queue(dev);
441
442 /* Advance the confirmation BD pointer */
443 if (!(bd_status & T_W_S))
444 bd += 1;
445 else
446 bd = priv->tx_bd_base;
447 bd_status = ioread16be(&bd->status);
448 }
449 priv->dirty_tx = bd;
450
451 return 0;
452}
453
454static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
455{
456 struct net_device *dev = priv->ndev;
457 struct sk_buff *skb;
458 hdlc_device *hdlc = dev_to_hdlc(dev);
459 struct qe_bd *bd;
460 u32 bd_status;
461 u16 length, howmany = 0;
462 u8 *bdbuffer;
463 int i;
464 static int entry;
465
466 bd = priv->currx_bd;
467 bd_status = ioread16be(&bd->status);
468
469 /* while there are received buffers and BD is full (~R_E) */
470 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
471 if (bd_status & R_OV_S)
472 dev->stats.rx_over_errors++;
473 if (bd_status & R_CR_S) {
474#ifdef BROKEN_FRAME_INFO
475 pr_info("Broken Frame with RxBD: %x\n", bd_status);
476#endif
477 dev->stats.rx_crc_errors++;
478 dev->stats.rx_dropped++;
479 goto recycle;
480 }
481 bdbuffer = priv->rx_buffer +
482 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
483 length = ioread16be(&bd->length);
484
485 pr_info("Received data length:%d", length);
486 pr_info("while entry times:%d", entry++);
487
488 pr_info("\nReceived data:\n");
489 for (i = 0; (i < 16); i++) {
490 if (i == length)
491 pr_info("++++");
492 else
493 pr_info("%02x\n", bdbuffer[i]);
494 }
495
496 switch (dev->type) {
497 case ARPHRD_RAWHDLC:
498 bdbuffer += HDLC_HEAD_LEN;
499 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
500
501 skb = dev_alloc_skb(length);
502 if (!skb) {
503 dev->stats.rx_dropped++;
504 return -ENOMEM;
505 }
506
507 skb_put(skb, length);
508 skb->len = length;
509 skb->dev = dev;
510 memcpy(skb->data, bdbuffer, length);
511 break;
512
513 case ARPHRD_PPP:
514 length -= HDLC_CRC_SIZE;
515
516 skb = dev_alloc_skb(length);
517 if (!skb) {
518 dev->stats.rx_dropped++;
519 return -ENOMEM;
520 }
521
522 skb_put(skb, length);
523 skb->len = length;
524 skb->dev = dev;
525 memcpy(skb->data, bdbuffer, length);
526 break;
527 }
528
529 dev->stats.rx_packets++;
530 dev->stats.rx_bytes += skb->len;
531 howmany++;
532 if (hdlc->proto)
533 skb->protocol = hdlc_type_trans(skb, dev);
534 pr_info("skb->protocol:%x\n", skb->protocol);
535 netif_receive_skb(skb);
536
537recycle:
538 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
539
540 /* update to point at the next bd */
541 if (bd_status & R_W_S) {
542 priv->currx_bdnum = 0;
543 bd = priv->rx_bd_base;
544 } else {
545 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
546 priv->currx_bdnum += 1;
547 else
548 priv->currx_bdnum = RX_BD_RING_LEN - 1;
549
550 bd += 1;
551 }
552
553 bd_status = ioread16be(&bd->status);
554 }
555
556 priv->currx_bd = bd;
557 return howmany;
558}
559
560static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
561{
562 struct ucc_hdlc_private *priv = container_of(napi,
563 struct ucc_hdlc_private,
564 napi);
565 int howmany;
566
567 /* Tx event processing */
568 spin_lock(&priv->lock);
569 hdlc_tx_done(priv);
570 spin_unlock(&priv->lock);
571
572 howmany = 0;
573 howmany += hdlc_rx_done(priv, budget - howmany);
574
575 if (howmany < budget) {
576 napi_complete(napi);
577 qe_setbits32(priv->uccf->p_uccm,
578 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
579 }
580
581 return howmany;
582}
583
584static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
585{
586 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
587 struct net_device *dev = priv->ndev;
588 struct ucc_fast_private *uccf;
589 struct ucc_tdm_info *ut_info;
590 u32 ucce;
591 u32 uccm;
592
593 ut_info = priv->ut_info;
594 uccf = priv->uccf;
595
596 ucce = ioread32be(uccf->p_ucce);
597 uccm = ioread32be(uccf->p_uccm);
598 ucce &= uccm;
599 iowrite32be(ucce, uccf->p_ucce);
600 pr_info("irq ucce:%x\n", ucce);
601 if (!ucce)
602 return IRQ_NONE;
603
604 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
605 if (napi_schedule_prep(&priv->napi)) {
606 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
607 << 16);
608 iowrite32be(uccm, uccf->p_uccm);
609 __napi_schedule(&priv->napi);
610 }
611 }
612
613 /* Errors and other events */
614 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
615 dev->stats.rx_errors++;
616 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
617 dev->stats.tx_errors++;
618
619 return IRQ_HANDLED;
620}
621
622static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
623{
624 const size_t size = sizeof(te1_settings);
625 te1_settings line;
626 struct ucc_hdlc_private *priv = netdev_priv(dev);
627
628 if (cmd != SIOCWANDEV)
629 return hdlc_ioctl(dev, ifr, cmd);
630
631 switch (ifr->ifr_settings.type) {
632 case IF_GET_IFACE:
633 ifr->ifr_settings.type = IF_IFACE_E1;
634 if (ifr->ifr_settings.size < size) {
635 ifr->ifr_settings.size = size; /* data size wanted */
636 return -ENOBUFS;
637 }
638 line.clock_type = priv->clocking;
639 line.clock_rate = 0;
640 line.loopback = 0;
641
642 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
643 return -EFAULT;
644 return 0;
645
646 default:
647 return hdlc_ioctl(dev, ifr, cmd);
648 }
649}
650
651static int uhdlc_open(struct net_device *dev)
652{
653 u32 cecr_subblock;
654 hdlc_device *hdlc = dev_to_hdlc(dev);
655 struct ucc_hdlc_private *priv = hdlc->priv;
656 struct ucc_tdm *utdm = priv->utdm;
657
658 if (priv->hdlc_busy != 1) {
659 if (request_irq(priv->ut_info->uf_info.irq,
660 ucc_hdlc_irq_handler, 0, "hdlc", priv))
661 return -ENODEV;
662
663 cecr_subblock = ucc_fast_get_qe_cr_subblock(
664 priv->ut_info->uf_info.ucc_num);
665
666 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
667 QE_CR_PROTOCOL_UNSPECIFIED, 0);
668
669 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
670
671 /* Enable the TDM port */
672 if (priv->tsa)
673 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
674
675 priv->hdlc_busy = 1;
676 netif_device_attach(priv->ndev);
677 napi_enable(&priv->napi);
678 netif_start_queue(dev);
679 hdlc_open(dev);
680 }
681
682 return 0;
683}
684
685static void uhdlc_memclean(struct ucc_hdlc_private *priv)
686{
687 qe_muram_free(priv->ucc_pram->riptr);
688 qe_muram_free(priv->ucc_pram->tiptr);
689
690 if (priv->rx_bd_base) {
691 dma_free_coherent(priv->dev,
692 RX_BD_RING_LEN * sizeof(struct qe_bd),
693 priv->rx_bd_base, priv->dma_rx_bd);
694
695 priv->rx_bd_base = NULL;
696 priv->dma_rx_bd = 0;
697 }
698
699 if (priv->tx_bd_base) {
700 dma_free_coherent(priv->dev,
701 TX_BD_RING_LEN * sizeof(struct qe_bd),
702 priv->tx_bd_base, priv->dma_tx_bd);
703
704 priv->tx_bd_base = NULL;
705 priv->dma_tx_bd = 0;
706 }
707
708 if (priv->ucc_pram) {
709 qe_muram_free(priv->ucc_pram_offset);
710 priv->ucc_pram = NULL;
711 priv->ucc_pram_offset = 0;
712 }
713
714 kfree(priv->rx_skbuff);
715 priv->rx_skbuff = NULL;
716
717 kfree(priv->tx_skbuff);
718 priv->tx_skbuff = NULL;
719
720 if (priv->uf_regs) {
721 iounmap(priv->uf_regs);
722 priv->uf_regs = NULL;
723 }
724
725 if (priv->uccf) {
726 ucc_fast_free(priv->uccf);
727 priv->uccf = NULL;
728 }
729
730 if (priv->rx_buffer) {
731 dma_free_coherent(priv->dev,
732 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
733 priv->rx_buffer, priv->dma_rx_addr);
734 priv->rx_buffer = NULL;
735 priv->dma_rx_addr = 0;
736 }
737
738 if (priv->tx_buffer) {
739 dma_free_coherent(priv->dev,
740 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
741 priv->tx_buffer, priv->dma_tx_addr);
742 priv->tx_buffer = NULL;
743 priv->dma_tx_addr = 0;
744 }
745}
746
747static int uhdlc_close(struct net_device *dev)
748{
749 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
750 struct ucc_tdm *utdm = priv->utdm;
751 u32 cecr_subblock;
752
753 napi_disable(&priv->napi);
754 cecr_subblock = ucc_fast_get_qe_cr_subblock(
755 priv->ut_info->uf_info.ucc_num);
756
757 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
758 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
759 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
760 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
761
762 if (priv->tsa)
763 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
764
765 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
766
767 free_irq(priv->ut_info->uf_info.irq, priv);
768 netif_stop_queue(dev);
769 priv->hdlc_busy = 0;
770
771 return 0;
772}
773
774static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
775 unsigned short parity)
776{
777 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
778
779 if (encoding != ENCODING_NRZ &&
780 encoding != ENCODING_NRZI)
781 return -EINVAL;
782
783 if (parity != PARITY_NONE &&
784 parity != PARITY_CRC32_PR1_CCITT &&
785 parity != PARITY_CRC16_PR1_CCITT)
786 return -EINVAL;
787
788 priv->encoding = encoding;
789 priv->parity = parity;
790
791 return 0;
792}
793
794#ifdef CONFIG_PM
795static void store_clk_config(struct ucc_hdlc_private *priv)
796{
797 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
798
799 /* store si clk */
800 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
801 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
802
803 /* store si sync */
804 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
805
806 /* store ucc clk */
807 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
808}
809
810static void resume_clk_config(struct ucc_hdlc_private *priv)
811{
812 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
813
814 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
815
816 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
817 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
818
819 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
820}
821
822static int uhdlc_suspend(struct device *dev)
823{
824 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
825 struct ucc_tdm_info *ut_info;
826 struct ucc_fast __iomem *uf_regs;
827
828 if (!priv)
829 return -EINVAL;
830
831 if (!netif_running(priv->ndev))
832 return 0;
833
834 netif_device_detach(priv->ndev);
835 napi_disable(&priv->napi);
836
837 ut_info = priv->ut_info;
838 uf_regs = priv->uf_regs;
839
840 /* backup gumr guemr*/
841 priv->gumr = ioread32be(&uf_regs->gumr);
842 priv->guemr = ioread8(&uf_regs->guemr);
843
844 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
845 GFP_KERNEL);
846 if (!priv->ucc_pram_bak)
847 return -ENOMEM;
848
849 /* backup HDLC parameter */
850 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
851 sizeof(struct ucc_hdlc_param));
852
853 /* store the clk configuration */
854 store_clk_config(priv);
855
856 /* save power */
857 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
858
859 dev_dbg(dev, "ucc hdlc suspend\n");
860 return 0;
861}
862
863static int uhdlc_resume(struct device *dev)
864{
865 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
866 struct ucc_tdm *utdm = priv->utdm;
867 struct ucc_tdm_info *ut_info;
868 struct ucc_fast __iomem *uf_regs;
869 struct ucc_fast_private *uccf;
870 struct ucc_fast_info *uf_info;
871 int ret, i;
872 u32 cecr_subblock;
873 u16 bd_status;
874
875 if (!priv)
876 return -EINVAL;
877
878 if (!netif_running(priv->ndev))
879 return 0;
880
881 ut_info = priv->ut_info;
882 uf_info = &ut_info->uf_info;
883 uf_regs = priv->uf_regs;
884 uccf = priv->uccf;
885
886 /* restore gumr guemr */
887 iowrite8(priv->guemr, &uf_regs->guemr);
888 iowrite32be(priv->gumr, &uf_regs->gumr);
889
890 /* Set Virtual Fifo registers */
891 iowrite16be(uf_info->urfs, &uf_regs->urfs);
892 iowrite16be(uf_info->urfet, &uf_regs->urfet);
893 iowrite16be(uf_info->urfset, &uf_regs->urfset);
894 iowrite16be(uf_info->utfs, &uf_regs->utfs);
895 iowrite16be(uf_info->utfet, &uf_regs->utfet);
896 iowrite16be(uf_info->utftt, &uf_regs->utftt);
897 /* utfb, urfb are offsets from MURAM base */
898 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
899 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
900
901 /* Rx Tx and sync clock routing */
902 resume_clk_config(priv);
903
904 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
905 iowrite32be(0xffffffff, &uf_regs->ucce);
906
907 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
908
909 /* rebuild SIRAM */
910 if (priv->tsa)
911 ucc_tdm_init(priv->utdm, priv->ut_info);
912
913 /* Write to QE CECR, UCCx channel to Stop Transmission */
914 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
915 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
916 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
917
918 /* Set UPSMR normal mode */
919 iowrite32be(0, &uf_regs->upsmr);
920
921 /* init parameter base */
922 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
923 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
924 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
925
926 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
927 qe_muram_addr(priv->ucc_pram_offset);
928
929 /* restore ucc parameter */
930 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
931 sizeof(struct ucc_hdlc_param));
932 kfree(priv->ucc_pram_bak);
933
934 /* rebuild BD entry */
935 for (i = 0; i < RX_BD_RING_LEN; i++) {
936 if (i < (RX_BD_RING_LEN - 1))
937 bd_status = R_E_S | R_I_S;
938 else
939 bd_status = R_E_S | R_I_S | R_W_S;
940
941 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
942 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
943 &priv->rx_bd_base[i].buf);
944 }
945
946 for (i = 0; i < TX_BD_RING_LEN; i++) {
947 if (i < (TX_BD_RING_LEN - 1))
948 bd_status = T_I_S | T_TC_S;
949 else
950 bd_status = T_I_S | T_TC_S | T_W_S;
951
952 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
953 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
954 &priv->tx_bd_base[i].buf);
955 }
956
957 /* if hdlc is busy enable TX and RX */
958 if (priv->hdlc_busy == 1) {
959 cecr_subblock = ucc_fast_get_qe_cr_subblock(
960 priv->ut_info->uf_info.ucc_num);
961
962 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
963 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
964
965 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
966
967 /* Enable the TDM port */
968 if (priv->tsa)
969 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
970 }
971
972 napi_enable(&priv->napi);
973 netif_device_attach(priv->ndev);
974
975 return 0;
976}
977
978static const struct dev_pm_ops uhdlc_pm_ops = {
979 .suspend = uhdlc_suspend,
980 .resume = uhdlc_resume,
981 .freeze = uhdlc_suspend,
982 .thaw = uhdlc_resume,
983};
984
985#define HDLC_PM_OPS (&uhdlc_pm_ops)
986
987#else
988
989#define HDLC_PM_OPS NULL
990
991#endif
992static const struct net_device_ops uhdlc_ops = {
993 .ndo_open = uhdlc_open,
994 .ndo_stop = uhdlc_close,
995 .ndo_change_mtu = hdlc_change_mtu,
996 .ndo_start_xmit = hdlc_start_xmit,
997 .ndo_do_ioctl = uhdlc_ioctl,
998};
999
1000static int ucc_hdlc_probe(struct platform_device *pdev)
1001{
1002 struct device_node *np = pdev->dev.of_node;
1003 struct ucc_hdlc_private *uhdlc_priv = NULL;
1004 struct ucc_tdm_info *ut_info;
1005 struct ucc_tdm *utdm;
1006 struct resource res;
1007 struct net_device *dev;
1008 hdlc_device *hdlc;
1009 int ucc_num;
1010 const char *sprop;
1011 int ret;
1012 u32 val;
1013
1014 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1015 if (ret) {
1016 dev_err(&pdev->dev, "Invalid ucc property\n");
1017 return -ENODEV;
1018 }
1019
1020 ucc_num = val - 1;
1021 if ((ucc_num > 3) || (ucc_num < 0)) {
1022 dev_err(&pdev->dev, ": Invalid UCC num\n");
1023 return -EINVAL;
1024 }
1025
1026 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1027 sizeof(utdm_primary_info));
1028
1029 ut_info = &utdm_info[ucc_num];
1030 ut_info->uf_info.ucc_num = ucc_num;
1031
1032 sprop = of_get_property(np, "rx-clock-name", NULL);
1033 if (sprop) {
1034 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1035 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1036 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1037 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1038 return -EINVAL;
1039 }
1040 } else {
1041 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1042 return -EINVAL;
1043 }
1044
1045 sprop = of_get_property(np, "tx-clock-name", NULL);
1046 if (sprop) {
1047 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1048 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1049 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1050 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1051 return -EINVAL;
1052 }
1053 } else {
1054 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1055 return -EINVAL;
1056 }
1057
1058 /* use the same clock when work in loopback */
1059 if (ut_info->uf_info.rx_clock == ut_info->uf_info.tx_clock)
1060 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
1061
1062 ret = of_address_to_resource(np, 0, &res);
1063 if (ret)
1064 return -EINVAL;
1065
1066 ut_info->uf_info.regs = res.start;
1067 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1068
1069 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1070 if (!uhdlc_priv) {
1071 ret = -ENOMEM;
1072 dev_err(&pdev->dev, "No mem to alloc hdlc private data\n");
1073 goto err_alloc_priv;
1074 }
1075
1076 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1077 uhdlc_priv->dev = &pdev->dev;
1078 uhdlc_priv->ut_info = ut_info;
1079
1080 if (of_get_property(np, "fsl,tdm-interface", NULL))
1081 uhdlc_priv->tsa = 1;
1082
1083 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1084 uhdlc_priv->loopback = 1;
1085
1086 if (uhdlc_priv->tsa == 1) {
1087 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1088 if (!utdm) {
1089 ret = -ENOMEM;
1090 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1091 goto err_alloc_utdm;
1092 }
1093 uhdlc_priv->utdm = utdm;
1094 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1095 if (ret)
1096 goto err_miss_tsa_property;
1097 }
1098
1099 ret = uhdlc_init(uhdlc_priv);
1100 if (ret) {
1101 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1102 goto err_hdlc_init;
1103 }
1104
1105 dev = alloc_hdlcdev(uhdlc_priv);
1106 if (!dev) {
1107 ret = -ENOMEM;
1108 pr_err("ucc_hdlc: unable to allocate memory\n");
1109 goto err_hdlc_init;
1110 }
1111
1112 uhdlc_priv->ndev = dev;
1113 hdlc = dev_to_hdlc(dev);
1114 dev->tx_queue_len = 16;
1115 dev->netdev_ops = &uhdlc_ops;
1116 hdlc->attach = ucc_hdlc_attach;
1117 hdlc->xmit = ucc_hdlc_tx;
1118 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1119 if (register_hdlc_device(dev)) {
1120 ret = -ENOBUFS;
1121 pr_err("ucc_hdlc: unable to register hdlc device\n");
1122 free_netdev(dev);
1123 goto err_hdlc_init;
1124 }
1125
1126 return 0;
1127
1128err_hdlc_init:
1129err_miss_tsa_property:
1130 kfree(uhdlc_priv);
1131 if (uhdlc_priv->tsa)
1132 kfree(utdm);
1133err_alloc_utdm:
1134 kfree(uhdlc_priv);
1135err_alloc_priv:
1136 return ret;
1137}
1138
1139static int ucc_hdlc_remove(struct platform_device *pdev)
1140{
1141 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1142
1143 uhdlc_memclean(priv);
1144
1145 if (priv->utdm->si_regs) {
1146 iounmap(priv->utdm->si_regs);
1147 priv->utdm->si_regs = NULL;
1148 }
1149
1150 if (priv->utdm->siram) {
1151 iounmap(priv->utdm->siram);
1152 priv->utdm->siram = NULL;
1153 }
1154 kfree(priv);
1155
1156 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1157
1158 return 0;
1159}
1160
1161static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1162 {
1163 .compatible = "fsl,ucc-hdlc",
1164 },
1165 {},
1166};
1167
1168MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1169
1170static struct platform_driver ucc_hdlc_driver = {
1171 .probe = ucc_hdlc_probe,
1172 .remove = ucc_hdlc_remove,
1173 .driver = {
1174 .owner = THIS_MODULE,
1175 .name = DRV_NAME,
1176 .pm = HDLC_PM_OPS,
1177 .of_match_table = fsl_ucc_hdlc_of_match,
1178 },
1179};
1180
1181static int __init ucc_hdlc_init(void)
1182{
1183 return platform_driver_register(&ucc_hdlc_driver);
1184}
1185
1186static void __exit ucc_hdlc_exit(void)
1187{
1188 platform_driver_unregister(&ucc_hdlc_driver);
1189}
1190
1191module_init(ucc_hdlc_init);
1192module_exit(ucc_hdlc_exit);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h
new file mode 100644
index 000000000000..881ecdeef076
--- /dev/null
+++ b/drivers/net/wan/fsl_ucc_hdlc.h
@@ -0,0 +1,147 @@
1/* Freescale QUICC Engine HDLC Device Driver
2 *
3 * Copyright 2014 Freescale Semiconductor Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef _UCC_HDLC_H_
12#define _UCC_HDLC_H_
13
14#include <linux/kernel.h>
15#include <linux/list.h>
16
17#include <soc/fsl/qe/immap_qe.h>
18#include <soc/fsl/qe/qe.h>
19
20#include <soc/fsl/qe/ucc.h>
21#include <soc/fsl/qe/ucc_fast.h>
22
23/* UCC HDLC event register */
24#define UCCE_HDLC_RX_EVENTS \
25(UCC_HDLC_UCCE_RXF | UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_BSY)
26#define UCCE_HDLC_TX_EVENTS (UCC_HDLC_UCCE_TXB | UCC_HDLC_UCCE_TXE)
27
28struct ucc_hdlc_param {
29 __be16 riptr;
30 __be16 tiptr;
31 __be16 res0;
32 __be16 mrblr;
33 __be32 rstate;
34 __be32 rbase;
35 __be16 rbdstat;
36 __be16 rbdlen;
37 __be32 rdptr;
38 __be32 tstate;
39 __be32 tbase;
40 __be16 tbdstat;
41 __be16 tbdlen;
42 __be32 tdptr;
43 __be32 rbptr;
44 __be32 tbptr;
45 __be32 rcrc;
46 __be32 res1;
47 __be32 tcrc;
48 __be32 res2;
49 __be32 res3;
50 __be32 c_mask;
51 __be32 c_pres;
52 __be16 disfc;
53 __be16 crcec;
54 __be16 abtsc;
55 __be16 nmarc;
56 __be32 max_cnt;
57 __be16 mflr;
58 __be16 rfthr;
59 __be16 rfcnt;
60 __be16 hmask;
61 __be16 haddr1;
62 __be16 haddr2;
63 __be16 haddr3;
64 __be16 haddr4;
65 __be16 ts_tmp;
66 __be16 tmp_mb;
67};
68
69struct ucc_hdlc_private {
70 struct ucc_tdm *utdm;
71 struct ucc_tdm_info *ut_info;
72 struct ucc_fast_private *uccf;
73 struct device *dev;
74 struct net_device *ndev;
75 struct napi_struct napi;
76 struct ucc_fast __iomem *uf_regs; /* UCC Fast registers */
77 struct ucc_hdlc_param __iomem *ucc_pram;
78 u16 tsa;
79 bool hdlc_busy;
80 bool loopback;
81
82 u8 *tx_buffer;
83 u8 *rx_buffer;
84 dma_addr_t dma_tx_addr;
85 dma_addr_t dma_rx_addr;
86
87 struct qe_bd *tx_bd_base;
88 struct qe_bd *rx_bd_base;
89 dma_addr_t dma_tx_bd;
90 dma_addr_t dma_rx_bd;
91 struct qe_bd *curtx_bd;
92 struct qe_bd *currx_bd;
93 struct qe_bd *dirty_tx;
94 u16 currx_bdnum;
95
96 struct sk_buff **tx_skbuff;
97 struct sk_buff **rx_skbuff;
98 u16 skb_curtx;
99 u16 skb_currx;
100 unsigned short skb_dirtytx;
101
102 unsigned short tx_ring_size;
103 unsigned short rx_ring_size;
104 u32 ucc_pram_offset;
105
106 unsigned short encoding;
107 unsigned short parity;
108 u32 clocking;
109 spinlock_t lock; /* lock for Tx BD and Tx buffer */
110#ifdef CONFIG_PM
111 struct ucc_hdlc_param *ucc_pram_bak;
112 u32 gumr;
113 u8 guemr;
114 u32 cmxsi1cr_l, cmxsi1cr_h;
115 u32 cmxsi1syr;
116 u32 cmxucr[4];
117#endif
118};
119
120#define TX_BD_RING_LEN 0x10
121#define RX_BD_RING_LEN 0x20
122#define RX_CLEAN_MAX 0x10
123#define NUM_OF_BUF 4
124#define MAX_RX_BUF_LENGTH (48 * 0x20)
125#define MAX_FRAME_LENGTH (MAX_RX_BUF_LENGTH + 8)
126#define ALIGNMENT_OF_UCC_HDLC_PRAM 64
127#define SI_BANK_SIZE 128
128#define MAX_HDLC_NUM 4
129#define HDLC_HEAD_LEN 2
130#define HDLC_CRC_SIZE 2
131#define TX_RING_MOD_MASK(size) (size - 1)
132#define RX_RING_MOD_MASK(size) (size - 1)
133
134#define HDLC_HEAD_MASK 0x0000
135#define DEFAULT_HDLC_HEAD 0xff44
136#define DEFAULT_ADDR_MASK 0x00ff
137#define DEFAULT_HDLC_ADDR 0x00ff
138
139#define BMR_GBL 0x20000000
140#define BMR_BIG_ENDIAN 0x10000000
141#define CRC_16BIT_MASK 0x0000F0B8
142#define CRC_16BIT_PRES 0x0000FFFF
143#define DEFAULT_RFTHR 1
144
145#define DEFAULT_PPP_HEAD 0xff03
146
147#endif
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
new file mode 100644
index 000000000000..d06a887a2352
--- /dev/null
+++ b/drivers/net/wan/slic_ds26522.c
@@ -0,0 +1,255 @@
1/*
2 * drivers/net/wan/slic_ds26522.c
3 *
4 * Copyright (C) 2016 Freescale Semiconductor, Inc.
5 *
6 * Author:Zhao Qiang<qiang.zhao@nxp.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/bitrev.h>
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/kthread.h>
20#include <linux/spi/spi.h>
21#include <linux/wait.h>
22#include <linux/param.h>
23#include <linux/delay.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/io.h>
27#include "slic_ds26522.h"
28
29#define DRV_NAME "ds26522"
30
31#define SLIC_TRANS_LEN 1
32#define SLIC_TWO_LEN 2
33#define SLIC_THREE_LEN 3
34
35static struct spi_device *g_spi;
36
37MODULE_LICENSE("GPL");
38MODULE_AUTHOR("Zhao Qiang<B45475@freescale.com>");
39
40/* the read/write format of address is
41 * w/r|A13|A12|A11|A10|A9|A8|A7|A6|A5|A4|A3|A2|A1|A0|x
42 */
43static void slic_write(struct spi_device *spi, u16 addr,
44 u8 data)
45{
46 u8 temp[3];
47
48 addr = bitrev16(addr) >> 1;
49 data = bitrev8(data);
50 temp[0] = (u8)((addr >> 8) & 0x7f);
51 temp[1] = (u8)(addr & 0xfe);
52 temp[2] = data;
53
54 /* write spi addr and value */
55 spi_write(spi, &temp[0], SLIC_THREE_LEN);
56}
57
58static u8 slic_read(struct spi_device *spi, u16 addr)
59{
60 u8 temp[2];
61 u8 data;
62
63 addr = bitrev16(addr) >> 1;
64 temp[0] = (u8)(((addr >> 8) & 0x7f) | 0x80);
65 temp[1] = (u8)(addr & 0xfe);
66
67 spi_write_then_read(spi, &temp[0], SLIC_TWO_LEN, &data,
68 SLIC_TRANS_LEN);
69
70 data = bitrev8(data);
71 return data;
72}
73
74static bool get_slic_product_code(struct spi_device *spi)
75{
76 u8 device_id;
77
78 device_id = slic_read(spi, DS26522_IDR_ADDR);
79 if ((device_id & 0xf8) == 0x68)
80 return true;
81 else
82 return false;
83}
84
85static void ds26522_e1_spec_config(struct spi_device *spi)
86{
87 /* Receive E1 Mode, Framer Disabled */
88 slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_E1);
89
90 /* Transmit E1 Mode, Framer Disable */
91 slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_E1);
92
93 /* Receive E1 Mode Framer Enable */
94 slic_write(spi, DS26522_RMMR_ADDR,
95 slic_read(spi, DS26522_RMMR_ADDR) | DS26522_RMMR_FRM_EN);
96
97 /* Transmit E1 Mode Framer Enable */
98 slic_write(spi, DS26522_TMMR_ADDR,
99 slic_read(spi, DS26522_TMMR_ADDR) | DS26522_TMMR_FRM_EN);
100
101 /* RCR1, receive E1 B8zs & ESF */
102 slic_write(spi, DS26522_RCR1_ADDR,
103 DS26522_RCR1_E1_HDB3 | DS26522_RCR1_E1_CCS);
104
105 /* RSYSCLK=2.048MHz, RSYNC-Output */
106 slic_write(spi, DS26522_RIOCR_ADDR,
107 DS26522_RIOCR_2048KHZ | DS26522_RIOCR_RSIO_OUT);
108
109 /* TCR1 Transmit E1 b8zs */
110 slic_write(spi, DS26522_TCR1_ADDR, DS26522_TCR1_TB8ZS);
111
112 /* TSYSCLK=2.048MHz, TSYNC-Output */
113 slic_write(spi, DS26522_TIOCR_ADDR,
114 DS26522_TIOCR_2048KHZ | DS26522_TIOCR_TSIO_OUT);
115
116 /* Set E1TAF */
117 slic_write(spi, DS26522_E1TAF_ADDR, DS26522_E1TAF_DEFAULT);
118
119 /* Set E1TNAF register */
120 slic_write(spi, DS26522_E1TNAF_ADDR, DS26522_E1TNAF_DEFAULT);
121
122 /* Receive E1 Mode Framer Enable & init Done */
123 slic_write(spi, DS26522_RMMR_ADDR, slic_read(spi, DS26522_RMMR_ADDR) |
124 DS26522_RMMR_INIT_DONE);
125
126 /* Transmit E1 Mode Framer Enable & init Done */
127 slic_write(spi, DS26522_TMMR_ADDR, slic_read(spi, DS26522_TMMR_ADDR) |
128 DS26522_TMMR_INIT_DONE);
129
130 /* Configure LIU E1 mode */
131 slic_write(spi, DS26522_LTRCR_ADDR, DS26522_LTRCR_E1);
132
133 /* E1 Mode default 75 ohm w/Transmit Impedance Matlinking */
134 slic_write(spi, DS26522_LTITSR_ADDR,
135 DS26522_LTITSR_TLIS_75OHM | DS26522_LTITSR_LBOS_75OHM);
136
137 /* E1 Mode default 75 ohm Long Haul w/Receive Impedance Matlinking */
138 slic_write(spi, DS26522_LRISMR_ADDR,
139 DS26522_LRISMR_75OHM | DS26522_LRISMR_MAX);
140
141 /* Enable Transmit output */
142 slic_write(spi, DS26522_LMCR_ADDR, DS26522_LMCR_TE);
143}
144
145static int slic_ds26522_init_configure(struct spi_device *spi)
146{
147 u16 addr;
148
149 /* set clock */
150 slic_write(spi, DS26522_GTCCR_ADDR, DS26522_GTCCR_BPREFSEL_REFCLKIN |
151 DS26522_GTCCR_BFREQSEL_2048KHZ |
152 DS26522_GTCCR_FREQSEL_2048KHZ);
153 slic_write(spi, DS26522_GTCR2_ADDR, DS26522_GTCR2_TSSYNCOUT);
154 slic_write(spi, DS26522_GFCR_ADDR, DS26522_GFCR_BPCLK_2048KHZ);
155
156 /* set gtcr */
157 slic_write(spi, DS26522_GTCR1_ADDR, DS26522_GTCR1);
158
159 /* Global LIU Software Reset Register */
160 slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_RESET);
161
162 /* Global Framer and BERT Software Reset Register */
163 slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_RESET);
164
165 usleep_range(100, 120);
166
167 slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_NORMAL);
168 slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_NORMAL);
169
170 /* Perform RX/TX SRESET,Reset receiver */
171 slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_SFTRST);
172
173 /* Reset tranceiver */
174 slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_SFTRST);
175
176 usleep_range(100, 120);
177
178 /* Zero all Framer Registers */
179 for (addr = DS26522_RF_ADDR_START; addr <= DS26522_RF_ADDR_END;
180 addr++)
181 slic_write(spi, addr, 0);
182
183 for (addr = DS26522_TF_ADDR_START; addr <= DS26522_TF_ADDR_END;
184 addr++)
185 slic_write(spi, addr, 0);
186
187 for (addr = DS26522_LIU_ADDR_START; addr <= DS26522_LIU_ADDR_END;
188 addr++)
189 slic_write(spi, addr, 0);
190
191 for (addr = DS26522_BERT_ADDR_START; addr <= DS26522_BERT_ADDR_END;
192 addr++)
193 slic_write(spi, addr, 0);
194
195 /* setup ds26522 for E1 specification */
196 ds26522_e1_spec_config(spi);
197
198 slic_write(spi, DS26522_GTCR1_ADDR, 0x00);
199
200 return 0;
201}
202
203static int slic_ds26522_remove(struct spi_device *spi)
204{
205 pr_info("DS26522 module uninstalled\n");
206 return 0;
207}
208
209static int slic_ds26522_probe(struct spi_device *spi)
210{
211 int ret = 0;
212
213 g_spi = spi;
214 spi->bits_per_word = 8;
215
216 if (!get_slic_product_code(spi))
217 return ret;
218
219 ret = slic_ds26522_init_configure(spi);
220 if (ret == 0)
221 pr_info("DS26522 cs%d configurated\n", spi->chip_select);
222
223 return ret;
224}
225
226static const struct of_device_id slic_ds26522_match[] = {
227 {
228 .compatible = "maxim,ds26522",
229 },
230 {},
231};
232
233static struct spi_driver slic_ds26522_driver = {
234 .driver = {
235 .name = "ds26522",
236 .bus = &spi_bus_type,
237 .owner = THIS_MODULE,
238 .of_match_table = slic_ds26522_match,
239 },
240 .probe = slic_ds26522_probe,
241 .remove = slic_ds26522_remove,
242};
243
244static int __init slic_ds26522_init(void)
245{
246 return spi_register_driver(&slic_ds26522_driver);
247}
248
249static void __exit slic_ds26522_exit(void)
250{
251 spi_unregister_driver(&slic_ds26522_driver);
252}
253
254module_init(slic_ds26522_init);
255module_exit(slic_ds26522_exit);
diff --git a/drivers/net/wan/slic_ds26522.h b/drivers/net/wan/slic_ds26522.h
new file mode 100644
index 000000000000..22aa0ecbd9fd
--- /dev/null
+++ b/drivers/net/wan/slic_ds26522.h
@@ -0,0 +1,134 @@
1/*
2 * drivers/tdm/line_ctrl/slic_ds26522.h
3 *
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 *
6 * Author: Zhao Qiang <B45475@freescale.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#define DS26522_RF_ADDR_START 0x00
15#define DS26522_RF_ADDR_END 0xef
16#define DS26522_GLB_ADDR_START 0xf0
17#define DS26522_GLB_ADDR_END 0xff
18#define DS26522_TF_ADDR_START 0x100
19#define DS26522_TF_ADDR_END 0x1ef
20#define DS26522_LIU_ADDR_START 0x1000
21#define DS26522_LIU_ADDR_END 0x101f
22#define DS26522_TEST_ADDR_START 0x1008
23#define DS26522_TEST_ADDR_END 0x101f
24#define DS26522_BERT_ADDR_START 0x1100
25#define DS26522_BERT_ADDR_END 0x110f
26
27#define DS26522_RMMR_ADDR 0x80
28#define DS26522_RCR1_ADDR 0x81
29#define DS26522_RCR3_ADDR 0x83
30#define DS26522_RIOCR_ADDR 0x84
31
32#define DS26522_GTCR1_ADDR 0xf0
33#define DS26522_GFCR_ADDR 0xf1
34#define DS26522_GTCR2_ADDR 0xf2
35#define DS26522_GTCCR_ADDR 0xf3
36#define DS26522_GLSRR_ADDR 0xf5
37#define DS26522_GFSRR_ADDR 0xf6
38#define DS26522_IDR_ADDR 0xf8
39
40#define DS26522_E1TAF_ADDR 0x164
41#define DS26522_E1TNAF_ADDR 0x165
42#define DS26522_TMMR_ADDR 0x180
43#define DS26522_TCR1_ADDR 0x181
44#define DS26522_TIOCR_ADDR 0x184
45
46#define DS26522_LTRCR_ADDR 0x1000
47#define DS26522_LTITSR_ADDR 0x1001
48#define DS26522_LMCR_ADDR 0x1002
49#define DS26522_LRISMR_ADDR 0x1007
50
51#define MAX_NUM_OF_CHANNELS 8
52#define PQ_MDS_8E1T1_BRD_REV 0x00
53#define PQ_MDS_8E1T1_PLD_REV 0x00
54
55#define DS26522_GTCCR_BPREFSEL_REFCLKIN 0xa0
56#define DS26522_GTCCR_BFREQSEL_1544KHZ 0x08
57#define DS26522_GTCCR_FREQSEL_1544KHZ 0x04
58#define DS26522_GTCCR_BFREQSEL_2048KHZ 0x00
59#define DS26522_GTCCR_FREQSEL_2048KHZ 0x00
60
61#define DS26522_GFCR_BPCLK_2048KHZ 0x00
62
63#define DS26522_GTCR2_TSSYNCOUT 0x02
64#define DS26522_GTCR1 0x00
65
66#define DS26522_GFSRR_RESET 0x01
67#define DS26522_GFSRR_NORMAL 0x00
68
69#define DS26522_GLSRR_RESET 0x01
70#define DS26522_GLSRR_NORMAL 0x00
71
72#define DS26522_RMMR_SFTRST 0x02
73#define DS26522_RMMR_FRM_EN 0x80
74#define DS26522_RMMR_INIT_DONE 0x40
75#define DS26522_RMMR_T1 0x00
76#define DS26522_RMMR_E1 0x01
77
78#define DS26522_E1TAF_DEFAULT 0x1b
79#define DS26522_E1TNAF_DEFAULT 0x40
80
81#define DS26522_TMMR_SFTRST 0x02
82#define DS26522_TMMR_FRM_EN 0x80
83#define DS26522_TMMR_INIT_DONE 0x40
84#define DS26522_TMMR_T1 0x00
85#define DS26522_TMMR_E1 0x01
86
87#define DS26522_RCR1_T1_SYNCT 0x80
88#define DS26522_RCR1_T1_RB8ZS 0x40
89#define DS26522_RCR1_T1_SYNCC 0x08
90
91#define DS26522_RCR1_E1_HDB3 0x40
92#define DS26522_RCR1_E1_CCS 0x20
93
94#define DS26522_RIOCR_1544KHZ 0x00
95#define DS26522_RIOCR_2048KHZ 0x10
96#define DS26522_RIOCR_RSIO_OUT 0x00
97
98#define DS26522_RCR3_FLB 0x01
99
100#define DS26522_TIOCR_1544KHZ 0x00
101#define DS26522_TIOCR_2048KHZ 0x10
102#define DS26522_TIOCR_TSIO_OUT 0x04
103
104#define DS26522_TCR1_TB8ZS 0x04
105
106#define DS26522_LTRCR_T1 0x02
107#define DS26522_LTRCR_E1 0x00
108
109#define DS26522_LTITSR_TLIS_75OHM 0x00
110#define DS26522_LTITSR_LBOS_75OHM 0x00
111#define DS26522_LTITSR_TLIS_100OHM 0x10
112#define DS26522_LTITSR_TLIS_0DB_CSU 0x00
113
114#define DS26522_LRISMR_75OHM 0x00
115#define DS26522_LRISMR_100OHM 0x10
116#define DS26522_LRISMR_MAX 0x03
117
118#define DS26522_LMCR_TE 0x01
119
120enum line_rate {
121 LINE_RATE_T1, /* T1 line rate (1.544 Mbps) */
122 LINE_RATE_E1 /* E1 line rate (2.048 Mbps) */
123};
124
125enum tdm_trans_mode {
126 NORMAL = 0,
127 FRAMER_LB
128};
129
130enum card_support_type {
131 LM_CARD = 0,
132 DS26522_CARD,
133 NO_CARD
134};
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index bd62bc19e758..acec16b9cf49 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -25,10 +25,9 @@
25#include "ahb.h" 25#include "ahb.h"
26 26
27static const struct of_device_id ath10k_ahb_of_match[] = { 27static const struct of_device_id ath10k_ahb_of_match[] = {
28 /* TODO: enable this entry once everything in place. 28 { .compatible = "qcom,ipq4019-wifi",
29 * { .compatible = "qcom,ipq4019-wifi", 29 .data = (void *)ATH10K_HW_QCA4019
30 * .data = (void *)ATH10K_HW_QCA4019 }, 30 },
31 */
32 { } 31 { }
33}; 32};
34 33
@@ -476,6 +475,7 @@ static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
476 475
477static int ath10k_ahb_request_irq_legacy(struct ath10k *ar) 476static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
478{ 477{
478 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
479 struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 479 struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
480 int ret; 480 int ret;
481 481
@@ -487,6 +487,7 @@ static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
487 ar_ahb->irq, ret); 487 ar_ahb->irq, ret);
488 return ret; 488 return ret;
489 } 489 }
490 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
490 491
491 return 0; 492 return 0;
492} 493}
@@ -918,8 +919,6 @@ int ath10k_ahb_init(void)
918{ 919{
919 int ret; 920 int ret;
920 921
921 printk(KERN_ERR "AHB support is still work in progress\n");
922
923 ret = platform_driver_register(&ath10k_ahb_driver); 922 ret = platform_driver_register(&ath10k_ahb_driver);
924 if (ret) 923 if (ret)
925 printk(KERN_ERR "failed to register ath10k ahb driver: %d\n", 924 printk(KERN_ERR "failed to register ath10k ahb driver: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index a92a0ba829f5..dfb3db0ee5d1 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/firmware.h> 19#include <linux/firmware.h>
20#include <linux/of.h> 20#include <linux/of.h>
21#include <asm/byteorder.h>
21 22
22#include "core.h" 23#include "core.h"
23#include "mac.h" 24#include "mac.h"
@@ -55,7 +56,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
55 .name = "qca988x hw2.0", 56 .name = "qca988x hw2.0",
56 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, 57 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
57 .uart_pin = 7, 58 .uart_pin = 7,
58 .has_shifted_cc_wraparound = true, 59 .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
59 .otp_exe_param = 0, 60 .otp_exe_param = 0,
60 .channel_counters_freq_hz = 88000, 61 .channel_counters_freq_hz = 88000,
61 .max_probe_resp_desc_thres = 0, 62 .max_probe_resp_desc_thres = 0,
@@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
69 }, 70 },
70 }, 71 },
71 { 72 {
73 .id = QCA9887_HW_1_0_VERSION,
74 .dev_id = QCA9887_1_0_DEVICE_ID,
75 .name = "qca9887 hw1.0",
76 .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
77 .uart_pin = 7,
78 .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
79 .otp_exe_param = 0,
80 .channel_counters_freq_hz = 88000,
81 .max_probe_resp_desc_thres = 0,
82 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
83 .cal_data_len = 2116,
84 .fw = {
85 .dir = QCA9887_HW_1_0_FW_DIR,
86 .board = QCA9887_HW_1_0_BOARD_DATA_FILE,
87 .board_size = QCA9887_BOARD_DATA_SZ,
88 .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
89 },
90 },
91 {
72 .id = QCA6174_HW_2_1_VERSION, 92 .id = QCA6174_HW_2_1_VERSION,
73 .dev_id = QCA6164_2_1_DEVICE_ID, 93 .dev_id = QCA6164_2_1_DEVICE_ID,
74 .name = "qca6164 hw2.1", 94 .name = "qca6164 hw2.1",
@@ -148,6 +168,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
148 .uart_pin = 7, 168 .uart_pin = 7,
149 .otp_exe_param = 0x00000700, 169 .otp_exe_param = 0x00000700,
150 .continuous_frag_desc = true, 170 .continuous_frag_desc = true,
171 .cck_rate_map_rev2 = true,
151 .channel_counters_freq_hz = 150000, 172 .channel_counters_freq_hz = 150000,
152 .max_probe_resp_desc_thres = 24, 173 .max_probe_resp_desc_thres = 24,
153 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, 174 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
@@ -163,6 +184,29 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
163 }, 184 },
164 }, 185 },
165 { 186 {
187 .id = QCA9984_HW_1_0_DEV_VERSION,
188 .dev_id = QCA9984_1_0_DEVICE_ID,
189 .name = "qca9984/qca9994 hw1.0",
190 .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
191 .uart_pin = 7,
192 .otp_exe_param = 0x00000700,
193 .continuous_frag_desc = true,
194 .cck_rate_map_rev2 = true,
195 .channel_counters_freq_hz = 150000,
196 .max_probe_resp_desc_thres = 24,
197 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
198 .tx_chain_mask = 0xf,
199 .rx_chain_mask = 0xf,
200 .max_spatial_stream = 4,
201 .cal_data_len = 12064,
202 .fw = {
203 .dir = QCA9984_HW_1_0_FW_DIR,
204 .board = QCA9984_HW_1_0_BOARD_DATA_FILE,
205 .board_size = QCA99X0_BOARD_DATA_SZ,
206 .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
207 },
208 },
209 {
166 .id = QCA9377_HW_1_0_DEV_VERSION, 210 .id = QCA9377_HW_1_0_DEV_VERSION,
167 .dev_id = QCA9377_1_0_DEVICE_ID, 211 .dev_id = QCA9377_1_0_DEVICE_ID,
168 .name = "qca9377 hw1.0", 212 .name = "qca9377 hw1.0",
@@ -202,9 +246,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
202 .name = "qca4019 hw1.0", 246 .name = "qca4019 hw1.0",
203 .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, 247 .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
204 .uart_pin = 7, 248 .uart_pin = 7,
205 .has_shifted_cc_wraparound = true, 249 .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
206 .otp_exe_param = 0x0010000, 250 .otp_exe_param = 0x0010000,
207 .continuous_frag_desc = true, 251 .continuous_frag_desc = true,
252 .cck_rate_map_rev2 = true,
208 .channel_counters_freq_hz = 125000, 253 .channel_counters_freq_hz = 125000,
209 .max_probe_resp_desc_thres = 24, 254 .max_probe_resp_desc_thres = 24,
210 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, 255 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
@@ -236,6 +281,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
236 [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca", 281 [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
237 [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp", 282 [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
238 [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl", 283 [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
284 [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
239}; 285};
240 286
241static unsigned int ath10k_core_get_fw_feature_str(char *buf, 287static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -531,6 +577,35 @@ out:
531 return ret; 577 return ret;
532} 578}
533 579
580static int ath10k_download_cal_eeprom(struct ath10k *ar)
581{
582 size_t data_len;
583 void *data = NULL;
584 int ret;
585
586 ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
587 if (ret) {
588 if (ret != -EOPNOTSUPP)
589 ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
590 ret);
591 goto out_free;
592 }
593
594 ret = ath10k_download_board_data(ar, data, data_len);
595 if (ret) {
596 ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
597 ret);
598 goto out_free;
599 }
600
601 ret = 0;
602
603out_free:
604 kfree(data);
605
606 return ret;
607}
608
534static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) 609static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
535{ 610{
536 u32 result, address; 611 u32 result, address;
@@ -1293,7 +1368,17 @@ static int ath10k_download_cal_data(struct ath10k *ar)
1293 } 1368 }
1294 1369
1295 ath10k_dbg(ar, ATH10K_DBG_BOOT, 1370 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1296 "boot did not find DT entry, try OTP next: %d\n", 1371 "boot did not find DT entry, try target EEPROM next: %d\n",
1372 ret);
1373
1374 ret = ath10k_download_cal_eeprom(ar);
1375 if (ret == 0) {
1376 ar->cal_mode = ATH10K_CAL_MODE_EEPROM;
1377 goto done;
1378 }
1379
1380 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1381 "boot did not find target EEPROM entry, try OTP next: %d\n",
1297 ret); 1382 ret);
1298 1383
1299 ret = ath10k_download_and_run_otp(ar); 1384 ret = ath10k_download_and_run_otp(ar);
@@ -1733,6 +1818,16 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
1733 if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) 1818 if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
1734 val |= WMI_10_4_BSS_CHANNEL_INFO_64; 1819 val |= WMI_10_4_BSS_CHANNEL_INFO_64;
1735 1820
1821 /* 10.4 firmware supports BT-Coex without reloading firmware
1822 * via pdev param. To support Bluetooth coexistence pdev param,
1823 * WMI_COEX_GPIO_SUPPORT of extended resource config should be
1824 * enabled always.
1825 */
1826 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
1827 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
1828 ar->running_fw->fw_file.fw_features))
1829 val |= WMI_10_4_COEX_GPIO_SUPPORT;
1830
1736 status = ath10k_mac_ext_resource_config(ar, val); 1831 status = ath10k_mac_ext_resource_config(ar, val);
1737 if (status) { 1832 if (status) {
1738 ath10k_err(ar, 1833 ath10k_err(ar,
@@ -2062,6 +2157,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
2062 2157
2063 switch (hw_rev) { 2158 switch (hw_rev) {
2064 case ATH10K_HW_QCA988X: 2159 case ATH10K_HW_QCA988X:
2160 case ATH10K_HW_QCA9887:
2065 ar->regs = &qca988x_regs; 2161 ar->regs = &qca988x_regs;
2066 ar->hw_values = &qca988x_values; 2162 ar->hw_values = &qca988x_values;
2067 break; 2163 break;
@@ -2071,6 +2167,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
2071 ar->hw_values = &qca6174_values; 2167 ar->hw_values = &qca6174_values;
2072 break; 2168 break;
2073 case ATH10K_HW_QCA99X0: 2169 case ATH10K_HW_QCA99X0:
2170 case ATH10K_HW_QCA9984:
2074 ar->regs = &qca99x0_regs; 2171 ar->regs = &qca99x0_regs;
2075 ar->hw_values = &qca99x0_values; 2172 ar->hw_values = &qca99x0_values;
2076 break; 2173 break;
@@ -2159,5 +2256,5 @@ void ath10k_core_destroy(struct ath10k *ar)
2159EXPORT_SYMBOL(ath10k_core_destroy); 2256EXPORT_SYMBOL(ath10k_core_destroy);
2160 2257
2161MODULE_AUTHOR("Qualcomm Atheros"); 2258MODULE_AUTHOR("Qualcomm Atheros");
2162MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); 2259MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards.");
2163MODULE_LICENSE("Dual BSD/GPL"); 2260MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 1852e0ee3fa1..3da18c9dbd7a 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -535,6 +535,13 @@ enum ath10k_fw_features {
535 */ 535 */
536 ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13, 536 ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13,
537 537
538 /* Firmware supports BT-Coex without reloading firmware via pdev param.
539 * To support Bluetooth coexistence pdev param, WMI_COEX_GPIO_SUPPORT of
540 * extended resource config should be enabled always. This firmware IE
541 * is used to configure WMI_COEX_GPIO_SUPPORT.
542 */
543 ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
544
538 /* keep last */ 545 /* keep last */
539 ATH10K_FW_FEATURE_COUNT, 546 ATH10K_FW_FEATURE_COUNT,
540}; 547};
@@ -571,6 +578,7 @@ enum ath10k_cal_mode {
571 ATH10K_CAL_MODE_DT, 578 ATH10K_CAL_MODE_DT,
572 ATH10K_PRE_CAL_MODE_FILE, 579 ATH10K_PRE_CAL_MODE_FILE,
573 ATH10K_PRE_CAL_MODE_DT, 580 ATH10K_PRE_CAL_MODE_DT,
581 ATH10K_CAL_MODE_EEPROM,
574}; 582};
575 583
576enum ath10k_crypt_mode { 584enum ath10k_crypt_mode {
@@ -593,6 +601,8 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
593 return "pre-cal-file"; 601 return "pre-cal-file";
594 case ATH10K_PRE_CAL_MODE_DT: 602 case ATH10K_PRE_CAL_MODE_DT:
595 return "pre-cal-dt"; 603 return "pre-cal-dt";
604 case ATH10K_CAL_MODE_EEPROM:
605 return "eeprom";
596 } 606 }
597 607
598 return "unknown"; 608 return "unknown";
@@ -703,12 +713,10 @@ struct ath10k {
703 int uart_pin; 713 int uart_pin;
704 u32 otp_exe_param; 714 u32 otp_exe_param;
705 715
706 /* This is true if given HW chip has a quirky Cycle Counter 716 /* Type of hw cycle counter wraparound logic, for more info
707 * wraparound which resets to 0x7fffffff instead of 0. All 717 * refer enum ath10k_hw_cc_wraparound_type.
708 * other CC related counters (e.g. Rx Clear Count) are divided
709 * by 2 so they never wraparound themselves.
710 */ 718 */
711 bool has_shifted_cc_wraparound; 719 enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
712 720
713 /* Some of chip expects fragment descriptor to be continuous 721 /* Some of chip expects fragment descriptor to be continuous
714 * memory for any TX operation. Set continuous_frag_desc flag 722 * memory for any TX operation. Set continuous_frag_desc flag
@@ -716,6 +724,12 @@ struct ath10k {
716 */ 724 */
717 bool continuous_frag_desc; 725 bool continuous_frag_desc;
718 726
727 /* CCK hardware rate table mapping for the newer chipsets
728 * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
729 * are in a proper order with respect to the rate/preamble
730 */
731 bool cck_rate_map_rev2;
732
719 u32 channel_counters_freq_hz; 733 u32 channel_counters_freq_hz;
720 734
721 /* Mgmt tx descriptors threshold for limiting probe response 735 /* Mgmt tx descriptors threshold for limiting probe response
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index e2511550fbb8..8fbb8f2c7828 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -609,25 +609,23 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
609 char buf[32]; 609 char buf[32];
610 int ret; 610 int ret;
611 611
612 mutex_lock(&ar->conf_mutex);
613
614 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); 612 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
615 613
616 /* make sure that buf is null terminated */ 614 /* make sure that buf is null terminated */
617 buf[sizeof(buf) - 1] = 0; 615 buf[sizeof(buf) - 1] = 0;
618 616
617 /* drop the possible '\n' from the end */
618 if (buf[count - 1] == '\n')
619 buf[count - 1] = 0;
620
621 mutex_lock(&ar->conf_mutex);
622
619 if (ar->state != ATH10K_STATE_ON && 623 if (ar->state != ATH10K_STATE_ON &&
620 ar->state != ATH10K_STATE_RESTARTED) { 624 ar->state != ATH10K_STATE_RESTARTED) {
621 ret = -ENETDOWN; 625 ret = -ENETDOWN;
622 goto exit; 626 goto exit;
623 } 627 }
624 628
625 /* drop the possible '\n' from the end */
626 if (buf[count - 1] == '\n') {
627 buf[count - 1] = 0;
628 count--;
629 }
630
631 if (!strcmp(buf, "soft")) { 629 if (!strcmp(buf, "soft")) {
632 ath10k_info(ar, "simulating soft firmware crash\n"); 630 ath10k_info(ar, "simulating soft firmware crash\n");
633 ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); 631 ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
@@ -2127,6 +2125,7 @@ static ssize_t ath10k_write_btcoex(struct file *file,
2127 size_t buf_size; 2125 size_t buf_size;
2128 int ret; 2126 int ret;
2129 bool val; 2127 bool val;
2128 u32 pdev_param;
2130 2129
2131 buf_size = min(count, (sizeof(buf) - 1)); 2130 buf_size = min(count, (sizeof(buf) - 1));
2132 if (copy_from_user(buf, ubuf, buf_size)) 2131 if (copy_from_user(buf, ubuf, buf_size))
@@ -2150,14 +2149,25 @@ static ssize_t ath10k_write_btcoex(struct file *file,
2150 goto exit; 2149 goto exit;
2151 } 2150 }
2152 2151
2152 pdev_param = ar->wmi.pdev_param->enable_btcoex;
2153 if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
2154 ar->running_fw->fw_file.fw_features)) {
2155 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val);
2156 if (ret) {
2157 ath10k_warn(ar, "failed to enable btcoex: %d\n", ret);
2158 ret = count;
2159 goto exit;
2160 }
2161 } else {
2162 ath10k_info(ar, "restarting firmware due to btcoex change");
2163 queue_work(ar->workqueue, &ar->restart_work);
2164 }
2165
2153 if (val) 2166 if (val)
2154 set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 2167 set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
2155 else 2168 else
2156 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 2169 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
2157 2170
2158 ath10k_info(ar, "restarting firmware due to btcoex change");
2159
2160 queue_work(ar->workqueue, &ar->restart_work);
2161 ret = count; 2171 ret = count;
2162 2172
2163exit: 2173exit:
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 89e7076c919f..b2566b06e1e1 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -87,6 +87,10 @@ struct ath10k_hif_ops {
87 87
88 int (*suspend)(struct ath10k *ar); 88 int (*suspend)(struct ath10k *ar);
89 int (*resume)(struct ath10k *ar); 89 int (*resume)(struct ath10k *ar);
90
91 /* fetch calibration data from target eeprom */
92 int (*fetch_cal_eeprom)(struct ath10k *ar, void **data,
93 size_t *data_len);
90}; 94};
91 95
92static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 96static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -202,4 +206,14 @@ static inline void ath10k_hif_write32(struct ath10k *ar,
202 ar->hif.ops->write32(ar, address, data); 206 ar->hif.ops->write32(ar, address, data);
203} 207}
204 208
209static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar,
210 void **data,
211 size_t *data_len)
212{
213 if (!ar->hif.ops->fetch_cal_eeprom)
214 return -EOPNOTSUPP;
215
216 return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len);
217}
218
205#endif /* _HIF_H_ */ 219#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 911c535d0863..430a83e142aa 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -485,10 +485,10 @@ struct htt_mgmt_tx_completion {
485 __le32 status; 485 __le32 status;
486} __packed; 486} __packed;
487 487
488#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F) 488#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F)
489#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0) 489#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
490#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6) 490#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5)
491#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7) 491#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
492 492
493#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F 493#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
494#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0 494#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 813cdd2621a1..80e645302b54 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -748,7 +748,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
748 if (WARN_ON_ONCE(!arvif)) 748 if (WARN_ON_ONCE(!arvif))
749 return NULL; 749 return NULL;
750 750
751 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 751 if (WARN_ON_ONCE(ath10k_mac_vif_chan(arvif->vif, &def)))
752 return NULL; 752 return NULL;
753 753
754 return def.chan; 754 return def.chan;
@@ -939,7 +939,8 @@ static void ath10k_process_rx(struct ath10k *ar,
939 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 939 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
940 "mcast" : "ucast", 940 "mcast" : "ucast",
941 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 941 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
942 status->flag == 0 ? "legacy" : "", 942 (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
943 "legacy" : "",
943 status->flag & RX_FLAG_HT ? "ht" : "", 944 status->flag & RX_FLAG_HT ? "ht" : "",
944 status->flag & RX_FLAG_VHT ? "vht" : "", 945 status->flag & RX_FLAG_VHT ? "vht" : "",
945 status->flag & RX_FLAG_40MHZ ? "40" : "", 946 status->flag & RX_FLAG_40MHZ ? "40" : "",
@@ -2181,34 +2182,6 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2181 ath10k_mac_tx_push_pending(ar); 2182 ath10k_mac_tx_push_pending(ar);
2182} 2183}
2183 2184
2184static inline enum nl80211_band phy_mode_to_band(u32 phy_mode)
2185{
2186 enum nl80211_band band;
2187
2188 switch (phy_mode) {
2189 case MODE_11A:
2190 case MODE_11NA_HT20:
2191 case MODE_11NA_HT40:
2192 case MODE_11AC_VHT20:
2193 case MODE_11AC_VHT40:
2194 case MODE_11AC_VHT80:
2195 band = NL80211_BAND_5GHZ;
2196 break;
2197 case MODE_11G:
2198 case MODE_11B:
2199 case MODE_11GONLY:
2200 case MODE_11NG_HT20:
2201 case MODE_11NG_HT40:
2202 case MODE_11AC_VHT20_2G:
2203 case MODE_11AC_VHT40_2G:
2204 case MODE_11AC_VHT80_2G:
2205 default:
2206 band = NL80211_BAND_2GHZ;
2207 }
2208
2209 return band;
2210}
2211
2212void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 2185void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2213{ 2186{
2214 bool release; 2187 bool release;
@@ -2290,7 +2263,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2290 ath10k_htt_tx_mgmt_dec_pending(htt); 2263 ath10k_htt_tx_mgmt_dec_pending(htt);
2291 spin_unlock_bh(&htt->tx_lock); 2264 spin_unlock_bh(&htt->tx_lock);
2292 } 2265 }
2293 ath10k_mac_tx_push_pending(ar);
2294 break; 2266 break;
2295 } 2267 }
2296 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: 2268 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
@@ -2441,8 +2413,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
2441 dev_kfree_skb_any(skb); 2413 dev_kfree_skb_any(skb);
2442 } 2414 }
2443 2415
2444 ath10k_mac_tx_push_pending(ar);
2445
2446 num_mpdus = atomic_read(&htt->num_mpdus_ready); 2416 num_mpdus = atomic_read(&htt->num_mpdus_ready);
2447 2417
2448 while (num_mpdus) { 2418 while (num_mpdus) {
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index f544d48518c3..bd86e7a38db9 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -179,17 +179,35 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
179 u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev) 179 u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
180{ 180{
181 u32 cc_fix = 0; 181 u32 cc_fix = 0;
182 u32 rcc_fix = 0;
183 enum ath10k_hw_cc_wraparound_type wraparound_type;
182 184
183 survey->filled |= SURVEY_INFO_TIME | 185 survey->filled |= SURVEY_INFO_TIME |
184 SURVEY_INFO_TIME_BUSY; 186 SURVEY_INFO_TIME_BUSY;
185 187
186 if (ar->hw_params.has_shifted_cc_wraparound && cc < cc_prev) { 188 wraparound_type = ar->hw_params.cc_wraparound_type;
187 cc_fix = 0x7fffffff; 189
188 survey->filled &= ~SURVEY_INFO_TIME_BUSY; 190 if (cc < cc_prev || rcc < rcc_prev) {
191 switch (wraparound_type) {
192 case ATH10K_HW_CC_WRAP_SHIFTED_ALL:
193 if (cc < cc_prev) {
194 cc_fix = 0x7fffffff;
195 survey->filled &= ~SURVEY_INFO_TIME_BUSY;
196 }
197 break;
198 case ATH10K_HW_CC_WRAP_SHIFTED_EACH:
199 if (cc < cc_prev)
200 cc_fix = 0x7fffffff;
201 else
202 rcc_fix = 0x7fffffff;
203 break;
204 case ATH10K_HW_CC_WRAP_DISABLED:
205 break;
206 }
189 } 207 }
190 208
191 cc -= cc_prev - cc_fix; 209 cc -= cc_prev - cc_fix;
192 rcc -= rcc_prev; 210 rcc -= rcc_prev - rcc_fix;
193 211
194 survey->time = CCNT_TO_MSEC(ar, cc); 212 survey->time = CCNT_TO_MSEC(ar, cc);
195 survey->time_busy = CCNT_TO_MSEC(ar, rcc); 213 survey->time_busy = CCNT_TO_MSEC(ar, rcc);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index aedd8987040b..f31d3ce42470 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -26,7 +26,9 @@
26#define QCA6164_2_1_DEVICE_ID (0x0041) 26#define QCA6164_2_1_DEVICE_ID (0x0041)
27#define QCA6174_2_1_DEVICE_ID (0x003e) 27#define QCA6174_2_1_DEVICE_ID (0x003e)
28#define QCA99X0_2_0_DEVICE_ID (0x0040) 28#define QCA99X0_2_0_DEVICE_ID (0x0040)
29#define QCA9984_1_0_DEVICE_ID (0x0046)
29#define QCA9377_1_0_DEVICE_ID (0x0042) 30#define QCA9377_1_0_DEVICE_ID (0x0042)
31#define QCA9887_1_0_DEVICE_ID (0x0050)
30 32
31/* QCA988X 1.0 definitions (unsupported) */ 33/* QCA988X 1.0 definitions (unsupported) */
32#define QCA988X_HW_1_0_CHIP_ID_REV 0x0 34#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
@@ -38,6 +40,13 @@
38#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" 40#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
39#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 41#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
40 42
43/* QCA9887 1.0 definitions */
44#define QCA9887_HW_1_0_VERSION 0x4100016d
45#define QCA9887_HW_1_0_CHIP_ID_REV 0
46#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0"
47#define QCA9887_HW_1_0_BOARD_DATA_FILE "board.bin"
48#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234
49
41/* QCA6174 target BMI version signatures */ 50/* QCA6174 target BMI version signatures */
42#define QCA6174_HW_1_0_VERSION 0x05000000 51#define QCA6174_HW_1_0_VERSION 0x05000000
43#define QCA6174_HW_1_1_VERSION 0x05000001 52#define QCA6174_HW_1_1_VERSION 0x05000001
@@ -91,6 +100,14 @@ enum qca9377_chip_id_rev {
91#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin" 100#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
92#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 101#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
93 102
103/* QCA9984 1.0 defines */
104#define QCA9984_HW_1_0_DEV_VERSION 0x1000000
105#define QCA9984_HW_DEV_TYPE 0xa
106#define QCA9984_HW_1_0_CHIP_ID_REV 0x0
107#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0"
108#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin"
109#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
110
94/* QCA9377 1.0 definitions */ 111/* QCA9377 1.0 definitions */
95#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" 112#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
96#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin" 113#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
@@ -193,8 +210,10 @@ enum ath10k_hw_rev {
193 ATH10K_HW_QCA988X, 210 ATH10K_HW_QCA988X,
194 ATH10K_HW_QCA6174, 211 ATH10K_HW_QCA6174,
195 ATH10K_HW_QCA99X0, 212 ATH10K_HW_QCA99X0,
213 ATH10K_HW_QCA9984,
196 ATH10K_HW_QCA9377, 214 ATH10K_HW_QCA9377,
197 ATH10K_HW_QCA4019, 215 ATH10K_HW_QCA4019,
216 ATH10K_HW_QCA9887,
198}; 217};
199 218
200struct ath10k_hw_regs { 219struct ath10k_hw_regs {
@@ -247,8 +266,10 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
247 u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev); 266 u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
248 267
249#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X) 268#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
269#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887)
250#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174) 270#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
251#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0) 271#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
272#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984)
252#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377) 273#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
253#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019) 274#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
254 275
@@ -315,11 +336,41 @@ enum ath10k_hw_rate_cck {
315 ATH10K_HW_RATE_CCK_SP_2M, 336 ATH10K_HW_RATE_CCK_SP_2M,
316}; 337};
317 338
339enum ath10k_hw_rate_rev2_cck {
340 ATH10K_HW_RATE_REV2_CCK_LP_1M = 1,
341 ATH10K_HW_RATE_REV2_CCK_LP_2M,
342 ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
343 ATH10K_HW_RATE_REV2_CCK_LP_11M,
344 ATH10K_HW_RATE_REV2_CCK_SP_2M,
345 ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
346 ATH10K_HW_RATE_REV2_CCK_SP_11M,
347};
348
318enum ath10k_hw_4addr_pad { 349enum ath10k_hw_4addr_pad {
319 ATH10K_HW_4ADDR_PAD_AFTER, 350 ATH10K_HW_4ADDR_PAD_AFTER,
320 ATH10K_HW_4ADDR_PAD_BEFORE, 351 ATH10K_HW_4ADDR_PAD_BEFORE,
321}; 352};
322 353
354enum ath10k_hw_cc_wraparound_type {
355 ATH10K_HW_CC_WRAP_DISABLED = 0,
356
357 /* This type is when the HW chip has a quirky Cycle Counter
358 * wraparound which resets to 0x7fffffff instead of 0. All
359 * other CC related counters (e.g. Rx Clear Count) are divided
360 * by 2 so they never wraparound themselves.
361 */
362 ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1,
363
364 /* Each hw counter wrapsaround independently. When the
365 * counter overflows the repestive counter is right shifted
366 * by 1, i.e reset to 0x7fffffff, and other counters will be
367 * running unaffected. In this type of wraparound, it should
368 * be possible to report accurate Rx busy time unlike the
369 * first type.
370 */
371 ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
372};
373
323/* Target specific defines for MAIN firmware */ 374/* Target specific defines for MAIN firmware */
324#define TARGET_NUM_VDEVS 8 375#define TARGET_NUM_VDEVS 8
325#define TARGET_NUM_PEER_AST 2 376#define TARGET_NUM_PEER_AST 2
@@ -547,7 +598,10 @@ enum ath10k_hw_4addr_pad {
547#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 598#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
548 599
549#define WLAN_GPIO_PIN0_ADDRESS 0x00000028 600#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
601#define WLAN_GPIO_PIN0_CONFIG_LSB 11
550#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 602#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
603#define WLAN_GPIO_PIN0_PAD_PULL_LSB 5
604#define WLAN_GPIO_PIN0_PAD_PULL_MASK 0x00000060
551#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c 605#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
552#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 606#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
553#define WLAN_GPIO_PIN10_ADDRESS 0x00000050 607#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
@@ -560,6 +614,8 @@ enum ath10k_hw_4addr_pad {
560#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 614#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
561 615
562#define SI_CONFIG_OFFSET 0x00000000 616#define SI_CONFIG_OFFSET 0x00000000
617#define SI_CONFIG_ERR_INT_LSB 19
618#define SI_CONFIG_ERR_INT_MASK 0x00080000
563#define SI_CONFIG_BIDIR_OD_DATA_LSB 18 619#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
564#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 620#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
565#define SI_CONFIG_I2C_LSB 16 621#define SI_CONFIG_I2C_LSB 16
@@ -573,7 +629,9 @@ enum ath10k_hw_4addr_pad {
573#define SI_CONFIG_DIVIDER_LSB 0 629#define SI_CONFIG_DIVIDER_LSB 0
574#define SI_CONFIG_DIVIDER_MASK 0x0000000f 630#define SI_CONFIG_DIVIDER_MASK 0x0000000f
575#define SI_CS_OFFSET 0x00000004 631#define SI_CS_OFFSET 0x00000004
632#define SI_CS_DONE_ERR_LSB 10
576#define SI_CS_DONE_ERR_MASK 0x00000400 633#define SI_CS_DONE_ERR_MASK 0x00000400
634#define SI_CS_DONE_INT_LSB 9
577#define SI_CS_DONE_INT_MASK 0x00000200 635#define SI_CS_DONE_INT_MASK 0x00000200
578#define SI_CS_START_LSB 8 636#define SI_CS_START_LSB 8
579#define SI_CS_START_MASK 0x00000100 637#define SI_CS_START_MASK 0x00000100
@@ -624,7 +682,10 @@ enum ath10k_hw_4addr_pad {
624#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS 682#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
625#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS 683#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
626#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS 684#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
685#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB
627#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK 686#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
687#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB
688#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK
628#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK 689#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
629#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS 690#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
630#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS 691#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
@@ -679,6 +740,18 @@ enum ath10k_hw_4addr_pad {
679#define WINDOW_READ_ADDR_ADDRESS MISSING 740#define WINDOW_READ_ADDR_ADDRESS MISSING
680#define WINDOW_WRITE_ADDR_ADDRESS MISSING 741#define WINDOW_WRITE_ADDR_ADDRESS MISSING
681 742
743#define QCA9887_1_0_I2C_SDA_GPIO_PIN 5
744#define QCA9887_1_0_I2C_SDA_PIN_CONFIG 3
745#define QCA9887_1_0_SI_CLK_GPIO_PIN 17
746#define QCA9887_1_0_SI_CLK_PIN_CONFIG 3
747#define QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS 0x00000010
748
749#define QCA9887_EEPROM_SELECT_READ 0xa10000a0
750#define QCA9887_EEPROM_ADDR_HI_MASK 0x0000ff00
751#define QCA9887_EEPROM_ADDR_HI_LSB 8
752#define QCA9887_EEPROM_ADDR_LO_MASK 0x00ff0000
753#define QCA9887_EEPROM_ADDR_LO_LSB 16
754
682#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) 755#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
683 756
684#endif /* _HW_H_ */ 757#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 4040f9413e86..d4b7a168f7c0 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -62,6 +62,32 @@ static struct ieee80211_rate ath10k_rates[] = {
62 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 62 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
63}; 63};
64 64
65static struct ieee80211_rate ath10k_rates_rev2[] = {
66 { .bitrate = 10,
67 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
68 { .bitrate = 20,
69 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
70 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
71 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
72 { .bitrate = 55,
73 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
74 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
75 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
76 { .bitrate = 110,
77 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
78 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
79 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
80
81 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
82 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
83 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
84 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
85 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
86 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
87 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
88 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
89};
90
65#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 91#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
66 92
67#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) 93#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
@@ -70,6 +96,9 @@ static struct ieee80211_rate ath10k_rates[] = {
70#define ath10k_g_rates (ath10k_rates + 0) 96#define ath10k_g_rates (ath10k_rates + 0)
71#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) 97#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
72 98
99#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
100#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
101
73static bool ath10k_mac_bitrate_is_cck(int bitrate) 102static bool ath10k_mac_bitrate_is_cck(int bitrate)
74{ 103{
75 switch (bitrate) { 104 switch (bitrate) {
@@ -3781,6 +3810,9 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
3781 int ret; 3810 int ret;
3782 int max; 3811 int max;
3783 3812
3813 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
3814 return;
3815
3784 spin_lock_bh(&ar->txqs_lock); 3816 spin_lock_bh(&ar->txqs_lock);
3785 rcu_read_lock(); 3817 rcu_read_lock();
3786 3818
@@ -4051,9 +4083,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4051 list_add_tail(&artxq->list, &ar->txqs); 4083 list_add_tail(&artxq->list, &ar->txqs);
4052 spin_unlock_bh(&ar->txqs_lock); 4084 spin_unlock_bh(&ar->txqs_lock);
4053 4085
4054 if (ath10k_mac_tx_can_push(hw, txq)) 4086 ath10k_mac_tx_push_pending(ar);
4055 tasklet_schedule(&ar->htt.txrx_compl_task);
4056
4057 ath10k_htt_tx_txq_update(hw, txq); 4087 ath10k_htt_tx_txq_update(hw, txq);
4058} 4088}
4059 4089
@@ -4467,6 +4497,19 @@ static int ath10k_start(struct ieee80211_hw *hw)
4467 } 4497 }
4468 } 4498 }
4469 4499
4500 param = ar->wmi.pdev_param->enable_btcoex;
4501 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
4502 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
4503 ar->running_fw->fw_file.fw_features)) {
4504 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4505 if (ret) {
4506 ath10k_warn(ar,
4507 "failed to set btcoex param: %d\n", ret);
4508 goto err_core_stop;
4509 }
4510 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
4511 }
4512
4470 ar->num_started_vdevs = 0; 4513 ar->num_started_vdevs = 0;
4471 ath10k_regd_update(ar); 4514 ath10k_regd_update(ar);
4472 4515
@@ -7695,8 +7738,14 @@ int ath10k_mac_register(struct ath10k *ar)
7695 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 7738 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
7696 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 7739 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
7697 band->channels = channels; 7740 band->channels = channels;
7698 band->n_bitrates = ath10k_g_rates_size; 7741
7699 band->bitrates = ath10k_g_rates; 7742 if (ar->hw_params.cck_rate_map_rev2) {
7743 band->n_bitrates = ath10k_g_rates_rev2_size;
7744 band->bitrates = ath10k_g_rates_rev2;
7745 } else {
7746 band->n_bitrates = ath10k_g_rates_size;
7747 band->bitrates = ath10k_g_rates;
7748 }
7700 7749
7701 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; 7750 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
7702 } 7751 }
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 8133d7b5b956..f06dd3941bac 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -56,7 +56,9 @@ static const struct pci_device_id ath10k_pci_id_table[] = {
56 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ 56 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
57 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ 57 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
58 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ 58 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
59 { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
59 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ 60 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
61 { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
60 {0} 62 {0}
61}; 63};
62 64
@@ -81,8 +83,12 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
81 83
82 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, 84 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
83 85
86 { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
87
84 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, 88 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
85 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, 89 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
90
91 { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
86}; 92};
87 93
88static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 94static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
@@ -837,6 +843,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
837 843
838 switch (ar->hw_rev) { 844 switch (ar->hw_rev) {
839 case ATH10K_HW_QCA988X: 845 case ATH10K_HW_QCA988X:
846 case ATH10K_HW_QCA9887:
840 case ATH10K_HW_QCA6174: 847 case ATH10K_HW_QCA6174:
841 case ATH10K_HW_QCA9377: 848 case ATH10K_HW_QCA9377:
842 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 849 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
@@ -844,6 +851,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
844 0x7ff) << 21; 851 0x7ff) << 21;
845 break; 852 break;
846 case ATH10K_HW_QCA99X0: 853 case ATH10K_HW_QCA99X0:
854 case ATH10K_HW_QCA9984:
847 case ATH10K_HW_QCA4019: 855 case ATH10K_HW_QCA4019:
848 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); 856 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
849 break; 857 break;
@@ -864,7 +872,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
864 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 872 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
865 int ret = 0; 873 int ret = 0;
866 u32 *buf; 874 u32 *buf;
867 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 875 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
868 struct ath10k_ce_pipe *ce_diag; 876 struct ath10k_ce_pipe *ce_diag;
869 /* Host buffer address in CE space */ 877 /* Host buffer address in CE space */
870 u32 ce_data; 878 u32 ce_data;
@@ -882,9 +890,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
882 * 1) 4-byte alignment 890 * 1) 4-byte alignment
883 * 2) Buffer in DMA-able space 891 * 2) Buffer in DMA-able space
884 */ 892 */
885 orig_nbytes = nbytes; 893 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
894
886 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, 895 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
887 orig_nbytes, 896 alloc_nbytes,
888 &ce_data_base, 897 &ce_data_base,
889 GFP_ATOMIC); 898 GFP_ATOMIC);
890 899
@@ -892,9 +901,9 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
892 ret = -ENOMEM; 901 ret = -ENOMEM;
893 goto done; 902 goto done;
894 } 903 }
895 memset(data_buf, 0, orig_nbytes); 904 memset(data_buf, 0, alloc_nbytes);
896 905
897 remaining_bytes = orig_nbytes; 906 remaining_bytes = nbytes;
898 ce_data = ce_data_base; 907 ce_data = ce_data_base;
899 while (remaining_bytes) { 908 while (remaining_bytes) {
900 nbytes = min_t(unsigned int, remaining_bytes, 909 nbytes = min_t(unsigned int, remaining_bytes,
@@ -954,19 +963,22 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
954 } 963 }
955 964
956 remaining_bytes -= nbytes; 965 remaining_bytes -= nbytes;
966
967 if (ret) {
968 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
969 address, ret);
970 break;
971 }
972 memcpy(data, data_buf, nbytes);
973
957 address += nbytes; 974 address += nbytes;
958 ce_data += nbytes; 975 data += nbytes;
959 } 976 }
960 977
961done: 978done:
962 if (ret == 0)
963 memcpy(data, data_buf, orig_nbytes);
964 else
965 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
966 address, ret);
967 979
968 if (data_buf) 980 if (data_buf)
969 dma_free_coherent(ar->dev, orig_nbytes, data_buf, 981 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
970 ce_data_base); 982 ce_data_base);
971 983
972 spin_unlock_bh(&ar_pci->ce_lock); 984 spin_unlock_bh(&ar_pci->ce_lock);
@@ -1560,6 +1572,7 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1560 1572
1561 switch (ar->hw_rev) { 1573 switch (ar->hw_rev) {
1562 case ATH10K_HW_QCA988X: 1574 case ATH10K_HW_QCA988X:
1575 case ATH10K_HW_QCA9887:
1563 case ATH10K_HW_QCA6174: 1576 case ATH10K_HW_QCA6174:
1564 case ATH10K_HW_QCA9377: 1577 case ATH10K_HW_QCA9377:
1565 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1578 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
@@ -1569,6 +1582,7 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1569 CORE_CTRL_ADDRESS, val); 1582 CORE_CTRL_ADDRESS, val);
1570 break; 1583 break;
1571 case ATH10K_HW_QCA99X0: 1584 case ATH10K_HW_QCA99X0:
1585 case ATH10K_HW_QCA9984:
1572 case ATH10K_HW_QCA4019: 1586 case ATH10K_HW_QCA4019:
1573 /* TODO: Find appropriate register configuration for QCA99X0 1587 /* TODO: Find appropriate register configuration for QCA99X0
1574 * to mask irq/MSI. 1588 * to mask irq/MSI.
@@ -1583,6 +1597,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1583 1597
1584 switch (ar->hw_rev) { 1598 switch (ar->hw_rev) {
1585 case ATH10K_HW_QCA988X: 1599 case ATH10K_HW_QCA988X:
1600 case ATH10K_HW_QCA9887:
1586 case ATH10K_HW_QCA6174: 1601 case ATH10K_HW_QCA6174:
1587 case ATH10K_HW_QCA9377: 1602 case ATH10K_HW_QCA9377:
1588 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1603 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
@@ -1592,6 +1607,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1592 CORE_CTRL_ADDRESS, val); 1607 CORE_CTRL_ADDRESS, val);
1593 break; 1608 break;
1594 case ATH10K_HW_QCA99X0: 1609 case ATH10K_HW_QCA99X0:
1610 case ATH10K_HW_QCA9984:
1595 case ATH10K_HW_QCA4019: 1611 case ATH10K_HW_QCA4019:
1596 /* TODO: Find appropriate register configuration for QCA99X0 1612 /* TODO: Find appropriate register configuration for QCA99X0
1597 * to unmask irq/MSI. 1613 * to unmask irq/MSI.
@@ -1932,6 +1948,8 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
1932 switch (ar_pci->pdev->device) { 1948 switch (ar_pci->pdev->device) {
1933 case QCA988X_2_0_DEVICE_ID: 1949 case QCA988X_2_0_DEVICE_ID:
1934 case QCA99X0_2_0_DEVICE_ID: 1950 case QCA99X0_2_0_DEVICE_ID:
1951 case QCA9984_1_0_DEVICE_ID:
1952 case QCA9887_1_0_DEVICE_ID:
1935 return 1; 1953 return 1;
1936 case QCA6164_2_1_DEVICE_ID: 1954 case QCA6164_2_1_DEVICE_ID:
1937 case QCA6174_2_1_DEVICE_ID: 1955 case QCA6174_2_1_DEVICE_ID:
@@ -2293,16 +2311,20 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
2293 return 0; 2311 return 0;
2294} 2312}
2295 2313
2314static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2315{
2316 ath10k_pci_irq_disable(ar);
2317 return ath10k_pci_qca99x0_chip_reset(ar);
2318}
2319
2296static int ath10k_pci_safe_chip_reset(struct ath10k *ar) 2320static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2297{ 2321{
2298 if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) { 2322 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2299 return ath10k_pci_warm_reset(ar); 2323
2300 } else if (QCA_REV_99X0(ar)) { 2324 if (!ar_pci->pci_soft_reset)
2301 ath10k_pci_irq_disable(ar);
2302 return ath10k_pci_qca99x0_chip_reset(ar);
2303 } else {
2304 return -ENOTSUPP; 2325 return -ENOTSUPP;
2305 } 2326
2327 return ar_pci->pci_soft_reset(ar);
2306} 2328}
2307 2329
2308static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) 2330static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
@@ -2437,16 +2459,12 @@ static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2437 2459
2438static int ath10k_pci_chip_reset(struct ath10k *ar) 2460static int ath10k_pci_chip_reset(struct ath10k *ar)
2439{ 2461{
2440 if (QCA_REV_988X(ar)) 2462 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2441 return ath10k_pci_qca988x_chip_reset(ar); 2463
2442 else if (QCA_REV_6174(ar)) 2464 if (WARN_ON(!ar_pci->pci_hard_reset))
2443 return ath10k_pci_qca6174_chip_reset(ar);
2444 else if (QCA_REV_9377(ar))
2445 return ath10k_pci_qca6174_chip_reset(ar);
2446 else if (QCA_REV_99X0(ar))
2447 return ath10k_pci_qca99x0_chip_reset(ar);
2448 else
2449 return -ENOTSUPP; 2465 return -ENOTSUPP;
2466
2467 return ar_pci->pci_hard_reset(ar);
2450} 2468}
2451 2469
2452static int ath10k_pci_hif_power_up(struct ath10k *ar) 2470static int ath10k_pci_hif_power_up(struct ath10k *ar)
@@ -2559,6 +2577,144 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
2559} 2577}
2560#endif 2578#endif
2561 2579
2580static bool ath10k_pci_validate_cal(void *data, size_t size)
2581{
2582 __le16 *cal_words = data;
2583 u16 checksum = 0;
2584 size_t i;
2585
2586 if (size % 2 != 0)
2587 return false;
2588
2589 for (i = 0; i < size / 2; i++)
2590 checksum ^= le16_to_cpu(cal_words[i]);
2591
2592 return checksum == 0xffff;
2593}
2594
2595static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2596{
2597 /* Enable SI clock */
2598 ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2599
2600 /* Configure GPIOs for I2C operation */
2601 ath10k_pci_write32(ar,
2602 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2603 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2604 SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2605 GPIO_PIN0_CONFIG) |
2606 SM(1, GPIO_PIN0_PAD_PULL));
2607
2608 ath10k_pci_write32(ar,
2609 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2610 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2611 SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2612 SM(1, GPIO_PIN0_PAD_PULL));
2613
2614 ath10k_pci_write32(ar,
2615 GPIO_BASE_ADDRESS +
2616 QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2617 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2618
2619 /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
2620 ath10k_pci_write32(ar,
2621 SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2622 SM(1, SI_CONFIG_ERR_INT) |
2623 SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2624 SM(1, SI_CONFIG_I2C) |
2625 SM(1, SI_CONFIG_POS_SAMPLE) |
2626 SM(1, SI_CONFIG_INACTIVE_DATA) |
2627 SM(1, SI_CONFIG_INACTIVE_CLK) |
2628 SM(8, SI_CONFIG_DIVIDER));
2629}
2630
2631static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2632{
2633 u32 reg;
2634 int wait_limit;
2635
2636 /* set device select byte and for the read operation */
2637 reg = QCA9887_EEPROM_SELECT_READ |
2638 SM(addr, QCA9887_EEPROM_ADDR_LO) |
2639 SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2640 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2641
2642 /* write transmit data, transfer length, and START bit */
2643 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2644 SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2645 SM(4, SI_CS_TX_CNT));
2646
2647 /* wait max 1 sec */
2648 wait_limit = 100000;
2649
2650 /* wait for SI_CS_DONE_INT */
2651 do {
2652 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
2653 if (MS(reg, SI_CS_DONE_INT))
2654 break;
2655
2656 wait_limit--;
2657 udelay(10);
2658 } while (wait_limit > 0);
2659
2660 if (!MS(reg, SI_CS_DONE_INT)) {
2661 ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
2662 addr);
2663 return -ETIMEDOUT;
2664 }
2665
2666 /* clear SI_CS_DONE_INT */
2667 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
2668
2669 if (MS(reg, SI_CS_DONE_ERR)) {
2670 ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
2671 return -EIO;
2672 }
2673
2674 /* extract receive data */
2675 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
2676 *out = reg;
2677
2678 return 0;
2679}
2680
2681static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
2682 size_t *data_len)
2683{
2684 u8 *caldata = NULL;
2685 size_t calsize, i;
2686 int ret;
2687
2688 if (!QCA_REV_9887(ar))
2689 return -EOPNOTSUPP;
2690
2691 calsize = ar->hw_params.cal_data_len;
2692 caldata = kmalloc(calsize, GFP_KERNEL);
2693 if (!caldata)
2694 return -ENOMEM;
2695
2696 ath10k_pci_enable_eeprom(ar);
2697
2698 for (i = 0; i < calsize; i++) {
2699 ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
2700 if (ret)
2701 goto err_free;
2702 }
2703
2704 if (!ath10k_pci_validate_cal(caldata, calsize))
2705 goto err_free;
2706
2707 *data = caldata;
2708 *data_len = calsize;
2709
2710 return 0;
2711
2712err_free:
2713 kfree(data);
2714
2715 return -EINVAL;
2716}
2717
2562static const struct ath10k_hif_ops ath10k_pci_hif_ops = { 2718static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2563 .tx_sg = ath10k_pci_hif_tx_sg, 2719 .tx_sg = ath10k_pci_hif_tx_sg,
2564 .diag_read = ath10k_pci_hif_diag_read, 2720 .diag_read = ath10k_pci_hif_diag_read,
@@ -2578,6 +2734,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2578 .suspend = ath10k_pci_hif_suspend, 2734 .suspend = ath10k_pci_hif_suspend,
2579 .resume = ath10k_pci_hif_resume, 2735 .resume = ath10k_pci_hif_resume,
2580#endif 2736#endif
2737 .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
2581}; 2738};
2582 2739
2583/* 2740/*
@@ -2976,24 +3133,47 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2976 enum ath10k_hw_rev hw_rev; 3133 enum ath10k_hw_rev hw_rev;
2977 u32 chip_id; 3134 u32 chip_id;
2978 bool pci_ps; 3135 bool pci_ps;
3136 int (*pci_soft_reset)(struct ath10k *ar);
3137 int (*pci_hard_reset)(struct ath10k *ar);
2979 3138
2980 switch (pci_dev->device) { 3139 switch (pci_dev->device) {
2981 case QCA988X_2_0_DEVICE_ID: 3140 case QCA988X_2_0_DEVICE_ID:
2982 hw_rev = ATH10K_HW_QCA988X; 3141 hw_rev = ATH10K_HW_QCA988X;
2983 pci_ps = false; 3142 pci_ps = false;
3143 pci_soft_reset = ath10k_pci_warm_reset;
3144 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3145 break;
3146 case QCA9887_1_0_DEVICE_ID:
3147 dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n");
3148 hw_rev = ATH10K_HW_QCA9887;
3149 pci_ps = false;
3150 pci_soft_reset = ath10k_pci_warm_reset;
3151 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
2984 break; 3152 break;
2985 case QCA6164_2_1_DEVICE_ID: 3153 case QCA6164_2_1_DEVICE_ID:
2986 case QCA6174_2_1_DEVICE_ID: 3154 case QCA6174_2_1_DEVICE_ID:
2987 hw_rev = ATH10K_HW_QCA6174; 3155 hw_rev = ATH10K_HW_QCA6174;
2988 pci_ps = true; 3156 pci_ps = true;
3157 pci_soft_reset = ath10k_pci_warm_reset;
3158 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
2989 break; 3159 break;
2990 case QCA99X0_2_0_DEVICE_ID: 3160 case QCA99X0_2_0_DEVICE_ID:
2991 hw_rev = ATH10K_HW_QCA99X0; 3161 hw_rev = ATH10K_HW_QCA99X0;
2992 pci_ps = false; 3162 pci_ps = false;
3163 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3164 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3165 break;
3166 case QCA9984_1_0_DEVICE_ID:
3167 hw_rev = ATH10K_HW_QCA9984;
3168 pci_ps = false;
3169 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3170 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
2993 break; 3171 break;
2994 case QCA9377_1_0_DEVICE_ID: 3172 case QCA9377_1_0_DEVICE_ID:
2995 hw_rev = ATH10K_HW_QCA9377; 3173 hw_rev = ATH10K_HW_QCA9377;
2996 pci_ps = true; 3174 pci_ps = true;
3175 pci_soft_reset = NULL;
3176 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
2997 break; 3177 break;
2998 default: 3178 default:
2999 WARN_ON(1); 3179 WARN_ON(1);
@@ -3018,6 +3198,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
3018 ar->dev_id = pci_dev->device; 3198 ar->dev_id = pci_dev->device;
3019 ar_pci->pci_ps = pci_ps; 3199 ar_pci->pci_ps = pci_ps;
3020 ar_pci->bus_ops = &ath10k_pci_bus_ops; 3200 ar_pci->bus_ops = &ath10k_pci_bus_ops;
3201 ar_pci->pci_soft_reset = pci_soft_reset;
3202 ar_pci->pci_hard_reset = pci_hard_reset;
3021 3203
3022 ar->id.vendor = pdev->vendor; 3204 ar->id.vendor = pdev->vendor;
3023 ar->id.device = pdev->device; 3205 ar->id.device = pdev->device;
@@ -3169,7 +3351,7 @@ static void __exit ath10k_pci_exit(void)
3169module_exit(ath10k_pci_exit); 3351module_exit(ath10k_pci_exit);
3170 3352
3171MODULE_AUTHOR("Qualcomm Atheros"); 3353MODULE_AUTHOR("Qualcomm Atheros");
3172MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); 3354MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3173MODULE_LICENSE("Dual BSD/GPL"); 3355MODULE_LICENSE("Dual BSD/GPL");
3174 3356
3175/* QCA988x 2.0 firmware files */ 3357/* QCA988x 2.0 firmware files */
@@ -3180,6 +3362,11 @@ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3180MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); 3362MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3181MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3363MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3182 3364
3365/* QCA9887 1.0 firmware files */
3366MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3367MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3368MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3369
3183/* QCA6174 2.1 firmware files */ 3370/* QCA6174 2.1 firmware files */
3184MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); 3371MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3185MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); 3372MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 959dc321b75e..6eca1df2ce60 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -234,6 +234,12 @@ struct ath10k_pci {
234 234
235 const struct ath10k_bus_ops *bus_ops; 235 const struct ath10k_bus_ops *bus_ops;
236 236
237 /* Chip specific pci reset routine used to do a safe reset */
238 int (*pci_soft_reset)(struct ath10k *ar);
239
240 /* Chip specific pci full reset function */
241 int (*pci_hard_reset)(struct ath10k *ar);
242
237 /* Keep this entry in the last, memory for struct ath10k_ahb is 243 /* Keep this entry in the last, memory for struct ath10k_ahb is
238 * allocated (ahb support enabled case) in the continuation of 244 * allocated (ahb support enabled case) in the continuation of
239 * this struct. 245 * this struct.
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index ca8d16884af1..034e7a54c5b2 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -656,26 +656,6 @@ struct rx_msdu_end {
656 * Reserved: HW should fill with zero. FW should ignore. 656 * Reserved: HW should fill with zero. FW should ignore.
657 */ 657 */
658 658
659#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
660#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1
661
662#define RX_PPDU_START_SIG_RATE_OFDM_48 0
663#define RX_PPDU_START_SIG_RATE_OFDM_24 1
664#define RX_PPDU_START_SIG_RATE_OFDM_12 2
665#define RX_PPDU_START_SIG_RATE_OFDM_6 3
666#define RX_PPDU_START_SIG_RATE_OFDM_54 4
667#define RX_PPDU_START_SIG_RATE_OFDM_36 5
668#define RX_PPDU_START_SIG_RATE_OFDM_18 6
669#define RX_PPDU_START_SIG_RATE_OFDM_9 7
670
671#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0
672#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
673#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2
674#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3
675#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4
676#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
677#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6
678
679#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04 659#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04
680#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08 660#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08
681#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09 661#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09
@@ -711,25 +691,6 @@ struct rx_msdu_end {
711/* No idea what this flag means. It seems to be always set in rate. */ 691/* No idea what this flag means. It seems to be always set in rate. */
712#define RX_PPDU_START_RATE_FLAG BIT(3) 692#define RX_PPDU_START_RATE_FLAG BIT(3)
713 693
714enum rx_ppdu_start_rate {
715 RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
716 RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
717 RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
718 RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
719 RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
720 RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
721 RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
722 RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
723
724 RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
725 RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
726 RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
727 RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
728 RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
729 RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
730 RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
731};
732
733struct rx_ppdu_start { 694struct rx_ppdu_start {
734 struct { 695 struct {
735 u8 pri20_mhz; 696 u8 pri20_mhz;
@@ -994,7 +955,41 @@ struct rx_pkt_end {
994 __le32 info0; /* %RX_PKT_END_INFO0_ */ 955 __le32 info0; /* %RX_PKT_END_INFO0_ */
995 __le32 phy_timestamp_1; 956 __le32 phy_timestamp_1;
996 __le32 phy_timestamp_2; 957 __le32 phy_timestamp_2;
997 __le32 rx_location_info; /* %RX_LOCATION_INFO_ */ 958} __packed;
959
960#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff
961#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0
962#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000
963#define RX_LOCATION_INFO0_RTT_FAC_VHT_LSB 15
964#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_MASK 0xc0000000
965#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_LSB 30
966#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_STATUS BIT(14)
967#define RX_LOCATION_INFO0_RTT_FAC_VHT_STATUS BIT(29)
968
969#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_MASK 0x0000000c
970#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_LSB 2
971#define RX_LOCATION_INFO1_PKT_BW_MASK 0x00000030
972#define RX_LOCATION_INFO1_PKT_BW_LSB 4
973#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_MASK 0x0000ff00
974#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_LSB 8
975#define RX_LOCATION_INFO1_RTT_MSC_RATE_MASK 0x000f0000
976#define RX_LOCATION_INFO1_RTT_MSC_RATE_LSB 16
977#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_MASK 0x00300000
978#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_LSB 20
979#define RX_LOCATION_INFO1_TIMING_BACKOFF_MASK 0x07c00000
980#define RX_LOCATION_INFO1_TIMING_BACKOFF_LSB 22
981#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_MASK 0x18000000
982#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_LSB 27
983#define RX_LOCATION_INFO1_RTT_CFR_STATUS BIT(0)
984#define RX_LOCATION_INFO1_RTT_CIR_STATUS BIT(1)
985#define RX_LOCATION_INFO1_RTT_GI_TYPE BIT(7)
986#define RX_LOCATION_INFO1_RTT_MAC_PHY_PHASE BIT(29)
987#define RX_LOCATION_INFO1_RTT_TX_DATA_START_X_PHASE BIT(30)
988#define RX_LOCATION_INFO1_RX_LOCATION_VALID BIT(31)
989
990struct rx_location_info {
991 __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
992 __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
998} __packed; 993} __packed;
999 994
1000enum rx_phy_ppdu_end_info0 { 995enum rx_phy_ppdu_end_info0 {
@@ -1067,6 +1062,17 @@ struct rx_phy_ppdu_end {
1067 1062
1068struct rx_ppdu_end_qca99x0 { 1063struct rx_ppdu_end_qca99x0 {
1069 struct rx_pkt_end rx_pkt_end; 1064 struct rx_pkt_end rx_pkt_end;
1065 __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
1066 struct rx_phy_ppdu_end rx_phy_ppdu_end;
1067 __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
1068 __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
1069 __le16 bb_length;
1070 __le16 info1; /* %RX_PPDU_END_INFO1_ */
1071} __packed;
1072
1073struct rx_ppdu_end_qca9984 {
1074 struct rx_pkt_end rx_pkt_end;
1075 struct rx_location_info rx_location_info;
1070 struct rx_phy_ppdu_end rx_phy_ppdu_end; 1076 struct rx_phy_ppdu_end rx_phy_ppdu_end;
1071 __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */ 1077 __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
1072 __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */ 1078 __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
@@ -1080,6 +1086,7 @@ struct rx_ppdu_end {
1080 struct rx_ppdu_end_qca988x qca988x; 1086 struct rx_ppdu_end_qca988x qca988x;
1081 struct rx_ppdu_end_qca6174 qca6174; 1087 struct rx_ppdu_end_qca6174 qca6174;
1082 struct rx_ppdu_end_qca99x0 qca99x0; 1088 struct rx_ppdu_end_qca99x0 qca99x0;
1089 struct rx_ppdu_end_qca9984 qca9984;
1083 } __packed; 1090 } __packed;
1084} __packed; 1091} __packed;
1085 1092
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 8e24099fa936..aaf53a81e78b 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -447,6 +447,9 @@ Fw Mode/SubMode Mask
447#define QCA988X_BOARD_DATA_SZ 7168 447#define QCA988X_BOARD_DATA_SZ 7168
448#define QCA988X_BOARD_EXT_DATA_SZ 0 448#define QCA988X_BOARD_EXT_DATA_SZ 0
449 449
450#define QCA9887_BOARD_DATA_SZ 7168
451#define QCA9887_BOARD_EXT_DATA_SZ 0
452
450#define QCA6174_BOARD_DATA_SZ 8192 453#define QCA6174_BOARD_DATA_SZ 8192
451#define QCA6174_BOARD_EXT_DATA_SZ 0 454#define QCA6174_BOARD_EXT_DATA_SZ 0
452 455
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 576e7c42ed65..1966c787998b 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -117,6 +117,9 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
117 117
118 ieee80211_tx_status(htt->ar->hw, msdu); 118 ieee80211_tx_status(htt->ar->hw, msdu);
119 /* we do not own the msdu anymore */ 119 /* we do not own the msdu anymore */
120
121 ath10k_mac_tx_push_pending(ar);
122
120 return 0; 123 return 0;
121} 124}
122 125
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 2c300329ebc3..6279ab4a760e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1104,6 +1104,7 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
1104 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, 1104 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1105 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1105 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1106 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1106 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1107 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1107}; 1108};
1108 1109
1109static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { 1110static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -1199,6 +1200,7 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
1199 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, 1200 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1200 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1201 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1201 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1202 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1203 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1202}; 1204};
1203 1205
1204static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { 1206static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
@@ -1294,6 +1296,7 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
1294 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, 1296 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1295 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1297 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1296 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1298 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1299 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1297}; 1300};
1298 1301
1299/* firmware 10.2 specific mappings */ 1302/* firmware 10.2 specific mappings */
@@ -1550,6 +1553,7 @@ static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
1550 .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, 1553 .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
1551 .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR, 1554 .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
1552 .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR, 1555 .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
1556 .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
1553}; 1557};
1554 1558
1555static const struct wmi_peer_flags_map wmi_peer_flags_map = { 1559static const struct wmi_peer_flags_map wmi_peer_flags_map = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9fdf47ea27d0..90f594e89f94 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3447,6 +3447,7 @@ struct wmi_pdev_param_map {
3447 u32 wapi_mbssid_offset; 3447 u32 wapi_mbssid_offset;
3448 u32 arp_srcaddr; 3448 u32 arp_srcaddr;
3449 u32 arp_dstaddr; 3449 u32 arp_dstaddr;
3450 u32 enable_btcoex;
3450}; 3451};
3451 3452
3452#define WMI_PDEV_PARAM_UNSUPPORTED 0 3453#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -3760,6 +3761,9 @@ enum wmi_10_4_pdev_param {
3760 WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH, 3761 WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
3761 WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR, 3762 WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
3762 WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE, 3763 WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE,
3764 WMI_10_4_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
3765 WMI_10_4_PDEV_PARAM_ATF_SSID_GROUP_POLICY,
3766 WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
3763}; 3767};
3764 3768
3765struct wmi_pdev_set_param_cmd { 3769struct wmi_pdev_set_param_cmd {
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index fc47b70988b1..f23c851765df 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -219,8 +219,8 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
219 sifs = AR5K_INIT_SIFS_QUARTER_RATE; 219 sifs = AR5K_INIT_SIFS_QUARTER_RATE;
220 break; 220 break;
221 case AR5K_BWMODE_DEFAULT: 221 case AR5K_BWMODE_DEFAULT:
222 sifs = AR5K_INIT_SIFS_DEFAULT_BG;
223 default: 222 default:
223 sifs = AR5K_INIT_SIFS_DEFAULT_BG;
224 if (channel->band == NL80211_BAND_5GHZ) 224 if (channel->band == NL80211_BAND_5GHZ)
225 sifs = AR5K_INIT_SIFS_DEFAULT_A; 225 sifs = AR5K_INIT_SIFS_DEFAULT_A;
226 break; 226 break;
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 7a1970e484a6..ac25f1781b42 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -148,7 +148,7 @@ enum ath6kl_fw_capability {
148 /* ratetable is the 2 stream version (max MCS15) */ 148 /* ratetable is the 2 stream version (max MCS15) */
149 ATH6KL_FW_CAPABILITY_RATETABLE_MCS15, 149 ATH6KL_FW_CAPABILITY_RATETABLE_MCS15,
150 150
151 /* firmare doesn't support IP checksumming */ 151 /* firmware doesn't support IP checksumming */
152 ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM, 152 ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM,
153 153
154 /* this needs to be last */ 154 /* this needs to be last */
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 631c3a0c572b..b8cf04d11975 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -2544,8 +2544,7 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
2544 s32 nominal_phy = 0; 2544 s32 nominal_phy = 0;
2545 int ret; 2545 int ret;
2546 2546
2547 if (!((params->user_pri < 8) && 2547 if (!((params->user_pri <= 0x7) &&
2548 (params->user_pri <= 0x7) &&
2549 (up_to_ac[params->user_pri & 0x7] == params->traffic_class) && 2548 (up_to_ac[params->user_pri & 0x7] == params->traffic_class) &&
2550 (params->traffic_direc == UPLINK_TRAFFIC || 2549 (params->traffic_direc == UPLINK_TRAFFIC ||
2551 params->traffic_direc == DNLINK_TRAFFIC || 2550 params->traffic_direc == DNLINK_TRAFFIC ||
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index dec1a317a070..d0224fc58e78 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3202,8 +3202,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
3202 it, length); 3202 it, length);
3203 break; 3203 break;
3204 case _CompressBlock: 3204 case _CompressBlock:
3205 if (reference == 0) { 3205 if (reference != 0) {
3206 } else {
3207 eep = ar9003_eeprom_struct_find_by_id(reference); 3206 eep = ar9003_eeprom_struct_find_by_id(reference);
3208 if (eep == NULL) { 3207 if (eep == NULL) {
3209 ath_dbg(common, EEPROM, 3208 ath_dbg(common, EEPROM,
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index ac4781f37e78..16aca9e28b77 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -132,7 +132,6 @@ static int ath9k_tx99_init(struct ath_softc *sc)
132 ath9k_ps_wakeup(sc); 132 ath9k_ps_wakeup(sc);
133 133
134 ath9k_hw_disable_interrupts(ah); 134 ath9k_hw_disable_interrupts(ah);
135 atomic_set(&ah->intr_ref_cnt, -1);
136 ath_drain_all_txq(sc); 135 ath_drain_all_txq(sc);
137 ath_stoprecv(sc); 136 ath_stoprecv(sc);
138 137
@@ -266,7 +265,7 @@ static const struct file_operations fops_tx99_power = {
266 265
267void ath9k_tx99_init_debug(struct ath_softc *sc) 266void ath9k_tx99_init_debug(struct ath_softc *sc)
268{ 267{
269 if (!AR_SREV_9300_20_OR_LATER(sc->sc_ah)) 268 if (!AR_SREV_9280_20_OR_LATER(sc->sc_ah))
270 return; 269 return;
271 270
272 debugfs_create_file("tx99", S_IRUSR | S_IWUSR, 271 debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
index 1a796e5f69ec..2e34baeaf764 100644
--- a/drivers/net/wireless/ath/carl9170/Kconfig
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -5,12 +5,10 @@ config CARL9170
5 select FW_LOADER 5 select FW_LOADER
6 select CRC32 6 select CRC32
7 help 7 help
8 This is another driver for the Atheros "otus" 802.11n USB devices. 8 This is the mainline driver for the Atheros "otus" 802.11n USB devices.
9 9
10 This driver provides more features than the original, 10 It needs a special firmware (carl9170-1.fw), which can be downloaded
11 but it needs a special firmware (carl9170-1.fw) to do that. 11 from our wiki here:
12
13 The firmware can be downloaded from our wiki here:
14 <http://wireless.kernel.org/en/users/Drivers/carl9170> 12 <http://wireless.kernel.org/en/users/Drivers/carl9170>
15 13
16 If you choose to build a module, it'll be called carl9170. 14 If you choose to build a module, it'll be called carl9170.
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 5769811291bf..62bf9331bd7f 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -378,6 +378,10 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
378 /* social scan on P2P_DEVICE is handled as p2p search */ 378 /* social scan on P2P_DEVICE is handled as p2p search */
379 if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE && 379 if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
380 wil_p2p_is_social_scan(request)) { 380 wil_p2p_is_social_scan(request)) {
381 if (!wil->p2p.p2p_dev_started) {
382 wil_err(wil, "P2P search requested on stopped P2P device\n");
383 return -EIO;
384 }
381 wil->scan_request = request; 385 wil->scan_request = request;
382 wil->radio_wdev = wdev; 386 wil->radio_wdev = wdev;
383 rc = wil_p2p_search(wil, request); 387 rc = wil_p2p_search(wil, request);
@@ -1351,6 +1355,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
1351 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 1355 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
1352 1356
1353 wil_dbg_misc(wil, "%s: entered\n", __func__); 1357 wil_dbg_misc(wil, "%s: entered\n", __func__);
1358 wil->p2p.p2p_dev_started = 1;
1354 return 0; 1359 return 0;
1355} 1360}
1356 1361
@@ -1358,8 +1363,19 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
1358 struct wireless_dev *wdev) 1363 struct wireless_dev *wdev)
1359{ 1364{
1360 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 1365 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
1366 u8 started;
1361 1367
1362 wil_dbg_misc(wil, "%s: entered\n", __func__); 1368 wil_dbg_misc(wil, "%s: entered\n", __func__);
1369 mutex_lock(&wil->mutex);
1370 started = wil_p2p_stop_discovery(wil);
1371 if (started && wil->scan_request) {
1372 cfg80211_scan_done(wil->scan_request, 1);
1373 wil->scan_request = NULL;
1374 wil->radio_wdev = wil->wdev;
1375 }
1376 mutex_unlock(&wil->mutex);
1377
1378 wil->p2p.p2p_dev_started = 0;
1363} 1379}
1364 1380
1365static struct cfg80211_ops wil_cfg80211_ops = { 1381static struct cfg80211_ops wil_cfg80211_ops = {
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
index c312a667c12a..217a4591bde4 100644
--- a/drivers/net/wireless/ath/wil6210/debug.c
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013 Qualcomm Atheros, Inc. 2 * Copyright (c) 2013,2016 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -19,34 +19,31 @@
19 19
20void __wil_err(struct wil6210_priv *wil, const char *fmt, ...) 20void __wil_err(struct wil6210_priv *wil, const char *fmt, ...)
21{ 21{
22 struct net_device *ndev = wil_to_ndev(wil); 22 struct va_format vaf;
23 struct va_format vaf = {
24 .fmt = fmt,
25 };
26 va_list args; 23 va_list args;
27 24
28 va_start(args, fmt); 25 va_start(args, fmt);
26 vaf.fmt = fmt;
29 vaf.va = &args; 27 vaf.va = &args;
30 netdev_err(ndev, "%pV", &vaf); 28 netdev_err(wil_to_ndev(wil), "%pV", &vaf);
31 trace_wil6210_log_err(&vaf); 29 trace_wil6210_log_err(&vaf);
32 va_end(args); 30 va_end(args);
33} 31}
34 32
35void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...) 33void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
36{ 34{
37 if (net_ratelimit()) { 35 struct va_format vaf;
38 struct net_device *ndev = wil_to_ndev(wil); 36 va_list args;
39 struct va_format vaf = {
40 .fmt = fmt,
41 };
42 va_list args;
43 37
44 va_start(args, fmt); 38 if (!net_ratelimit())
45 vaf.va = &args; 39 return;
46 netdev_err(ndev, "%pV", &vaf); 40
47 trace_wil6210_log_err(&vaf); 41 va_start(args, fmt);
48 va_end(args); 42 vaf.fmt = fmt;
49 } 43 vaf.va = &args;
44 netdev_err(wil_to_ndev(wil), "%pV", &vaf);
45 trace_wil6210_log_err(&vaf);
46 va_end(args);
50} 47}
51 48
52void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...) 49void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...)
@@ -67,27 +64,24 @@ void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...)
67 64
68void __wil_info(struct wil6210_priv *wil, const char *fmt, ...) 65void __wil_info(struct wil6210_priv *wil, const char *fmt, ...)
69{ 66{
70 struct net_device *ndev = wil_to_ndev(wil); 67 struct va_format vaf;
71 struct va_format vaf = {
72 .fmt = fmt,
73 };
74 va_list args; 68 va_list args;
75 69
76 va_start(args, fmt); 70 va_start(args, fmt);
71 vaf.fmt = fmt;
77 vaf.va = &args; 72 vaf.va = &args;
78 netdev_info(ndev, "%pV", &vaf); 73 netdev_info(wil_to_ndev(wil), "%pV", &vaf);
79 trace_wil6210_log_info(&vaf); 74 trace_wil6210_log_info(&vaf);
80 va_end(args); 75 va_end(args);
81} 76}
82 77
83void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...) 78void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...)
84{ 79{
85 struct va_format vaf = { 80 struct va_format vaf;
86 .fmt = fmt,
87 };
88 va_list args; 81 va_list args;
89 82
90 va_start(args, fmt); 83 va_start(args, fmt);
84 vaf.fmt = fmt;
91 vaf.va = &args; 85 vaf.va = &args;
92 trace_wil6210_log_dbg(&vaf); 86 trace_wil6210_log_dbg(&vaf);
93 va_end(args); 87 va_end(args);
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index 1c9153894dca..213b8259638c 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -114,8 +114,10 @@ int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
114 u8 channel = P2P_DMG_SOCIAL_CHANNEL; 114 u8 channel = P2P_DMG_SOCIAL_CHANNEL;
115 int rc; 115 int rc;
116 116
117 if (chan) 117 if (!chan)
118 channel = chan->hw_value; 118 return -EINVAL;
119
120 channel = chan->hw_value;
119 121
120 wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration); 122 wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
121 123
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index aeb72c438e44..7b5c4222bc33 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. 2 * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -18,13 +18,20 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21 21#include <linux/suspend.h>
22#include "wil6210.h" 22#include "wil6210.h"
23 23
24static bool use_msi = true; 24static bool use_msi = true;
25module_param(use_msi, bool, S_IRUGO); 25module_param(use_msi, bool, S_IRUGO);
26MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true"); 26MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
27 27
28#ifdef CONFIG_PM
29#ifdef CONFIG_PM_SLEEP
30static int wil6210_pm_notify(struct notifier_block *notify_block,
31 unsigned long mode, void *unused);
32#endif /* CONFIG_PM_SLEEP */
33#endif /* CONFIG_PM */
34
28static 35static
29void wil_set_capabilities(struct wil6210_priv *wil) 36void wil_set_capabilities(struct wil6210_priv *wil)
30{ 37{
@@ -238,6 +245,18 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
238 goto bus_disable; 245 goto bus_disable;
239 } 246 }
240 247
248#ifdef CONFIG_PM
249#ifdef CONFIG_PM_SLEEP
250 wil->pm_notify.notifier_call = wil6210_pm_notify;
251 rc = register_pm_notifier(&wil->pm_notify);
252 if (rc)
253 /* Do not fail the driver initialization, as suspend can
254 * be prevented in a later phase if needed
255 */
256 wil_err(wil, "register_pm_notifier failed: %d\n", rc);
257#endif /* CONFIG_PM_SLEEP */
258#endif /* CONFIG_PM */
259
241 wil6210_debugfs_init(wil); 260 wil6210_debugfs_init(wil);
242 261
243 262
@@ -267,6 +286,12 @@ static void wil_pcie_remove(struct pci_dev *pdev)
267 286
268 wil_dbg_misc(wil, "%s()\n", __func__); 287 wil_dbg_misc(wil, "%s()\n", __func__);
269 288
289#ifdef CONFIG_PM
290#ifdef CONFIG_PM_SLEEP
291 unregister_pm_notifier(&wil->pm_notify);
292#endif /* CONFIG_PM_SLEEP */
293#endif /* CONFIG_PM */
294
270 wil6210_debugfs_remove(wil); 295 wil6210_debugfs_remove(wil);
271 wil_if_remove(wil); 296 wil_if_remove(wil);
272 wil_if_pcie_disable(wil); 297 wil_if_pcie_disable(wil);
@@ -335,6 +360,45 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
335 return rc; 360 return rc;
336} 361}
337 362
363static int wil6210_pm_notify(struct notifier_block *notify_block,
364 unsigned long mode, void *unused)
365{
366 struct wil6210_priv *wil = container_of(
367 notify_block, struct wil6210_priv, pm_notify);
368 int rc = 0;
369 enum wil_platform_event evt;
370
371 wil_dbg_pm(wil, "%s: mode (%ld)\n", __func__, mode);
372
373 switch (mode) {
374 case PM_HIBERNATION_PREPARE:
375 case PM_SUSPEND_PREPARE:
376 case PM_RESTORE_PREPARE:
377 rc = wil_can_suspend(wil, false);
378 if (rc)
379 break;
380 evt = WIL_PLATFORM_EVT_PRE_SUSPEND;
381 if (wil->platform_ops.notify)
382 rc = wil->platform_ops.notify(wil->platform_handle,
383 evt);
384 break;
385 case PM_POST_SUSPEND:
386 case PM_POST_HIBERNATION:
387 case PM_POST_RESTORE:
388 evt = WIL_PLATFORM_EVT_POST_SUSPEND;
389 if (wil->platform_ops.notify)
390 rc = wil->platform_ops.notify(wil->platform_handle,
391 evt);
392 break;
393 default:
394 wil_dbg_pm(wil, "unhandled notify mode %ld\n", mode);
395 break;
396 }
397
398 wil_dbg_pm(wil, "notification mode %ld: rc (%d)\n", mode, rc);
399 return rc;
400}
401
338static int wil6210_pm_suspend(struct device *dev) 402static int wil6210_pm_suspend(struct device *dev)
339{ 403{
340 return wil6210_suspend(dev, false); 404 return wil6210_suspend(dev, false);
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 0b7ecbcac19c..11ee24d509e5 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 Qualcomm Atheros, Inc. 2 * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -24,10 +24,32 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
24 wil_dbg_pm(wil, "%s(%s)\n", __func__, 24 wil_dbg_pm(wil, "%s(%s)\n", __func__,
25 is_runtime ? "runtime" : "system"); 25 is_runtime ? "runtime" : "system");
26 26
27 if (!netif_running(wil_to_ndev(wil))) {
28 /* can always sleep when down */
29 wil_dbg_pm(wil, "Interface is down\n");
30 goto out;
31 }
32 if (test_bit(wil_status_resetting, wil->status)) {
33 wil_dbg_pm(wil, "Delay suspend when resetting\n");
34 rc = -EBUSY;
35 goto out;
36 }
37 if (wil->recovery_state != fw_recovery_idle) {
38 wil_dbg_pm(wil, "Delay suspend during recovery\n");
39 rc = -EBUSY;
40 goto out;
41 }
42
43 /* interface is running */
27 switch (wdev->iftype) { 44 switch (wdev->iftype) {
28 case NL80211_IFTYPE_MONITOR: 45 case NL80211_IFTYPE_MONITOR:
29 case NL80211_IFTYPE_STATION: 46 case NL80211_IFTYPE_STATION:
30 case NL80211_IFTYPE_P2P_CLIENT: 47 case NL80211_IFTYPE_P2P_CLIENT:
48 if (test_bit(wil_status_fwconnecting, wil->status)) {
49 wil_dbg_pm(wil, "Delay suspend when connecting\n");
50 rc = -EBUSY;
51 goto out;
52 }
31 break; 53 break;
32 /* AP-like interface - can't suspend */ 54 /* AP-like interface - can't suspend */
33 default: 55 default:
@@ -36,6 +58,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
36 break; 58 break;
37 } 59 }
38 60
61out:
39 wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__, 62 wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
40 is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc); 63 is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
41 64
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index a4e43796addb..f2f6a404d3d1 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -184,6 +184,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
184 &vring->va[vring->swtail].tx; 184 &vring->va[vring->swtail].tx;
185 185
186 ctx = &vring->ctx[vring->swtail]; 186 ctx = &vring->ctx[vring->swtail];
187 if (!ctx) {
188 wil_dbg_txrx(wil,
189 "ctx(%d) was already completed\n",
190 vring->swtail);
191 vring->swtail = wil_vring_next_tail(vring);
192 continue;
193 }
187 *d = *_d; 194 *d = *_d;
188 wil_txdesc_unmap(dev, d, ctx); 195 wil_txdesc_unmap(dev, d, ctx);
189 if (ctx->skb) 196 if (ctx->skb)
@@ -544,6 +551,12 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
544 break; 551 break;
545 } 552 }
546 } 553 }
554
555 /* make sure all writes to descriptors (shared memory) are done before
556 * committing them to HW
557 */
558 wmb();
559
547 wil_w(wil, v->hwtail, v->swtail); 560 wil_w(wil, v->hwtail, v->swtail);
548 561
549 return rc; 562 return rc;
@@ -969,6 +982,13 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
969 txdata->dot1x_open = false; 982 txdata->dot1x_open = false;
970 txdata->enabled = 0; /* no Tx can be in progress or start anew */ 983 txdata->enabled = 0; /* no Tx can be in progress or start anew */
971 spin_unlock_bh(&txdata->lock); 984 spin_unlock_bh(&txdata->lock);
985 /* napi_synchronize waits for completion of the current NAPI but will
986 * not prevent the next NAPI run.
987 * Add a memory barrier to guarantee that txdata->enabled is zeroed
988 * before napi_synchronize so that the next scheduled NAPI will not
989 * handle this vring
990 */
991 wmb();
972 /* make sure NAPI won't touch this vring */ 992 /* make sure NAPI won't touch this vring */
973 if (test_bit(wil_status_napi_en, wil->status)) 993 if (test_bit(wil_status_napi_en, wil->status))
974 napi_synchronize(&wil->napi_tx); 994 napi_synchronize(&wil->napi_tx);
@@ -1551,6 +1571,13 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
1551 vring_index, used, used + descs_used); 1571 vring_index, used, used + descs_used);
1552 } 1572 }
1553 1573
1574 /* Make sure to advance the head only after descriptor update is done.
1575 * This will prevent a race condition where the completion thread
1576 * will see the DU bit set from previous run and will handle the
1577 * skb before it was completed.
1578 */
1579 wmb();
1580
1554 /* advance swhead */ 1581 /* advance swhead */
1555 wil_vring_advance_head(vring, descs_used); 1582 wil_vring_advance_head(vring, descs_used);
1556 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); 1583 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
@@ -1567,7 +1594,7 @@ mem_error:
1567 while (descs_used > 0) { 1594 while (descs_used > 0) {
1568 struct wil_ctx *ctx; 1595 struct wil_ctx *ctx;
1569 1596
1570 i = (swhead + descs_used) % vring->size; 1597 i = (swhead + descs_used - 1) % vring->size;
1571 d = (struct vring_tx_desc *)&vring->va[i].tx; 1598 d = (struct vring_tx_desc *)&vring->va[i].tx;
1572 _desc = &vring->va[i].tx; 1599 _desc = &vring->va[i].tx;
1573 *d = *_desc; 1600 *d = *_desc;
@@ -1691,6 +1718,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1691 vring_index, used, used + nr_frags + 1); 1718 vring_index, used, used + nr_frags + 1);
1692 } 1719 }
1693 1720
1721 /* Make sure to advance the head only after descriptor update is done.
1722 * This will prevent a race condition where the completion thread
1723 * will see the DU bit set from previous run and will handle the
1724 * skb before it was completed.
1725 */
1726 wmb();
1727
1694 /* advance swhead */ 1728 /* advance swhead */
1695 wil_vring_advance_head(vring, nr_frags + 1); 1729 wil_vring_advance_head(vring, nr_frags + 1);
1696 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, 1730 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
@@ -1914,6 +1948,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
1914 wil_consume_skb(skb, d->dma.error == 0); 1948 wil_consume_skb(skb, d->dma.error == 0);
1915 } 1949 }
1916 memset(ctx, 0, sizeof(*ctx)); 1950 memset(ctx, 0, sizeof(*ctx));
1951 /* Make sure the ctx is zeroed before updating the tail
1952 * to prevent a case where wil_tx_vring will see
1953 * this descriptor as used and handle it before ctx zero
1954 * is completed.
1955 */
1956 wmb();
1917 /* There is no need to touch HW descriptor: 1957 /* There is no need to touch HW descriptor:
1918 * - ststus bit TX_DMA_STATUS_DU is set by design, 1958 * - ststus bit TX_DMA_STATUS_DU is set by design,
1919 * so hardware will not try to process this desc., 1959 * so hardware will not try to process this desc.,
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index aa09cbcce47c..ecab4af90602 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -458,6 +458,7 @@ struct wil_tid_crypto_rx {
458struct wil_p2p_info { 458struct wil_p2p_info {
459 struct ieee80211_channel listen_chan; 459 struct ieee80211_channel listen_chan;
460 u8 discovery_started; 460 u8 discovery_started;
461 u8 p2p_dev_started;
461 u64 cookie; 462 u64 cookie;
462 struct timer_list discovery_timer; /* listen/search duration */ 463 struct timer_list discovery_timer; /* listen/search duration */
463 struct work_struct discovery_expired_work; /* listen/search expire */ 464 struct work_struct discovery_expired_work; /* listen/search expire */
@@ -662,6 +663,11 @@ struct wil6210_priv {
662 /* High Access Latency Policy voting */ 663 /* High Access Latency Policy voting */
663 struct wil_halp halp; 664 struct wil_halp halp;
664 665
666#ifdef CONFIG_PM
667#ifdef CONFIG_PM_SLEEP
668 struct notifier_block pm_notify;
669#endif /* CONFIG_PM_SLEEP */
670#endif /* CONFIG_PM */
665}; 671};
666 672
667#define wil_to_wiphy(i) (i->wdev->wiphy) 673#define wil_to_wiphy(i) (i->wdev->wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 33d4a34b3b1c..f8c41172a3f4 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. 2 * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -23,6 +23,8 @@ enum wil_platform_event {
23 WIL_PLATFORM_EVT_FW_CRASH = 0, 23 WIL_PLATFORM_EVT_FW_CRASH = 0,
24 WIL_PLATFORM_EVT_PRE_RESET = 1, 24 WIL_PLATFORM_EVT_PRE_RESET = 1,
25 WIL_PLATFORM_EVT_FW_RDY = 2, 25 WIL_PLATFORM_EVT_FW_RDY = 2,
26 WIL_PLATFORM_EVT_PRE_SUSPEND = 3,
27 WIL_PLATFORM_EVT_POST_SUSPEND = 4,
26}; 28};
27 29
28/** 30/**
diff --git a/drivers/net/wireless/broadcom/b43/Makefile b/drivers/net/wireless/broadcom/b43/Makefile
index ddc4df46656f..27fab958e3d5 100644
--- a/drivers/net/wireless/broadcom/b43/Makefile
+++ b/drivers/net/wireless/broadcom/b43/Makefile
@@ -1,6 +1,6 @@
1b43-y += main.o 1b43-y += main.o
2b43-y += bus.o 2b43-y += bus.o
3b43-$(CONFIG_B43_PHY_G) += phy_a.o phy_g.o tables.o lo.o wa.o 3b43-$(CONFIG_B43_PHY_G) += phy_g.o tables.o lo.o wa.o
4b43-$(CONFIG_B43_PHY_N) += tables_nphy.o 4b43-$(CONFIG_B43_PHY_N) += tables_nphy.o
5b43-$(CONFIG_B43_PHY_N) += radio_2055.o 5b43-$(CONFIG_B43_PHY_N) += radio_2055.o
6b43-$(CONFIG_B43_PHY_N) += radio_2056.o 6b43-$(CONFIG_B43_PHY_N) += radio_2056.o
diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
index d79ab2a227e1..cb987c2ecc6b 100644
--- a/drivers/net/wireless/broadcom/b43/leds.c
+++ b/drivers/net/wireless/broadcom/b43/leds.c
@@ -222,7 +222,7 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
222 sprom[2] = dev->dev->bus_sprom->gpio2; 222 sprom[2] = dev->dev->bus_sprom->gpio2;
223 sprom[3] = dev->dev->bus_sprom->gpio3; 223 sprom[3] = dev->dev->bus_sprom->gpio3;
224 224
225 if (sprom[led_index] == 0xFF) { 225 if ((sprom[0] & sprom[1] & sprom[2] & sprom[3]) == 0xff) {
226 /* There is no LED information in the SPROM 226 /* There is no LED information in the SPROM
227 * for this LED. Hardcode it here. */ 227 * for this LED. Hardcode it here. */
228 *activelow = false; 228 *activelow = false;
@@ -250,7 +250,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
250 return; 250 return;
251 } 251 }
252 } else { 252 } else {
253 *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR; 253 /* keep LED disabled if no mapping is defined */
254 if (sprom[led_index] == 0xff)
255 *behaviour = B43_LED_OFF;
256 else
257 *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR;
254 *activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW); 258 *activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW);
255 } 259 }
256} 260}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 4ee5c5853f9f..6e5d9095b195 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -3180,7 +3180,6 @@ static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm)
3180static void b43_rate_memory_init(struct b43_wldev *dev) 3180static void b43_rate_memory_init(struct b43_wldev *dev)
3181{ 3181{
3182 switch (dev->phy.type) { 3182 switch (dev->phy.type) {
3183 case B43_PHYTYPE_A:
3184 case B43_PHYTYPE_G: 3183 case B43_PHYTYPE_G:
3185 case B43_PHYTYPE_N: 3184 case B43_PHYTYPE_N:
3186 case B43_PHYTYPE_LP: 3185 case B43_PHYTYPE_LP:
@@ -3194,8 +3193,6 @@ static void b43_rate_memory_init(struct b43_wldev *dev)
3194 b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1); 3193 b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1);
3195 b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1); 3194 b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1);
3196 b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1); 3195 b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1);
3197 if (dev->phy.type == B43_PHYTYPE_A)
3198 break;
3199 /* fallthrough */ 3196 /* fallthrough */
3200 case B43_PHYTYPE_B: 3197 case B43_PHYTYPE_B:
3201 b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0); 3198 b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0);
@@ -4604,14 +4601,6 @@ static int b43_phy_versioning(struct b43_wldev *dev)
4604 if (radio_manuf != 0x17F /* Broadcom */) 4601 if (radio_manuf != 0x17F /* Broadcom */)
4605 unsupported = 1; 4602 unsupported = 1;
4606 switch (phy_type) { 4603 switch (phy_type) {
4607 case B43_PHYTYPE_A:
4608 if (radio_id != 0x2060)
4609 unsupported = 1;
4610 if (radio_rev != 1)
4611 unsupported = 1;
4612 if (radio_manuf != 0x17F)
4613 unsupported = 1;
4614 break;
4615 case B43_PHYTYPE_B: 4604 case B43_PHYTYPE_B:
4616 if ((radio_id & 0xFFF0) != 0x2050) 4605 if ((radio_id & 0xFFF0) != 0x2050)
4617 unsupported = 1; 4606 unsupported = 1;
@@ -4766,10 +4755,7 @@ static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle)
4766 u16 pu_delay; 4755 u16 pu_delay;
4767 4756
4768 /* The time value is in microseconds. */ 4757 /* The time value is in microseconds. */
4769 if (dev->phy.type == B43_PHYTYPE_A) 4758 pu_delay = 1050;
4770 pu_delay = 3700;
4771 else
4772 pu_delay = 1050;
4773 if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle) 4759 if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle)
4774 pu_delay = 500; 4760 pu_delay = 500;
4775 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8)) 4761 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
@@ -4784,14 +4770,10 @@ static void b43_set_pretbtt(struct b43_wldev *dev)
4784 u16 pretbtt; 4770 u16 pretbtt;
4785 4771
4786 /* The time value is in microseconds. */ 4772 /* The time value is in microseconds. */
4787 if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) { 4773 if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
4788 pretbtt = 2; 4774 pretbtt = 2;
4789 } else { 4775 else
4790 if (dev->phy.type == B43_PHYTYPE_A) 4776 pretbtt = 250;
4791 pretbtt = 120;
4792 else
4793 pretbtt = 250;
4794 }
4795 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRETBTT, pretbtt); 4777 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRETBTT, pretbtt);
4796 b43_write16(dev, B43_MMIO_TSF_CFP_PRETBTT, pretbtt); 4778 b43_write16(dev, B43_MMIO_TSF_CFP_PRETBTT, pretbtt);
4797} 4779}
@@ -5380,10 +5362,6 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
5380 5362
5381 /* As a fallback, try to guess using PHY type */ 5363 /* As a fallback, try to guess using PHY type */
5382 switch (dev->phy.type) { 5364 switch (dev->phy.type) {
5383 case B43_PHYTYPE_A:
5384 *have_2ghz_phy = false;
5385 *have_5ghz_phy = true;
5386 return;
5387 case B43_PHYTYPE_G: 5365 case B43_PHYTYPE_G:
5388 case B43_PHYTYPE_N: 5366 case B43_PHYTYPE_N:
5389 case B43_PHYTYPE_LP: 5367 case B43_PHYTYPE_LP:
@@ -5455,7 +5433,6 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
5455 /* We don't support 5 GHz on some PHYs yet */ 5433 /* We don't support 5 GHz on some PHYs yet */
5456 if (have_5ghz_phy) { 5434 if (have_5ghz_phy) {
5457 switch (dev->phy.type) { 5435 switch (dev->phy.type) {
5458 case B43_PHYTYPE_A:
5459 case B43_PHYTYPE_G: 5436 case B43_PHYTYPE_G:
5460 case B43_PHYTYPE_LP: 5437 case B43_PHYTYPE_LP:
5461 case B43_PHYTYPE_HT: 5438 case B43_PHYTYPE_HT:
diff --git a/drivers/net/wireless/broadcom/b43/phy_a.c b/drivers/net/wireless/broadcom/b43/phy_a.c
deleted file mode 100644
index 99c036f5ecb7..000000000000
--- a/drivers/net/wireless/broadcom/b43/phy_a.c
+++ /dev/null
@@ -1,595 +0,0 @@
1/*
2
3 Broadcom B43 wireless driver
4 IEEE 802.11a PHY driver
5
6 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
7 Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
8 Copyright (c) 2005-2008 Michael Buesch <m@bues.ch>
9 Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
10 Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; see the file COPYING. If not, write to
24 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
25 Boston, MA 02110-1301, USA.
26
27*/
28
29#include <linux/slab.h>
30
31#include "b43.h"
32#include "phy_a.h"
33#include "phy_common.h"
34#include "wa.h"
35#include "tables.h"
36#include "main.h"
37
38
39/* Get the freq, as it has to be written to the device. */
40static inline u16 channel2freq_a(u8 channel)
41{
42 B43_WARN_ON(channel > 200);
43
44 return (5000 + 5 * channel);
45}
46
47static inline u16 freq_r3A_value(u16 frequency)
48{
49 u16 value;
50
51 if (frequency < 5091)
52 value = 0x0040;
53 else if (frequency < 5321)
54 value = 0x0000;
55 else if (frequency < 5806)
56 value = 0x0080;
57 else
58 value = 0x0040;
59
60 return value;
61}
62
63#if 0
64/* This function converts a TSSI value to dBm in Q5.2 */
65static s8 b43_aphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
66{
67 struct b43_phy *phy = &dev->phy;
68 struct b43_phy_a *aphy = phy->a;
69 s8 dbm = 0;
70 s32 tmp;
71
72 tmp = (aphy->tgt_idle_tssi - aphy->cur_idle_tssi + tssi);
73 tmp += 0x80;
74 tmp = clamp_val(tmp, 0x00, 0xFF);
75 dbm = aphy->tssi2dbm[tmp];
76 //TODO: There's a FIXME on the specs
77
78 return dbm;
79}
80#endif
81
82static void b43_radio_set_tx_iq(struct b43_wldev *dev)
83{
84 static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 };
85 static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A };
86 u16 tmp = b43_radio_read16(dev, 0x001E);
87 int i, j;
88
89 for (i = 0; i < 5; i++) {
90 for (j = 0; j < 5; j++) {
91 if (tmp == (data_high[i] << 4 | data_low[j])) {
92 b43_phy_write(dev, 0x0069,
93 (i - j) << 8 | 0x00C0);
94 return;
95 }
96 }
97 }
98}
99
100static void aphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
101{
102 u16 freq, r8, tmp;
103
104 freq = channel2freq_a(channel);
105
106 r8 = b43_radio_read16(dev, 0x0008);
107 b43_write16(dev, 0x03F0, freq);
108 b43_radio_write16(dev, 0x0008, r8);
109
110 //TODO: write max channel TX power? to Radio 0x2D
111 tmp = b43_radio_read16(dev, 0x002E);
112 tmp &= 0x0080;
113 //TODO: OR tmp with the Power out estimation for this channel?
114 b43_radio_write16(dev, 0x002E, tmp);
115
116 if (freq >= 4920 && freq <= 5500) {
117 /*
118 * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F;
119 * = (freq * 0.025862069
120 */
121 r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */
122 }
123 b43_radio_write16(dev, 0x0007, (r8 << 4) | r8);
124 b43_radio_write16(dev, 0x0020, (r8 << 4) | r8);
125 b43_radio_write16(dev, 0x0021, (r8 << 4) | r8);
126 b43_radio_maskset(dev, 0x0022, 0x000F, (r8 << 4));
127 b43_radio_write16(dev, 0x002A, (r8 << 4));
128 b43_radio_write16(dev, 0x002B, (r8 << 4));
129 b43_radio_maskset(dev, 0x0008, 0x00F0, (r8 << 4));
130 b43_radio_maskset(dev, 0x0029, 0xFF0F, 0x00B0);
131 b43_radio_write16(dev, 0x0035, 0x00AA);
132 b43_radio_write16(dev, 0x0036, 0x0085);
133 b43_radio_maskset(dev, 0x003A, 0xFF20, freq_r3A_value(freq));
134 b43_radio_mask(dev, 0x003D, 0x00FF);
135 b43_radio_maskset(dev, 0x0081, 0xFF7F, 0x0080);
136 b43_radio_mask(dev, 0x0035, 0xFFEF);
137 b43_radio_maskset(dev, 0x0035, 0xFFEF, 0x0010);
138 b43_radio_set_tx_iq(dev);
139 //TODO: TSSI2dbm workaround
140//FIXME b43_phy_xmitpower(dev);
141}
142
143static void b43_radio_init2060(struct b43_wldev *dev)
144{
145 b43_radio_write16(dev, 0x0004, 0x00C0);
146 b43_radio_write16(dev, 0x0005, 0x0008);
147 b43_radio_write16(dev, 0x0009, 0x0040);
148 b43_radio_write16(dev, 0x0005, 0x00AA);
149 b43_radio_write16(dev, 0x0032, 0x008F);
150 b43_radio_write16(dev, 0x0006, 0x008F);
151 b43_radio_write16(dev, 0x0034, 0x008F);
152 b43_radio_write16(dev, 0x002C, 0x0007);
153 b43_radio_write16(dev, 0x0082, 0x0080);
154 b43_radio_write16(dev, 0x0080, 0x0000);
155 b43_radio_write16(dev, 0x003F, 0x00DA);
156 b43_radio_mask(dev, 0x0005, ~0x0008);
157 b43_radio_mask(dev, 0x0081, ~0x0010);
158 b43_radio_mask(dev, 0x0081, ~0x0020);
159 b43_radio_mask(dev, 0x0081, ~0x0020);
160 msleep(1); /* delay 400usec */
161
162 b43_radio_maskset(dev, 0x0081, ~0x0020, 0x0010);
163 msleep(1); /* delay 400usec */
164
165 b43_radio_maskset(dev, 0x0005, ~0x0008, 0x0008);
166 b43_radio_mask(dev, 0x0085, ~0x0010);
167 b43_radio_mask(dev, 0x0005, ~0x0008);
168 b43_radio_mask(dev, 0x0081, ~0x0040);
169 b43_radio_maskset(dev, 0x0081, ~0x0040, 0x0040);
170 b43_radio_write16(dev, 0x0005,
171 (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008);
172 b43_phy_write(dev, 0x0063, 0xDDC6);
173 b43_phy_write(dev, 0x0069, 0x07BE);
174 b43_phy_write(dev, 0x006A, 0x0000);
175
176 aphy_channel_switch(dev, dev->phy.ops->get_default_chan(dev));
177
178 msleep(1);
179}
180
181static void b43_phy_rssiagc(struct b43_wldev *dev, u8 enable)
182{
183 int i;
184
185 if (dev->phy.rev < 3) {
186 if (enable)
187 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
188 b43_ofdmtab_write16(dev,
189 B43_OFDMTAB_LNAHPFGAIN1, i, 0xFFF8);
190 b43_ofdmtab_write16(dev,
191 B43_OFDMTAB_WRSSI, i, 0xFFF8);
192 }
193 else
194 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
195 b43_ofdmtab_write16(dev,
196 B43_OFDMTAB_LNAHPFGAIN1, i, b43_tab_rssiagc1[i]);
197 b43_ofdmtab_write16(dev,
198 B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc1[i]);
199 }
200 } else {
201 if (enable)
202 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++)
203 b43_ofdmtab_write16(dev,
204 B43_OFDMTAB_WRSSI, i, 0x0820);
205 else
206 for (i = 0; i < B43_TAB_RSSIAGC2_SIZE; i++)
207 b43_ofdmtab_write16(dev,
208 B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc2[i]);
209 }
210}
211
212static void b43_phy_ww(struct b43_wldev *dev)
213{
214 u16 b, curr_s, best_s = 0xFFFF;
215 int i;
216
217 b43_phy_mask(dev, B43_PHY_CRS0, ~B43_PHY_CRS0_EN);
218 b43_phy_set(dev, B43_PHY_OFDM(0x1B), 0x1000);
219 b43_phy_maskset(dev, B43_PHY_OFDM(0x82), 0xF0FF, 0x0300);
220 b43_radio_set(dev, 0x0009, 0x0080);
221 b43_radio_maskset(dev, 0x0012, 0xFFFC, 0x0002);
222 b43_wa_initgains(dev);
223 b43_phy_write(dev, B43_PHY_OFDM(0xBA), 0x3ED5);
224 b = b43_phy_read(dev, B43_PHY_PWRDOWN);
225 b43_phy_write(dev, B43_PHY_PWRDOWN, (b & 0xFFF8) | 0x0005);
226 b43_radio_set(dev, 0x0004, 0x0004);
227 for (i = 0x10; i <= 0x20; i++) {
228 b43_radio_write16(dev, 0x0013, i);
229 curr_s = b43_phy_read(dev, B43_PHY_OTABLEQ) & 0x00FF;
230 if (!curr_s) {
231 best_s = 0x0000;
232 break;
233 } else if (curr_s >= 0x0080)
234 curr_s = 0x0100 - curr_s;
235 if (curr_s < best_s)
236 best_s = curr_s;
237 }
238 b43_phy_write(dev, B43_PHY_PWRDOWN, b);
239 b43_radio_mask(dev, 0x0004, 0xFFFB);
240 b43_radio_write16(dev, 0x0013, best_s);
241 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 0xFFEC);
242 b43_phy_write(dev, B43_PHY_OFDM(0xB7), 0x1E80);
243 b43_phy_write(dev, B43_PHY_OFDM(0xB6), 0x1C00);
244 b43_phy_write(dev, B43_PHY_OFDM(0xB5), 0x0EC0);
245 b43_phy_write(dev, B43_PHY_OFDM(0xB2), 0x00C0);
246 b43_phy_write(dev, B43_PHY_OFDM(0xB9), 0x1FFF);
247 b43_phy_maskset(dev, B43_PHY_OFDM(0xBB), 0xF000, 0x0053);
248 b43_phy_maskset(dev, B43_PHY_OFDM61, 0xFE1F, 0x0120);
249 b43_phy_maskset(dev, B43_PHY_OFDM(0x13), 0x0FFF, 0x3000);
250 b43_phy_maskset(dev, B43_PHY_OFDM(0x14), 0x0FFF, 0x3000);
251 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 6, 0x0017);
252 for (i = 0; i < 6; i++)
253 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, i, 0x000F);
254 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0D, 0x000E);
255 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0E, 0x0011);
256 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0F, 0x0013);
257 b43_phy_write(dev, B43_PHY_OFDM(0x33), 0x5030);
258 b43_phy_set(dev, B43_PHY_CRS0, B43_PHY_CRS0_EN);
259}
260
261static void hardware_pctl_init_aphy(struct b43_wldev *dev)
262{
263 //TODO
264}
265
266void b43_phy_inita(struct b43_wldev *dev)
267{
268 struct b43_phy *phy = &dev->phy;
269
270 /* This lowlevel A-PHY init is also called from G-PHY init.
271 * So we must not access phy->a, if called from G-PHY code.
272 */
273 B43_WARN_ON((phy->type != B43_PHYTYPE_A) &&
274 (phy->type != B43_PHYTYPE_G));
275
276 might_sleep();
277
278 if (phy->rev >= 6) {
279 if (phy->type == B43_PHYTYPE_A)
280 b43_phy_mask(dev, B43_PHY_OFDM(0x1B), ~0x1000);
281 if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
282 b43_phy_set(dev, B43_PHY_ENCORE, 0x0010);
283 else
284 b43_phy_mask(dev, B43_PHY_ENCORE, ~0x1010);
285 }
286
287 b43_wa_all(dev);
288
289 if (phy->type == B43_PHYTYPE_A) {
290 if (phy->gmode && (phy->rev < 3))
291 b43_phy_set(dev, 0x0034, 0x0001);
292 b43_phy_rssiagc(dev, 0);
293
294 b43_phy_set(dev, B43_PHY_CRS0, B43_PHY_CRS0_EN);
295
296 b43_radio_init2060(dev);
297
298 if ((dev->dev->board_vendor == SSB_BOARDVENDOR_BCM) &&
299 ((dev->dev->board_type == SSB_BOARD_BU4306) ||
300 (dev->dev->board_type == SSB_BOARD_BU4309))) {
301 ; //TODO: A PHY LO
302 }
303
304 if (phy->rev >= 3)
305 b43_phy_ww(dev);
306
307 hardware_pctl_init_aphy(dev);
308
309 //TODO: radar detection
310 }
311
312 if ((phy->type == B43_PHYTYPE_G) &&
313 (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)) {
314 b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF);
315 }
316}
317
318/* Initialise the TSSI->dBm lookup table */
319static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev)
320{
321 struct b43_phy *phy = &dev->phy;
322 struct b43_phy_a *aphy = phy->a;
323 s16 pab0, pab1, pab2;
324
325 pab0 = (s16) (dev->dev->bus_sprom->pa1b0);
326 pab1 = (s16) (dev->dev->bus_sprom->pa1b1);
327 pab2 = (s16) (dev->dev->bus_sprom->pa1b2);
328
329 if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
330 pab0 != -1 && pab1 != -1 && pab2 != -1) {
331 /* The pabX values are set in SPROM. Use them. */
332 if ((s8) dev->dev->bus_sprom->itssi_a != 0 &&
333 (s8) dev->dev->bus_sprom->itssi_a != -1)
334 aphy->tgt_idle_tssi =
335 (s8) (dev->dev->bus_sprom->itssi_a);
336 else
337 aphy->tgt_idle_tssi = 62;
338 aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
339 pab1, pab2);
340 if (!aphy->tssi2dbm)
341 return -ENOMEM;
342 } else {
343 /* pabX values not set in SPROM,
344 * but APHY needs a generated table. */
345 aphy->tssi2dbm = NULL;
346 b43err(dev->wl, "Could not generate tssi2dBm "
347 "table (wrong SPROM info)!\n");
348 return -ENODEV;
349 }
350
351 return 0;
352}
353
354static int b43_aphy_op_allocate(struct b43_wldev *dev)
355{
356 struct b43_phy_a *aphy;
357 int err;
358
359 aphy = kzalloc(sizeof(*aphy), GFP_KERNEL);
360 if (!aphy)
361 return -ENOMEM;
362 dev->phy.a = aphy;
363
364 err = b43_aphy_init_tssi2dbm_table(dev);
365 if (err)
366 goto err_free_aphy;
367
368 return 0;
369
370err_free_aphy:
371 kfree(aphy);
372 dev->phy.a = NULL;
373
374 return err;
375}
376
377static void b43_aphy_op_prepare_structs(struct b43_wldev *dev)
378{
379 struct b43_phy *phy = &dev->phy;
380 struct b43_phy_a *aphy = phy->a;
381 const void *tssi2dbm;
382 int tgt_idle_tssi;
383
384 /* tssi2dbm table is constant, so it is initialized at alloc time.
385 * Save a copy of the pointer. */
386 tssi2dbm = aphy->tssi2dbm;
387 tgt_idle_tssi = aphy->tgt_idle_tssi;
388
389 /* Zero out the whole PHY structure. */
390 memset(aphy, 0, sizeof(*aphy));
391
392 aphy->tssi2dbm = tssi2dbm;
393 aphy->tgt_idle_tssi = tgt_idle_tssi;
394
395 //TODO init struct b43_phy_a
396
397}
398
399static void b43_aphy_op_free(struct b43_wldev *dev)
400{
401 struct b43_phy *phy = &dev->phy;
402 struct b43_phy_a *aphy = phy->a;
403
404 kfree(aphy->tssi2dbm);
405 aphy->tssi2dbm = NULL;
406
407 kfree(aphy);
408 dev->phy.a = NULL;
409}
410
411static int b43_aphy_op_init(struct b43_wldev *dev)
412{
413 b43_phy_inita(dev);
414
415 return 0;
416}
417
418static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset)
419{
420 /* OFDM registers are base-registers for the A-PHY. */
421 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
422 offset &= ~B43_PHYROUTE;
423 offset |= B43_PHYROUTE_BASE;
424 }
425
426#if B43_DEBUG
427 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) {
428 /* Ext-G registers are only available on G-PHYs */
429 b43err(dev->wl, "Invalid EXT-G PHY access at "
430 "0x%04X on A-PHY\n", offset);
431 dump_stack();
432 }
433 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) {
434 /* N-BMODE registers are only available on N-PHYs */
435 b43err(dev->wl, "Invalid N-BMODE PHY access at "
436 "0x%04X on A-PHY\n", offset);
437 dump_stack();
438 }
439#endif /* B43_DEBUG */
440
441 return offset;
442}
443
444static u16 b43_aphy_op_read(struct b43_wldev *dev, u16 reg)
445{
446 reg = adjust_phyreg(dev, reg);
447 b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg);
448 return b43_read16(dev, B43_MMIO_PHY_DATA);
449}
450
451static void b43_aphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
452{
453 reg = adjust_phyreg(dev, reg);
454 b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg);
455 b43_write16(dev, B43_MMIO_PHY_DATA, value);
456}
457
458static u16 b43_aphy_op_radio_read(struct b43_wldev *dev, u16 reg)
459{
460 /* Register 1 is a 32-bit register. */
461 B43_WARN_ON(reg == 1);
462 /* A-PHY needs 0x40 for read access */
463 reg |= 0x40;
464
465 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
466 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
467}
468
469static void b43_aphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
470{
471 /* Register 1 is a 32-bit register. */
472 B43_WARN_ON(reg == 1);
473
474 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
475 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
476}
477
478static bool b43_aphy_op_supports_hwpctl(struct b43_wldev *dev)
479{
480 return (dev->phy.rev >= 5);
481}
482
483static void b43_aphy_op_software_rfkill(struct b43_wldev *dev,
484 bool blocked)
485{
486 struct b43_phy *phy = &dev->phy;
487
488 if (!blocked) {
489 if (phy->radio_on)
490 return;
491 b43_radio_write16(dev, 0x0004, 0x00C0);
492 b43_radio_write16(dev, 0x0005, 0x0008);
493 b43_phy_mask(dev, 0x0010, 0xFFF7);
494 b43_phy_mask(dev, 0x0011, 0xFFF7);
495 b43_radio_init2060(dev);
496 } else {
497 b43_radio_write16(dev, 0x0004, 0x00FF);
498 b43_radio_write16(dev, 0x0005, 0x00FB);
499 b43_phy_set(dev, 0x0010, 0x0008);
500 b43_phy_set(dev, 0x0011, 0x0008);
501 }
502}
503
504static int b43_aphy_op_switch_channel(struct b43_wldev *dev,
505 unsigned int new_channel)
506{
507 if (new_channel > 200)
508 return -EINVAL;
509 aphy_channel_switch(dev, new_channel);
510
511 return 0;
512}
513
514static unsigned int b43_aphy_op_get_default_chan(struct b43_wldev *dev)
515{
516 return 36; /* Default to channel 36 */
517}
518
519static void b43_aphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
520{//TODO
521 struct b43_phy *phy = &dev->phy;
522 u16 tmp;
523 int autodiv = 0;
524
525 if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1)
526 autodiv = 1;
527
528 b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP);
529
530 b43_phy_maskset(dev, B43_PHY_BBANDCFG, ~B43_PHY_BBANDCFG_RXANT,
531 (autodiv ? B43_ANTENNA_AUTO1 : antenna) <<
532 B43_PHY_BBANDCFG_RXANT_SHIFT);
533
534 if (autodiv) {
535 tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
536 if (antenna == B43_ANTENNA_AUTO1)
537 tmp &= ~B43_PHY_ANTDWELL_AUTODIV1;
538 else
539 tmp |= B43_PHY_ANTDWELL_AUTODIV1;
540 b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
541 }
542 if (phy->rev < 3)
543 b43_phy_maskset(dev, B43_PHY_ANTDWELL, 0xFF00, 0x24);
544 else {
545 b43_phy_set(dev, B43_PHY_OFDM61, 0x10);
546 if (phy->rev == 3) {
547 b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, 0x1D);
548 b43_phy_write(dev, B43_PHY_ADIVRELATED, 8);
549 } else {
550 b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, 0x3A);
551 b43_phy_maskset(dev, B43_PHY_ADIVRELATED, 0xFF00, 8);
552 }
553 }
554
555 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP);
556}
557
558static void b43_aphy_op_adjust_txpower(struct b43_wldev *dev)
559{//TODO
560}
561
562static enum b43_txpwr_result b43_aphy_op_recalc_txpower(struct b43_wldev *dev,
563 bool ignore_tssi)
564{//TODO
565 return B43_TXPWR_RES_DONE;
566}
567
568static void b43_aphy_op_pwork_15sec(struct b43_wldev *dev)
569{//TODO
570}
571
572static void b43_aphy_op_pwork_60sec(struct b43_wldev *dev)
573{//TODO
574}
575
576static const struct b43_phy_operations b43_phyops_a = {
577 .allocate = b43_aphy_op_allocate,
578 .free = b43_aphy_op_free,
579 .prepare_structs = b43_aphy_op_prepare_structs,
580 .init = b43_aphy_op_init,
581 .phy_read = b43_aphy_op_read,
582 .phy_write = b43_aphy_op_write,
583 .radio_read = b43_aphy_op_radio_read,
584 .radio_write = b43_aphy_op_radio_write,
585 .supports_hwpctl = b43_aphy_op_supports_hwpctl,
586 .software_rfkill = b43_aphy_op_software_rfkill,
587 .switch_analog = b43_phyop_switch_analog_generic,
588 .switch_channel = b43_aphy_op_switch_channel,
589 .get_default_chan = b43_aphy_op_get_default_chan,
590 .set_rx_antenna = b43_aphy_op_set_rx_antenna,
591 .recalc_txpower = b43_aphy_op_recalc_txpower,
592 .adjust_txpower = b43_aphy_op_adjust_txpower,
593 .pwork_15sec = b43_aphy_op_pwork_15sec,
594 .pwork_60sec = b43_aphy_op_pwork_60sec,
595};
diff --git a/drivers/net/wireless/broadcom/b43/phy_a.h b/drivers/net/wireless/broadcom/b43/phy_a.h
index f7d0d929a374..0a92d01c21f9 100644
--- a/drivers/net/wireless/broadcom/b43/phy_a.h
+++ b/drivers/net/wireless/broadcom/b43/phy_a.h
@@ -101,26 +101,4 @@ u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset);
101void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, 101void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
102 u16 offset, u32 value); 102 u16 offset, u32 value);
103 103
104
105struct b43_phy_a {
106 /* Pointer to the table used to convert a
107 * TSSI value to dBm-Q5.2 */
108 const s8 *tssi2dbm;
109 /* Target idle TSSI */
110 int tgt_idle_tssi;
111 /* Current idle TSSI */
112 int cur_idle_tssi;//FIXME value currently not set
113
114 /* A-PHY TX Power control value. */
115 u16 txpwr_offset;
116
117 //TODO lots of missing stuff
118};
119
120/**
121 * b43_phy_inita - Lowlevel A-PHY init routine.
122 * This is _only_ used by the G-PHY code.
123 */
124void b43_phy_inita(struct b43_wldev *dev);
125
126#endif /* LINUX_B43_PHY_A_H_ */ 104#endif /* LINUX_B43_PHY_A_H_ */
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.h b/drivers/net/wireless/broadcom/b43/phy_common.h
index 78d86526799e..ced054a9850c 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.h
+++ b/drivers/net/wireless/broadcom/b43/phy_common.h
@@ -190,7 +190,6 @@ struct b43_phy_operations {
190 void (*pwork_60sec)(struct b43_wldev *dev); 190 void (*pwork_60sec)(struct b43_wldev *dev);
191}; 191};
192 192
193struct b43_phy_a;
194struct b43_phy_g; 193struct b43_phy_g;
195struct b43_phy_n; 194struct b43_phy_n;
196struct b43_phy_lp; 195struct b43_phy_lp;
@@ -210,8 +209,6 @@ struct b43_phy {
210#else 209#else
211 union { 210 union {
212#endif 211#endif
213 /* A-PHY specific information */
214 struct b43_phy_a *a;
215 /* G-PHY specific information */ 212 /* G-PHY specific information */
216 struct b43_phy_g *g; 213 struct b43_phy_g *g;
217 /* N-PHY specific information */ 214 /* N-PHY specific information */
diff --git a/drivers/net/wireless/broadcom/b43/phy_g.c b/drivers/net/wireless/broadcom/b43/phy_g.c
index 462310e6e88f..822dcaa8ace6 100644
--- a/drivers/net/wireless/broadcom/b43/phy_g.c
+++ b/drivers/net/wireless/broadcom/b43/phy_g.c
@@ -31,6 +31,7 @@
31#include "phy_common.h" 31#include "phy_common.h"
32#include "lo.h" 32#include "lo.h"
33#include "main.h" 33#include "main.h"
34#include "wa.h"
34 35
35#include <linux/bitrev.h> 36#include <linux/bitrev.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
@@ -1987,6 +1988,25 @@ static void b43_phy_init_pctl(struct b43_wldev *dev)
1987 b43_shm_clear_tssi(dev); 1988 b43_shm_clear_tssi(dev);
1988} 1989}
1989 1990
1991static void b43_phy_inita(struct b43_wldev *dev)
1992{
1993 struct b43_phy *phy = &dev->phy;
1994
1995 might_sleep();
1996
1997 if (phy->rev >= 6) {
1998 if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
1999 b43_phy_set(dev, B43_PHY_ENCORE, 0x0010);
2000 else
2001 b43_phy_mask(dev, B43_PHY_ENCORE, ~0x1010);
2002 }
2003
2004 b43_wa_all(dev);
2005
2006 if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)
2007 b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF);
2008}
2009
1990static void b43_phy_initg(struct b43_wldev *dev) 2010static void b43_phy_initg(struct b43_wldev *dev)
1991{ 2011{
1992 struct b43_phy *phy = &dev->phy; 2012 struct b43_phy *phy = &dev->phy;
@@ -2150,11 +2170,6 @@ static void default_radio_attenuation(struct b43_wldev *dev,
2150 } 2170 }
2151 } 2171 }
2152 2172
2153 if (phy->type == B43_PHYTYPE_A) {
2154 rf->att = 0x60;
2155 return;
2156 }
2157
2158 switch (phy->radio_ver) { 2173 switch (phy->radio_ver) {
2159 case 0x2053: 2174 case 0x2053:
2160 switch (phy->radio_rev) { 2175 switch (phy->radio_rev) {
diff --git a/drivers/net/wireless/broadcom/b43/wa.c b/drivers/net/wireless/broadcom/b43/wa.c
index c218c08fb2f5..0e96c08d1e17 100644
--- a/drivers/net/wireless/broadcom/b43/wa.c
+++ b/drivers/net/wireless/broadcom/b43/wa.c
@@ -30,33 +30,6 @@
30#include "phy_common.h" 30#include "phy_common.h"
31#include "wa.h" 31#include "wa.h"
32 32
33static void b43_wa_papd(struct b43_wldev *dev)
34{
35 u16 backup;
36
37 backup = b43_ofdmtab_read16(dev, B43_OFDMTAB_PWRDYN2, 0);
38 b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, 7);
39 b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 0, 0);
40 b43_dummy_transmission(dev, true, true);
41 b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, backup);
42}
43
44static void b43_wa_auxclipthr(struct b43_wldev *dev)
45{
46 b43_phy_write(dev, B43_PHY_OFDM(0x8E), 0x3800);
47}
48
49static void b43_wa_afcdac(struct b43_wldev *dev)
50{
51 b43_phy_write(dev, 0x0035, 0x03FF);
52 b43_phy_write(dev, 0x0036, 0x0400);
53}
54
55static void b43_wa_txdc_offset(struct b43_wldev *dev)
56{
57 b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 0, 0x0051);
58}
59
60void b43_wa_initgains(struct b43_wldev *dev) 33void b43_wa_initgains(struct b43_wldev *dev)
61{ 34{
62 struct b43_phy *phy = &dev->phy; 35 struct b43_phy *phy = &dev->phy;
@@ -81,41 +54,6 @@ void b43_wa_initgains(struct b43_wldev *dev)
81 b43_phy_write(dev, 0x00BA, 0x3ED5); 54 b43_phy_write(dev, 0x00BA, 0x3ED5);
82} 55}
83 56
84static void b43_wa_divider(struct b43_wldev *dev)
85{
86 b43_phy_mask(dev, 0x002B, ~0x0100);
87 b43_phy_write(dev, 0x008E, 0x58C1);
88}
89
90static void b43_wa_gt(struct b43_wldev *dev) /* Gain table. */
91{
92 if (dev->phy.rev <= 2) {
93 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 0, 15);
94 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 1, 31);
95 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 2, 42);
96 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 3, 48);
97 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 4, 58);
98 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19);
99 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19);
100 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19);
101 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19);
102 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21);
103 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21);
104 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25);
105 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 0, 3);
106 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 1, 3);
107 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 2, 7);
108 } else {
109 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19);
110 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19);
111 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19);
112 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19);
113 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21);
114 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21);
115 b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25);
116 }
117}
118
119static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */ 57static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */
120{ 58{
121 int i; 59 int i;
@@ -133,15 +71,11 @@ static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */
133 71
134static void b43_wa_analog(struct b43_wldev *dev) 72static void b43_wa_analog(struct b43_wldev *dev)
135{ 73{
136 struct b43_phy *phy = &dev->phy;
137 u16 ofdmrev; 74 u16 ofdmrev;
138 75
139 ofdmrev = b43_phy_read(dev, B43_PHY_VERSION_OFDM) & B43_PHYVER_VERSION; 76 ofdmrev = b43_phy_read(dev, B43_PHY_VERSION_OFDM) & B43_PHYVER_VERSION;
140 if (ofdmrev > 2) { 77 if (ofdmrev > 2) {
141 if (phy->type == B43_PHYTYPE_A) 78 b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000);
142 b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1808);
143 else
144 b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000);
145 } else { 79 } else {
146 b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 3, 0x1044); 80 b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 3, 0x1044);
147 b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 4, 0x7201); 81 b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 4, 0x7201);
@@ -149,26 +83,13 @@ static void b43_wa_analog(struct b43_wldev *dev)
149 } 83 }
150} 84}
151 85
152static void b43_wa_dac(struct b43_wldev *dev)
153{
154 if (dev->phy.analog == 1)
155 b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1,
156 (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0034) | 0x0008);
157 else
158 b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1,
159 (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0078) | 0x0010);
160}
161
162static void b43_wa_fft(struct b43_wldev *dev) /* Fine frequency table */ 86static void b43_wa_fft(struct b43_wldev *dev) /* Fine frequency table */
163{ 87{
164 int i; 88 int i;
165 89
166 if (dev->phy.type == B43_PHYTYPE_A) 90 for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++)
167 for (i = 0; i < B43_TAB_FINEFREQA_SIZE; i++) 91 b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i,
168 b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqa[i]); 92 b43_tab_finefreqg[i]);
169 else
170 for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++)
171 b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqg[i]);
172} 93}
173 94
174static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */ 95static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */
@@ -176,21 +97,14 @@ static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */
176 struct b43_phy *phy = &dev->phy; 97 struct b43_phy *phy = &dev->phy;
177 int i; 98 int i;
178 99
179 if (phy->type == B43_PHYTYPE_A) { 100 if (phy->rev == 1)
180 if (phy->rev == 2) 101 for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++)
181 for (i = 0; i < B43_TAB_NOISEA2_SIZE; i++) 102 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i,
182 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea2[i]); 103 b43_tab_noiseg1[i]);
183 else 104 else
184 for (i = 0; i < B43_TAB_NOISEA3_SIZE; i++) 105 for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++)
185 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea3[i]); 106 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i,
186 } else { 107 b43_tab_noiseg2[i]);
187 if (phy->rev == 1)
188 for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++)
189 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg1[i]);
190 else
191 for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++)
192 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg2[i]);
193 }
194} 108}
195 109
196static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */ 110static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */
@@ -201,14 +115,6 @@ static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */
201 b43_ofdmtab_write32(dev, B43_OFDMTAB_ROTOR, i, b43_tab_rotor[i]); 115 b43_ofdmtab_write32(dev, B43_OFDMTAB_ROTOR, i, b43_tab_rotor[i]);
202} 116}
203 117
204static void b43_write_null_nst(struct b43_wldev *dev)
205{
206 int i;
207
208 for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++)
209 b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, 0);
210}
211
212static void b43_write_nst(struct b43_wldev *dev, const u16 *nst) 118static void b43_write_nst(struct b43_wldev *dev, const u16 *nst)
213{ 119{
214 int i; 120 int i;
@@ -221,24 +127,13 @@ static void b43_wa_nst(struct b43_wldev *dev) /* Noise scale table */
221{ 127{
222 struct b43_phy *phy = &dev->phy; 128 struct b43_phy *phy = &dev->phy;
223 129
224 if (phy->type == B43_PHYTYPE_A) { 130 if (phy->rev >= 6) {
225 if (phy->rev <= 1) 131 if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
226 b43_write_null_nst(dev);
227 else if (phy->rev == 2)
228 b43_write_nst(dev, b43_tab_noisescalea2);
229 else if (phy->rev == 3)
230 b43_write_nst(dev, b43_tab_noisescalea3);
231 else
232 b43_write_nst(dev, b43_tab_noisescaleg3); 132 b43_write_nst(dev, b43_tab_noisescaleg3);
133 else
134 b43_write_nst(dev, b43_tab_noisescaleg2);
233 } else { 135 } else {
234 if (phy->rev >= 6) { 136 b43_write_nst(dev, b43_tab_noisescaleg1);
235 if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
236 b43_write_nst(dev, b43_tab_noisescaleg3);
237 else
238 b43_write_nst(dev, b43_tab_noisescaleg2);
239 } else {
240 b43_write_nst(dev, b43_tab_noisescaleg1);
241 }
242 } 137 }
243} 138}
244 139
@@ -251,41 +146,13 @@ static void b43_wa_art(struct b43_wldev *dev) /* ADV retard table */
251 i, b43_tab_retard[i]); 146 i, b43_tab_retard[i]);
252} 147}
253 148
254static void b43_wa_txlna_gain(struct b43_wldev *dev)
255{
256 b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 13, 0x0000);
257}
258
259static void b43_wa_crs_reset(struct b43_wldev *dev)
260{
261 b43_phy_write(dev, 0x002C, 0x0064);
262}
263
264static void b43_wa_2060txlna_gain(struct b43_wldev *dev)
265{
266 b43_hf_write(dev, b43_hf_read(dev) |
267 B43_HF_2060W);
268}
269
270static void b43_wa_lms(struct b43_wldev *dev)
271{
272 b43_phy_maskset(dev, 0x0055, 0xFFC0, 0x0004);
273}
274
275static void b43_wa_mixedsignal(struct b43_wldev *dev)
276{
277 b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, 3);
278}
279
280static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */ 149static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */
281{ 150{
282 struct b43_phy *phy = &dev->phy; 151 struct b43_phy *phy = &dev->phy;
283 int i; 152 int i;
284 const u16 *tab; 153 const u16 *tab;
285 154
286 if (phy->type == B43_PHYTYPE_A) { 155 if (phy->type == B43_PHYTYPE_G) {
287 tab = b43_tab_sigmasqr1;
288 } else if (phy->type == B43_PHYTYPE_G) {
289 tab = b43_tab_sigmasqr2; 156 tab = b43_tab_sigmasqr2;
290 } else { 157 } else {
291 B43_WARN_ON(1); 158 B43_WARN_ON(1);
@@ -298,13 +165,6 @@ static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */
298 } 165 }
299} 166}
300 167
301static void b43_wa_iqadc(struct b43_wldev *dev)
302{
303 if (dev->phy.analog == 4)
304 b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 0,
305 b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 0) & ~0xF000);
306}
307
308static void b43_wa_crs_ed(struct b43_wldev *dev) 168static void b43_wa_crs_ed(struct b43_wldev *dev)
309{ 169{
310 struct b43_phy *phy = &dev->phy; 170 struct b43_phy *phy = &dev->phy;
@@ -450,38 +310,6 @@ static void b43_wa_cpll_nonpilot(struct b43_wldev *dev)
450 b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 1, 0); 310 b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 1, 0);
451} 311}
452 312
453static void b43_wa_rssi_adc(struct b43_wldev *dev)
454{
455 if (dev->phy.analog == 4)
456 b43_phy_write(dev, 0x00DC, 0x7454);
457}
458
459static void b43_wa_boards_a(struct b43_wldev *dev)
460{
461 if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
462 dev->dev->board_type == SSB_BOARD_BU4306 &&
463 dev->dev->board_rev < 0x30) {
464 b43_phy_write(dev, 0x0010, 0xE000);
465 b43_phy_write(dev, 0x0013, 0x0140);
466 b43_phy_write(dev, 0x0014, 0x0280);
467 } else {
468 if (dev->dev->board_type == SSB_BOARD_MP4318 &&
469 dev->dev->board_rev < 0x20) {
470 b43_phy_write(dev, 0x0013, 0x0210);
471 b43_phy_write(dev, 0x0014, 0x0840);
472 } else {
473 b43_phy_write(dev, 0x0013, 0x0140);
474 b43_phy_write(dev, 0x0014, 0x0280);
475 }
476 if (dev->phy.rev <= 4)
477 b43_phy_write(dev, 0x0010, 0xE000);
478 else
479 b43_phy_write(dev, 0x0010, 0x2000);
480 b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 1, 0x0039);
481 b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 7, 0x0040);
482 }
483}
484
485static void b43_wa_boards_g(struct b43_wldev *dev) 313static void b43_wa_boards_g(struct b43_wldev *dev)
486{ 314{
487 struct ssb_sprom *sprom = dev->dev->bus_sprom; 315 struct ssb_sprom *sprom = dev->dev->bus_sprom;
@@ -518,80 +346,7 @@ void b43_wa_all(struct b43_wldev *dev)
518{ 346{
519 struct b43_phy *phy = &dev->phy; 347 struct b43_phy *phy = &dev->phy;
520 348
521 if (phy->type == B43_PHYTYPE_A) { 349 if (phy->type == B43_PHYTYPE_G) {
522 switch (phy->rev) {
523 case 2:
524 b43_wa_papd(dev);
525 b43_wa_auxclipthr(dev);
526 b43_wa_afcdac(dev);
527 b43_wa_txdc_offset(dev);
528 b43_wa_initgains(dev);
529 b43_wa_divider(dev);
530 b43_wa_gt(dev);
531 b43_wa_rssi_lt(dev);
532 b43_wa_analog(dev);
533 b43_wa_dac(dev);
534 b43_wa_fft(dev);
535 b43_wa_nft(dev);
536 b43_wa_rt(dev);
537 b43_wa_nst(dev);
538 b43_wa_art(dev);
539 b43_wa_txlna_gain(dev);
540 b43_wa_crs_reset(dev);
541 b43_wa_2060txlna_gain(dev);
542 b43_wa_lms(dev);
543 break;
544 case 3:
545 b43_wa_papd(dev);
546 b43_wa_mixedsignal(dev);
547 b43_wa_rssi_lt(dev);
548 b43_wa_txdc_offset(dev);
549 b43_wa_initgains(dev);
550 b43_wa_dac(dev);
551 b43_wa_nft(dev);
552 b43_wa_nst(dev);
553 b43_wa_msst(dev);
554 b43_wa_analog(dev);
555 b43_wa_gt(dev);
556 b43_wa_txpuoff_rxpuon(dev);
557 b43_wa_txlna_gain(dev);
558 break;
559 case 5:
560 b43_wa_iqadc(dev);
561 case 6:
562 b43_wa_papd(dev);
563 b43_wa_rssi_lt(dev);
564 b43_wa_txdc_offset(dev);
565 b43_wa_initgains(dev);
566 b43_wa_dac(dev);
567 b43_wa_nft(dev);
568 b43_wa_nst(dev);
569 b43_wa_msst(dev);
570 b43_wa_analog(dev);
571 b43_wa_gt(dev);
572 b43_wa_txpuoff_rxpuon(dev);
573 b43_wa_txlna_gain(dev);
574 break;
575 case 7:
576 b43_wa_iqadc(dev);
577 b43_wa_papd(dev);
578 b43_wa_rssi_lt(dev);
579 b43_wa_txdc_offset(dev);
580 b43_wa_initgains(dev);
581 b43_wa_dac(dev);
582 b43_wa_nft(dev);
583 b43_wa_nst(dev);
584 b43_wa_msst(dev);
585 b43_wa_analog(dev);
586 b43_wa_gt(dev);
587 b43_wa_txpuoff_rxpuon(dev);
588 b43_wa_txlna_gain(dev);
589 b43_wa_rssi_adc(dev);
590 default:
591 B43_WARN_ON(1);
592 }
593 b43_wa_boards_a(dev);
594 } else if (phy->type == B43_PHYTYPE_G) {
595 switch (phy->rev) { 350 switch (phy->rev) {
596 case 1://XXX review rev1 351 case 1://XXX review rev1
597 b43_wa_crs_ed(dev); 352 b43_wa_crs_ed(dev);
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index f6201264de49..b068d5aeee24 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -205,7 +205,7 @@ static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
205 return control; 205 return control;
206} 206}
207 207
208static u8 b43_calc_fallback_rate(u8 bitrate) 208static u8 b43_calc_fallback_rate(u8 bitrate, int gmode)
209{ 209{
210 switch (bitrate) { 210 switch (bitrate) {
211 case B43_CCK_RATE_1MB: 211 case B43_CCK_RATE_1MB:
@@ -216,8 +216,15 @@ static u8 b43_calc_fallback_rate(u8 bitrate)
216 return B43_CCK_RATE_2MB; 216 return B43_CCK_RATE_2MB;
217 case B43_CCK_RATE_11MB: 217 case B43_CCK_RATE_11MB:
218 return B43_CCK_RATE_5MB; 218 return B43_CCK_RATE_5MB;
219 /*
220 * Don't just fallback to CCK; it may be in 5GHz operation
221 * and falling back to CCK won't work out very well.
222 */
219 case B43_OFDM_RATE_6MB: 223 case B43_OFDM_RATE_6MB:
220 return B43_CCK_RATE_5MB; 224 if (gmode)
225 return B43_CCK_RATE_5MB;
226 else
227 return B43_OFDM_RATE_6MB;
221 case B43_OFDM_RATE_9MB: 228 case B43_OFDM_RATE_9MB:
222 return B43_OFDM_RATE_6MB; 229 return B43_OFDM_RATE_6MB;
223 case B43_OFDM_RATE_12MB: 230 case B43_OFDM_RATE_12MB:
@@ -438,7 +445,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
438 445
439 rts_rate = rts_cts_rate ? rts_cts_rate->hw_value : B43_CCK_RATE_1MB; 446 rts_rate = rts_cts_rate ? rts_cts_rate->hw_value : B43_CCK_RATE_1MB;
440 rts_rate_ofdm = b43_is_ofdm_rate(rts_rate); 447 rts_rate_ofdm = b43_is_ofdm_rate(rts_rate);
441 rts_rate_fb = b43_calc_fallback_rate(rts_rate); 448 rts_rate_fb = b43_calc_fallback_rate(rts_rate, phy->gmode);
442 rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); 449 rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb);
443 450
444 if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 451 if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
@@ -642,11 +649,7 @@ static s8 b43_rssinoise_postprocess(struct b43_wldev *dev, u8 in_rssi)
642 struct b43_phy *phy = &dev->phy; 649 struct b43_phy *phy = &dev->phy;
643 s8 ret; 650 s8 ret;
644 651
645 if (phy->type == B43_PHYTYPE_A) { 652 ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1);
646 //TODO: Incomplete specs.
647 ret = 0;
648 } else
649 ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1);
650 653
651 return ret; 654 return ret;
652} 655}
@@ -663,7 +666,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
663 u16 uninitialized_var(chanstat), uninitialized_var(mactime); 666 u16 uninitialized_var(chanstat), uninitialized_var(mactime);
664 u32 uninitialized_var(macstat); 667 u32 uninitialized_var(macstat);
665 u16 chanid; 668 u16 chanid;
666 u16 phytype;
667 int padding, rate_idx; 669 int padding, rate_idx;
668 670
669 memset(&status, 0, sizeof(status)); 671 memset(&status, 0, sizeof(status));
@@ -684,7 +686,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
684 chanstat = le16_to_cpu(rxhdr->format_351.channel); 686 chanstat = le16_to_cpu(rxhdr->format_351.channel);
685 break; 687 break;
686 } 688 }
687 phytype = chanstat & B43_RX_CHAN_PHYTYPE;
688 689
689 if (unlikely(macstat & B43_RX_MAC_FCSERR)) { 690 if (unlikely(macstat & B43_RX_MAC_FCSERR)) {
690 dev->wl->ieee_stats.dot11FCSErrorCount++; 691 dev->wl->ieee_stats.dot11FCSErrorCount++;
@@ -755,7 +756,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
755 else 756 else
756 status.signal = max(rxhdr->power0, rxhdr->power1); 757 status.signal = max(rxhdr->power0, rxhdr->power1);
757 break; 758 break;
758 case B43_PHYTYPE_A:
759 case B43_PHYTYPE_B: 759 case B43_PHYTYPE_B:
760 case B43_PHYTYPE_G: 760 case B43_PHYTYPE_G:
761 case B43_PHYTYPE_LP: 761 case B43_PHYTYPE_LP:
@@ -802,14 +802,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
802 802
803 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; 803 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
804 switch (chanstat & B43_RX_CHAN_PHYTYPE) { 804 switch (chanstat & B43_RX_CHAN_PHYTYPE) {
805 case B43_PHYTYPE_A:
806 status.band = NL80211_BAND_5GHZ;
807 B43_WARN_ON(1);
808 /* FIXME: We don't really know which value the "chanid" contains.
809 * So the following assignment might be wrong. */
810 status.freq =
811 ieee80211_channel_to_frequency(chanid, status.band);
812 break;
813 case B43_PHYTYPE_G: 805 case B43_PHYTYPE_G:
814 status.band = NL80211_BAND_2GHZ; 806 status.band = NL80211_BAND_2GHZ;
815 /* Somewhere between 478.104 and 508.1084 firmware for G-PHY 807 /* Somewhere between 478.104 and 508.1084 firmware for G-PHY
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index c7550dab6a23..c4b89d27e2e8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -166,41 +166,45 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
166 sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler); 166 sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
167 sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler); 167 sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
168 sdio_release_host(sdiodev->func[1]); 168 sdio_release_host(sdiodev->func[1]);
169 sdiodev->sd_irq_requested = true;
169 } 170 }
170 171
171 return 0; 172 return 0;
172} 173}
173 174
174int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) 175void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
175{ 176{
176 struct brcmfmac_sdio_pd *pdata;
177 177
178 brcmf_dbg(SDIO, "Entering\n"); 178 brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n",
179 sdiodev->oob_irq_requested,
180 sdiodev->sd_irq_requested);
179 181
180 pdata = &sdiodev->settings->bus.sdio; 182 if (sdiodev->oob_irq_requested) {
181 if (pdata->oob_irq_supported) { 183 struct brcmfmac_sdio_pd *pdata;
184
185 pdata = &sdiodev->settings->bus.sdio;
182 sdio_claim_host(sdiodev->func[1]); 186 sdio_claim_host(sdiodev->func[1]);
183 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); 187 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
184 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL); 188 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
185 sdio_release_host(sdiodev->func[1]); 189 sdio_release_host(sdiodev->func[1]);
186 190
187 if (sdiodev->oob_irq_requested) { 191 sdiodev->oob_irq_requested = false;
188 sdiodev->oob_irq_requested = false; 192 if (sdiodev->irq_wake) {
189 if (sdiodev->irq_wake) { 193 disable_irq_wake(pdata->oob_irq_nr);
190 disable_irq_wake(pdata->oob_irq_nr); 194 sdiodev->irq_wake = false;
191 sdiodev->irq_wake = false;
192 }
193 free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
194 sdiodev->irq_en = false;
195 } 195 }
196 } else { 196 free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
197 sdiodev->irq_en = false;
198 sdiodev->oob_irq_requested = false;
199 }
200
201 if (sdiodev->sd_irq_requested) {
197 sdio_claim_host(sdiodev->func[1]); 202 sdio_claim_host(sdiodev->func[1]);
198 sdio_release_irq(sdiodev->func[2]); 203 sdio_release_irq(sdiodev->func[2]);
199 sdio_release_irq(sdiodev->func[1]); 204 sdio_release_irq(sdiodev->func[1]);
200 sdio_release_host(sdiodev->func[1]); 205 sdio_release_host(sdiodev->func[1]);
206 sdiodev->sd_irq_requested = false;
201 } 207 }
202
203 return 0;
204} 208}
205 209
206void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, 210void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
@@ -1197,12 +1201,17 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
1197 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); 1201 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1198 brcmf_dbg(SDIO, "Function: %d\n", func->num); 1202 brcmf_dbg(SDIO, "Function: %d\n", func->num);
1199 1203
1200 if (func->num != 1)
1201 return;
1202
1203 bus_if = dev_get_drvdata(&func->dev); 1204 bus_if = dev_get_drvdata(&func->dev);
1204 if (bus_if) { 1205 if (bus_if) {
1205 sdiodev = bus_if->bus_priv.sdio; 1206 sdiodev = bus_if->bus_priv.sdio;
1207
1208 /* start by unregistering irqs */
1209 brcmf_sdiod_intr_unregister(sdiodev);
1210
1211 if (func->num != 1)
1212 return;
1213
1214 /* only proceed with rest of cleanup if func 1 */
1206 brcmf_sdiod_remove(sdiodev); 1215 brcmf_sdiod_remove(sdiodev);
1207 1216
1208 dev_set_drvdata(&sdiodev->func[1]->dev, NULL); 1217 dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 62f475e31077..264bd638a3d9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -541,6 +541,21 @@ brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
541 ADDR_INDIRECT); 541 ADDR_INDIRECT);
542} 542}
543 543
544static int brcmf_get_first_free_bsscfgidx(struct brcmf_pub *drvr)
545{
546 int bsscfgidx;
547
548 for (bsscfgidx = 0; bsscfgidx < BRCMF_MAX_IFS; bsscfgidx++) {
549 /* bsscfgidx 1 is reserved for legacy P2P */
550 if (bsscfgidx == 1)
551 continue;
552 if (!drvr->iflist[bsscfgidx])
553 return bsscfgidx;
554 }
555
556 return -ENOMEM;
557}
558
544static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) 559static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
545{ 560{
546 struct brcmf_mbss_ssid_le mbss_ssid_le; 561 struct brcmf_mbss_ssid_le mbss_ssid_le;
@@ -548,7 +563,7 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
548 int err; 563 int err;
549 564
550 memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le)); 565 memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le));
551 bsscfgidx = brcmf_get_next_free_bsscfgidx(ifp->drvr); 566 bsscfgidx = brcmf_get_first_free_bsscfgidx(ifp->drvr);
552 if (bsscfgidx < 0) 567 if (bsscfgidx < 0)
553 return bsscfgidx; 568 return bsscfgidx;
554 569
@@ -586,7 +601,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
586 601
587 brcmf_dbg(INFO, "Adding vif \"%s\"\n", name); 602 brcmf_dbg(INFO, "Adding vif \"%s\"\n", name);
588 603
589 vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP, false); 604 vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP);
590 if (IS_ERR(vif)) 605 if (IS_ERR(vif))
591 return (struct wireless_dev *)vif; 606 return (struct wireless_dev *)vif;
592 607
@@ -669,20 +684,24 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
669 return ERR_PTR(-EOPNOTSUPP); 684 return ERR_PTR(-EOPNOTSUPP);
670 case NL80211_IFTYPE_AP: 685 case NL80211_IFTYPE_AP:
671 wdev = brcmf_ap_add_vif(wiphy, name, flags, params); 686 wdev = brcmf_ap_add_vif(wiphy, name, flags, params);
672 if (!IS_ERR(wdev)) 687 break;
673 brcmf_cfg80211_update_proto_addr_mode(wdev);
674 return wdev;
675 case NL80211_IFTYPE_P2P_CLIENT: 688 case NL80211_IFTYPE_P2P_CLIENT:
676 case NL80211_IFTYPE_P2P_GO: 689 case NL80211_IFTYPE_P2P_GO:
677 case NL80211_IFTYPE_P2P_DEVICE: 690 case NL80211_IFTYPE_P2P_DEVICE:
678 wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params); 691 wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params);
679 if (!IS_ERR(wdev)) 692 break;
680 brcmf_cfg80211_update_proto_addr_mode(wdev);
681 return wdev;
682 case NL80211_IFTYPE_UNSPECIFIED: 693 case NL80211_IFTYPE_UNSPECIFIED:
683 default: 694 default:
684 return ERR_PTR(-EINVAL); 695 return ERR_PTR(-EINVAL);
685 } 696 }
697
698 if (IS_ERR(wdev))
699 brcmf_err("add iface %s type %d failed: err=%d\n",
700 name, type, (int)PTR_ERR(wdev));
701 else
702 brcmf_cfg80211_update_proto_addr_mode(wdev);
703
704 return wdev;
686} 705}
687 706
688static void brcmf_scan_config_mpc(struct brcmf_if *ifp, int mpc) 707static void brcmf_scan_config_mpc(struct brcmf_if *ifp, int mpc)
@@ -2750,7 +2769,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
2750 if (!bi->ctl_ch) { 2769 if (!bi->ctl_ch) {
2751 ch.chspec = le16_to_cpu(bi->chanspec); 2770 ch.chspec = le16_to_cpu(bi->chanspec);
2752 cfg->d11inf.decchspec(&ch); 2771 cfg->d11inf.decchspec(&ch);
2753 bi->ctl_ch = ch.chnum; 2772 bi->ctl_ch = ch.control_ch_num;
2754 } 2773 }
2755 channel = bi->ctl_ch; 2774 channel = bi->ctl_ch;
2756 2775
@@ -2868,7 +2887,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
2868 else 2887 else
2869 band = wiphy->bands[NL80211_BAND_5GHZ]; 2888 band = wiphy->bands[NL80211_BAND_5GHZ];
2870 2889
2871 freq = ieee80211_channel_to_frequency(ch.chnum, band->band); 2890 freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band);
2872 cfg->channel = freq; 2891 cfg->channel = freq;
2873 notify_channel = ieee80211_get_channel(wiphy, freq); 2892 notify_channel = ieee80211_get_channel(wiphy, freq);
2874 2893
@@ -2878,7 +2897,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
2878 notify_ielen = le32_to_cpu(bi->ie_length); 2897 notify_ielen = le32_to_cpu(bi->ie_length);
2879 notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; 2898 notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
2880 2899
2881 brcmf_dbg(CONN, "channel: %d(%d)\n", ch.chnum, freq); 2900 brcmf_dbg(CONN, "channel: %d(%d)\n", ch.control_ch_num, freq);
2882 brcmf_dbg(CONN, "capability: %X\n", notify_capability); 2901 brcmf_dbg(CONN, "capability: %X\n", notify_capability);
2883 brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval); 2902 brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval);
2884 brcmf_dbg(CONN, "signal: %d\n", notify_signal); 2903 brcmf_dbg(CONN, "signal: %d\n", notify_signal);
@@ -4439,7 +4458,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4439 struct brcmf_join_params join_params; 4458 struct brcmf_join_params join_params;
4440 enum nl80211_iftype dev_role; 4459 enum nl80211_iftype dev_role;
4441 struct brcmf_fil_bss_enable_le bss_enable; 4460 struct brcmf_fil_bss_enable_le bss_enable;
4442 u16 chanspec; 4461 u16 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
4443 bool mbss; 4462 bool mbss;
4444 int is_11d; 4463 int is_11d;
4445 4464
@@ -4515,16 +4534,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4515 4534
4516 brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon); 4535 brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
4517 4536
4537 /* Parameters shared by all radio interfaces */
4518 if (!mbss) { 4538 if (!mbss) {
4519 chanspec = chandef_to_chanspec(&cfg->d11inf,
4520 &settings->chandef);
4521 err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
4522 if (err < 0) {
4523 brcmf_err("Set Channel failed: chspec=%d, %d\n",
4524 chanspec, err);
4525 goto exit;
4526 }
4527
4528 if (is_11d != ifp->vif->is_11d) { 4539 if (is_11d != ifp->vif->is_11d) {
4529 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY, 4540 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
4530 is_11d); 4541 is_11d);
@@ -4572,6 +4583,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4572 err = -EINVAL; 4583 err = -EINVAL;
4573 goto exit; 4584 goto exit;
4574 } 4585 }
4586
4587 /* Interface specific setup */
4575 if (dev_role == NL80211_IFTYPE_AP) { 4588 if (dev_role == NL80211_IFTYPE_AP) {
4576 if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss)) 4589 if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss))
4577 brcmf_fil_iovar_int_set(ifp, "mbss", 1); 4590 brcmf_fil_iovar_int_set(ifp, "mbss", 1);
@@ -4581,6 +4594,17 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4581 brcmf_err("setting AP mode failed %d\n", err); 4594 brcmf_err("setting AP mode failed %d\n", err);
4582 goto exit; 4595 goto exit;
4583 } 4596 }
4597 if (!mbss) {
4598 /* Firmware 10.x requires setting channel after enabling
4599 * AP and before bringing interface up.
4600 */
4601 err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
4602 if (err < 0) {
4603 brcmf_err("Set Channel failed: chspec=%d, %d\n",
4604 chanspec, err);
4605 goto exit;
4606 }
4607 }
4584 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1); 4608 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
4585 if (err < 0) { 4609 if (err < 0) {
4586 brcmf_err("BRCMF_C_UP error (%d)\n", err); 4610 brcmf_err("BRCMF_C_UP error (%d)\n", err);
@@ -4602,7 +4626,13 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4602 goto exit; 4626 goto exit;
4603 } 4627 }
4604 brcmf_dbg(TRACE, "AP mode configuration complete\n"); 4628 brcmf_dbg(TRACE, "AP mode configuration complete\n");
4605 } else { 4629 } else if (dev_role == NL80211_IFTYPE_P2P_GO) {
4630 err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
4631 if (err < 0) {
4632 brcmf_err("Set Channel failed: chspec=%d, %d\n",
4633 chanspec, err);
4634 goto exit;
4635 }
4606 err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le, 4636 err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
4607 sizeof(ssid_le)); 4637 sizeof(ssid_le));
4608 if (err < 0) { 4638 if (err < 0) {
@@ -4619,7 +4649,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4619 } 4649 }
4620 4650
4621 brcmf_dbg(TRACE, "GO mode configuration complete\n"); 4651 brcmf_dbg(TRACE, "GO mode configuration complete\n");
4652 } else {
4653 WARN_ON(1);
4622 } 4654 }
4655
4623 set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); 4656 set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
4624 brcmf_net_setcarrier(ifp, true); 4657 brcmf_net_setcarrier(ifp, true);
4625 4658
@@ -4908,6 +4941,68 @@ exit:
4908 return err; 4941 return err;
4909} 4942}
4910 4943
4944static int brcmf_cfg80211_get_channel(struct wiphy *wiphy,
4945 struct wireless_dev *wdev,
4946 struct cfg80211_chan_def *chandef)
4947{
4948 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
4949 struct net_device *ndev = wdev->netdev;
4950 struct brcmf_if *ifp;
4951 struct brcmu_chan ch;
4952 enum nl80211_band band = 0;
4953 enum nl80211_chan_width width = 0;
4954 u32 chanspec;
4955 int freq, err;
4956
4957 if (!ndev)
4958 return -ENODEV;
4959 ifp = netdev_priv(ndev);
4960
4961 err = brcmf_fil_iovar_int_get(ifp, "chanspec", &chanspec);
4962 if (err) {
4963 brcmf_err("chanspec failed (%d)\n", err);
4964 return err;
4965 }
4966
4967 ch.chspec = chanspec;
4968 cfg->d11inf.decchspec(&ch);
4969
4970 switch (ch.band) {
4971 case BRCMU_CHAN_BAND_2G:
4972 band = NL80211_BAND_2GHZ;
4973 break;
4974 case BRCMU_CHAN_BAND_5G:
4975 band = NL80211_BAND_5GHZ;
4976 break;
4977 }
4978
4979 switch (ch.bw) {
4980 case BRCMU_CHAN_BW_80:
4981 width = NL80211_CHAN_WIDTH_80;
4982 break;
4983 case BRCMU_CHAN_BW_40:
4984 width = NL80211_CHAN_WIDTH_40;
4985 break;
4986 case BRCMU_CHAN_BW_20:
4987 width = NL80211_CHAN_WIDTH_20;
4988 break;
4989 case BRCMU_CHAN_BW_80P80:
4990 width = NL80211_CHAN_WIDTH_80P80;
4991 break;
4992 case BRCMU_CHAN_BW_160:
4993 width = NL80211_CHAN_WIDTH_160;
4994 break;
4995 }
4996
4997 freq = ieee80211_channel_to_frequency(ch.control_ch_num, band);
4998 chandef->chan = ieee80211_get_channel(wiphy, freq);
4999 chandef->width = width;
5000 chandef->center_freq1 = ieee80211_channel_to_frequency(ch.chnum, band);
5001 chandef->center_freq2 = 0;
5002
5003 return 0;
5004}
5005
4911static int brcmf_cfg80211_crit_proto_start(struct wiphy *wiphy, 5006static int brcmf_cfg80211_crit_proto_start(struct wiphy *wiphy,
4912 struct wireless_dev *wdev, 5007 struct wireless_dev *wdev,
4913 enum nl80211_crit_proto_id proto, 5008 enum nl80211_crit_proto_id proto,
@@ -5070,6 +5165,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
5070 .mgmt_tx = brcmf_cfg80211_mgmt_tx, 5165 .mgmt_tx = brcmf_cfg80211_mgmt_tx,
5071 .remain_on_channel = brcmf_p2p_remain_on_channel, 5166 .remain_on_channel = brcmf_p2p_remain_on_channel,
5072 .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel, 5167 .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
5168 .get_channel = brcmf_cfg80211_get_channel,
5073 .start_p2p_device = brcmf_p2p_start_device, 5169 .start_p2p_device = brcmf_p2p_start_device,
5074 .stop_p2p_device = brcmf_p2p_stop_device, 5170 .stop_p2p_device = brcmf_p2p_stop_device,
5075 .crit_proto_start = brcmf_cfg80211_crit_proto_start, 5171 .crit_proto_start = brcmf_cfg80211_crit_proto_start,
@@ -5078,8 +5174,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
5078}; 5174};
5079 5175
5080struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, 5176struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
5081 enum nl80211_iftype type, 5177 enum nl80211_iftype type)
5082 bool pm_block)
5083{ 5178{
5084 struct brcmf_cfg80211_vif *vif_walk; 5179 struct brcmf_cfg80211_vif *vif_walk;
5085 struct brcmf_cfg80211_vif *vif; 5180 struct brcmf_cfg80211_vif *vif;
@@ -5094,8 +5189,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
5094 vif->wdev.wiphy = cfg->wiphy; 5189 vif->wdev.wiphy = cfg->wiphy;
5095 vif->wdev.iftype = type; 5190 vif->wdev.iftype = type;
5096 5191
5097 vif->pm_block = pm_block;
5098
5099 brcmf_init_prof(&vif->profile); 5192 brcmf_init_prof(&vif->profile);
5100 5193
5101 if (type == NL80211_IFTYPE_AP) { 5194 if (type == NL80211_IFTYPE_AP) {
@@ -5296,7 +5389,7 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
5296 else 5389 else
5297 band = wiphy->bands[NL80211_BAND_5GHZ]; 5390 band = wiphy->bands[NL80211_BAND_5GHZ];
5298 5391
5299 freq = ieee80211_channel_to_frequency(ch.chnum, band->band); 5392 freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band);
5300 notify_channel = ieee80211_get_channel(wiphy, freq); 5393 notify_channel = ieee80211_get_channel(wiphy, freq);
5301 5394
5302done: 5395done:
@@ -5352,7 +5445,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
5352 struct net_device *ndev, 5445 struct net_device *ndev,
5353 const struct brcmf_event_msg *e, void *data) 5446 const struct brcmf_event_msg *e, void *data)
5354{ 5447{
5355 struct brcmf_if *ifp = netdev_priv(ndev);
5356 static int generation; 5448 static int generation;
5357 u32 event = e->event_code; 5449 u32 event = e->event_code;
5358 u32 reason = e->reason; 5450 u32 reason = e->reason;
@@ -5363,8 +5455,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
5363 ndev != cfg_to_ndev(cfg)) { 5455 ndev != cfg_to_ndev(cfg)) {
5364 brcmf_dbg(CONN, "AP mode link down\n"); 5456 brcmf_dbg(CONN, "AP mode link down\n");
5365 complete(&cfg->vif_disabled); 5457 complete(&cfg->vif_disabled);
5366 if (ifp->vif->mbss)
5367 brcmf_remove_interface(ifp);
5368 return 0; 5458 return 0;
5369 } 5459 }
5370 5460
@@ -5818,14 +5908,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
5818 channel = band->channels; 5908 channel = band->channels;
5819 index = band->n_channels; 5909 index = band->n_channels;
5820 for (j = 0; j < band->n_channels; j++) { 5910 for (j = 0; j < band->n_channels; j++) {
5821 if (channel[j].hw_value == ch.chnum) { 5911 if (channel[j].hw_value == ch.control_ch_num) {
5822 index = j; 5912 index = j;
5823 break; 5913 break;
5824 } 5914 }
5825 } 5915 }
5826 channel[index].center_freq = 5916 channel[index].center_freq =
5827 ieee80211_channel_to_frequency(ch.chnum, band->band); 5917 ieee80211_channel_to_frequency(ch.control_ch_num,
5828 channel[index].hw_value = ch.chnum; 5918 band->band);
5919 channel[index].hw_value = ch.control_ch_num;
5829 5920
5830 /* assuming the chanspecs order is HT20, 5921 /* assuming the chanspecs order is HT20,
5831 * HT40 upper, HT40 lower, and VHT80. 5922 * HT40 upper, HT40 lower, and VHT80.
@@ -5927,7 +6018,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
5927 if (WARN_ON(ch.bw != BRCMU_CHAN_BW_40)) 6018 if (WARN_ON(ch.bw != BRCMU_CHAN_BW_40))
5928 continue; 6019 continue;
5929 for (j = 0; j < band->n_channels; j++) { 6020 for (j = 0; j < band->n_channels; j++) {
5930 if (band->channels[j].hw_value == ch.chnum) 6021 if (band->channels[j].hw_value == ch.control_ch_num)
5931 break; 6022 break;
5932 } 6023 }
5933 if (WARN_ON(j == band->n_channels)) 6024 if (WARN_ON(j == band->n_channels))
@@ -6715,11 +6806,10 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
6715 return NULL; 6806 return NULL;
6716 } 6807 }
6717 6808
6718 ops = kzalloc(sizeof(*ops), GFP_KERNEL); 6809 ops = kmemdup(&brcmf_cfg80211_ops, sizeof(*ops), GFP_KERNEL);
6719 if (!ops) 6810 if (!ops)
6720 return NULL; 6811 return NULL;
6721 6812
6722 memcpy(ops, &brcmf_cfg80211_ops, sizeof(*ops));
6723 ifp = netdev_priv(ndev); 6813 ifp = netdev_priv(ndev);
6724#ifdef CONFIG_PM 6814#ifdef CONFIG_PM
6725 if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) 6815 if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK))
@@ -6740,7 +6830,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
6740 init_vif_event(&cfg->vif_event); 6830 init_vif_event(&cfg->vif_event);
6741 INIT_LIST_HEAD(&cfg->vif_list); 6831 INIT_LIST_HEAD(&cfg->vif_list);
6742 6832
6743 vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false); 6833 vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION);
6744 if (IS_ERR(vif)) 6834 if (IS_ERR(vif))
6745 goto wiphy_out; 6835 goto wiphy_out;
6746 6836
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 95e35bcc16ce..04bfc7e3ecde 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -20,6 +20,9 @@
20/* for brcmu_d11inf */ 20/* for brcmu_d11inf */
21#include <brcmu_d11.h> 21#include <brcmu_d11.h>
22 22
23#include "fwil_types.h"
24#include "p2p.h"
25
23#define WL_NUM_SCAN_MAX 10 26#define WL_NUM_SCAN_MAX 10
24#define WL_TLV_INFO_MAX 1024 27#define WL_TLV_INFO_MAX 1024
25#define WL_BSS_INFO_MAX 2048 28#define WL_BSS_INFO_MAX 2048
@@ -167,7 +170,6 @@ struct vif_saved_ie {
167 * @wdev: wireless device. 170 * @wdev: wireless device.
168 * @profile: profile information. 171 * @profile: profile information.
169 * @sme_state: SME state using enum brcmf_vif_status bits. 172 * @sme_state: SME state using enum brcmf_vif_status bits.
170 * @pm_block: power-management blocked.
171 * @list: linked list. 173 * @list: linked list.
172 * @mgmt_rx_reg: registered rx mgmt frame types. 174 * @mgmt_rx_reg: registered rx mgmt frame types.
173 * @mbss: Multiple BSS type, set if not first AP (not relevant for P2P). 175 * @mbss: Multiple BSS type, set if not first AP (not relevant for P2P).
@@ -177,7 +179,6 @@ struct brcmf_cfg80211_vif {
177 struct wireless_dev wdev; 179 struct wireless_dev wdev;
178 struct brcmf_cfg80211_profile profile; 180 struct brcmf_cfg80211_profile profile;
179 unsigned long sme_state; 181 unsigned long sme_state;
180 bool pm_block;
181 struct vif_saved_ie saved_ie; 182 struct vif_saved_ie saved_ie;
182 struct list_head list; 183 struct list_head list;
183 u16 mgmt_rx_reg; 184 u16 mgmt_rx_reg;
@@ -388,8 +389,7 @@ s32 brcmf_cfg80211_down(struct net_device *ndev);
388enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp); 389enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
389 390
390struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, 391struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
391 enum nl80211_iftype type, 392 enum nl80211_iftype type);
392 bool pm_block);
393void brcmf_free_vif(struct brcmf_cfg80211_vif *vif); 393void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
394 394
395s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, 395s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index d3fd6b1db1d9..05f22ff81d60 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -685,6 +685,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
685 case BRCM_CC_43602_CHIP_ID: 685 case BRCM_CC_43602_CHIP_ID:
686 case BRCM_CC_4371_CHIP_ID: 686 case BRCM_CC_4371_CHIP_ID:
687 return 0x180000; 687 return 0x180000;
688 case BRCM_CC_43465_CHIP_ID:
689 case BRCM_CC_43525_CHIP_ID:
688 case BRCM_CC_4365_CHIP_ID: 690 case BRCM_CC_4365_CHIP_ID:
689 case BRCM_CC_4366_CHIP_ID: 691 case BRCM_CC_4366_CHIP_ID:
690 return 0x200000; 692 return 0x200000;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index b590499f6883..faf4e46bd65b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -516,7 +516,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
516 /* set appropriate operations */ 516 /* set appropriate operations */
517 ndev->netdev_ops = &brcmf_netdev_ops_pri; 517 ndev->netdev_ops = &brcmf_netdev_ops_pri;
518 518
519 ndev->hard_header_len += drvr->hdrlen; 519 ndev->needed_headroom += drvr->hdrlen;
520 ndev->ethtool_ops = &brcmf_ethtool_ops; 520 ndev->ethtool_ops = &brcmf_ethtool_ops;
521 521
522 drvr->rxsz = ndev->mtu + ndev->hard_header_len + 522 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
@@ -753,30 +753,6 @@ void brcmf_remove_interface(struct brcmf_if *ifp)
753 brcmf_del_if(ifp->drvr, ifp->bsscfgidx); 753 brcmf_del_if(ifp->drvr, ifp->bsscfgidx);
754} 754}
755 755
756int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
757{
758 int ifidx;
759 int bsscfgidx;
760 bool available;
761 int highest;
762
763 available = false;
764 bsscfgidx = 2;
765 highest = 2;
766 for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
767 if (drvr->iflist[ifidx]) {
768 if (drvr->iflist[ifidx]->bsscfgidx == bsscfgidx)
769 bsscfgidx = highest + 1;
770 else if (drvr->iflist[ifidx]->bsscfgidx > highest)
771 highest = drvr->iflist[ifidx]->bsscfgidx;
772 } else {
773 available = true;
774 }
775 }
776
777 return available ? bsscfgidx : -ENOMEM;
778}
779
780#ifdef CONFIG_INET 756#ifdef CONFIG_INET
781#define ARPOL_MAX_ENTRIES 8 757#define ARPOL_MAX_ENTRIES 8
782static int brcmf_inetaddr_changed(struct notifier_block *nb, 758static int brcmf_inetaddr_changed(struct notifier_block *nb,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 647d3cc2a4dc..2a075c5f6f8b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -217,7 +217,6 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
217struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, 217struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
218 bool is_p2pdev, char *name, u8 *mac_addr); 218 bool is_p2pdev, char *name, u8 *mac_addr);
219void brcmf_remove_interface(struct brcmf_if *ifp); 219void brcmf_remove_interface(struct brcmf_if *ifp);
220int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr);
221void brcmf_txflowblock_if(struct brcmf_if *ifp, 220void brcmf_txflowblock_if(struct brcmf_if *ifp,
222 enum brcmf_netif_stop_reason reason, bool state); 221 enum brcmf_netif_stop_reason reason, bool state);
223void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); 222void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 5b30922b67ec..cd221ab55062 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2101,7 +2101,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
2101 2101
2102 brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto)); 2102 brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
2103 /* determine the priority */ 2103 /* determine the priority */
2104 if (!skb->priority) 2104 if ((skb->priority == 0) || (skb->priority > 7))
2105 skb->priority = cfg80211_classify8021d(skb, NULL); 2105 skb->priority = cfg80211_classify8021d(skb, NULL);
2106 2106
2107 drvr->tx_multicast += !!multicast; 2107 drvr->tx_multicast += !!multicast;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index a70cda6c0592..f38a82133540 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1246,7 +1246,7 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
1246 if (!bi->ctl_ch) { 1246 if (!bi->ctl_ch) {
1247 ch.chspec = le16_to_cpu(bi->chanspec); 1247 ch.chspec = le16_to_cpu(bi->chanspec);
1248 cfg->d11inf.decchspec(&ch); 1248 cfg->d11inf.decchspec(&ch);
1249 bi->ctl_ch = ch.chnum; 1249 bi->ctl_ch = ch.control_ch_num;
1250 } 1250 }
1251 afx_hdl->peer_chan = bi->ctl_ch; 1251 afx_hdl->peer_chan = bi->ctl_ch;
1252 brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n", 1252 brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n",
@@ -1385,7 +1385,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
1385 if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, 1385 if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
1386 &p2p->status) && 1386 &p2p->status) &&
1387 (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) { 1387 (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
1388 afx_hdl->peer_chan = ch.chnum; 1388 afx_hdl->peer_chan = ch.control_ch_num;
1389 brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n", 1389 brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
1390 afx_hdl->peer_chan); 1390 afx_hdl->peer_chan);
1391 complete(&afx_hdl->act_frm_scan); 1391 complete(&afx_hdl->act_frm_scan);
@@ -1428,7 +1428,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
1428 memcpy(&mgmt_frame->u, frame, mgmt_frame_len); 1428 memcpy(&mgmt_frame->u, frame, mgmt_frame_len);
1429 mgmt_frame_len += offsetof(struct ieee80211_mgmt, u); 1429 mgmt_frame_len += offsetof(struct ieee80211_mgmt, u);
1430 1430
1431 freq = ieee80211_channel_to_frequency(ch.chnum, 1431 freq = ieee80211_channel_to_frequency(ch.control_ch_num,
1432 ch.band == BRCMU_CHAN_BAND_2G ? 1432 ch.band == BRCMU_CHAN_BAND_2G ?
1433 NL80211_BAND_2GHZ : 1433 NL80211_BAND_2GHZ :
1434 NL80211_BAND_5GHZ); 1434 NL80211_BAND_5GHZ);
@@ -1873,7 +1873,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
1873 1873
1874 if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) && 1874 if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
1875 (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) { 1875 (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
1876 afx_hdl->peer_chan = ch.chnum; 1876 afx_hdl->peer_chan = ch.control_ch_num;
1877 brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n", 1877 brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
1878 afx_hdl->peer_chan); 1878 afx_hdl->peer_chan);
1879 complete(&afx_hdl->act_frm_scan); 1879 complete(&afx_hdl->act_frm_scan);
@@ -1898,7 +1898,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
1898 1898
1899 mgmt_frame = (u8 *)(rxframe + 1); 1899 mgmt_frame = (u8 *)(rxframe + 1);
1900 mgmt_frame_len = e->datalen - sizeof(*rxframe); 1900 mgmt_frame_len = e->datalen - sizeof(*rxframe);
1901 freq = ieee80211_channel_to_frequency(ch.chnum, 1901 freq = ieee80211_channel_to_frequency(ch.control_ch_num,
1902 ch.band == BRCMU_CHAN_BAND_2G ? 1902 ch.band == BRCMU_CHAN_BAND_2G ?
1903 NL80211_BAND_2GHZ : 1903 NL80211_BAND_2GHZ :
1904 NL80211_BAND_5GHZ); 1904 NL80211_BAND_5GHZ);
@@ -2030,8 +2030,6 @@ static int brcmf_p2p_request_p2p_if(struct brcmf_p2p_info *p2p,
2030 2030
2031 err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request, 2031 err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request,
2032 sizeof(if_request)); 2032 sizeof(if_request));
2033 if (err)
2034 return err;
2035 2033
2036 return err; 2034 return err;
2037} 2035}
@@ -2076,8 +2074,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
2076 if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif) 2074 if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
2077 return ERR_PTR(-ENOSPC); 2075 return ERR_PTR(-ENOSPC);
2078 2076
2079 p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE, 2077 p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE);
2080 false);
2081 if (IS_ERR(p2p_vif)) { 2078 if (IS_ERR(p2p_vif)) {
2082 brcmf_err("could not create discovery vif\n"); 2079 brcmf_err("could not create discovery vif\n");
2083 return (struct wireless_dev *)p2p_vif; 2080 return (struct wireless_dev *)p2p_vif;
@@ -2177,7 +2174,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
2177 return ERR_PTR(-EOPNOTSUPP); 2174 return ERR_PTR(-EOPNOTSUPP);
2178 } 2175 }
2179 2176
2180 vif = brcmf_alloc_vif(cfg, type, false); 2177 vif = brcmf_alloc_vif(cfg, type);
2181 if (IS_ERR(vif)) 2178 if (IS_ERR(vif))
2182 return (struct wireless_dev *)vif; 2179 return (struct wireless_dev *)vif;
2183 brcmf_cfg80211_arm_vif_event(cfg, vif); 2180 brcmf_cfg80211_arm_vif_event(cfg, vif);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 0af8db82da0c..3deba90c7eb5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -54,21 +54,25 @@ BRCMF_FW_NVRAM_DEF(43570, "brcmfmac43570-pcie.bin", "brcmfmac43570-pcie.txt");
54BRCMF_FW_NVRAM_DEF(4358, "brcmfmac4358-pcie.bin", "brcmfmac4358-pcie.txt"); 54BRCMF_FW_NVRAM_DEF(4358, "brcmfmac4358-pcie.bin", "brcmfmac4358-pcie.txt");
55BRCMF_FW_NVRAM_DEF(4359, "brcmfmac4359-pcie.bin", "brcmfmac4359-pcie.txt"); 55BRCMF_FW_NVRAM_DEF(4359, "brcmfmac4359-pcie.bin", "brcmfmac4359-pcie.txt");
56BRCMF_FW_NVRAM_DEF(4365B, "brcmfmac4365b-pcie.bin", "brcmfmac4365b-pcie.txt"); 56BRCMF_FW_NVRAM_DEF(4365B, "brcmfmac4365b-pcie.bin", "brcmfmac4365b-pcie.txt");
57BRCMF_FW_NVRAM_DEF(4365C, "brcmfmac4365c-pcie.bin", "brcmfmac4365c-pcie.txt");
57BRCMF_FW_NVRAM_DEF(4366B, "brcmfmac4366b-pcie.bin", "brcmfmac4366b-pcie.txt"); 58BRCMF_FW_NVRAM_DEF(4366B, "brcmfmac4366b-pcie.bin", "brcmfmac4366b-pcie.txt");
58BRCMF_FW_NVRAM_DEF(4366C, "brcmfmac4366c-pcie.bin", "brcmfmac4366c-pcie.txt"); 59BRCMF_FW_NVRAM_DEF(4366C, "brcmfmac4366c-pcie.bin", "brcmfmac4366c-pcie.txt");
59BRCMF_FW_NVRAM_DEF(4371, "brcmfmac4371-pcie.bin", "brcmfmac4371-pcie.txt"); 60BRCMF_FW_NVRAM_DEF(4371, "brcmfmac4371-pcie.bin", "brcmfmac4371-pcie.txt");
60 61
61static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { 62static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
62 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602), 63 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
64 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
63 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C), 65 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
64 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350), 66 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
67 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
65 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), 68 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
66 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570), 69 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
67 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570), 70 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
68 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570), 71 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
69 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358), 72 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
70 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359), 73 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
71 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFFF, 4365B), 74 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
75 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
72 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B), 76 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
73 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C), 77 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
74 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371), 78 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 67e69bff2545..5fb8b91b9326 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1384,8 +1384,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
1384 return -ENXIO; 1384 return -ENXIO;
1385 } 1385 }
1386 if (rd->seq_num != rx_seq) { 1386 if (rd->seq_num != rx_seq) {
1387 brcmf_err("seq %d: sequence number error, expect %d\n", 1387 brcmf_dbg(SDIO, "seq %d, expected %d\n", rx_seq, rd->seq_num);
1388 rx_seq, rd->seq_num);
1389 bus->sdcnt.rx_badseq++; 1388 bus->sdcnt.rx_badseq++;
1390 rd->seq_num = rx_seq; 1389 rd->seq_num = rx_seq;
1391 } 1390 }
@@ -3666,7 +3665,7 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
3666 str_shift = 11; 3665 str_shift = 11;
3667 break; 3666 break;
3668 default: 3667 default:
3669 brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", 3668 brcmf_dbg(INFO, "No SDIO driver strength init needed for chip %s rev %d pmurev %d\n",
3670 ci->name, ci->chiprev, ci->pmurev); 3669 ci->name, ci->chiprev, ci->pmurev);
3671 break; 3670 break;
3672 } 3671 }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index dcf0ce8cd2c1..f3da32fc6360 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -186,6 +186,7 @@ struct brcmf_sdio_dev {
186 struct brcmf_bus *bus_if; 186 struct brcmf_bus *bus_if;
187 struct brcmf_mp_device *settings; 187 struct brcmf_mp_device *settings;
188 bool oob_irq_requested; 188 bool oob_irq_requested;
189 bool sd_irq_requested;
189 bool irq_en; /* irq enable flags */ 190 bool irq_en; /* irq enable flags */
190 spinlock_t irq_en_lock; 191 spinlock_t irq_en_lock;
191 bool irq_wake; /* irq wake enable flags */ 192 bool irq_wake; /* irq wake enable flags */
@@ -293,7 +294,7 @@ struct sdpcmd_regs {
293 294
294/* Register/deregister interrupt handler. */ 295/* Register/deregister interrupt handler. */
295int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev); 296int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
296int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev); 297void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
297 298
298/* sdio device register access interface */ 299/* sdio device register access interface */
299u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret); 300u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 99dac9b8a082..b3aab2fe96eb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -27017,7 +27017,7 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rx_core,
27017 tx_core = 1 - rx_core; 27017 tx_core = 1 - rx_core;
27018 27018
27019 num_samps = 1024; 27019 num_samps = 1024;
27020 desired_log2_pwr = (cal_type == 0) ? 13 : 13; 27020 desired_log2_pwr = 13;
27021 27021
27022 wlc_phy_rx_iq_coeffs_nphy(pi, 0, &save_comp); 27022 wlc_phy_rx_iq_coeffs_nphy(pi, 0, &save_comp);
27023 zero_comp.a0 = zero_comp.b0 = zero_comp.a1 = zero_comp.b1 = 0x0; 27023 zero_comp.a0 = zero_comp.b0 = zero_comp.a1 = zero_comp.b1 = 0x0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
index 2b2522bdd8eb..d8b79cb72b58 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
@@ -107,6 +107,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
107 u16 val; 107 u16 val;
108 108
109 ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK); 109 ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK);
110 ch->control_ch_num = ch->chnum;
110 111
111 switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) { 112 switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) {
112 case BRCMU_CHSPEC_D11N_BW_20: 113 case BRCMU_CHSPEC_D11N_BW_20:
@@ -118,10 +119,10 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
118 val = ch->chspec & BRCMU_CHSPEC_D11N_SB_MASK; 119 val = ch->chspec & BRCMU_CHSPEC_D11N_SB_MASK;
119 if (val == BRCMU_CHSPEC_D11N_SB_L) { 120 if (val == BRCMU_CHSPEC_D11N_SB_L) {
120 ch->sb = BRCMU_CHAN_SB_L; 121 ch->sb = BRCMU_CHAN_SB_L;
121 ch->chnum -= CH_10MHZ_APART; 122 ch->control_ch_num -= CH_10MHZ_APART;
122 } else { 123 } else {
123 ch->sb = BRCMU_CHAN_SB_U; 124 ch->sb = BRCMU_CHAN_SB_U;
124 ch->chnum += CH_10MHZ_APART; 125 ch->control_ch_num += CH_10MHZ_APART;
125 } 126 }
126 break; 127 break;
127 default: 128 default:
@@ -147,6 +148,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
147 u16 val; 148 u16 val;
148 149
149 ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK); 150 ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK);
151 ch->control_ch_num = ch->chnum;
150 152
151 switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) { 153 switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) {
152 case BRCMU_CHSPEC_D11AC_BW_20: 154 case BRCMU_CHSPEC_D11AC_BW_20:
@@ -158,10 +160,10 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
158 val = ch->chspec & BRCMU_CHSPEC_D11AC_SB_MASK; 160 val = ch->chspec & BRCMU_CHSPEC_D11AC_SB_MASK;
159 if (val == BRCMU_CHSPEC_D11AC_SB_L) { 161 if (val == BRCMU_CHSPEC_D11AC_SB_L) {
160 ch->sb = BRCMU_CHAN_SB_L; 162 ch->sb = BRCMU_CHAN_SB_L;
161 ch->chnum -= CH_10MHZ_APART; 163 ch->control_ch_num -= CH_10MHZ_APART;
162 } else if (val == BRCMU_CHSPEC_D11AC_SB_U) { 164 } else if (val == BRCMU_CHSPEC_D11AC_SB_U) {
163 ch->sb = BRCMU_CHAN_SB_U; 165 ch->sb = BRCMU_CHAN_SB_U;
164 ch->chnum += CH_10MHZ_APART; 166 ch->control_ch_num += CH_10MHZ_APART;
165 } else { 167 } else {
166 WARN_ON_ONCE(1); 168 WARN_ON_ONCE(1);
167 } 169 }
@@ -172,16 +174,16 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
172 BRCMU_CHSPEC_D11AC_SB_SHIFT); 174 BRCMU_CHSPEC_D11AC_SB_SHIFT);
173 switch (ch->sb) { 175 switch (ch->sb) {
174 case BRCMU_CHAN_SB_LL: 176 case BRCMU_CHAN_SB_LL:
175 ch->chnum -= CH_30MHZ_APART; 177 ch->control_ch_num -= CH_30MHZ_APART;
176 break; 178 break;
177 case BRCMU_CHAN_SB_LU: 179 case BRCMU_CHAN_SB_LU:
178 ch->chnum -= CH_10MHZ_APART; 180 ch->control_ch_num -= CH_10MHZ_APART;
179 break; 181 break;
180 case BRCMU_CHAN_SB_UL: 182 case BRCMU_CHAN_SB_UL:
181 ch->chnum += CH_10MHZ_APART; 183 ch->control_ch_num += CH_10MHZ_APART;
182 break; 184 break;
183 case BRCMU_CHAN_SB_UU: 185 case BRCMU_CHAN_SB_UU:
184 ch->chnum += CH_30MHZ_APART; 186 ch->control_ch_num += CH_30MHZ_APART;
185 break; 187 break;
186 default: 188 default:
187 WARN_ON_ONCE(1); 189 WARN_ON_ONCE(1);
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index 699f2c2782ee..3cc42bef6245 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -40,7 +40,9 @@
40#define BRCM_CC_4339_CHIP_ID 0x4339 40#define BRCM_CC_4339_CHIP_ID 0x4339
41#define BRCM_CC_43430_CHIP_ID 43430 41#define BRCM_CC_43430_CHIP_ID 43430
42#define BRCM_CC_4345_CHIP_ID 0x4345 42#define BRCM_CC_4345_CHIP_ID 0x4345
43#define BRCM_CC_43465_CHIP_ID 43465
43#define BRCM_CC_4350_CHIP_ID 0x4350 44#define BRCM_CC_4350_CHIP_ID 0x4350
45#define BRCM_CC_43525_CHIP_ID 43525
44#define BRCM_CC_4354_CHIP_ID 0x4354 46#define BRCM_CC_4354_CHIP_ID 0x4354
45#define BRCM_CC_4356_CHIP_ID 0x4356 47#define BRCM_CC_4356_CHIP_ID 0x4356
46#define BRCM_CC_43566_CHIP_ID 43566 48#define BRCM_CC_43566_CHIP_ID 43566
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
index f9745ea8b3e0..8b8b2ecb3199 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
@@ -125,14 +125,36 @@ enum brcmu_chan_sb {
125 BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU, 125 BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU,
126}; 126};
127 127
128/**
129 * struct brcmu_chan - stores channel formats
130 *
131 * This structure can be used with functions translating chanspec into generic
132 * channel info and the other way.
133 *
134 * @chspec: firmware specific format
135 * @chnum: center channel number
136 * @control_ch_num: control channel number
137 * @band: frequency band
138 * @bw: channel width
139 * @sb: control sideband (location of control channel against the center one)
140 */
128struct brcmu_chan { 141struct brcmu_chan {
129 u16 chspec; 142 u16 chspec;
130 u8 chnum; 143 u8 chnum;
144 u8 control_ch_num;
131 u8 band; 145 u8 band;
132 enum brcmu_chan_bw bw; 146 enum brcmu_chan_bw bw;
133 enum brcmu_chan_sb sb; 147 enum brcmu_chan_sb sb;
134}; 148};
135 149
150/**
151 * struct brcmu_d11inf - provides functions translating channel format
152 *
153 * @io_type: determines version of channel format used by firmware
154 * @encchspec: encodes channel info into a chanspec, requires center channel
155 * number, ignores control one
156 * @decchspec: decodes chanspec into generic info
157 */
136struct brcmu_d11inf { 158struct brcmu_d11inf {
137 u8 io_type; 159 u8 io_type;
138 160
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 7bcedbb53d94..209dc9988455 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -1019,12 +1019,13 @@ il3945_hw_txq_ctx_free(struct il_priv *il)
1019 int txq_id; 1019 int txq_id;
1020 1020
1021 /* Tx queues */ 1021 /* Tx queues */
1022 if (il->txq) 1022 if (il->txq) {
1023 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) 1023 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1024 if (txq_id == IL39_CMD_QUEUE_NUM) 1024 if (txq_id == IL39_CMD_QUEUE_NUM)
1025 il_cmd_queue_free(il); 1025 il_cmd_queue_free(il);
1026 else 1026 else
1027 il_tx_queue_free(il, txq_id); 1027 il_tx_queue_free(il, txq_id);
1028 }
1028 1029
1029 /* free tx queue structure */ 1030 /* free tx queue structure */
1030 il_free_txq_mem(il); 1031 il_free_txq_mem(il);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4dd5adcdd29b..a1e28a4fd658 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -30,6 +30,8 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/ktime.h> 31#include <linux/ktime.h>
32#include <net/genetlink.h> 32#include <net/genetlink.h>
33#include <net/net_namespace.h>
34#include <net/netns/generic.h>
33#include "mac80211_hwsim.h" 35#include "mac80211_hwsim.h"
34 36
35#define WARN_QUEUE 100 37#define WARN_QUEUE 100
@@ -250,6 +252,28 @@ static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c)
250 cp->magic = 0; 252 cp->magic = 0;
251} 253}
252 254
255static unsigned int hwsim_net_id;
256
257static int hwsim_netgroup;
258
259struct hwsim_net {
260 int netgroup;
261};
262
263static inline int hwsim_net_get_netgroup(struct net *net)
264{
265 struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
266
267 return hwsim_net->netgroup;
268}
269
270static inline void hwsim_net_set_netgroup(struct net *net)
271{
272 struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
273
274 hwsim_net->netgroup = hwsim_netgroup++;
275}
276
253static struct class *hwsim_class; 277static struct class *hwsim_class;
254 278
255static struct net_device *hwsim_mon; /* global monitor netdev */ 279static struct net_device *hwsim_mon; /* global monitor netdev */
@@ -526,6 +550,9 @@ struct mac80211_hwsim_data {
526 */ 550 */
527 u64 group; 551 u64 group;
528 552
553 /* group shared by radios created in the same netns */
554 int netgroup;
555
529 int power_level; 556 int power_level;
530 557
531 /* difference between this hw's clock and the real clock, in usecs */ 558 /* difference between this hw's clock and the real clock, in usecs */
@@ -568,6 +595,7 @@ static struct genl_family hwsim_genl_family = {
568 .name = "MAC80211_HWSIM", 595 .name = "MAC80211_HWSIM",
569 .version = 1, 596 .version = 1,
570 .maxattr = HWSIM_ATTR_MAX, 597 .maxattr = HWSIM_ATTR_MAX,
598 .netnsok = true,
571}; 599};
572 600
573enum hwsim_multicast_groups { 601enum hwsim_multicast_groups {
@@ -1202,6 +1230,9 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
1202 if (!(data->group & data2->group)) 1230 if (!(data->group & data2->group))
1203 continue; 1231 continue;
1204 1232
1233 if (data->netgroup != data2->netgroup)
1234 continue;
1235
1205 if (!hwsim_chans_compat(chan, data2->tmp_chan) && 1236 if (!hwsim_chans_compat(chan, data2->tmp_chan) &&
1206 !hwsim_chans_compat(chan, data2->channel)) { 1237 !hwsim_chans_compat(chan, data2->channel)) {
1207 ieee80211_iterate_active_interfaces_atomic( 1238 ieee80211_iterate_active_interfaces_atomic(
@@ -2349,6 +2380,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2349 struct ieee80211_hw *hw; 2380 struct ieee80211_hw *hw;
2350 enum nl80211_band band; 2381 enum nl80211_band band;
2351 const struct ieee80211_ops *ops = &mac80211_hwsim_ops; 2382 const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
2383 struct net *net;
2352 int idx; 2384 int idx;
2353 2385
2354 if (WARN_ON(param->channels > 1 && !param->use_chanctx)) 2386 if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2366,6 +2398,13 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2366 err = -ENOMEM; 2398 err = -ENOMEM;
2367 goto failed; 2399 goto failed;
2368 } 2400 }
2401
2402 if (info)
2403 net = genl_info_net(info);
2404 else
2405 net = &init_net;
2406 wiphy_net_set(hw->wiphy, net);
2407
2369 data = hw->priv; 2408 data = hw->priv;
2370 data->hw = hw; 2409 data->hw = hw;
2371 2410
@@ -2541,6 +2580,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2541 data->group = 1; 2580 data->group = 1;
2542 mutex_init(&data->mutex); 2581 mutex_init(&data->mutex);
2543 2582
2583 data->netgroup = hwsim_net_get_netgroup(net);
2584
2544 /* Enable frame retransmissions for lossy channels */ 2585 /* Enable frame retransmissions for lossy channels */
2545 hw->max_rates = 4; 2586 hw->max_rates = 4;
2546 hw->max_rate_tries = 11; 2587 hw->max_rate_tries = 11;
@@ -3014,6 +3055,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
3014 continue; 3055 continue;
3015 } 3056 }
3016 3057
3058 if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
3059 continue;
3060
3017 list_del(&data->list); 3061 list_del(&data->list);
3018 spin_unlock_bh(&hwsim_radio_lock); 3062 spin_unlock_bh(&hwsim_radio_lock);
3019 mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), 3063 mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
@@ -3040,6 +3084,9 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
3040 if (data->idx != idx) 3084 if (data->idx != idx)
3041 continue; 3085 continue;
3042 3086
3087 if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
3088 continue;
3089
3043 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 3090 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3044 if (!skb) { 3091 if (!skb) {
3045 res = -ENOMEM; 3092 res = -ENOMEM;
@@ -3079,6 +3126,9 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
3079 if (data->idx < idx) 3126 if (data->idx < idx)
3080 continue; 3127 continue;
3081 3128
3129 if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk)))
3130 continue;
3131
3082 res = mac80211_hwsim_get_radio(skb, data, 3132 res = mac80211_hwsim_get_radio(skb, data,
3083 NETLINK_CB(cb->skb).portid, 3133 NETLINK_CB(cb->skb).portid,
3084 cb->nlh->nlmsg_seq, cb, 3134 cb->nlh->nlmsg_seq, cb,
@@ -3118,13 +3168,13 @@ static const struct genl_ops hwsim_ops[] = {
3118 .cmd = HWSIM_CMD_NEW_RADIO, 3168 .cmd = HWSIM_CMD_NEW_RADIO,
3119 .policy = hwsim_genl_policy, 3169 .policy = hwsim_genl_policy,
3120 .doit = hwsim_new_radio_nl, 3170 .doit = hwsim_new_radio_nl,
3121 .flags = GENL_ADMIN_PERM, 3171 .flags = GENL_UNS_ADMIN_PERM,
3122 }, 3172 },
3123 { 3173 {
3124 .cmd = HWSIM_CMD_DEL_RADIO, 3174 .cmd = HWSIM_CMD_DEL_RADIO,
3125 .policy = hwsim_genl_policy, 3175 .policy = hwsim_genl_policy,
3126 .doit = hwsim_del_radio_nl, 3176 .doit = hwsim_del_radio_nl,
3127 .flags = GENL_ADMIN_PERM, 3177 .flags = GENL_UNS_ADMIN_PERM,
3128 }, 3178 },
3129 { 3179 {
3130 .cmd = HWSIM_CMD_GET_RADIO, 3180 .cmd = HWSIM_CMD_GET_RADIO,
@@ -3206,6 +3256,40 @@ failure:
3206 return -EINVAL; 3256 return -EINVAL;
3207} 3257}
3208 3258
3259static __net_init int hwsim_init_net(struct net *net)
3260{
3261 hwsim_net_set_netgroup(net);
3262
3263 return 0;
3264}
3265
3266static void __net_exit hwsim_exit_net(struct net *net)
3267{
3268 struct mac80211_hwsim_data *data, *tmp;
3269
3270 spin_lock_bh(&hwsim_radio_lock);
3271 list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
3272 if (!net_eq(wiphy_net(data->hw->wiphy), net))
3273 continue;
3274
3275 /* Radios created in init_net are returned to init_net. */
3276 if (data->netgroup == hwsim_net_get_netgroup(&init_net))
3277 continue;
3278
3279 list_del(&data->list);
3280 INIT_WORK(&data->destroy_work, destroy_radio);
3281 schedule_work(&data->destroy_work);
3282 }
3283 spin_unlock_bh(&hwsim_radio_lock);
3284}
3285
3286static struct pernet_operations hwsim_net_ops = {
3287 .init = hwsim_init_net,
3288 .exit = hwsim_exit_net,
3289 .id = &hwsim_net_id,
3290 .size = sizeof(struct hwsim_net),
3291};
3292
3209static void hwsim_exit_netlink(void) 3293static void hwsim_exit_netlink(void)
3210{ 3294{
3211 /* unregister the notifier */ 3295 /* unregister the notifier */
@@ -3242,10 +3326,14 @@ static int __init init_mac80211_hwsim(void)
3242 spin_lock_init(&hwsim_radio_lock); 3326 spin_lock_init(&hwsim_radio_lock);
3243 INIT_LIST_HEAD(&hwsim_radios); 3327 INIT_LIST_HEAD(&hwsim_radios);
3244 3328
3245 err = platform_driver_register(&mac80211_hwsim_driver); 3329 err = register_pernet_device(&hwsim_net_ops);
3246 if (err) 3330 if (err)
3247 return err; 3331 return err;
3248 3332
3333 err = platform_driver_register(&mac80211_hwsim_driver);
3334 if (err)
3335 goto out_unregister_pernet;
3336
3249 hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); 3337 hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
3250 if (IS_ERR(hwsim_class)) { 3338 if (IS_ERR(hwsim_class)) {
3251 err = PTR_ERR(hwsim_class); 3339 err = PTR_ERR(hwsim_class);
@@ -3363,6 +3451,8 @@ out_free_radios:
3363 mac80211_hwsim_free(); 3451 mac80211_hwsim_free();
3364out_unregister_driver: 3452out_unregister_driver:
3365 platform_driver_unregister(&mac80211_hwsim_driver); 3453 platform_driver_unregister(&mac80211_hwsim_driver);
3454out_unregister_pernet:
3455 unregister_pernet_device(&hwsim_net_ops);
3366 return err; 3456 return err;
3367} 3457}
3368module_init(init_mac80211_hwsim); 3458module_init(init_mac80211_hwsim);
@@ -3376,5 +3466,6 @@ static void __exit exit_mac80211_hwsim(void)
3376 mac80211_hwsim_free(); 3466 mac80211_hwsim_free();
3377 unregister_netdev(hwsim_mon); 3467 unregister_netdev(hwsim_mon);
3378 platform_driver_unregister(&mac80211_hwsim_driver); 3468 platform_driver_unregister(&mac80211_hwsim_driver);
3469 unregister_pernet_device(&hwsim_net_ops);
3379} 3470}
3380module_exit(exit_mac80211_hwsim); 3471module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 13eae9ff8c35..47f4a14c84fe 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1228,7 +1228,7 @@ static int if_sdio_probe(struct sdio_func *func,
1228 } 1228 }
1229 1229
1230 spin_lock_init(&card->lock); 1230 spin_lock_init(&card->lock);
1231 card->workqueue = create_workqueue("libertas_sdio"); 1231 card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
1232 INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); 1232 INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
1233 init_waitqueue_head(&card->pwron_waitq); 1233 init_waitqueue_head(&card->pwron_waitq);
1234 1234
@@ -1326,7 +1326,6 @@ static void if_sdio_remove(struct sdio_func *func)
1326 lbs_stop_card(card->priv); 1326 lbs_stop_card(card->priv);
1327 lbs_remove_card(card->priv); 1327 lbs_remove_card(card->priv);
1328 1328
1329 flush_workqueue(card->workqueue);
1330 destroy_workqueue(card->workqueue); 1329 destroy_workqueue(card->workqueue);
1331 1330
1332 while (card->packets) { 1331 while (card->packets) {
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index 82c0796377aa..c3a53cd6988e 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1180,7 +1180,7 @@ static int if_spi_probe(struct spi_device *spi)
1180 priv->fw_ready = 1; 1180 priv->fw_ready = 1;
1181 1181
1182 /* Initialize interrupt handling stuff. */ 1182 /* Initialize interrupt handling stuff. */
1183 card->workqueue = create_workqueue("libertas_spi"); 1183 card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0);
1184 INIT_WORK(&card->packet_work, if_spi_host_to_card_worker); 1184 INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
1185 INIT_WORK(&card->resume_work, if_spi_resume_worker); 1185 INIT_WORK(&card->resume_work, if_spi_resume_worker);
1186 1186
@@ -1208,7 +1208,6 @@ static int if_spi_probe(struct spi_device *spi)
1208release_irq: 1208release_irq:
1209 free_irq(spi->irq, card); 1209 free_irq(spi->irq, card);
1210terminate_workqueue: 1210terminate_workqueue:
1211 flush_workqueue(card->workqueue);
1212 destroy_workqueue(card->workqueue); 1211 destroy_workqueue(card->workqueue);
1213 lbs_remove_card(priv); /* will call free_netdev */ 1212 lbs_remove_card(priv); /* will call free_netdev */
1214free_card: 1213free_card:
@@ -1235,7 +1234,6 @@ static int libertas_spi_remove(struct spi_device *spi)
1235 lbs_remove_card(priv); /* will call free_netdev */ 1234 lbs_remove_card(priv); /* will call free_netdev */
1236 1235
1237 free_irq(spi->irq, card); 1236 free_irq(spi->irq, card);
1238 flush_workqueue(card->workqueue);
1239 destroy_workqueue(card->workqueue); 1237 destroy_workqueue(card->workqueue);
1240 if (card->pdata->teardown) 1238 if (card->pdata->teardown)
1241 card->pdata->teardown(spi); 1239 card->pdata->teardown(spi);
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 0bf8916a02cf..75bf0c8a2f6f 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -16,7 +16,6 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include "libertas_tf.h" 17#include "libertas_tf.h"
18 18
19#define DRIVER_RELEASE_VERSION "004.p0"
20/* thinfirm version: 5.132.X.pX */ 19/* thinfirm version: 5.132.X.pX */
21#define LBTF_FW_VER_MIN 0x05840300 20#define LBTF_FW_VER_MIN 0x05840300
22#define LBTF_FW_VER_MAX 0x0584ffff 21#define LBTF_FW_VER_MAX 0x0584ffff
@@ -27,12 +26,6 @@ unsigned int lbtf_debug;
27EXPORT_SYMBOL_GPL(lbtf_debug); 26EXPORT_SYMBOL_GPL(lbtf_debug);
28module_param_named(libertas_tf_debug, lbtf_debug, int, 0644); 27module_param_named(libertas_tf_debug, lbtf_debug, int, 0644);
29 28
30static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION
31#ifdef DEBUG
32 "-dbg"
33#endif
34 "";
35
36struct workqueue_struct *lbtf_wq; 29struct workqueue_struct *lbtf_wq;
37 30
38static const struct ieee80211_channel lbtf_channels[] = { 31static const struct ieee80211_channel lbtf_channels[] = {
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index 1efef3b8273d..dc49c3de1f25 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -184,7 +184,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
184 184
185 tx_info_src = MWIFIEX_SKB_TXCB(skb_src); 185 tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
186 skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size, 186 skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
187 GFP_ATOMIC | GFP_DMA); 187 GFP_ATOMIC);
188 if (!skb_aggr) { 188 if (!skb_aggr) {
189 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 189 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
190 ra_list_flags); 190 ra_list_flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 78c532f0d286..a6d86d4ccd22 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -788,3 +788,4 @@ poll_fw:
788 788
789 return ret; 789 return ret;
790} 790}
791EXPORT_SYMBOL_GPL(mwifiex_dnld_fw);
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index 62211fca91b7..a4b773d102b3 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -1281,7 +1281,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
1281 if (result) { 1281 if (result) {
1282 mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n"); 1282 mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n");
1283 if (priv->media_connected) 1283 if (priv->media_connected)
1284 mwifiex_reset_connect_state(priv, result); 1284 mwifiex_reset_connect_state(priv, result, true);
1285 1285
1286 memset(&priv->curr_bss_params.bss_descriptor, 1286 memset(&priv->curr_bss_params.bss_descriptor,
1287 0x00, sizeof(struct mwifiex_bssdescriptor)); 1287 0x00, sizeof(struct mwifiex_bssdescriptor));
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 8b67a552a690..0e280f879b58 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -526,10 +526,12 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
526 fw.fw_buf = (u8 *) adapter->firmware->data; 526 fw.fw_buf = (u8 *) adapter->firmware->data;
527 fw.fw_len = adapter->firmware->size; 527 fw.fw_len = adapter->firmware->size;
528 528
529 if (adapter->if_ops.dnld_fw) 529 if (adapter->if_ops.dnld_fw) {
530 ret = adapter->if_ops.dnld_fw(adapter, &fw); 530 ret = adapter->if_ops.dnld_fw(adapter, &fw);
531 else 531 } else {
532 ret = mwifiex_dnld_fw(adapter, &fw); 532 ret = mwifiex_dnld_fw(adapter, &fw);
533 }
534
533 if (ret == -1) 535 if (ret == -1)
534 goto err_dnld_fw; 536 goto err_dnld_fw;
535 537
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 0207af00be42..f0cd055c0b2c 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1128,7 +1128,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
1128 struct mwifiex_bssdescriptor *bss_desc); 1128 struct mwifiex_bssdescriptor *bss_desc);
1129int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, 1129int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
1130 struct host_cmd_ds_command *resp); 1130 struct host_cmd_ds_command *resp);
1131void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason); 1131void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason,
1132 bool from_ap);
1132u8 mwifiex_band_to_radio_type(u8 band); 1133u8 mwifiex_band_to_radio_type(u8 band);
1133int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac); 1134int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
1134void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter); 1135void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 0c7937eb6b77..1b1e266ce00f 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -507,7 +507,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
507 for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { 507 for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
508 /* Allocate skb here so that firmware can DMA data from it */ 508 /* Allocate skb here so that firmware can DMA data from it */
509 skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, 509 skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
510 GFP_KERNEL | GFP_DMA); 510 GFP_KERNEL);
511 if (!skb) { 511 if (!skb) {
512 mwifiex_dbg(adapter, ERROR, 512 mwifiex_dbg(adapter, ERROR,
513 "Unable to allocate skb for RX ring.\n"); 513 "Unable to allocate skb for RX ring.\n");
@@ -1319,7 +1319,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
1319 } 1319 }
1320 1320
1321 skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, 1321 skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
1322 GFP_KERNEL | GFP_DMA); 1322 GFP_KERNEL);
1323 if (!skb_tmp) { 1323 if (!skb_tmp) {
1324 mwifiex_dbg(adapter, ERROR, 1324 mwifiex_dbg(adapter, ERROR,
1325 "Unable to allocate skb.\n"); 1325 "Unable to allocate skb.\n");
@@ -2804,7 +2804,7 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
2804} 2804}
2805 2805
2806/* 2806/*
2807 * This function get firmare name for downloading by revision id 2807 * This function gets the firmware name for downloading by revision id
2808 * 2808 *
2809 * Read revision id register to get revision id 2809 * Read revision id register to get revision id
2810 */ 2810 */
@@ -2901,10 +2901,11 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
2901{ 2901{
2902 struct pcie_service_card *card = adapter->card; 2902 struct pcie_service_card *card = adapter->card;
2903 const struct mwifiex_pcie_card_reg *reg; 2903 const struct mwifiex_pcie_card_reg *reg;
2904 struct pci_dev *pdev = card->dev; 2904 struct pci_dev *pdev;
2905 int i; 2905 int i;
2906 2906
2907 if (card) { 2907 if (card) {
2908 pdev = card->dev;
2908 if (card->msix_enable) { 2909 if (card->msix_enable) {
2909 for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) 2910 for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
2910 synchronize_irq(card->msix_entries[i].vector); 2911 synchronize_irq(card->msix_entries[i].vector);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index bdc51ffd43ec..d3e1561ca075 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -102,10 +102,9 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
102 struct mwifiex_plt_wake_cfg *cfg; 102 struct mwifiex_plt_wake_cfg *cfg;
103 int ret; 103 int ret;
104 104
105 if (!dev->of_node || 105 if (!of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) {
106 !of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) { 106 dev_err(dev, "required compatible string missing\n");
107 dev_err(dev, "sdio platform data not available\n"); 107 return -EINVAL;
108 return -1;
109 } 108 }
110 109
111 card->plt_of_node = dev->of_node; 110 card->plt_of_node = dev->of_node;
@@ -115,7 +114,7 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
115 if (cfg && card->plt_of_node) { 114 if (cfg && card->plt_of_node) {
116 cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0); 115 cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0);
117 if (!cfg->irq_wifi) { 116 if (!cfg->irq_wifi) {
118 dev_err(dev, 117 dev_dbg(dev,
119 "fail to parse irq_wifi from device tree\n"); 118 "fail to parse irq_wifi from device tree\n");
120 } else { 119 } else {
121 ret = devm_request_irq(dev, cfg->irq_wifi, 120 ret = devm_request_irq(dev, cfg->irq_wifi,
@@ -183,24 +182,35 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
183 sdio_release_host(func); 182 sdio_release_host(func);
184 183
185 if (ret) { 184 if (ret) {
186 pr_err("%s: failed to enable function\n", __func__); 185 dev_err(&func->dev, "failed to enable function\n");
187 kfree(card); 186 goto err_free;
188 return -EIO;
189 } 187 }
190 188
191 /* device tree node parsing and platform specific configuration*/ 189 /* device tree node parsing and platform specific configuration*/
192 mwifiex_sdio_probe_of(&func->dev, card); 190 if (func->dev.of_node) {
193 191 ret = mwifiex_sdio_probe_of(&func->dev, card);
194 if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops, 192 if (ret) {
195 MWIFIEX_SDIO)) { 193 dev_err(&func->dev, "SDIO dt node parse failed\n");
196 pr_err("%s: add card failed\n", __func__); 194 goto err_disable;
197 kfree(card); 195 }
198 sdio_claim_host(func); 196 }
199 ret = sdio_disable_func(func); 197
200 sdio_release_host(func); 198 ret = mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
201 ret = -1; 199 MWIFIEX_SDIO);
200 if (ret) {
201 dev_err(&func->dev, "add card failed\n");
202 goto err_disable;
202 } 203 }
203 204
205 return 0;
206
207err_disable:
208 sdio_claim_host(func);
209 sdio_disable_func(func);
210 sdio_release_host(func);
211err_free:
212 kfree(card);
213
204 return ret; 214 return ret;
205} 215}
206 216
@@ -544,6 +554,19 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
544 return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0); 554 return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0);
545} 555}
546 556
557static int mwifiex_sdio_dnld_fw(struct mwifiex_adapter *adapter,
558 struct mwifiex_fw_image *fw)
559{
560 struct sdio_mmc_card *card = adapter->card;
561 int ret;
562
563 sdio_claim_host(card->func);
564 ret = mwifiex_dnld_fw(adapter, fw);
565 sdio_release_host(card->func);
566
567 return ret;
568}
569
547/* 570/*
548 * This function is used to initialize IO ports for the 571 * This function is used to initialize IO ports for the
549 * chipsets supporting SDIO new mode eg SD8897. 572 * chipsets supporting SDIO new mode eg SD8897.
@@ -1492,7 +1515,7 @@ rx_curr_single:
1492 mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n", 1515 mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n",
1493 port, rx_len); 1516 port, rx_len);
1494 1517
1495 skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); 1518 skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL);
1496 if (!skb) { 1519 if (!skb) {
1497 mwifiex_dbg(adapter, ERROR, 1520 mwifiex_dbg(adapter, ERROR,
1498 "single skb allocated fail,\t" 1521 "single skb allocated fail,\t"
@@ -1597,7 +1620,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1597 rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); 1620 rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
1598 mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len); 1621 mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len);
1599 1622
1600 skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); 1623 skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL);
1601 if (!skb) 1624 if (!skb)
1602 return -1; 1625 return -1;
1603 1626
@@ -2732,6 +2755,7 @@ static struct mwifiex_if_ops sdio_ops = {
2732 .cleanup_mpa_buf = mwifiex_cleanup_mpa_buf, 2755 .cleanup_mpa_buf = mwifiex_cleanup_mpa_buf,
2733 .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete, 2756 .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
2734 .event_complete = mwifiex_sdio_event_complete, 2757 .event_complete = mwifiex_sdio_event_complete,
2758 .dnld_fw = mwifiex_sdio_dnld_fw,
2735 .card_reset = mwifiex_sdio_card_reset, 2759 .card_reset = mwifiex_sdio_card_reset,
2736 .reg_dump = mwifiex_sdio_reg_dump, 2760 .reg_dump = mwifiex_sdio_reg_dump,
2737 .device_dump = mwifiex_sdio_device_dump, 2761 .device_dump = mwifiex_sdio_device_dump,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index d18c7979d723..bcfd4b743145 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -553,7 +553,8 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
553 if (!memcmp(resp->params.deauth.mac_addr, 553 if (!memcmp(resp->params.deauth.mac_addr,
554 &priv->curr_bss_params.bss_descriptor.mac_address, 554 &priv->curr_bss_params.bss_descriptor.mac_address,
555 sizeof(resp->params.deauth.mac_addr))) 555 sizeof(resp->params.deauth.mac_addr)))
556 mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING); 556 mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING,
557 false);
557 558
558 return 0; 559 return 0;
559} 560}
@@ -566,7 +567,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
566static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv, 567static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
567 struct host_cmd_ds_command *resp) 568 struct host_cmd_ds_command *resp)
568{ 569{
569 mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING); 570 mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING, false);
570 return 0; 571 return 0;
571} 572}
572 573
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 0104108b4ea2..0cefd40b2762 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -40,8 +40,8 @@
40 * - Erases current SSID and BSSID information 40 * - Erases current SSID and BSSID information
41 * - Sends a disconnect event to upper layers/applications. 41 * - Sends a disconnect event to upper layers/applications.
42 */ 42 */
43void 43void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
44mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) 44 bool from_ap)
45{ 45{
46 struct mwifiex_adapter *adapter = priv->adapter; 46 struct mwifiex_adapter *adapter = priv->adapter;
47 47
@@ -140,7 +140,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
140 if (priv->bss_mode == NL80211_IFTYPE_STATION || 140 if (priv->bss_mode == NL80211_IFTYPE_STATION ||
141 priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { 141 priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
142 cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, 142 cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
143 false, GFP_KERNEL); 143 !from_ap, GFP_KERNEL);
144 } 144 }
145 eth_zero_addr(priv->cfg_bssid); 145 eth_zero_addr(priv->cfg_bssid);
146 146
@@ -574,7 +574,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
574 if (priv->media_connected) { 574 if (priv->media_connected) {
575 reason_code = 575 reason_code =
576 le16_to_cpu(*(__le16 *)adapter->event_body); 576 le16_to_cpu(*(__le16 *)adapter->event_body);
577 mwifiex_reset_connect_state(priv, reason_code); 577 mwifiex_reset_connect_state(priv, reason_code, true);
578 } 578 }
579 break; 579 break;
580 580
@@ -589,7 +589,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
589 if (priv->media_connected) { 589 if (priv->media_connected) {
590 reason_code = 590 reason_code =
591 le16_to_cpu(*(__le16 *)adapter->event_body); 591 le16_to_cpu(*(__le16 *)adapter->event_body);
592 mwifiex_reset_connect_state(priv, reason_code); 592 mwifiex_reset_connect_state(priv, reason_code, true);
593 } 593 }
594 break; 594 break;
595 595
@@ -599,7 +599,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
599 if (priv->media_connected) { 599 if (priv->media_connected) {
600 reason_code = 600 reason_code =
601 le16_to_cpu(*(__le16 *)adapter->event_body); 601 le16_to_cpu(*(__le16 *)adapter->event_body);
602 mwifiex_reset_connect_state(priv, reason_code); 602 mwifiex_reset_connect_state(priv, reason_code, true);
603 } 603 }
604 break; 604 break;
605 605
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 666e91af59d7..bf5660eb27d3 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -272,7 +272,7 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
272int mwifiex_uap_recv_packet(struct mwifiex_private *priv, 272int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
273 struct sk_buff *skb) 273 struct sk_buff *skb)
274{ 274{
275 struct mwifiex_adapter *adapter = adapter; 275 struct mwifiex_adapter *adapter = priv->adapter;
276 struct mwifiex_sta_node *src_node; 276 struct mwifiex_sta_node *src_node;
277 struct ethhdr *p_ethhdr; 277 struct ethhdr *p_ethhdr;
278 struct sk_buff *skb_uap; 278 struct sk_buff *skb_uap;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 8ee83b093c0d..e26a233684bb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1839,20 +1839,22 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
1839 u8 hwinfo[HWSET_MAX_SIZE]; 1839 u8 hwinfo[HWSET_MAX_SIZE];
1840 u16 eeprom_id; 1840 u16 eeprom_id;
1841 1841
1842 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { 1842 switch (rtlefuse->epromtype) {
1843 case EEPROM_BOOT_EFUSE:
1843 rtl_efuse_shadow_map_update(hw); 1844 rtl_efuse_shadow_map_update(hw);
1845 break;
1844 1846
1845 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 1847 case EEPROM_93C46:
1846 HWSET_MAX_SIZE);
1847 } else if (rtlefuse->epromtype == EEPROM_93C46) {
1848 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1848 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1849 "RTL819X Not boot from eeprom, check it !!"); 1849 "RTL819X Not boot from eeprom, check it !!");
1850 return; 1850 return;
1851 } else { 1851
1852 default:
1852 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1853 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1853 "boot from neither eeprom nor efuse, check it !!"); 1854 "boot from neither eeprom nor efuse, check it !!");
1854 return; 1855 return;
1855 } 1856 }
1857 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
1856 1858
1857 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", 1859 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
1858 hwinfo, HWSET_MAX_SIZE); 1860 hwinfo, HWSET_MAX_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 04eb5c3f8464..58b7ac6899ef 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1680,21 +1680,28 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1680 struct rtl_priv *rtlpriv = rtl_priv(hw); 1680 struct rtl_priv *rtlpriv = rtl_priv(hw);
1681 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 1681 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1682 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1682 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1683 struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
1683 u16 i, usvalue; 1684 u16 i, usvalue;
1684 u8 hwinfo[HWSET_MAX_SIZE]; 1685 u8 hwinfo[HWSET_MAX_SIZE];
1685 u16 eeprom_id; 1686 u16 eeprom_id;
1686 1687
1687 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { 1688 switch (rtlefuse->epromtype) {
1689 case EEPROM_BOOT_EFUSE:
1688 rtl_efuse_shadow_map_update(hw); 1690 rtl_efuse_shadow_map_update(hw);
1691 break;
1689 1692
1690 memcpy((void *)hwinfo, 1693 case EEPROM_93C46:
1691 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
1692 HWSET_MAX_SIZE);
1693 } else if (rtlefuse->epromtype == EEPROM_93C46) {
1694 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1694 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1695 "RTL819X Not boot from eeprom, check it !!"); 1695 "RTL819X Not boot from eeprom, check it !!");
1696 return;
1697
1698 default:
1699 dev_warn(dev, "no efuse data\n");
1700 return;
1696 } 1701 }
1697 1702
1703 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
1704
1698 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP", 1705 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
1699 hwinfo, HWSET_MAX_SIZE); 1706 hwinfo, HWSET_MAX_SIZE);
1700 1707
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 34ce06441d1b..ae1129f916d5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -351,15 +351,21 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
351 u8 hwinfo[HWSET_MAX_SIZE] = {0}; 351 u8 hwinfo[HWSET_MAX_SIZE] = {0};
352 u16 eeprom_id; 352 u16 eeprom_id;
353 353
354 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { 354 switch (rtlefuse->epromtype) {
355 case EEPROM_BOOT_EFUSE:
355 rtl_efuse_shadow_map_update(hw); 356 rtl_efuse_shadow_map_update(hw);
356 memcpy((void *)hwinfo, 357 break;
357 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 358
358 HWSET_MAX_SIZE); 359 case EEPROM_93C46:
359 } else if (rtlefuse->epromtype == EEPROM_93C46) {
360 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 360 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
361 "RTL819X Not boot from eeprom, check it !!\n"); 361 "RTL819X Not boot from eeprom, check it !!\n");
362 return;
363
364 default:
365 pr_warn("rtl92cu: no efuse data\n\n");
366 return;
362 } 367 }
368 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
363 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, "MAP", 369 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, "MAP",
364 hwinfo, HWSET_MAX_SIZE); 370 hwinfo, HWSET_MAX_SIZE);
365 eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0])); 371 eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index f49b60d31450..8618c322a3f8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1744,23 +1744,29 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
1744 struct rtl_priv *rtlpriv = rtl_priv(hw); 1744 struct rtl_priv *rtlpriv = rtl_priv(hw);
1745 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 1745 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1746 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1746 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1747 struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
1747 u16 i, usvalue; 1748 u16 i, usvalue;
1748 u8 hwinfo[HWSET_MAX_SIZE]; 1749 u8 hwinfo[HWSET_MAX_SIZE];
1749 u16 eeprom_id; 1750 u16 eeprom_id;
1750 unsigned long flags; 1751 unsigned long flags;
1751 1752
1752 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { 1753 switch (rtlefuse->epromtype) {
1754 case EEPROM_BOOT_EFUSE:
1753 spin_lock_irqsave(&globalmutex_for_power_and_efuse, flags); 1755 spin_lock_irqsave(&globalmutex_for_power_and_efuse, flags);
1754 rtl_efuse_shadow_map_update(hw); 1756 rtl_efuse_shadow_map_update(hw);
1755 _rtl92de_efuse_update_chip_version(hw); 1757 _rtl92de_efuse_update_chip_version(hw);
1756 spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags); 1758 spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
1757 memcpy((void *)hwinfo, (void *)&rtlefuse->efuse_map 1759 break;
1758 [EFUSE_INIT_MAP][0], 1760 case EEPROM_93C46:
1759 HWSET_MAX_SIZE);
1760 } else if (rtlefuse->epromtype == EEPROM_93C46) {
1761 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1761 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1762 "RTL819X Not boot from eeprom, check it !!\n"); 1762 "RTL819X Not boot from eeprom, check it !!\n");
1763 return;
1764 default:
1765 dev_warn(dev, "no efuse data\n");
1766 return;
1763 } 1767 }
1768
1769 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
1764 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP", 1770 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
1765 hwinfo, HWSET_MAX_SIZE); 1771 hwinfo, HWSET_MAX_SIZE);
1766 1772
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 9fd3f1b6e4a8..28c260dd11ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -2102,20 +2102,22 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw)
2102 u8 hwinfo[HWSET_MAX_SIZE]; 2102 u8 hwinfo[HWSET_MAX_SIZE];
2103 u16 eeprom_id; 2103 u16 eeprom_id;
2104 2104
2105 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { 2105 switch (rtlefuse->epromtype) {
2106 case EEPROM_BOOT_EFUSE:
2106 rtl_efuse_shadow_map_update(hw); 2107 rtl_efuse_shadow_map_update(hw);
2108 break;
2107 2109
2108 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 2110 case EEPROM_93C46:
2109 HWSET_MAX_SIZE);
2110 } else if (rtlefuse->epromtype == EEPROM_93C46) {
2111 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 2111 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2112 "RTL819X Not boot from eeprom, check it !!"); 2112 "RTL819X Not boot from eeprom, check it !!");
2113 return; 2113 return;
2114 } else { 2114
2115 default:
2115 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 2116 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2116 "boot from neither eeprom nor efuse, check it !!"); 2117 "boot from neither eeprom nor efuse, check it !!");
2117 return; 2118 return;
2118 } 2119 }
2120 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
2119 2121
2120 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", 2122 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
2121 hwinfo, HWSET_MAX_SIZE); 2123 hwinfo, HWSET_MAX_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 018340aedf09..c2bf8d1a7af3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -2414,19 +2414,10 @@ static void _rtl92ee_phy_reload_mac_registers(struct ieee80211_hw *hw,
2414static void _rtl92ee_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg, 2414static void _rtl92ee_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
2415 bool is_patha_on, bool is2t) 2415 bool is_patha_on, bool is2t)
2416{ 2416{
2417 u32 pathon;
2418 u32 i; 2417 u32 i;
2419 2418
2420 pathon = is_patha_on ? 0x0fc01616 : 0x0fc01616; 2419 for (i = 0; i < IQK_ADDA_REG_NUM; i++)
2421 if (!is2t) { 2420 rtl_set_bbreg(hw, addareg[i], MASKDWORD, 0x0fc01616);
2422 pathon = 0x0fc01616;
2423 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0fc01616);
2424 } else {
2425 rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon);
2426 }
2427
2428 for (i = 1; i < IQK_ADDA_REG_NUM; i++)
2429 rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon);
2430} 2421}
2431 2422
2432static void _rtl92ee_phy_mac_setting_calibration(struct ieee80211_hw *hw, 2423static void _rtl92ee_phy_mac_setting_calibration(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 12b0978ba4fa..442f2b68ee58 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1673,23 +1673,31 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1673 struct rtl_priv *rtlpriv = rtl_priv(hw); 1673 struct rtl_priv *rtlpriv = rtl_priv(hw);
1674 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 1674 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1675 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1675 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1676 struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
1676 u16 i, usvalue; 1677 u16 i, usvalue;
1677 u16 eeprom_id; 1678 u16 eeprom_id;
1678 u8 tempval; 1679 u8 tempval;
1679 u8 hwinfo[HWSET_MAX_SIZE_92S]; 1680 u8 hwinfo[HWSET_MAX_SIZE_92S];
1680 u8 rf_path, index; 1681 u8 rf_path, index;
1681 1682
1682 if (rtlefuse->epromtype == EEPROM_93C46) { 1683 switch (rtlefuse->epromtype) {
1684 case EEPROM_BOOT_EFUSE:
1685 rtl_efuse_shadow_map_update(hw);
1686 break;
1687
1688 case EEPROM_93C46:
1683 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1689 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1684 "RTL819X Not boot from eeprom, check it !!\n"); 1690 "RTL819X Not boot from eeprom, check it !!\n");
1685 } else if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { 1691 return;
1686 rtl_efuse_shadow_map_update(hw);
1687 1692
1688 memcpy((void *)hwinfo, (void *) 1693 default:
1689 &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 1694 dev_warn(dev, "no efuse data\n");
1690 HWSET_MAX_SIZE_92S); 1695 return;
1691 } 1696 }
1692 1697
1698 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
1699 HWSET_MAX_SIZE_92S);
1700
1693 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP", 1701 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
1694 hwinfo, HWSET_MAX_SIZE_92S); 1702 hwinfo, HWSET_MAX_SIZE_92S);
1695 1703
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index a4b7eac6856f..57a1ba8822b1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1630,6 +1630,7 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
1630 struct rtl_priv *rtlpriv = rtl_priv(hw); 1630 struct rtl_priv *rtlpriv = rtl_priv(hw);
1631 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 1631 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1632 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1632 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1633 struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
1633 u16 i, usvalue; 1634 u16 i, usvalue;
1634 u8 hwinfo[HWSET_MAX_SIZE]; 1635 u8 hwinfo[HWSET_MAX_SIZE];
1635 u16 eeprom_id; 1636 u16 eeprom_id;
@@ -1638,15 +1639,19 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
1638 /* need add */ 1639 /* need add */
1639 return; 1640 return;
1640 } 1641 }
1641 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { 1642 switch (rtlefuse->epromtype) {
1643 case EEPROM_BOOT_EFUSE:
1642 rtl_efuse_shadow_map_update(hw); 1644 rtl_efuse_shadow_map_update(hw);
1643 1645
1644 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 1646 case EEPROM_93C46:
1645 HWSET_MAX_SIZE);
1646 } else if (rtlefuse->epromtype == EEPROM_93C46) {
1647 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1647 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1648 "RTL819X Not boot from eeprom, check it !!"); 1648 "RTL819X Not boot from eeprom, check it !!");
1649 return;
1650
1651 default:
1652 dev_warn(dev, "no efuse data\n");
1649 } 1653 }
1654 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
1650 1655
1651 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", 1656 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
1652 hwinfo, HWSET_MAX_SIZE); 1657 hwinfo, HWSET_MAX_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 5a3df9198ddf..08288ac9020a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -2026,6 +2026,7 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
2026 struct rtl_priv *rtlpriv = rtl_priv(hw); 2026 struct rtl_priv *rtlpriv = rtl_priv(hw);
2027 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 2027 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
2028 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 2028 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2029 struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
2029 u16 i, usvalue; 2030 u16 i, usvalue;
2030 u8 hwinfo[HWSET_MAX_SIZE]; 2031 u8 hwinfo[HWSET_MAX_SIZE];
2031 u16 eeprom_id; 2032 u16 eeprom_id;
@@ -2055,15 +2056,22 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
2055 /* needs to be added */ 2056 /* needs to be added */
2056 return; 2057 return;
2057 } 2058 }
2058 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { 2059
2060 switch (rtlefuse->epromtype) {
2061 case EEPROM_BOOT_EFUSE:
2059 rtl_efuse_shadow_map_update(hw); 2062 rtl_efuse_shadow_map_update(hw);
2063 break;
2060 2064
2061 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 2065 case EEPROM_93C46:
2062 HWSET_MAX_SIZE);
2063 } else if (rtlefuse->epromtype == EEPROM_93C46) {
2064 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 2066 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2065 "RTL819X Not boot from eeprom, check it !!"); 2067 "RTL819X Not boot from eeprom, check it !!");
2068 return;
2069
2070 default:
2071 dev_warn(dev, "no efuse data\n");
2072 return;
2066 } 2073 }
2074 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
2067 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"), 2075 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
2068 hwinfo, HWSET_MAX_SIZE); 2076 hwinfo, HWSET_MAX_SIZE);
2069 2077
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 445f681d08c0..c5ca9dfb445c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -1019,7 +1019,7 @@ static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path,
1019 struct rtl_priv *rtlpriv = rtl_priv(hw); 1019 struct rtl_priv *rtlpriv = rtl_priv(hw);
1020 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 1020 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1021 u8 index = (channel - 1); 1021 u8 index = (channel - 1);
1022 u8 txpower; 1022 u8 txpower = 0;
1023 u8 power_diff_byrate = 0; 1023 u8 power_diff_byrate = 0;
1024 1024
1025 if (channel > 14 || channel < 1) { 1025 if (channel > 14 || channel < 1) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 71e4dd9965bb..b9436df9e1ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -3101,6 +3101,7 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
3101 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 3101 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
3102 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 3102 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
3103 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); 3103 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
3104 struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
3104 u16 i, usvalue; 3105 u16 i, usvalue;
3105 u8 hwinfo[HWSET_MAX_SIZE]; 3106 u8 hwinfo[HWSET_MAX_SIZE];
3106 u16 eeprom_id; 3107 u16 eeprom_id;
@@ -3109,14 +3110,20 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
3109 ;/* need add */ 3110 ;/* need add */
3110 } 3111 }
3111 3112
3112 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { 3113 switch (rtlefuse->epromtype) {
3114 case EEPROM_BOOT_EFUSE:
3113 rtl_efuse_shadow_map_update(hw); 3115 rtl_efuse_shadow_map_update(hw);
3114 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 3116 break;
3115 HWSET_MAX_SIZE); 3117
3116 } else if (rtlefuse->epromtype == EEPROM_93C46) { 3118 case EEPROM_93C46:
3117 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 3119 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
3118 "RTL819X Not boot from eeprom, check it !!"); 3120 "RTL819X Not boot from eeprom, check it !!");
3121 return;
3122
3123 default:
3124 dev_warn(dev, "no efuse data\n");
3119 } 3125 }
3126 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
3120 3127
3121 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", 3128 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
3122 hwinfo, HWSET_MAX_SIZE); 3129 hwinfo, HWSET_MAX_SIZE);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 40658b62d077..35c14cc3f0d2 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -398,7 +398,7 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
398 return -ENOLINK; 398 return -ENOLINK;
399 399
400 msg_len -= pad_bytes; 400 msg_len -= pad_bytes;
401 if ((msg_len <= 0) || (!msg)) { 401 if (msg_len <= 0) {
402 rsi_dbg(MGMT_RX_ZONE, 402 rsi_dbg(MGMT_RX_ZONE,
403 "%s: Invalid rx msg of len = %d\n", 403 "%s: Invalid rx msg of len = %d\n",
404 __func__, msg_len); 404 __func__, msg_len);
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 13fd734b61ec..82d94f83b6b4 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -378,8 +378,7 @@ static int wl3501_esbq_exec(struct wl3501_card *this, void *sig, int sig_size)
378 return rc; 378 return rc;
379} 379}
380 380
381static int wl3501_get_mib_value(struct wl3501_card *this, u8 index, 381static int wl3501_request_mib(struct wl3501_card *this, u8 index, void *bf)
382 void *bf, int size)
383{ 382{
384 struct wl3501_get_req sig = { 383 struct wl3501_get_req sig = {
385 .sig_id = WL3501_SIG_GET_REQ, 384 .sig_id = WL3501_SIG_GET_REQ,
@@ -395,20 +394,32 @@ static int wl3501_get_mib_value(struct wl3501_card *this, u8 index,
395 wl3501_set_to_wla(this, ptr, &sig, sizeof(sig)); 394 wl3501_set_to_wla(this, ptr, &sig, sizeof(sig));
396 wl3501_esbq_req(this, &ptr); 395 wl3501_esbq_req(this, &ptr);
397 this->sig_get_confirm.mib_status = 255; 396 this->sig_get_confirm.mib_status = 255;
398 spin_unlock_irqrestore(&this->lock, flags); 397 rc = 0;
399 rc = wait_event_interruptible(this->wait,
400 this->sig_get_confirm.mib_status != 255);
401 if (!rc)
402 memcpy(bf, this->sig_get_confirm.mib_value,
403 size);
404 goto out;
405 } 398 }
406 } 399 }
407 spin_unlock_irqrestore(&this->lock, flags); 400 spin_unlock_irqrestore(&this->lock, flags);
408out: 401
409 return rc; 402 return rc;
410} 403}
411 404
405static int wl3501_get_mib_value(struct wl3501_card *this, u8 index,
406 void *bf, int size)
407{
408 int rc;
409
410 rc = wl3501_request_mib(this, index, bf);
411 if (rc)
412 return rc;
413
414 rc = wait_event_interruptible(this->wait,
415 this->sig_get_confirm.mib_status != 255);
416 if (rc)
417 return rc;
418
419 memcpy(bf, this->sig_get_confirm.mib_value, size);
420 return 0;
421}
422
412static int wl3501_pwr_mgmt(struct wl3501_card *this, int suspend) 423static int wl3501_pwr_mgmt(struct wl3501_card *this, int suspend)
413{ 424{
414 struct wl3501_pwr_mgmt_req sig = { 425 struct wl3501_pwr_mgmt_req sig = {
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index b3bec3aaa45d..bc07ad30c9bf 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -74,6 +74,7 @@ config OF_NET
74config OF_MDIO 74config OF_MDIO
75 def_tristate PHYLIB 75 def_tristate PHYLIB
76 depends on PHYLIB 76 depends on PHYLIB
77 select FIXED_PHY
77 help 78 help
78 OpenFirmware MDIO bus (Ethernet PHY) accessors 79 OpenFirmware MDIO bus (Ethernet PHY) accessors
79 80
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index e051e1b57609..e2b50bc12f23 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -361,7 +361,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
361} 361}
362EXPORT_SYMBOL(of_phy_attach); 362EXPORT_SYMBOL(of_phy_attach);
363 363
364#if defined(CONFIG_FIXED_PHY)
365/* 364/*
366 * of_phy_is_fixed_link() and of_phy_register_fixed_link() must 365 * of_phy_is_fixed_link() and of_phy_register_fixed_link() must
367 * support two DT bindings: 366 * support two DT bindings:
@@ -451,4 +450,3 @@ int of_phy_register_fixed_link(struct device_node *np)
451 return -ENODEV; 450 return -ENODEV;
452} 451}
453EXPORT_SYMBOL(of_phy_register_fixed_link); 452EXPORT_SYMBOL(of_phy_register_fixed_link);
454#endif
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index b869b98835f4..01fb93b4b1e4 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -434,4 +434,12 @@ config PHY_CYGNUS_PCIE
434 434
435source "drivers/phy/tegra/Kconfig" 435source "drivers/phy/tegra/Kconfig"
436 436
437config PHY_NS2_PCIE
438 tristate "Broadcom Northstar2 PCIe PHY driver"
439 depends on OF && MDIO_BUS_MUX_BCM_IPROC
440 select GENERIC_PHY
441 default ARCH_BCM_IPROC
442 help
443 Enable this to support the Broadcom Northstar2 PCIe PHY.
444 If unsure, say N.
437endmenu 445endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 9c3e73ccabc4..7aea094da3e2 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -53,5 +53,5 @@ obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o
53obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o 53obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o
54obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o 54obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
55obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o 55obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
56
57obj-$(CONFIG_ARCH_TEGRA) += tegra/ 56obj-$(CONFIG_ARCH_TEGRA) += tegra/
57obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o
diff --git a/drivers/phy/phy-bcm-ns2-pcie.c b/drivers/phy/phy-bcm-ns2-pcie.c
new file mode 100644
index 000000000000..9513f7ab1eaa
--- /dev/null
+++ b/drivers/phy/phy-bcm-ns2-pcie.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) 2016 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/device.h>
15#include <linux/module.h>
16#include <linux/of_mdio.h>
17#include <linux/mdio.h>
18#include <linux/phy.h>
19#include <linux/phy/phy.h>
20
21struct ns2_pci_phy {
22 struct mdio_device *mdiodev;
23 struct phy *phy;
24};
25
26#define BLK_ADDR_REG_OFFSET 0x1f
27#define PLL_AFE1_100MHZ_BLK 0x2100
28#define PLL_CLK_AMP_OFFSET 0x03
29#define PLL_CLK_AMP_2P05V 0x2b18
30
31static int ns2_pci_phy_init(struct phy *p)
32{
33 struct ns2_pci_phy *phy = phy_get_drvdata(p);
34 int rc;
35
36 /* select the AFE 100MHz block page */
37 rc = mdiobus_write(phy->mdiodev->bus, phy->mdiodev->addr,
38 BLK_ADDR_REG_OFFSET, PLL_AFE1_100MHZ_BLK);
39 if (rc)
40 goto err;
41
42 /* set the 100 MHz reference clock amplitude to 2.05 v */
43 rc = mdiobus_write(phy->mdiodev->bus, phy->mdiodev->addr,
44 PLL_CLK_AMP_OFFSET, PLL_CLK_AMP_2P05V);
45 if (rc)
46 goto err;
47
48 return 0;
49
50err:
51 dev_err(&phy->mdiodev->dev, "Error %d writing to phy\n", rc);
52 return rc;
53}
54
55static struct phy_ops ns2_pci_phy_ops = {
56 .init = ns2_pci_phy_init,
57};
58
59static int ns2_pci_phy_probe(struct mdio_device *mdiodev)
60{
61 struct device *dev = &mdiodev->dev;
62 struct phy_provider *provider;
63 struct ns2_pci_phy *p;
64 struct phy *phy;
65
66 phy = devm_phy_create(dev, dev->of_node, &ns2_pci_phy_ops);
67 if (IS_ERR(phy)) {
68 dev_err(dev, "failed to create Phy\n");
69 return PTR_ERR(phy);
70 }
71
72 p = devm_kmalloc(dev, sizeof(struct ns2_pci_phy),
73 GFP_KERNEL);
74 if (!p)
75 return -ENOMEM;
76
77 p->mdiodev = mdiodev;
78 dev_set_drvdata(dev, p);
79
80 p->phy = phy;
81 phy_set_drvdata(phy, p);
82
83 provider = devm_of_phy_provider_register(&phy->dev,
84 of_phy_simple_xlate);
85 if (IS_ERR(provider)) {
86 dev_err(dev, "failed to register Phy provider\n");
87 return PTR_ERR(provider);
88 }
89
90 dev_info(dev, "%s PHY registered\n", dev_name(dev));
91
92 return 0;
93}
94
95static const struct of_device_id ns2_pci_phy_of_match[] = {
96 { .compatible = "brcm,ns2-pcie-phy", },
97 { /* sentinel */ },
98};
99MODULE_DEVICE_TABLE(of, ns2_pci_phy_of_match);
100
101static struct mdio_driver ns2_pci_phy_driver = {
102 .mdiodrv = {
103 .driver = {
104 .name = "phy-bcm-ns2-pci",
105 .of_match_table = ns2_pci_phy_of_match,
106 },
107 },
108 .probe = ns2_pci_phy_probe,
109};
110mdio_module_driver(ns2_pci_phy_driver);
111
112MODULE_AUTHOR("Broadcom");
113MODULE_DESCRIPTION("Broadcom Northstar2 PCI Phy driver");
114MODULE_LICENSE("GPL v2");
115MODULE_ALIAS("platform:phy-bcm-ns2-pci");
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index ec2e014e885c..bf40063de202 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -19,6 +19,7 @@
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/ethtool.h> 20#include <linux/ethtool.h>
21#include <linux/hashtable.h> 21#include <linux/hashtable.h>
22#include <linux/ip.h>
22 23
23#include <net/ipv6.h> 24#include <net/ipv6.h>
24#include <net/if_inet6.h> 25#include <net/if_inet6.h>
@@ -144,6 +145,7 @@ struct qeth_perf_stats {
144 unsigned int sg_alloc_page_rx; 145 unsigned int sg_alloc_page_rx;
145 unsigned int tx_csum; 146 unsigned int tx_csum;
146 unsigned int tx_lin; 147 unsigned int tx_lin;
148 unsigned int tx_linfail;
147}; 149};
148 150
149/* Routing stuff */ 151/* Routing stuff */
@@ -559,7 +561,6 @@ enum qeth_ip_types {
559 QETH_IP_TYPE_NORMAL, 561 QETH_IP_TYPE_NORMAL,
560 QETH_IP_TYPE_VIPA, 562 QETH_IP_TYPE_VIPA,
561 QETH_IP_TYPE_RXIP, 563 QETH_IP_TYPE_RXIP,
562 QETH_IP_TYPE_DEL_ALL_MC,
563}; 564};
564 565
565enum qeth_cmd_buffer_state { 566enum qeth_cmd_buffer_state {
@@ -740,17 +741,10 @@ struct qeth_vlan_vid {
740 unsigned short vid; 741 unsigned short vid;
741}; 742};
742 743
743enum qeth_mac_disposition { 744enum qeth_addr_disposition {
744 QETH_DISP_MAC_DELETE = 0, 745 QETH_DISP_ADDR_DELETE = 0,
745 QETH_DISP_MAC_DO_NOTHING = 1, 746 QETH_DISP_ADDR_DO_NOTHING = 1,
746 QETH_DISP_MAC_ADD = 2, 747 QETH_DISP_ADDR_ADD = 2,
747};
748
749struct qeth_mac {
750 u8 mac_addr[OSA_ADDR_LEN];
751 u8 is_uc:1;
752 u8 disp_flag:2;
753 struct hlist_node hnode;
754}; 748};
755 749
756struct qeth_rx { 750struct qeth_rx {
@@ -798,6 +792,8 @@ struct qeth_card {
798 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 792 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
799 struct list_head vid_list; 793 struct list_head vid_list;
800 DECLARE_HASHTABLE(mac_htable, 4); 794 DECLARE_HASHTABLE(mac_htable, 4);
795 DECLARE_HASHTABLE(ip_htable, 4);
796 DECLARE_HASHTABLE(ip_mc_htable, 4);
801 struct work_struct kernel_thread_starter; 797 struct work_struct kernel_thread_starter;
802 spinlock_t thread_mask_lock; 798 spinlock_t thread_mask_lock;
803 unsigned long thread_start_mask; 799 unsigned long thread_start_mask;
@@ -805,8 +801,6 @@ struct qeth_card {
805 unsigned long thread_running_mask; 801 unsigned long thread_running_mask;
806 struct task_struct *recovery_task; 802 struct task_struct *recovery_task;
807 spinlock_t ip_lock; 803 spinlock_t ip_lock;
808 struct list_head ip_list;
809 struct list_head *ip_tbd_list;
810 struct qeth_ipato ipato; 804 struct qeth_ipato ipato;
811 struct list_head cmd_waiter_list; 805 struct list_head cmd_waiter_list;
812 /* QDIO buffer handling */ 806 /* QDIO buffer handling */
@@ -844,6 +838,19 @@ struct qeth_trap_id {
844/*some helper functions*/ 838/*some helper functions*/
845#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") 839#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
846 840
841/**
842 * qeth_get_elements_for_range() - find number of SBALEs to cover range.
843 * @start: Start of the address range.
844 * @end: Address after the end of the range.
845 *
846 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
847 * the specified address range.
848 */
849static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
850{
851 return PFN_UP(end - 1) - PFN_DOWN(start);
852}
853
847static inline int qeth_get_micros(void) 854static inline int qeth_get_micros(void)
848{ 855{
849 return (int) (get_tod_clock() >> 12); 856 return (int) (get_tod_clock() >> 12);
@@ -865,6 +872,11 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
865 } 872 }
866} 873}
867 874
875static inline int qeth_get_ip_protocol(struct sk_buff *skb)
876{
877 return ip_hdr(skb)->protocol;
878}
879
868static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, 880static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
869 struct qeth_buffer_pool_entry *entry) 881 struct qeth_buffer_pool_entry *entry)
870{ 882{
@@ -981,12 +993,13 @@ int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
981 int (*reply_cb)(struct qeth_card *, 993 int (*reply_cb)(struct qeth_card *,
982 struct qeth_reply *, unsigned long), 994 struct qeth_reply *, unsigned long),
983 void *); 995 void *);
996int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
984struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, 997struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
985 enum qeth_ipa_funcs, 998 enum qeth_ipa_funcs,
986 __u16, __u16, 999 __u16, __u16,
987 enum qeth_prot_versions); 1000 enum qeth_prot_versions);
988int qeth_start_ipa_tx_checksum(struct qeth_card *); 1001int qeth_set_features(struct net_device *, netdev_features_t);
989int qeth_set_rx_csum(struct qeth_card *, int); 1002netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
990 1003
991/* exports for OSN */ 1004/* exports for OSN */
992int qeth_osn_assist(struct net_device *, void *, int); 1005int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b7b74776e2ff..7dba6c8537a1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1464,8 +1464,6 @@ static int qeth_setup_card(struct qeth_card *card)
1464 card->thread_allowed_mask = 0; 1464 card->thread_allowed_mask = 0;
1465 card->thread_running_mask = 0; 1465 card->thread_running_mask = 0;
1466 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1466 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1467 INIT_LIST_HEAD(&card->ip_list);
1468 INIT_LIST_HEAD(card->ip_tbd_list);
1469 INIT_LIST_HEAD(&card->cmd_waiter_list); 1467 INIT_LIST_HEAD(&card->cmd_waiter_list);
1470 init_waitqueue_head(&card->wait_q); 1468 init_waitqueue_head(&card->wait_q);
1471 /* initial options */ 1469 /* initial options */
@@ -1500,11 +1498,6 @@ static struct qeth_card *qeth_alloc_card(void)
1500 if (!card) 1498 if (!card)
1501 goto out; 1499 goto out;
1502 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1500 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1503 card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
1504 if (!card->ip_tbd_list) {
1505 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1506 goto out_card;
1507 }
1508 if (qeth_setup_channel(&card->read)) 1501 if (qeth_setup_channel(&card->read))
1509 goto out_ip; 1502 goto out_ip;
1510 if (qeth_setup_channel(&card->write)) 1503 if (qeth_setup_channel(&card->write))
@@ -1517,8 +1510,6 @@ static struct qeth_card *qeth_alloc_card(void)
1517out_channel: 1510out_channel:
1518 qeth_clean_channel(&card->read); 1511 qeth_clean_channel(&card->read);
1519out_ip: 1512out_ip:
1520 kfree(card->ip_tbd_list);
1521out_card:
1522 kfree(card); 1513 kfree(card);
1523out: 1514out:
1524 return NULL; 1515 return NULL;
@@ -3757,6 +3748,14 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3757} 3748}
3758EXPORT_SYMBOL_GPL(qeth_qdio_output_handler); 3749EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
3759 3750
3751/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
3752static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
3753{
3754 if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
3755 return 2;
3756 return queue_num;
3757}
3758
3760/** 3759/**
3761 * Note: Function assumes that we have 4 outbound queues. 3760 * Note: Function assumes that we have 4 outbound queues.
3762 */ 3761 */
@@ -3784,9 +3783,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3784 return card->qdio.default_out_queue; 3783 return card->qdio.default_out_queue;
3785 } 3784 }
3786 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3785 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3787 return ~tos >> 6 & 3; 3786 return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
3788 if (tos & IPTOS_MINCOST) 3787 if (tos & IPTOS_MINCOST)
3789 return 3; 3788 return qeth_cut_iqd_prio(card, 3);
3790 if (tos & IPTOS_RELIABILITY) 3789 if (tos & IPTOS_RELIABILITY)
3791 return 2; 3790 return 2;
3792 if (tos & IPTOS_THROUGHPUT) 3791 if (tos & IPTOS_THROUGHPUT)
@@ -3797,11 +3796,12 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3797 case QETH_PRIO_Q_ING_SKB: 3796 case QETH_PRIO_Q_ING_SKB:
3798 if (skb->priority > 5) 3797 if (skb->priority > 5)
3799 return 0; 3798 return 0;
3800 return ~skb->priority >> 1 & 3; 3799 return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
3801 case QETH_PRIO_Q_ING_VLAN: 3800 case QETH_PRIO_Q_ING_VLAN:
3802 tci = &((struct ethhdr *)skb->data)->h_proto; 3801 tci = &((struct ethhdr *)skb->data)->h_proto;
3803 if (*tci == ETH_P_8021Q) 3802 if (*tci == ETH_P_8021Q)
3804 return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3; 3803 return qeth_cut_iqd_prio(card, ~*(tci + 1) >>
3804 (VLAN_PRIO_SHIFT + 1) & 3);
3805 break; 3805 break;
3806 default: 3806 default:
3807 break; 3807 break;
@@ -3810,41 +3810,54 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3810} 3810}
3811EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3811EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3812 3812
3813/**
3814 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3815 * @skb: SKB address
3816 *
3817 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3818 * fragmented part of the SKB. Returns zero for linear SKB.
3819 */
3813int qeth_get_elements_for_frags(struct sk_buff *skb) 3820int qeth_get_elements_for_frags(struct sk_buff *skb)
3814{ 3821{
3815 int cnt, length, e, elements = 0; 3822 int cnt, elements = 0;
3816 struct skb_frag_struct *frag;
3817 char *data;
3818 3823
3819 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3824 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3820 frag = &skb_shinfo(skb)->frags[cnt]; 3825 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
3821 data = (char *)page_to_phys(skb_frag_page(frag)) + 3826
3822 frag->page_offset; 3827 elements += qeth_get_elements_for_range(
3823 length = frag->size; 3828 (addr_t)skb_frag_address(frag),
3824 e = PFN_UP((unsigned long)data + length - 1) - 3829 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3825 PFN_DOWN((unsigned long)data);
3826 elements += e;
3827 } 3830 }
3828 return elements; 3831 return elements;
3829} 3832}
3830EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); 3833EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3831 3834
3835/**
3836 * qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags.
3837 * @card: qeth card structure, to check max. elems.
3838 * @skb: SKB address
3839 * @extra_elems: extra elems needed, to check against max.
3840 *
3841 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3842 * skb data, including linear part and fragments. Checks if the result plus
3843 * extra_elems fits under the limit for the card. Returns 0 if it does not.
3844 * Note: extra_elems is not included in the returned result.
3845 */
3832int qeth_get_elements_no(struct qeth_card *card, 3846int qeth_get_elements_no(struct qeth_card *card,
3833 struct sk_buff *skb, int elems) 3847 struct sk_buff *skb, int extra_elems)
3834{ 3848{
3835 int dlen = skb->len - skb->data_len; 3849 int elements = qeth_get_elements_for_range(
3836 int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - 3850 (addr_t)skb->data,
3837 PFN_DOWN((unsigned long)skb->data); 3851 (addr_t)skb->data + skb_headlen(skb)) +
3838 3852 qeth_get_elements_for_frags(skb);
3839 elements_needed += qeth_get_elements_for_frags(skb);
3840 3853
3841 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { 3854 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3842 QETH_DBF_MESSAGE(2, "Invalid size of IP packet " 3855 QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
3843 "(Number=%d / Length=%d). Discarded.\n", 3856 "(Number=%d / Length=%d). Discarded.\n",
3844 (elements_needed+elems), skb->len); 3857 elements + extra_elems, skb->len);
3845 return 0; 3858 return 0;
3846 } 3859 }
3847 return elements_needed; 3860 return elements;
3848} 3861}
3849EXPORT_SYMBOL_GPL(qeth_get_elements_no); 3862EXPORT_SYMBOL_GPL(qeth_get_elements_no);
3850 3863
@@ -3859,7 +3872,7 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
3859 rest = len - inpage; 3872 rest = len - inpage;
3860 if (rest > hroom) 3873 if (rest > hroom)
3861 return 1; 3874 return 1;
3862 memmove(skb->data - rest, skb->data, skb->len - skb->data_len); 3875 memmove(skb->data - rest, skb->data, skb_headlen(skb));
3863 skb->data -= rest; 3876 skb->data -= rest;
3864 skb->tail -= rest; 3877 skb->tail -= rest;
3865 *hdr = (struct qeth_hdr *)skb->data; 3878 *hdr = (struct qeth_hdr *)skb->data;
@@ -3873,7 +3886,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3873 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, 3886 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
3874 int offset) 3887 int offset)
3875{ 3888{
3876 int length = skb->len - skb->data_len; 3889 int length = skb_headlen(skb);
3877 int length_here; 3890 int length_here;
3878 int element; 3891 int element;
3879 char *data; 3892 char *data;
@@ -4967,7 +4980,6 @@ static void qeth_core_free_card(struct qeth_card *card)
4967 qeth_clean_channel(&card->write); 4980 qeth_clean_channel(&card->write);
4968 if (card->dev) 4981 if (card->dev)
4969 free_netdev(card->dev); 4982 free_netdev(card->dev);
4970 kfree(card->ip_tbd_list);
4971 qeth_free_qdio_buffers(card); 4983 qeth_free_qdio_buffers(card);
4972 unregister_service_level(&card->qeth_service_level); 4984 unregister_service_level(&card->qeth_service_level);
4973 kfree(card); 4985 kfree(card);
@@ -5265,8 +5277,8 @@ no_mem:
5265} 5277}
5266EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); 5278EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
5267 5279
5268static int qeth_setassparms_cb(struct qeth_card *card, 5280int qeth_setassparms_cb(struct qeth_card *card,
5269 struct qeth_reply *reply, unsigned long data) 5281 struct qeth_reply *reply, unsigned long data)
5270{ 5282{
5271 struct qeth_ipa_cmd *cmd; 5283 struct qeth_ipa_cmd *cmd;
5272 5284
@@ -5294,6 +5306,7 @@ static int qeth_setassparms_cb(struct qeth_card *card,
5294 5306
5295 return 0; 5307 return 0;
5296} 5308}
5309EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5297 5310
5298struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 5311struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5299 enum qeth_ipa_funcs ipa_func, 5312 enum qeth_ipa_funcs ipa_func,
@@ -5788,6 +5801,7 @@ static struct {
5788 {"tx do_QDIO count"}, 5801 {"tx do_QDIO count"},
5789 {"tx csum"}, 5802 {"tx csum"},
5790 {"tx lin"}, 5803 {"tx lin"},
5804 {"tx linfail"},
5791 {"cq handler count"}, 5805 {"cq handler count"},
5792 {"cq handler time"} 5806 {"cq handler time"}
5793}; 5807};
@@ -5848,8 +5862,9 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
5848 data[32] = card->perf_stats.outbound_do_qdio_cnt; 5862 data[32] = card->perf_stats.outbound_do_qdio_cnt;
5849 data[33] = card->perf_stats.tx_csum; 5863 data[33] = card->perf_stats.tx_csum;
5850 data[34] = card->perf_stats.tx_lin; 5864 data[34] = card->perf_stats.tx_lin;
5851 data[35] = card->perf_stats.cq_cnt; 5865 data[35] = card->perf_stats.tx_linfail;
5852 data[36] = card->perf_stats.cq_time; 5866 data[36] = card->perf_stats.cq_cnt;
5867 data[37] = card->perf_stats.cq_time;
5853} 5868}
5854EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); 5869EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
5855 5870
@@ -6048,74 +6063,136 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
6048} 6063}
6049EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings); 6064EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
6050 6065
6051static int qeth_send_checksum_command(struct qeth_card *card) 6066static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
6052{ 6067{
6068 long rxtx_arg;
6053 int rc; 6069 int rc;
6054 6070
6055 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, 6071 rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_START, 0);
6056 IPA_CMD_ASS_START, 0);
6057 if (rc) { 6072 if (rc) {
6058 dev_warn(&card->gdev->dev, "Starting HW checksumming for %s " 6073 dev_warn(&card->gdev->dev,
6059 "failed, using SW checksumming\n", 6074 "Starting HW checksumming for %s failed, using SW checksumming\n",
6060 QETH_CARD_IFNAME(card)); 6075 QETH_CARD_IFNAME(card));
6061 return rc; 6076 return rc;
6062 } 6077 }
6063 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, 6078 rxtx_arg = (cstype == IPA_OUTBOUND_CHECKSUM) ? card->info.tx_csum_mask
6064 IPA_CMD_ASS_ENABLE, 6079 : card->info.csum_mask;
6065 card->info.csum_mask); 6080 rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_ENABLE,
6081 rxtx_arg);
6066 if (rc) { 6082 if (rc) {
6067 dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s " 6083 dev_warn(&card->gdev->dev,
6068 "failed, using SW checksumming\n", 6084 "Enabling HW checksumming for %s failed, using SW checksumming\n",
6069 QETH_CARD_IFNAME(card)); 6085 QETH_CARD_IFNAME(card));
6070 return rc; 6086 return rc;
6071 } 6087 }
6088
6089 dev_info(&card->gdev->dev, "HW Checksumming (%sbound) enabled\n",
6090 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out");
6072 return 0; 6091 return 0;
6073} 6092}
6074 6093
6075int qeth_set_rx_csum(struct qeth_card *card, int on) 6094static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype)
6076{ 6095{
6077 int rc; 6096 int rc;
6078 6097
6079 if (on) { 6098 if (on) {
6080 rc = qeth_send_checksum_command(card); 6099 rc = qeth_send_checksum_on(card, cstype);
6081 if (rc) 6100 if (rc)
6082 return -EIO; 6101 return -EIO;
6083 dev_info(&card->gdev->dev,
6084 "HW Checksumming (inbound) enabled\n");
6085 } else { 6102 } else {
6086 rc = qeth_send_simple_setassparms(card, 6103 rc = qeth_send_simple_setassparms(card, cstype,
6087 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0); 6104 IPA_CMD_ASS_STOP, 0);
6088 if (rc) 6105 if (rc)
6089 return -EIO; 6106 return -EIO;
6090 } 6107 }
6091 return 0; 6108 return 0;
6092} 6109}
6093EXPORT_SYMBOL_GPL(qeth_set_rx_csum);
6094 6110
6095int qeth_start_ipa_tx_checksum(struct qeth_card *card) 6111static int qeth_set_ipa_tso(struct qeth_card *card, int on)
6096{ 6112{
6097 int rc = 0; 6113 int rc;
6098 6114
6099 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 6115 QETH_CARD_TEXT(card, 3, "sttso");
6100 return rc;
6101 rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
6102 IPA_CMD_ASS_START, 0);
6103 if (rc)
6104 goto err_out;
6105 rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
6106 IPA_CMD_ASS_ENABLE,
6107 card->info.tx_csum_mask);
6108 if (rc)
6109 goto err_out;
6110 6116
6111 dev_info(&card->gdev->dev, "HW TX Checksumming enabled\n"); 6117 if (on) {
6112 return rc; 6118 rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
6113err_out: 6119 IPA_CMD_ASS_START, 0);
6114 dev_warn(&card->gdev->dev, "Enabling HW TX checksumming for %s " 6120 if (rc) {
6115 "failed, using SW TX checksumming\n", QETH_CARD_IFNAME(card)); 6121 dev_warn(&card->gdev->dev,
6122 "Starting outbound TCP segmentation offload for %s failed\n",
6123 QETH_CARD_IFNAME(card));
6124 return -EIO;
6125 }
6126 dev_info(&card->gdev->dev, "Outbound TSO enabled\n");
6127 } else {
6128 rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
6129 IPA_CMD_ASS_STOP, 0);
6130 }
6116 return rc; 6131 return rc;
6117} 6132}
6118EXPORT_SYMBOL_GPL(qeth_start_ipa_tx_checksum); 6133
6134int qeth_set_features(struct net_device *dev, netdev_features_t features)
6135{
6136 struct qeth_card *card = dev->ml_priv;
6137 netdev_features_t changed = dev->features ^ features;
6138 int rc = 0;
6139
6140 QETH_DBF_TEXT(SETUP, 2, "setfeat");
6141 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6142
6143 if ((changed & NETIF_F_IP_CSUM)) {
6144 rc = qeth_set_ipa_csum(card,
6145 features & NETIF_F_IP_CSUM ? 1 : 0,
6146 IPA_OUTBOUND_CHECKSUM);
6147 if (rc)
6148 changed ^= NETIF_F_IP_CSUM;
6149 }
6150 if ((changed & NETIF_F_RXCSUM)) {
6151 rc = qeth_set_ipa_csum(card,
6152 features & NETIF_F_RXCSUM ? 1 : 0,
6153 IPA_INBOUND_CHECKSUM);
6154 if (rc)
6155 changed ^= NETIF_F_RXCSUM;
6156 }
6157 if ((changed & NETIF_F_TSO)) {
6158 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0);
6159 if (rc)
6160 changed ^= NETIF_F_TSO;
6161 }
6162
6163 /* everything changed successfully? */
6164 if ((dev->features ^ features) == changed)
6165 return 0;
6166 /* something went wrong. save changed features and return error */
6167 dev->features ^= changed;
6168 return -EIO;
6169}
6170EXPORT_SYMBOL_GPL(qeth_set_features);
6171
6172netdev_features_t qeth_fix_features(struct net_device *dev,
6173 netdev_features_t features)
6174{
6175 struct qeth_card *card = dev->ml_priv;
6176
6177 QETH_DBF_TEXT(SETUP, 2, "fixfeat");
6178 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6179 features &= ~NETIF_F_IP_CSUM;
6180 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6181 features &= ~NETIF_F_RXCSUM;
6182 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
6183 features &= ~NETIF_F_TSO;
6184 dev_info(&card->gdev->dev, "Outbound TSO not supported on %s\n",
6185 QETH_CARD_IFNAME(card));
6186 }
6187 /* if the card isn't up, remove features that require hw changes */
6188 if (card->state == CARD_STATE_DOWN ||
6189 card->state == CARD_STATE_RECOVER)
6190 features = features & ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
6191 NETIF_F_TSO);
6192 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6193 return features;
6194}
6195EXPORT_SYMBOL_GPL(qeth_fix_features);
6119 6196
6120static int __init qeth_core_init(void) 6197static int __init qeth_core_init(void)
6121{ 6198{
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index e6e5b9671bf2..75b29fd2fcf4 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -243,6 +243,10 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
243 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; 243 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
244 card->qdio.default_out_queue = 2; 244 card->qdio.default_out_queue = 2;
245 } else if (sysfs_streq(buf, "no_prio_queueing:3")) { 245 } else if (sysfs_streq(buf, "no_prio_queueing:3")) {
246 if (card->info.type == QETH_CARD_TYPE_IQD) {
247 rc = -EPERM;
248 goto out;
249 }
246 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; 250 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
247 card->qdio.default_out_queue = 3; 251 card->qdio.default_out_queue = 3;
248 } else if (sysfs_streq(buf, "no_prio_queueing")) { 252 } else if (sysfs_streq(buf, "no_prio_queueing")) {
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 0767556404bd..29d9fb3890ad 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -12,4 +12,11 @@ int qeth_l2_create_device_attributes(struct device *);
12void qeth_l2_remove_device_attributes(struct device *); 12void qeth_l2_remove_device_attributes(struct device *);
13void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); 13void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
14 14
15struct qeth_mac {
16 u8 mac_addr[OSA_ADDR_LEN];
17 u8 is_uc:1;
18 u8 disp_flag:2;
19 struct hlist_node hnode;
20};
21
15#endif /* __QETH_L2_H__ */ 22#endif /* __QETH_L2_H__ */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 80b1979e8d95..9fd48de38a4c 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -404,38 +404,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
404 return rc; 404 return rc;
405} 405}
406 406
407static netdev_features_t qeth_l2_fix_features(struct net_device *dev,
408 netdev_features_t features)
409{
410 struct qeth_card *card = dev->ml_priv;
411
412 QETH_DBF_TEXT(SETUP, 2, "fixfeat");
413 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
414 features &= ~NETIF_F_IP_CSUM;
415 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
416 features &= ~NETIF_F_RXCSUM;
417 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
418 return features;
419}
420
421static int qeth_l2_set_features(struct net_device *dev,
422 netdev_features_t features)
423{
424 struct qeth_card *card = dev->ml_priv;
425 netdev_features_t changed = dev->features ^ features;
426
427 QETH_DBF_TEXT(SETUP, 2, "setfeat");
428 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
429
430 if (card->state == CARD_STATE_DOWN ||
431 card->state == CARD_STATE_RECOVER)
432 return 0;
433
434 if (!(changed & NETIF_F_RXCSUM))
435 return 0;
436 return qeth_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
437}
438
439static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) 407static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
440{ 408{
441 QETH_DBF_TEXT(SETUP , 2, "stopcard"); 409 QETH_DBF_TEXT(SETUP , 2, "stopcard");
@@ -780,7 +748,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
780 qeth_l2_mac_hash(ha->addr)) { 748 qeth_l2_mac_hash(ha->addr)) {
781 if (is_uc == mac->is_uc && 749 if (is_uc == mac->is_uc &&
782 !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) { 750 !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
783 mac->disp_flag = QETH_DISP_MAC_DO_NOTHING; 751 mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
784 return; 752 return;
785 } 753 }
786 } 754 }
@@ -792,7 +760,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
792 760
793 memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN); 761 memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN);
794 mac->is_uc = is_uc; 762 mac->is_uc = is_uc;
795 mac->disp_flag = QETH_DISP_MAC_ADD; 763 mac->disp_flag = QETH_DISP_ADDR_ADD;
796 764
797 hash_add(card->mac_htable, &mac->hnode, 765 hash_add(card->mac_htable, &mac->hnode,
798 qeth_l2_mac_hash(mac->mac_addr)); 766 qeth_l2_mac_hash(mac->mac_addr));
@@ -825,7 +793,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
825 qeth_l2_add_mac(card, ha, 1); 793 qeth_l2_add_mac(card, ha, 1);
826 794
827 hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { 795 hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
828 if (mac->disp_flag == QETH_DISP_MAC_DELETE) { 796 if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
829 if (!mac->is_uc) 797 if (!mac->is_uc)
830 rc = qeth_l2_send_delgroupmac(card, 798 rc = qeth_l2_send_delgroupmac(card,
831 mac->mac_addr); 799 mac->mac_addr);
@@ -837,15 +805,15 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
837 hash_del(&mac->hnode); 805 hash_del(&mac->hnode);
838 kfree(mac); 806 kfree(mac);
839 807
840 } else if (mac->disp_flag == QETH_DISP_MAC_ADD) { 808 } else if (mac->disp_flag == QETH_DISP_ADDR_ADD) {
841 rc = qeth_l2_write_mac(card, mac); 809 rc = qeth_l2_write_mac(card, mac);
842 if (rc) { 810 if (rc) {
843 hash_del(&mac->hnode); 811 hash_del(&mac->hnode);
844 kfree(mac); 812 kfree(mac);
845 } else 813 } else
846 mac->disp_flag = QETH_DISP_MAC_DELETE; 814 mac->disp_flag = QETH_DISP_ADDR_DELETE;
847 } else 815 } else
848 mac->disp_flag = QETH_DISP_MAC_DELETE; 816 mac->disp_flag = QETH_DISP_ADDR_DELETE;
849 } 817 }
850 818
851 spin_unlock_bh(&card->mclock); 819 spin_unlock_bh(&card->mclock);
@@ -869,6 +837,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
869 int data_offset = -1; 837 int data_offset = -1;
870 int elements_needed = 0; 838 int elements_needed = 0;
871 int hd_len = 0; 839 int hd_len = 0;
840 int nr_frags;
872 841
873 if (card->qdio.do_prio_queueing || (cast_type && 842 if (card->qdio.do_prio_queueing || (cast_type &&
874 card->info.is_multicast_different)) 843 card->info.is_multicast_different))
@@ -892,6 +861,23 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
892 } 861 }
893 netif_stop_queue(dev); 862 netif_stop_queue(dev);
894 863
864 /* fix hardware limitation: as long as we do not have sbal
865 * chaining we can not send long frag lists
866 */
867 if ((card->info.type != QETH_CARD_TYPE_IQD) &&
868 !qeth_get_elements_no(card, new_skb, 0)) {
869 int lin_rc = skb_linearize(new_skb);
870
871 if (card->options.performance_stats) {
872 if (lin_rc)
873 card->perf_stats.tx_linfail++;
874 else
875 card->perf_stats.tx_lin++;
876 }
877 if (lin_rc)
878 goto tx_drop;
879 }
880
895 if (card->info.type == QETH_CARD_TYPE_OSN) 881 if (card->info.type == QETH_CARD_TYPE_OSN)
896 hdr = (struct qeth_hdr *)skb->data; 882 hdr = (struct qeth_hdr *)skb->data;
897 else { 883 else {
@@ -943,6 +929,14 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
943 if (!rc) { 929 if (!rc) {
944 card->stats.tx_packets++; 930 card->stats.tx_packets++;
945 card->stats.tx_bytes += tx_bytes; 931 card->stats.tx_bytes += tx_bytes;
932 if (card->options.performance_stats) {
933 nr_frags = skb_shinfo(new_skb)->nr_frags;
934 if (nr_frags) {
935 card->perf_stats.sg_skbs_sent++;
936 /* nr_frags + skb->data */
937 card->perf_stats.sg_frags_sent += nr_frags + 1;
938 }
939 }
946 if (new_skb != skb) 940 if (new_skb != skb)
947 dev_kfree_skb_any(skb); 941 dev_kfree_skb_any(skb);
948 rc = NETDEV_TX_OK; 942 rc = NETDEV_TX_OK;
@@ -1086,8 +1080,8 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
1086 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid, 1080 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
1087 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid, 1081 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
1088 .ndo_tx_timeout = qeth_tx_timeout, 1082 .ndo_tx_timeout = qeth_tx_timeout,
1089 .ndo_fix_features = qeth_l2_fix_features, 1083 .ndo_fix_features = qeth_fix_features,
1090 .ndo_set_features = qeth_l2_set_features 1084 .ndo_set_features = qeth_set_features
1091}; 1085};
1092 1086
1093static int qeth_l2_setup_netdev(struct qeth_card *card) 1087static int qeth_l2_setup_netdev(struct qeth_card *card)
@@ -1118,12 +1112,25 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
1118 &qeth_l2_ethtool_ops : &qeth_l2_osn_ops; 1112 &qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
1119 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1113 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1120 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { 1114 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
1121 card->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1115 card->dev->hw_features = NETIF_F_SG;
1122 /* Turn on RX offloading per default */ 1116 card->dev->vlan_features = NETIF_F_SG;
1123 card->dev->features |= NETIF_F_RXCSUM; 1117 /* OSA 3S and earlier has no RX/TX support */
1118 if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
1119 card->dev->hw_features |= NETIF_F_IP_CSUM;
1120 card->dev->vlan_features |= NETIF_F_IP_CSUM;
1121 }
1122 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
1123 card->dev->hw_features |= NETIF_F_RXCSUM;
1124 card->dev->vlan_features |= NETIF_F_RXCSUM;
1125 }
1126 /* Turn on SG per default */
1127 card->dev->features |= NETIF_F_SG;
1124 } 1128 }
1125 card->info.broadcast_capable = 1; 1129 card->info.broadcast_capable = 1;
1126 qeth_l2_request_initial_mac(card); 1130 qeth_l2_request_initial_mac(card);
1131 card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
1132 PAGE_SIZE;
1133 card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
1127 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 1134 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
1128 netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT); 1135 netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
1129 netif_carrier_off(card->dev); 1136 netif_carrier_off(card->dev);
@@ -1135,9 +1142,6 @@ static int qeth_l2_start_ipassists(struct qeth_card *card)
1135 /* configure isolation level */ 1142 /* configure isolation level */
1136 if (qeth_set_access_ctrl_online(card, 0)) 1143 if (qeth_set_access_ctrl_online(card, 0))
1137 return -ENODEV; 1144 return -ENODEV;
1138 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
1139 qeth_set_rx_csum(card, 1);
1140 qeth_start_ipa_tx_checksum(card);
1141 return 0; 1145 return 0;
1142} 1146}
1143 1147
@@ -1206,7 +1210,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1206contin: 1210contin:
1207 if ((card->info.type == QETH_CARD_TYPE_OSD) || 1211 if ((card->info.type == QETH_CARD_TYPE_OSD) ||
1208 (card->info.type == QETH_CARD_TYPE_OSX)) { 1212 (card->info.type == QETH_CARD_TYPE_OSX)) {
1209 if (qeth_l2_start_ipassists(card)) 1213 rc = qeth_l2_start_ipassists(card);
1214 if (rc)
1210 goto out_remove; 1215 goto out_remove;
1211 } 1216 }
1212 1217
@@ -1800,6 +1805,12 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
1800 dev_err(&card->gdev->dev, 1805 dev_err(&card->gdev->dev,
1801 "The device is not configured as a Bridge Port\n"); 1806 "The device is not configured as a Bridge Port\n");
1802 break; 1807 break;
1808 case 0x2B10:
1809 case 0x0010: /* OS mismatch */
1810 rc = -EPERM;
1811 dev_err(&card->gdev->dev,
1812 "A Bridge Port is already configured by a different operating system\n");
1813 break;
1803 case 0x2B14: 1814 case 0x2B14:
1804 case 0x0014: /* Another device is Primary */ 1815 case 0x0014: /* Another device is Primary */
1805 switch (setcmd) { 1816 switch (setcmd) {
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 551a4b4c03fd..26f79533e62e 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -10,16 +10,23 @@
10#define __QETH_L3_H__ 10#define __QETH_L3_H__
11 11
12#include "qeth_core.h" 12#include "qeth_core.h"
13#include <linux/hashtable.h>
13 14
14#define QETH_SNIFF_AVAIL 0x0008 15#define QETH_SNIFF_AVAIL 0x0008
15 16
16struct qeth_ipaddr { 17struct qeth_ipaddr {
17 struct list_head entry; 18 struct hlist_node hnode;
18 enum qeth_ip_types type; 19 enum qeth_ip_types type;
19 enum qeth_ipa_setdelip_flags set_flags; 20 enum qeth_ipa_setdelip_flags set_flags;
20 enum qeth_ipa_setdelip_flags del_flags; 21 enum qeth_ipa_setdelip_flags del_flags;
21 int is_multicast; 22 u8 is_multicast:1;
22 int users; 23 u8 in_progress:1;
24 u8 disp_flag:2;
25
26 /* is changed only for normal ip addresses
27 * for non-normal addresses it always is 1
28 */
29 int ref_counter;
23 enum qeth_prot_versions proto; 30 enum qeth_prot_versions proto;
24 unsigned char mac[OSA_ADDR_LEN]; 31 unsigned char mac[OSA_ADDR_LEN];
25 union { 32 union {
@@ -32,7 +39,24 @@ struct qeth_ipaddr {
32 unsigned int pfxlen; 39 unsigned int pfxlen;
33 } a6; 40 } a6;
34 } u; 41 } u;
42
35}; 43};
44static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
45{
46 u64 ret = 0;
47 u8 *point;
48
49 if (addr->proto == QETH_PROT_IPV6) {
50 point = (u8 *) &addr->u.a6.addr;
51 ret = get_unaligned((u64 *)point) ^
52 get_unaligned((u64 *) (point + 8));
53 }
54 if (addr->proto == QETH_PROT_IPV4) {
55 point = (u8 *) &addr->u.a4.addr;
56 ret = get_unaligned((u32 *) point);
57 }
58 return ret;
59}
36 60
37struct qeth_ipato_entry { 61struct qeth_ipato_entry {
38 struct list_head entry; 62 struct list_head entry;
@@ -60,6 +84,5 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
60struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); 84struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
61int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); 85int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
62int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *); 86int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
63void qeth_l3_set_ip_addr_list(struct qeth_card *);
64 87
65#endif /* __QETH_L3_H__ */ 88#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ac544330daeb..bcd324e054a9 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -30,6 +30,7 @@
30#include <net/ip6_fib.h> 30#include <net/ip6_fib.h>
31#include <net/ip6_checksum.h> 31#include <net/ip6_checksum.h>
32#include <net/iucv/af_iucv.h> 32#include <net/iucv/af_iucv.h>
33#include <linux/hashtable.h>
33 34
34#include "qeth_l3.h" 35#include "qeth_l3.h"
35 36
@@ -57,7 +58,7 @@ static int qeth_l3_isxdigit(char *buf)
57 58
58static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) 59static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
59{ 60{
60 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]); 61 sprintf(buf, "%pI4", addr);
61} 62}
62 63
63static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) 64static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
@@ -204,104 +205,129 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
204 return rc; 205 return rc;
205} 206}
206 207
207/* 208inline int
208 * Add IP to be added to todo list. If there is already an "add todo" 209qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
209 * in this list we just incremenent the reference count.
210 * Returns 0 if we just incremented reference count.
211 */
212static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
213 struct qeth_ipaddr *addr, int add)
214{ 210{
215 struct qeth_ipaddr *tmp, *t; 211 return addr1->proto == addr2->proto &&
216 int found = 0; 212 !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
213 !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
214}
217 215
218 if (card->options.sniffer) 216static struct qeth_ipaddr *
219 return 0; 217qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
220 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) { 218{
221 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) && 219 struct qeth_ipaddr *addr;
222 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC)) 220
223 return 0; 221 if (tmp_addr->is_multicast) {
224 if ((tmp->proto == QETH_PROT_IPV4) && 222 hash_for_each_possible(card->ip_mc_htable, addr,
225 (addr->proto == QETH_PROT_IPV4) && 223 hnode, qeth_l3_ipaddr_hash(tmp_addr))
226 (tmp->type == addr->type) && 224 if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
227 (tmp->is_multicast == addr->is_multicast) && 225 return addr;
228 (tmp->u.a4.addr == addr->u.a4.addr) &&
229 (tmp->u.a4.mask == addr->u.a4.mask)) {
230 found = 1;
231 break;
232 }
233 if ((tmp->proto == QETH_PROT_IPV6) &&
234 (addr->proto == QETH_PROT_IPV6) &&
235 (tmp->type == addr->type) &&
236 (tmp->is_multicast == addr->is_multicast) &&
237 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
238 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
239 sizeof(struct in6_addr)) == 0)) {
240 found = 1;
241 break;
242 }
243 }
244 if (found) {
245 if (addr->users != 0)
246 tmp->users += addr->users;
247 else
248 tmp->users += add ? 1 : -1;
249 if (tmp->users == 0) {
250 list_del(&tmp->entry);
251 kfree(tmp);
252 }
253 return 0;
254 } else { 226 } else {
255 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC) 227 hash_for_each_possible(card->ip_htable, addr,
256 list_add(&addr->entry, card->ip_tbd_list); 228 hnode, qeth_l3_ipaddr_hash(tmp_addr))
257 else { 229 if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
258 if (addr->users == 0) 230 return addr;
259 addr->users += add ? 1 : -1;
260 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
261 qeth_l3_is_addr_covered_by_ipato(card, addr)) {
262 QETH_CARD_TEXT(card, 2, "tkovaddr");
263 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
264 }
265 list_add_tail(&addr->entry, card->ip_tbd_list);
266 }
267 return 1;
268 } 231 }
232
233 return NULL;
269} 234}
270 235
271int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) 236int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
272{ 237{
273 unsigned long flags;
274 int rc = 0; 238 int rc = 0;
239 struct qeth_ipaddr *addr;
275 240
276 QETH_CARD_TEXT(card, 4, "delip"); 241 QETH_CARD_TEXT(card, 4, "delip");
277 242
278 if (addr->proto == QETH_PROT_IPV4) 243 if (tmp_addr->proto == QETH_PROT_IPV4)
279 QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4); 244 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
280 else { 245 else {
281 QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8); 246 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
282 QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8); 247 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
283 } 248 }
284 spin_lock_irqsave(&card->ip_lock, flags); 249
285 rc = __qeth_l3_insert_ip_todo(card, addr, 0); 250 addr = qeth_l3_ip_from_hash(card, tmp_addr);
286 spin_unlock_irqrestore(&card->ip_lock, flags); 251 if (!addr)
252 return -ENOENT;
253
254 addr->ref_counter--;
255 if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
256 return rc;
257 if (addr->in_progress)
258 return -EINPROGRESS;
259
260 rc = qeth_l3_deregister_addr_entry(card, addr);
261
262 hash_del(&addr->hnode);
263 kfree(addr);
264
287 return rc; 265 return rc;
288} 266}
289 267
290int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) 268int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
291{ 269{
292 unsigned long flags;
293 int rc = 0; 270 int rc = 0;
271 struct qeth_ipaddr *addr;
294 272
295 QETH_CARD_TEXT(card, 4, "addip"); 273 QETH_CARD_TEXT(card, 4, "addip");
296 if (addr->proto == QETH_PROT_IPV4) 274
297 QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4); 275 if (tmp_addr->proto == QETH_PROT_IPV4)
276 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
298 else { 277 else {
299 QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8); 278 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
300 QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8); 279 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
301 } 280 }
302 spin_lock_irqsave(&card->ip_lock, flags); 281
303 rc = __qeth_l3_insert_ip_todo(card, addr, 1); 282 addr = qeth_l3_ip_from_hash(card, tmp_addr);
304 spin_unlock_irqrestore(&card->ip_lock, flags); 283 if (!addr) {
284 addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
285 if (!addr)
286 return -ENOMEM;
287
288 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
289 addr->ref_counter = 1;
290
291 if (addr->type == QETH_IP_TYPE_NORMAL &&
292 qeth_l3_is_addr_covered_by_ipato(card, addr)) {
293 QETH_CARD_TEXT(card, 2, "tkovaddr");
294 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
295 }
296 hash_add(card->ip_htable, &addr->hnode,
297 qeth_l3_ipaddr_hash(addr));
298
299 /* qeth_l3_register_addr_entry can go to sleep
300 * if we add a IPV4 addr. It is caused by the reason
301 * that SETIP ipa cmd starts ARP staff for IPV4 addr.
302 * Thus we should unlock spinlock, and make a protection
303 * using in_progress variable to indicate that there is
304 * an hardware operation with this IPV4 address
305 */
306 if (addr->proto == QETH_PROT_IPV4) {
307 addr->in_progress = 1;
308 spin_unlock_bh(&card->ip_lock);
309 rc = qeth_l3_register_addr_entry(card, addr);
310 spin_lock_bh(&card->ip_lock);
311 addr->in_progress = 0;
312 } else
313 rc = qeth_l3_register_addr_entry(card, addr);
314
315 if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) ||
316 (rc == IPA_RC_LAN_OFFLINE)) {
317 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
318 if (addr->ref_counter < 1) {
319 qeth_l3_delete_ip(card, addr);
320 kfree(addr);
321 }
322 } else {
323 hash_del(&addr->hnode);
324 kfree(addr);
325 }
326 } else {
327 if (addr->type == QETH_IP_TYPE_NORMAL)
328 addr->ref_counter++;
329 }
330
305 return rc; 331 return rc;
306} 332}
307 333
@@ -312,229 +338,88 @@ struct qeth_ipaddr *qeth_l3_get_addr_buffer(
312 struct qeth_ipaddr *addr; 338 struct qeth_ipaddr *addr;
313 339
314 addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); 340 addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
315 if (addr == NULL) { 341 if (!addr)
316 return NULL; 342 return NULL;
317 } 343
318 addr->type = QETH_IP_TYPE_NORMAL; 344 addr->type = QETH_IP_TYPE_NORMAL;
345 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
319 addr->proto = prot; 346 addr->proto = prot;
347
320 return addr; 348 return addr;
321} 349}
322 350
323static void qeth_l3_delete_mc_addresses(struct qeth_card *card) 351static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
324{
325 struct qeth_ipaddr *iptodo;
326 unsigned long flags;
327
328 QETH_CARD_TEXT(card, 4, "delmc");
329 iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
330 if (!iptodo) {
331 QETH_CARD_TEXT(card, 2, "dmcnomem");
332 return;
333 }
334 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
335 spin_lock_irqsave(&card->ip_lock, flags);
336 if (!__qeth_l3_insert_ip_todo(card, iptodo, 0))
337 kfree(iptodo);
338 spin_unlock_irqrestore(&card->ip_lock, flags);
339}
340
341/*
342 * Add/remove address to/from card's ip list, i.e. try to add or remove
343 * reference to/from an IP address that is already registered on the card.
344 * Returns:
345 * 0 address was on card and its reference count has been adjusted,
346 * but is still > 0, so nothing has to be done
347 * also returns 0 if card was not on card and the todo was to delete
348 * the address -> there is also nothing to be done
349 * 1 address was not on card and the todo is to add it to the card's ip
350 * list
351 * -1 address was on card and its reference count has been decremented
352 * to <= 0 by the todo -> address must be removed from card
353 */
354static int __qeth_l3_ref_ip_on_card(struct qeth_card *card,
355 struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr)
356{ 352{
357 struct qeth_ipaddr *addr; 353 struct qeth_ipaddr *addr;
358 int found = 0; 354 struct hlist_node *tmp;
359 355 int i;
360 list_for_each_entry(addr, &card->ip_list, entry) {
361 if ((addr->proto == QETH_PROT_IPV4) &&
362 (todo->proto == QETH_PROT_IPV4) &&
363 (addr->type == todo->type) &&
364 (addr->u.a4.addr == todo->u.a4.addr) &&
365 (addr->u.a4.mask == todo->u.a4.mask)) {
366 found = 1;
367 break;
368 }
369 if ((addr->proto == QETH_PROT_IPV6) &&
370 (todo->proto == QETH_PROT_IPV6) &&
371 (addr->type == todo->type) &&
372 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
373 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
374 sizeof(struct in6_addr)) == 0)) {
375 found = 1;
376 break;
377 }
378 }
379 if (found) {
380 addr->users += todo->users;
381 if (addr->users <= 0) {
382 *__addr = addr;
383 return -1;
384 } else {
385 /* for VIPA and RXIP limit refcount to 1 */
386 if (addr->type != QETH_IP_TYPE_NORMAL)
387 addr->users = 1;
388 return 0;
389 }
390 }
391 if (todo->users > 0) {
392 /* for VIPA and RXIP limit refcount to 1 */
393 if (todo->type != QETH_IP_TYPE_NORMAL)
394 todo->users = 1;
395 return 1;
396 } else
397 return 0;
398}
399
400static void __qeth_l3_delete_all_mc(struct qeth_card *card,
401 unsigned long *flags)
402{
403 struct list_head fail_list;
404 struct qeth_ipaddr *addr, *tmp;
405 int rc;
406
407 INIT_LIST_HEAD(&fail_list);
408again:
409 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
410 if (addr->is_multicast) {
411 list_del(&addr->entry);
412 spin_unlock_irqrestore(&card->ip_lock, *flags);
413 rc = qeth_l3_deregister_addr_entry(card, addr);
414 spin_lock_irqsave(&card->ip_lock, *flags);
415 if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND))
416 kfree(addr);
417 else
418 list_add_tail(&addr->entry, &fail_list);
419 goto again;
420 }
421 }
422 list_splice(&fail_list, &card->ip_list);
423}
424
425void qeth_l3_set_ip_addr_list(struct qeth_card *card)
426{
427 struct list_head *tbd_list;
428 struct qeth_ipaddr *todo, *addr;
429 unsigned long flags;
430 int rc;
431 356
432 QETH_CARD_TEXT(card, 2, "sdiplist"); 357 QETH_CARD_TEXT(card, 4, "clearip");
433 QETH_CARD_HEX(card, 2, &card, sizeof(void *));
434 358
435 if (!qeth_card_hw_is_reachable(card) || card->options.sniffer) 359 if (recover && card->options.sniffer)
436 return; 360 return;
437 361
438 spin_lock_irqsave(&card->ip_lock, flags); 362 spin_lock_bh(&card->ip_lock);
439 tbd_list = card->ip_tbd_list; 363
440 card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_ATOMIC); 364 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
441 if (!card->ip_tbd_list) { 365 if (!recover) {
442 QETH_CARD_TEXT(card, 0, "silnomem"); 366 hash_del(&addr->hnode);
443 card->ip_tbd_list = tbd_list; 367 kfree(addr);
444 spin_unlock_irqrestore(&card->ip_lock, flags);
445 return;
446 } else
447 INIT_LIST_HEAD(card->ip_tbd_list);
448
449 while (!list_empty(tbd_list)) {
450 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
451 list_del(&todo->entry);
452 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) {
453 __qeth_l3_delete_all_mc(card, &flags);
454 kfree(todo);
455 continue; 368 continue;
456 } 369 }
457 rc = __qeth_l3_ref_ip_on_card(card, todo, &addr); 370 addr->disp_flag = QETH_DISP_ADDR_ADD;
458 if (rc == 0) {
459 /* nothing to be done; only adjusted refcount */
460 kfree(todo);
461 } else if (rc == 1) {
462 /* new entry to be added to on-card list */
463 spin_unlock_irqrestore(&card->ip_lock, flags);
464 rc = qeth_l3_register_addr_entry(card, todo);
465 spin_lock_irqsave(&card->ip_lock, flags);
466 if (!rc || (rc == IPA_RC_LAN_OFFLINE))
467 list_add_tail(&todo->entry, &card->ip_list);
468 else
469 kfree(todo);
470 } else if (rc == -1) {
471 /* on-card entry to be removed */
472 list_del_init(&addr->entry);
473 spin_unlock_irqrestore(&card->ip_lock, flags);
474 rc = qeth_l3_deregister_addr_entry(card, addr);
475 spin_lock_irqsave(&card->ip_lock, flags);
476 if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED))
477 kfree(addr);
478 else
479 list_add_tail(&addr->entry, &card->ip_list);
480 kfree(todo);
481 }
482 } 371 }
483 spin_unlock_irqrestore(&card->ip_lock, flags);
484 kfree(tbd_list);
485}
486 372
487static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover) 373 spin_unlock_bh(&card->ip_lock);
488{
489 struct qeth_ipaddr *addr, *tmp;
490 unsigned long flags;
491 374
492 QETH_CARD_TEXT(card, 4, "clearip"); 375 spin_lock_bh(&card->mclock);
493 if (recover && card->options.sniffer) 376
494 return; 377 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
495 spin_lock_irqsave(&card->ip_lock, flags); 378 hash_del(&addr->hnode);
496 /* clear todo list */
497 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
498 list_del(&addr->entry);
499 kfree(addr); 379 kfree(addr);
500 } 380 }
501 381
502 while (!list_empty(&card->ip_list)) { 382 spin_unlock_bh(&card->mclock);
503 addr = list_entry(card->ip_list.next, 383
504 struct qeth_ipaddr, entry);
505 list_del_init(&addr->entry);
506 if (!recover || addr->is_multicast) {
507 kfree(addr);
508 continue;
509 }
510 list_add_tail(&addr->entry, card->ip_tbd_list);
511 }
512 spin_unlock_irqrestore(&card->ip_lock, flags);
513}
514 384
515static int qeth_l3_address_exists_in_list(struct list_head *list, 385}
516 struct qeth_ipaddr *addr, int same_type) 386static void qeth_l3_recover_ip(struct qeth_card *card)
517{ 387{
518 struct qeth_ipaddr *tmp; 388 struct qeth_ipaddr *addr;
389 struct hlist_node *tmp;
390 int i;
391 int rc;
519 392
520 list_for_each_entry(tmp, list, entry) { 393 QETH_CARD_TEXT(card, 4, "recoverip");
521 if ((tmp->proto == QETH_PROT_IPV4) && 394
522 (addr->proto == QETH_PROT_IPV4) && 395 spin_lock_bh(&card->ip_lock);
523 ((same_type && (tmp->type == addr->type)) || 396
524 (!same_type && (tmp->type != addr->type))) && 397 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
525 (tmp->u.a4.addr == addr->u.a4.addr)) 398 if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
526 return 1; 399 if (addr->proto == QETH_PROT_IPV4) {
400 addr->in_progress = 1;
401 spin_unlock_bh(&card->ip_lock);
402 rc = qeth_l3_register_addr_entry(card, addr);
403 spin_lock_bh(&card->ip_lock);
404 addr->in_progress = 0;
405 } else
406 rc = qeth_l3_register_addr_entry(card, addr);
407
408 if (!rc) {
409 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
410 if (addr->ref_counter < 1) {
411 qeth_l3_delete_ip(card, addr);
412 kfree(addr);
413 }
414 } else {
415 hash_del(&addr->hnode);
416 kfree(addr);
417 }
418 }
419 }
527 420
528 if ((tmp->proto == QETH_PROT_IPV6) && 421 spin_unlock_bh(&card->ip_lock);
529 (addr->proto == QETH_PROT_IPV6) &&
530 ((same_type && (tmp->type == addr->type)) ||
531 (!same_type && (tmp->type != addr->type))) &&
532 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
533 sizeof(struct in6_addr)) == 0))
534 return 1;
535 422
536 }
537 return 0;
538} 423}
539 424
540static int qeth_l3_send_setdelmc(struct qeth_card *card, 425static int qeth_l3_send_setdelmc(struct qeth_card *card,
@@ -712,27 +597,28 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
712 */ 597 */
713static void qeth_l3_clear_ipato_list(struct qeth_card *card) 598static void qeth_l3_clear_ipato_list(struct qeth_card *card)
714{ 599{
715
716 struct qeth_ipato_entry *ipatoe, *tmp; 600 struct qeth_ipato_entry *ipatoe, *tmp;
717 unsigned long flags;
718 601
719 spin_lock_irqsave(&card->ip_lock, flags); 602 spin_lock_bh(&card->ip_lock);
603
720 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 604 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
721 list_del(&ipatoe->entry); 605 list_del(&ipatoe->entry);
722 kfree(ipatoe); 606 kfree(ipatoe);
723 } 607 }
724 spin_unlock_irqrestore(&card->ip_lock, flags); 608
609 spin_unlock_bh(&card->ip_lock);
725} 610}
726 611
727int qeth_l3_add_ipato_entry(struct qeth_card *card, 612int qeth_l3_add_ipato_entry(struct qeth_card *card,
728 struct qeth_ipato_entry *new) 613 struct qeth_ipato_entry *new)
729{ 614{
730 struct qeth_ipato_entry *ipatoe; 615 struct qeth_ipato_entry *ipatoe;
731 unsigned long flags;
732 int rc = 0; 616 int rc = 0;
733 617
734 QETH_CARD_TEXT(card, 2, "addipato"); 618 QETH_CARD_TEXT(card, 2, "addipato");
735 spin_lock_irqsave(&card->ip_lock, flags); 619
620 spin_lock_bh(&card->ip_lock);
621
736 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 622 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
737 if (ipatoe->proto != new->proto) 623 if (ipatoe->proto != new->proto)
738 continue; 624 continue;
@@ -743,10 +629,12 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
743 break; 629 break;
744 } 630 }
745 } 631 }
632
746 if (!rc) 633 if (!rc)
747 list_add_tail(&new->entry, &card->ipato.entries); 634 list_add_tail(&new->entry, &card->ipato.entries);
748 635
749 spin_unlock_irqrestore(&card->ip_lock, flags); 636 spin_unlock_bh(&card->ip_lock);
637
750 return rc; 638 return rc;
751} 639}
752 640
@@ -754,10 +642,11 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
754 enum qeth_prot_versions proto, u8 *addr, int mask_bits) 642 enum qeth_prot_versions proto, u8 *addr, int mask_bits)
755{ 643{
756 struct qeth_ipato_entry *ipatoe, *tmp; 644 struct qeth_ipato_entry *ipatoe, *tmp;
757 unsigned long flags;
758 645
759 QETH_CARD_TEXT(card, 2, "delipato"); 646 QETH_CARD_TEXT(card, 2, "delipato");
760 spin_lock_irqsave(&card->ip_lock, flags); 647
648 spin_lock_bh(&card->ip_lock);
649
761 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 650 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
762 if (ipatoe->proto != proto) 651 if (ipatoe->proto != proto)
763 continue; 652 continue;
@@ -768,7 +657,8 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
768 kfree(ipatoe); 657 kfree(ipatoe);
769 } 658 }
770 } 659 }
771 spin_unlock_irqrestore(&card->ip_lock, flags); 660
661 spin_unlock_bh(&card->ip_lock);
772} 662}
773 663
774/* 664/*
@@ -778,7 +668,6 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
778 const u8 *addr) 668 const u8 *addr)
779{ 669{
780 struct qeth_ipaddr *ipaddr; 670 struct qeth_ipaddr *ipaddr;
781 unsigned long flags;
782 int rc = 0; 671 int rc = 0;
783 672
784 ipaddr = qeth_l3_get_addr_buffer(proto); 673 ipaddr = qeth_l3_get_addr_buffer(proto);
@@ -797,18 +686,18 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
797 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG; 686 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
798 } else 687 } else
799 return -ENOMEM; 688 return -ENOMEM;
800 spin_lock_irqsave(&card->ip_lock, flags); 689
801 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) || 690 spin_lock_bh(&card->ip_lock);
802 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0)) 691
692 if (!qeth_l3_ip_from_hash(card, ipaddr))
803 rc = -EEXIST; 693 rc = -EEXIST;
804 spin_unlock_irqrestore(&card->ip_lock, flags); 694 else
805 if (rc) { 695 qeth_l3_add_ip(card, ipaddr);
806 kfree(ipaddr); 696
807 return rc; 697 spin_unlock_bh(&card->ip_lock);
808 } 698
809 if (!qeth_l3_add_ip(card, ipaddr)) 699 kfree(ipaddr);
810 kfree(ipaddr); 700
811 qeth_l3_set_ip_addr_list(card);
812 return rc; 701 return rc;
813} 702}
814 703
@@ -831,9 +720,12 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
831 ipaddr->type = QETH_IP_TYPE_VIPA; 720 ipaddr->type = QETH_IP_TYPE_VIPA;
832 } else 721 } else
833 return; 722 return;
834 if (!qeth_l3_delete_ip(card, ipaddr)) 723
835 kfree(ipaddr); 724 spin_lock_bh(&card->ip_lock);
836 qeth_l3_set_ip_addr_list(card); 725 qeth_l3_delete_ip(card, ipaddr);
726 spin_unlock_bh(&card->ip_lock);
727
728 kfree(ipaddr);
837} 729}
838 730
839/* 731/*
@@ -843,7 +735,6 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
843 const u8 *addr) 735 const u8 *addr)
844{ 736{
845 struct qeth_ipaddr *ipaddr; 737 struct qeth_ipaddr *ipaddr;
846 unsigned long flags;
847 int rc = 0; 738 int rc = 0;
848 739
849 ipaddr = qeth_l3_get_addr_buffer(proto); 740 ipaddr = qeth_l3_get_addr_buffer(proto);
@@ -857,24 +748,25 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
857 memcpy(&ipaddr->u.a6.addr, addr, 16); 748 memcpy(&ipaddr->u.a6.addr, addr, 16);
858 ipaddr->u.a6.pfxlen = 0; 749 ipaddr->u.a6.pfxlen = 0;
859 } 750 }
751
860 ipaddr->type = QETH_IP_TYPE_RXIP; 752 ipaddr->type = QETH_IP_TYPE_RXIP;
861 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG; 753 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
862 ipaddr->del_flags = 0; 754 ipaddr->del_flags = 0;
863 } else 755 } else
864 return -ENOMEM; 756 return -ENOMEM;
865 spin_lock_irqsave(&card->ip_lock, flags); 757
866 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) || 758 spin_lock_bh(&card->ip_lock);
867 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0)) 759
760 if (!qeth_l3_ip_from_hash(card, ipaddr))
868 rc = -EEXIST; 761 rc = -EEXIST;
869 spin_unlock_irqrestore(&card->ip_lock, flags); 762 else
870 if (rc) { 763 qeth_l3_add_ip(card, ipaddr);
871 kfree(ipaddr); 764
872 return rc; 765 spin_unlock_bh(&card->ip_lock);
873 } 766
874 if (!qeth_l3_add_ip(card, ipaddr)) 767 kfree(ipaddr);
875 kfree(ipaddr); 768
876 qeth_l3_set_ip_addr_list(card); 769 return rc;
877 return 0;
878} 770}
879 771
880void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, 772void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
@@ -896,9 +788,12 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
896 ipaddr->type = QETH_IP_TYPE_RXIP; 788 ipaddr->type = QETH_IP_TYPE_RXIP;
897 } else 789 } else
898 return; 790 return;
899 if (!qeth_l3_delete_ip(card, ipaddr)) 791
900 kfree(ipaddr); 792 spin_lock_bh(&card->ip_lock);
901 qeth_l3_set_ip_addr_list(card); 793 qeth_l3_delete_ip(card, ipaddr);
794 spin_unlock_bh(&card->ip_lock);
795
796 kfree(ipaddr);
902} 797}
903 798
904static int qeth_l3_register_addr_entry(struct qeth_card *card, 799static int qeth_l3_register_addr_entry(struct qeth_card *card,
@@ -908,6 +803,7 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
908 int rc = 0; 803 int rc = 0;
909 int cnt = 3; 804 int cnt = 3;
910 805
806
911 if (addr->proto == QETH_PROT_IPV4) { 807 if (addr->proto == QETH_PROT_IPV4) {
912 QETH_CARD_TEXT(card, 2, "setaddr4"); 808 QETH_CARD_TEXT(card, 2, "setaddr4");
913 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); 809 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
@@ -1013,36 +909,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
1013 return rc; 909 return rc;
1014} 910}
1015 911
1016static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
1017 struct qeth_reply *reply, unsigned long data)
1018{
1019 struct qeth_ipa_cmd *cmd;
1020
1021 QETH_CARD_TEXT(card, 4, "defadpcb");
1022
1023 cmd = (struct qeth_ipa_cmd *) data;
1024 if (cmd->hdr.return_code == 0) {
1025 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
1026 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
1027 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1028 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
1029 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1030 }
1031 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
1032 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
1033 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
1034 QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
1035 }
1036 if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
1037 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
1038 card->info.tx_csum_mask =
1039 cmd->data.setassparms.data.flags_32bit;
1040 QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
1041 }
1042
1043 return 0;
1044}
1045
1046#ifdef CONFIG_QETH_IPV6 912#ifdef CONFIG_QETH_IPV6
1047static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, 913static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
1048 enum qeth_ipa_funcs ipa_func, __u16 cmd_code) 914 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
@@ -1056,7 +922,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
1056 if (!iob) 922 if (!iob)
1057 return -ENOMEM; 923 return -ENOMEM;
1058 rc = qeth_send_setassparms(card, iob, 0, 0, 924 rc = qeth_send_setassparms(card, iob, 0, 0,
1059 qeth_l3_default_setassparms_cb, NULL); 925 qeth_setassparms_cb, NULL);
1060 return rc; 926 return rc;
1061} 927}
1062#endif 928#endif
@@ -1291,47 +1157,6 @@ out:
1291 return rc; 1157 return rc;
1292} 1158}
1293 1159
1294static void qeth_l3_start_ipa_checksum(struct qeth_card *card)
1295{
1296 QETH_CARD_TEXT(card, 3, "strtcsum");
1297 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)
1298 && (card->dev->features & NETIF_F_RXCSUM))
1299 qeth_set_rx_csum(card, 1);
1300}
1301
1302static void qeth_l3_start_ipa_tx_checksum(struct qeth_card *card)
1303{
1304 QETH_CARD_TEXT(card, 3, "strttxcs");
1305 qeth_start_ipa_tx_checksum(card);
1306}
1307
1308static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1309{
1310 int rc;
1311
1312 QETH_CARD_TEXT(card, 3, "sttso");
1313
1314 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
1315 dev_info(&card->gdev->dev,
1316 "Outbound TSO not supported on %s\n",
1317 QETH_CARD_IFNAME(card));
1318 rc = -EOPNOTSUPP;
1319 } else {
1320 rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
1321 IPA_CMD_ASS_START, 0);
1322 if (rc)
1323 dev_warn(&card->gdev->dev, "Starting outbound TCP "
1324 "segmentation offload for %s failed\n",
1325 QETH_CARD_IFNAME(card));
1326 else
1327 dev_info(&card->gdev->dev,
1328 "Outbound TSO enabled\n");
1329 }
1330 if (rc)
1331 card->dev->features &= ~NETIF_F_TSO;
1332 return rc;
1333}
1334
1335static int qeth_l3_start_ipassists(struct qeth_card *card) 1160static int qeth_l3_start_ipassists(struct qeth_card *card)
1336{ 1161{
1337 QETH_CARD_TEXT(card, 3, "strtipas"); 1162 QETH_CARD_TEXT(card, 3, "strtipas");
@@ -1345,9 +1170,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
1345 qeth_l3_start_ipa_multicast(card); /* go on*/ 1170 qeth_l3_start_ipa_multicast(card); /* go on*/
1346 qeth_l3_start_ipa_ipv6(card); /* go on*/ 1171 qeth_l3_start_ipa_ipv6(card); /* go on*/
1347 qeth_l3_start_ipa_broadcast(card); /* go on*/ 1172 qeth_l3_start_ipa_broadcast(card); /* go on*/
1348 qeth_l3_start_ipa_checksum(card); /* go on*/
1349 qeth_l3_start_ipa_tx_checksum(card);
1350 qeth_l3_start_ipa_tso(card); /* go on*/
1351 return 0; 1173 return 0;
1352} 1174}
1353 1175
@@ -1507,31 +1329,99 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
1507 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL); 1329 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
1508} 1330}
1509 1331
1510static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac, 1332static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac)
1511 struct net_device *dev)
1512{ 1333{
1513 ip_eth_mc_map(ipm, mac); 1334 ip_eth_mc_map(ipm, mac);
1514} 1335}
1515 1336
1516static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) 1337static void qeth_l3_mark_all_mc_to_be_deleted(struct qeth_card *card)
1338{
1339 struct qeth_ipaddr *addr;
1340 int i;
1341
1342 hash_for_each(card->ip_mc_htable, i, addr, hnode)
1343 addr->disp_flag = QETH_DISP_ADDR_DELETE;
1344
1345}
1346
1347static void qeth_l3_add_all_new_mc(struct qeth_card *card)
1348{
1349 struct qeth_ipaddr *addr;
1350 struct hlist_node *tmp;
1351 int i;
1352 int rc;
1353
1354 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
1355 if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
1356 rc = qeth_l3_register_addr_entry(card, addr);
1357 if (!rc || (rc == IPA_RC_LAN_OFFLINE))
1358 addr->ref_counter = 1;
1359 else {
1360 hash_del(&addr->hnode);
1361 kfree(addr);
1362 }
1363 }
1364 }
1365
1366}
1367
1368static void qeth_l3_delete_nonused_mc(struct qeth_card *card)
1369{
1370 struct qeth_ipaddr *addr;
1371 struct hlist_node *tmp;
1372 int i;
1373 int rc;
1374
1375 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
1376 if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
1377 rc = qeth_l3_deregister_addr_entry(card, addr);
1378 if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) {
1379 hash_del(&addr->hnode);
1380 kfree(addr);
1381 }
1382 }
1383 }
1384
1385}
1386
1387
1388static void
1389qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
1517{ 1390{
1518 struct qeth_ipaddr *ipm;
1519 struct ip_mc_list *im4; 1391 struct ip_mc_list *im4;
1392 struct qeth_ipaddr *tmp, *ipm;
1520 char buf[MAX_ADDR_LEN]; 1393 char buf[MAX_ADDR_LEN];
1521 1394
1522 QETH_CARD_TEXT(card, 4, "addmc"); 1395 QETH_CARD_TEXT(card, 4, "addmc");
1396
1397 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1398 if (!tmp)
1399 return;
1400
1523 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; 1401 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
1524 im4 = rcu_dereference(im4->next_rcu)) { 1402 im4 = rcu_dereference(im4->next_rcu)) {
1525 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); 1403 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf);
1526 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1404
1527 if (!ipm) 1405 tmp->u.a4.addr = im4->multiaddr;
1528 continue; 1406 memcpy(tmp->mac, buf, sizeof(tmp->mac));
1529 ipm->u.a4.addr = im4->multiaddr; 1407
1530 memcpy(ipm->mac, buf, OSA_ADDR_LEN); 1408 ipm = qeth_l3_ip_from_hash(card, tmp);
1531 ipm->is_multicast = 1; 1409 if (ipm) {
1532 if (!qeth_l3_add_ip(card, ipm)) 1410 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1533 kfree(ipm); 1411 } else {
1412 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1413 if (!ipm)
1414 continue;
1415 memcpy(ipm->mac, buf, sizeof(tmp->mac));
1416 ipm->u.a4.addr = im4->multiaddr;
1417 ipm->is_multicast = 1;
1418 ipm->disp_flag = QETH_DISP_ADDR_ADD;
1419 hash_add(card->ip_mc_htable,
1420 &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
1421 }
1534 } 1422 }
1423
1424 kfree(tmp);
1535} 1425}
1536 1426
1537/* called with rcu_read_lock */ 1427/* called with rcu_read_lock */
@@ -1541,6 +1431,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1541 u16 vid; 1431 u16 vid;
1542 1432
1543 QETH_CARD_TEXT(card, 4, "addmcvl"); 1433 QETH_CARD_TEXT(card, 4, "addmcvl");
1434
1544 if (!qeth_is_supported(card, IPA_FULL_VLAN)) 1435 if (!qeth_is_supported(card, IPA_FULL_VLAN))
1545 return; 1436 return;
1546 1437
@@ -1555,7 +1446,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1555 in_dev = __in_dev_get_rcu(netdev); 1446 in_dev = __in_dev_get_rcu(netdev);
1556 if (!in_dev) 1447 if (!in_dev)
1557 continue; 1448 continue;
1558 qeth_l3_add_mc(card, in_dev); 1449 qeth_l3_add_mc_to_hash(card, in_dev);
1559 } 1450 }
1560} 1451}
1561 1452
@@ -1564,36 +1455,60 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
1564 struct in_device *in4_dev; 1455 struct in_device *in4_dev;
1565 1456
1566 QETH_CARD_TEXT(card, 4, "chkmcv4"); 1457 QETH_CARD_TEXT(card, 4, "chkmcv4");
1458
1567 rcu_read_lock(); 1459 rcu_read_lock();
1568 in4_dev = __in_dev_get_rcu(card->dev); 1460 in4_dev = __in_dev_get_rcu(card->dev);
1569 if (in4_dev == NULL) 1461 if (in4_dev == NULL)
1570 goto unlock; 1462 goto unlock;
1571 qeth_l3_add_mc(card, in4_dev); 1463 qeth_l3_add_mc_to_hash(card, in4_dev);
1572 qeth_l3_add_vlan_mc(card); 1464 qeth_l3_add_vlan_mc(card);
1573unlock: 1465unlock:
1574 rcu_read_unlock(); 1466 rcu_read_unlock();
1575} 1467}
1576 1468
1577#ifdef CONFIG_QETH_IPV6 1469#ifdef CONFIG_QETH_IPV6
1578static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) 1470static void
1471qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
1579{ 1472{
1580 struct qeth_ipaddr *ipm; 1473 struct qeth_ipaddr *ipm;
1581 struct ifmcaddr6 *im6; 1474 struct ifmcaddr6 *im6;
1475 struct qeth_ipaddr *tmp;
1582 char buf[MAX_ADDR_LEN]; 1476 char buf[MAX_ADDR_LEN];
1583 1477
1584 QETH_CARD_TEXT(card, 4, "addmc6"); 1478 QETH_CARD_TEXT(card, 4, "addmc6");
1479
1480 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1481 if (!tmp)
1482 return;
1483
1585 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { 1484 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
1586 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0); 1485 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
1486
1487 memcpy(tmp->mac, buf, sizeof(tmp->mac));
1488 memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
1489 sizeof(struct in6_addr));
1490 tmp->is_multicast = 1;
1491
1492 ipm = qeth_l3_ip_from_hash(card, tmp);
1493 if (ipm) {
1494 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1495 continue;
1496 }
1497
1587 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1498 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1588 if (!ipm) 1499 if (!ipm)
1589 continue; 1500 continue;
1590 ipm->is_multicast = 1; 1501
1591 memcpy(ipm->mac, buf, OSA_ADDR_LEN); 1502 memcpy(ipm->mac, buf, OSA_ADDR_LEN);
1592 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr, 1503 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
1593 sizeof(struct in6_addr)); 1504 sizeof(struct in6_addr));
1594 if (!qeth_l3_add_ip(card, ipm)) 1505 ipm->is_multicast = 1;
1595 kfree(ipm); 1506 ipm->disp_flag = QETH_DISP_ADDR_ADD;
1507 hash_add(card->ip_mc_htable,
1508 &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
1509
1596 } 1510 }
1511 kfree(tmp);
1597} 1512}
1598 1513
1599/* called with rcu_read_lock */ 1514/* called with rcu_read_lock */
@@ -1603,6 +1518,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1603 u16 vid; 1518 u16 vid;
1604 1519
1605 QETH_CARD_TEXT(card, 4, "admc6vl"); 1520 QETH_CARD_TEXT(card, 4, "admc6vl");
1521
1606 if (!qeth_is_supported(card, IPA_FULL_VLAN)) 1522 if (!qeth_is_supported(card, IPA_FULL_VLAN))
1607 return; 1523 return;
1608 1524
@@ -1618,7 +1534,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1618 if (!in_dev) 1534 if (!in_dev)
1619 continue; 1535 continue;
1620 read_lock_bh(&in_dev->lock); 1536 read_lock_bh(&in_dev->lock);
1621 qeth_l3_add_mc6(card, in_dev); 1537 qeth_l3_add_mc6_to_hash(card, in_dev);
1622 read_unlock_bh(&in_dev->lock); 1538 read_unlock_bh(&in_dev->lock);
1623 in6_dev_put(in_dev); 1539 in6_dev_put(in_dev);
1624 } 1540 }
@@ -1629,14 +1545,16 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
1629 struct inet6_dev *in6_dev; 1545 struct inet6_dev *in6_dev;
1630 1546
1631 QETH_CARD_TEXT(card, 4, "chkmcv6"); 1547 QETH_CARD_TEXT(card, 4, "chkmcv6");
1548
1632 if (!qeth_is_supported(card, IPA_IPV6)) 1549 if (!qeth_is_supported(card, IPA_IPV6))
1633 return ; 1550 return ;
1634 in6_dev = in6_dev_get(card->dev); 1551 in6_dev = in6_dev_get(card->dev);
1635 if (in6_dev == NULL) 1552 if (!in6_dev)
1636 return; 1553 return;
1554
1637 rcu_read_lock(); 1555 rcu_read_lock();
1638 read_lock_bh(&in6_dev->lock); 1556 read_lock_bh(&in6_dev->lock);
1639 qeth_l3_add_mc6(card, in6_dev); 1557 qeth_l3_add_mc6_to_hash(card, in6_dev);
1640 qeth_l3_add_vlan_mc6(card); 1558 qeth_l3_add_vlan_mc6(card);
1641 read_unlock_bh(&in6_dev->lock); 1559 read_unlock_bh(&in6_dev->lock);
1642 rcu_read_unlock(); 1560 rcu_read_unlock();
@@ -1660,16 +1578,23 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
1660 in_dev = in_dev_get(netdev); 1578 in_dev = in_dev_get(netdev);
1661 if (!in_dev) 1579 if (!in_dev)
1662 return; 1580 return;
1581
1582 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1583 if (!addr)
1584 return;
1585
1586 spin_lock_bh(&card->ip_lock);
1587
1663 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 1588 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1664 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1589 addr->u.a4.addr = ifa->ifa_address;
1665 if (addr) { 1590 addr->u.a4.mask = ifa->ifa_mask;
1666 addr->u.a4.addr = ifa->ifa_address; 1591 addr->type = QETH_IP_TYPE_NORMAL;
1667 addr->u.a4.mask = ifa->ifa_mask; 1592 qeth_l3_delete_ip(card, addr);
1668 addr->type = QETH_IP_TYPE_NORMAL;
1669 if (!qeth_l3_delete_ip(card, addr))
1670 kfree(addr);
1671 }
1672 } 1593 }
1594
1595 spin_unlock_bh(&card->ip_lock);
1596
1597 kfree(addr);
1673 in_dev_put(in_dev); 1598 in_dev_put(in_dev);
1674} 1599}
1675 1600
@@ -1687,20 +1612,28 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
1687 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid); 1612 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
1688 if (!netdev) 1613 if (!netdev)
1689 return; 1614 return;
1615
1690 in6_dev = in6_dev_get(netdev); 1616 in6_dev = in6_dev_get(netdev);
1691 if (!in6_dev) 1617 if (!in6_dev)
1692 return; 1618 return;
1619
1620 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1621 if (!addr)
1622 return;
1623
1624 spin_lock_bh(&card->ip_lock);
1625
1693 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) { 1626 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
1694 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1627 memcpy(&addr->u.a6.addr, &ifa->addr,
1695 if (addr) { 1628 sizeof(struct in6_addr));
1696 memcpy(&addr->u.a6.addr, &ifa->addr, 1629 addr->u.a6.pfxlen = ifa->prefix_len;
1697 sizeof(struct in6_addr)); 1630 addr->type = QETH_IP_TYPE_NORMAL;
1698 addr->u.a6.pfxlen = ifa->prefix_len; 1631 qeth_l3_delete_ip(card, addr);
1699 addr->type = QETH_IP_TYPE_NORMAL;
1700 if (!qeth_l3_delete_ip(card, addr))
1701 kfree(addr);
1702 }
1703 } 1632 }
1633
1634 spin_unlock_bh(&card->ip_lock);
1635
1636 kfree(addr);
1704 in6_dev_put(in6_dev); 1637 in6_dev_put(in6_dev);
1705#endif /* CONFIG_QETH_IPV6 */ 1638#endif /* CONFIG_QETH_IPV6 */
1706} 1639}
@@ -1727,18 +1660,16 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
1727 __be16 proto, u16 vid) 1660 __be16 proto, u16 vid)
1728{ 1661{
1729 struct qeth_card *card = dev->ml_priv; 1662 struct qeth_card *card = dev->ml_priv;
1730 unsigned long flags;
1731 1663
1732 QETH_CARD_TEXT_(card, 4, "kid:%d", vid); 1664 QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
1665
1733 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { 1666 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
1734 QETH_CARD_TEXT(card, 3, "kidREC"); 1667 QETH_CARD_TEXT(card, 3, "kidREC");
1735 return 0; 1668 return 0;
1736 } 1669 }
1737 spin_lock_irqsave(&card->vlanlock, flags);
1738 /* unregister IP addresses of vlan device */ 1670 /* unregister IP addresses of vlan device */
1739 qeth_l3_free_vlan_addresses(card, vid); 1671 qeth_l3_free_vlan_addresses(card, vid);
1740 clear_bit(vid, card->active_vlans); 1672 clear_bit(vid, card->active_vlans);
1741 spin_unlock_irqrestore(&card->vlanlock, flags);
1742 qeth_l3_set_multicast_list(card->dev); 1673 qeth_l3_set_multicast_list(card->dev);
1743 return 0; 1674 return 0;
1744} 1675}
@@ -1994,8 +1925,8 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
1994static int qeth_l3_verify_dev(struct net_device *dev) 1925static int qeth_l3_verify_dev(struct net_device *dev)
1995{ 1926{
1996 struct qeth_card *card; 1927 struct qeth_card *card;
1997 unsigned long flags;
1998 int rc = 0; 1928 int rc = 0;
1929 unsigned long flags;
1999 1930
2000 read_lock_irqsave(&qeth_core_card_list.rwlock, flags); 1931 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
2001 list_for_each_entry(card, &qeth_core_card_list.list, list) { 1932 list_for_each_entry(card, &qeth_core_card_list.list, list) {
@@ -2051,7 +1982,7 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2051 card->state = CARD_STATE_SOFTSETUP; 1982 card->state = CARD_STATE_SOFTSETUP;
2052 } 1983 }
2053 if (card->state == CARD_STATE_SOFTSETUP) { 1984 if (card->state == CARD_STATE_SOFTSETUP) {
2054 qeth_l3_clear_ip_list(card, 1); 1985 qeth_l3_clear_ip_htable(card, 1);
2055 qeth_clear_ipacmd_list(card); 1986 qeth_clear_ipacmd_list(card);
2056 card->state = CARD_STATE_HARDSETUP; 1987 card->state = CARD_STATE_HARDSETUP;
2057 } 1988 }
@@ -2106,12 +2037,20 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
2106 (card->state != CARD_STATE_UP)) 2037 (card->state != CARD_STATE_UP))
2107 return; 2038 return;
2108 if (!card->options.sniffer) { 2039 if (!card->options.sniffer) {
2109 qeth_l3_delete_mc_addresses(card); 2040
2041 spin_lock_bh(&card->mclock);
2042
2043 qeth_l3_mark_all_mc_to_be_deleted(card);
2044
2110 qeth_l3_add_multicast_ipv4(card); 2045 qeth_l3_add_multicast_ipv4(card);
2111#ifdef CONFIG_QETH_IPV6 2046#ifdef CONFIG_QETH_IPV6
2112 qeth_l3_add_multicast_ipv6(card); 2047 qeth_l3_add_multicast_ipv6(card);
2113#endif 2048#endif
2114 qeth_l3_set_ip_addr_list(card); 2049 qeth_l3_delete_nonused_mc(card);
2050 qeth_l3_add_all_new_mc(card);
2051
2052 spin_unlock_bh(&card->mclock);
2053
2115 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 2054 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2116 return; 2055 return;
2117 } 2056 }
@@ -2375,22 +2314,21 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
2375 if (rc) { 2314 if (rc) {
2376 if (copy_to_user(udata, qinfo.udata, 4)) 2315 if (copy_to_user(udata, qinfo.udata, 4))
2377 rc = -EFAULT; 2316 rc = -EFAULT;
2378 goto free_and_out; 2317 goto free_and_out;
2379 } else { 2318 }
2380#ifdef CONFIG_QETH_IPV6 2319#ifdef CONFIG_QETH_IPV6
2381 if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) { 2320 if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
2382 /* fails in case of GuestLAN QDIO mode */ 2321 /* fails in case of GuestLAN QDIO mode */
2383 qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, 2322 qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
2384 &qinfo); 2323 }
2385 }
2386#endif 2324#endif
2387 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) { 2325 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
2388 QETH_CARD_TEXT(card, 4, "qactf"); 2326 QETH_CARD_TEXT(card, 4, "qactf");
2389 rc = -EFAULT; 2327 rc = -EFAULT;
2390 goto free_and_out; 2328 goto free_and_out;
2391 }
2392 QETH_CARD_TEXT(card, 4, "qacts");
2393 } 2329 }
2330 QETH_CARD_TEXT(card, 4, "qacts");
2331
2394free_and_out: 2332free_and_out:
2395 kfree(qinfo.udata); 2333 kfree(qinfo.udata);
2396out: 2334out:
@@ -2427,7 +2365,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
2427 rc = qeth_send_setassparms(card, iob, 2365 rc = qeth_send_setassparms(card, iob,
2428 sizeof(struct qeth_arp_cache_entry), 2366 sizeof(struct qeth_arp_cache_entry),
2429 (unsigned long) entry, 2367 (unsigned long) entry,
2430 qeth_l3_default_setassparms_cb, NULL); 2368 qeth_setassparms_cb, NULL);
2431 if (rc) { 2369 if (rc) {
2432 tmp = rc; 2370 tmp = rc;
2433 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); 2371 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
@@ -2467,7 +2405,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
2467 return -ENOMEM; 2405 return -ENOMEM;
2468 rc = qeth_send_setassparms(card, iob, 2406 rc = qeth_send_setassparms(card, iob,
2469 12, (unsigned long)buf, 2407 12, (unsigned long)buf,
2470 qeth_l3_default_setassparms_cb, NULL); 2408 qeth_setassparms_cb, NULL);
2471 if (rc) { 2409 if (rc) {
2472 tmp = rc; 2410 tmp = rc;
2473 memset(buf, 0, 16); 2411 memset(buf, 0, 16);
@@ -2793,15 +2731,34 @@ static void qeth_tso_fill_header(struct qeth_card *card,
2793 } 2731 }
2794} 2732}
2795 2733
2796static inline int qeth_l3_tso_elements(struct sk_buff *skb) 2734/**
2735 * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso
2736 * @card: qeth card structure, to check max. elems.
2737 * @skb: SKB address
2738 * @extra_elems: extra elems needed, to check against max.
2739 *
2740 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
2741 * skb data, including linear part and fragments, but excluding TCP header.
2742 * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().)
2743 * Checks if the result plus extra_elems fits under the limit for the card.
2744 * Returns 0 if it does not.
2745 * Note: extra_elems is not included in the returned result.
2746 */
2747static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
2748 struct sk_buff *skb, int extra_elems)
2797{ 2749{
2798 unsigned long tcpd = (unsigned long)tcp_hdr(skb) + 2750 addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
2799 tcp_hdr(skb)->doff * 4; 2751 int elements = qeth_get_elements_for_range(
2800 int tcpd_len = skb_headlen(skb) - (tcpd - (unsigned long)skb->data); 2752 tcpdptr,
2801 int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); 2753 (addr_t)skb->data + skb_headlen(skb)) +
2802 2754 qeth_get_elements_for_frags(skb);
2803 elements += qeth_get_elements_for_frags(skb);
2804 2755
2756 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
2757 QETH_DBF_MESSAGE(2,
2758 "Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n",
2759 elements + extra_elems, skb->len);
2760 return 0;
2761 }
2805 return elements; 2762 return elements;
2806} 2763}
2807 2764
@@ -2810,8 +2767,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2810 int rc; 2767 int rc;
2811 u16 *tag; 2768 u16 *tag;
2812 struct qeth_hdr *hdr = NULL; 2769 struct qeth_hdr *hdr = NULL;
2813 int elements_needed = 0; 2770 int hdr_elements = 0;
2814 int elems; 2771 int elements;
2815 struct qeth_card *card = dev->ml_priv; 2772 struct qeth_card *card = dev->ml_priv;
2816 struct sk_buff *new_skb = NULL; 2773 struct sk_buff *new_skb = NULL;
2817 int ipv = qeth_get_ip_version(skb); 2774 int ipv = qeth_get_ip_version(skb);
@@ -2822,7 +2779,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2822 qeth_get_priority_queue(card, skb, ipv, cast_type) : 2779 qeth_get_priority_queue(card, skb, ipv, cast_type) :
2823 card->qdio.default_out_queue]; 2780 card->qdio.default_out_queue];
2824 int tx_bytes = skb->len; 2781 int tx_bytes = skb->len;
2825 bool large_send; 2782 bool use_tso;
2826 int data_offset = -1; 2783 int data_offset = -1;
2827 int nr_frags; 2784 int nr_frags;
2828 2785
@@ -2847,10 +2804,12 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2847 card->perf_stats.outbound_start_time = qeth_get_micros(); 2804 card->perf_stats.outbound_start_time = qeth_get_micros();
2848 } 2805 }
2849 2806
2850 large_send = skb_is_gso(skb); 2807 /* Ignore segment size from skb_is_gso(), 1 page is always used. */
2808 use_tso = skb_is_gso(skb) &&
2809 (qeth_get_ip_protocol(skb) == IPPROTO_TCP) && (ipv == 4);
2851 2810
2852 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && 2811 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2853 (skb_shinfo(skb)->nr_frags == 0)) { 2812 !skb_is_nonlinear(skb)) {
2854 new_skb = skb; 2813 new_skb = skb;
2855 if (new_skb->protocol == ETH_P_AF_IUCV) 2814 if (new_skb->protocol == ETH_P_AF_IUCV)
2856 data_offset = 0; 2815 data_offset = 0;
@@ -2859,7 +2818,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2859 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 2818 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
2860 if (!hdr) 2819 if (!hdr)
2861 goto tx_drop; 2820 goto tx_drop;
2862 elements_needed++; 2821 hdr_elements++;
2863 } else { 2822 } else {
2864 /* create a clone with writeable headroom */ 2823 /* create a clone with writeable headroom */
2865 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) 2824 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso)
@@ -2894,22 +2853,28 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2894 /* fix hardware limitation: as long as we do not have sbal 2853 /* fix hardware limitation: as long as we do not have sbal
2895 * chaining we can not send long frag lists 2854 * chaining we can not send long frag lists
2896 */ 2855 */
2897 if (large_send) { 2856 if ((card->info.type != QETH_CARD_TYPE_IQD) &&
2898 if (qeth_l3_tso_elements(new_skb) + 1 > 16) { 2857 ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
2899 if (skb_linearize(new_skb)) 2858 (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) {
2900 goto tx_drop; 2859 int lin_rc = skb_linearize(new_skb);
2901 if (card->options.performance_stats) 2860
2861 if (card->options.performance_stats) {
2862 if (lin_rc)
2863 card->perf_stats.tx_linfail++;
2864 else
2902 card->perf_stats.tx_lin++; 2865 card->perf_stats.tx_lin++;
2903 } 2866 }
2867 if (lin_rc)
2868 goto tx_drop;
2904 } 2869 }
2905 2870
2906 if (large_send && (cast_type == RTN_UNSPEC)) { 2871 if (use_tso) {
2907 hdr = (struct qeth_hdr *)skb_push(new_skb, 2872 hdr = (struct qeth_hdr *)skb_push(new_skb,
2908 sizeof(struct qeth_hdr_tso)); 2873 sizeof(struct qeth_hdr_tso));
2909 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 2874 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2910 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); 2875 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2911 qeth_tso_fill_header(card, hdr, new_skb); 2876 qeth_tso_fill_header(card, hdr, new_skb);
2912 elements_needed++; 2877 hdr_elements++;
2913 } else { 2878 } else {
2914 if (data_offset < 0) { 2879 if (data_offset < 0) {
2915 hdr = (struct qeth_hdr *)skb_push(new_skb, 2880 hdr = (struct qeth_hdr *)skb_push(new_skb,
@@ -2930,31 +2895,31 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2930 qeth_l3_hdr_csum(card, hdr, new_skb); 2895 qeth_l3_hdr_csum(card, hdr, new_skb);
2931 } 2896 }
2932 2897
2933 elems = qeth_get_elements_no(card, new_skb, elements_needed); 2898 elements = use_tso ?
2934 if (!elems) { 2899 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
2900 qeth_get_elements_no(card, new_skb, hdr_elements);
2901 if (!elements) {
2935 if (data_offset >= 0) 2902 if (data_offset >= 0)
2936 kmem_cache_free(qeth_core_header_cache, hdr); 2903 kmem_cache_free(qeth_core_header_cache, hdr);
2937 goto tx_drop; 2904 goto tx_drop;
2938 } 2905 }
2939 elements_needed += elems; 2906 elements += hdr_elements;
2940 nr_frags = skb_shinfo(new_skb)->nr_frags;
2941 2907
2942 if (card->info.type != QETH_CARD_TYPE_IQD) { 2908 if (card->info.type != QETH_CARD_TYPE_IQD) {
2943 int len; 2909 int len;
2944 if (large_send) 2910 if (use_tso)
2945 len = ((unsigned long)tcp_hdr(new_skb) + 2911 len = ((unsigned long)tcp_hdr(new_skb) +
2946 tcp_hdr(new_skb)->doff * 4) - 2912 tcp_hdrlen(new_skb)) -
2947 (unsigned long)new_skb->data; 2913 (unsigned long)new_skb->data;
2948 else 2914 else
2949 len = sizeof(struct qeth_hdr_layer3); 2915 len = sizeof(struct qeth_hdr_layer3);
2950 2916
2951 if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) 2917 if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
2952 goto tx_drop; 2918 goto tx_drop;
2953 rc = qeth_do_send_packet(card, queue, new_skb, hdr, 2919 rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements);
2954 elements_needed);
2955 } else 2920 } else
2956 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, 2921 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
2957 elements_needed, data_offset, 0); 2922 elements, data_offset, 0);
2958 2923
2959 if (!rc) { 2924 if (!rc) {
2960 card->stats.tx_packets++; 2925 card->stats.tx_packets++;
@@ -2962,7 +2927,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2962 if (new_skb != skb) 2927 if (new_skb != skb)
2963 dev_kfree_skb_any(skb); 2928 dev_kfree_skb_any(skb);
2964 if (card->options.performance_stats) { 2929 if (card->options.performance_stats) {
2965 if (large_send) { 2930 nr_frags = skb_shinfo(new_skb)->nr_frags;
2931 if (use_tso) {
2966 card->perf_stats.large_send_bytes += tx_bytes; 2932 card->perf_stats.large_send_bytes += tx_bytes;
2967 card->perf_stats.large_send_cnt++; 2933 card->perf_stats.large_send_cnt++;
2968 } 2934 }
@@ -3048,36 +3014,6 @@ static int qeth_l3_stop(struct net_device *dev)
3048 return 0; 3014 return 0;
3049} 3015}
3050 3016
3051static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
3052 netdev_features_t features)
3053{
3054 struct qeth_card *card = dev->ml_priv;
3055
3056 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
3057 features &= ~NETIF_F_IP_CSUM;
3058 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
3059 features &= ~NETIF_F_TSO;
3060 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
3061 features &= ~NETIF_F_RXCSUM;
3062 return features;
3063}
3064
3065static int qeth_l3_set_features(struct net_device *dev,
3066 netdev_features_t features)
3067{
3068 struct qeth_card *card = dev->ml_priv;
3069 netdev_features_t changed = dev->features ^ features;
3070
3071 if (!(changed & NETIF_F_RXCSUM))
3072 return 0;
3073
3074 if (card->state == CARD_STATE_DOWN ||
3075 card->state == CARD_STATE_RECOVER)
3076 return 0;
3077
3078 return qeth_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
3079}
3080
3081static const struct ethtool_ops qeth_l3_ethtool_ops = { 3017static const struct ethtool_ops qeth_l3_ethtool_ops = {
3082 .get_link = ethtool_op_get_link, 3018 .get_link = ethtool_op_get_link,
3083 .get_strings = qeth_core_get_strings, 3019 .get_strings = qeth_core_get_strings,
@@ -3120,8 +3056,8 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
3120 .ndo_set_rx_mode = qeth_l3_set_multicast_list, 3056 .ndo_set_rx_mode = qeth_l3_set_multicast_list,
3121 .ndo_do_ioctl = qeth_l3_do_ioctl, 3057 .ndo_do_ioctl = qeth_l3_do_ioctl,
3122 .ndo_change_mtu = qeth_change_mtu, 3058 .ndo_change_mtu = qeth_change_mtu,
3123 .ndo_fix_features = qeth_l3_fix_features, 3059 .ndo_fix_features = qeth_fix_features,
3124 .ndo_set_features = qeth_l3_set_features, 3060 .ndo_set_features = qeth_set_features,
3125 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 3061 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
3126 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 3062 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
3127 .ndo_tx_timeout = qeth_tx_timeout, 3063 .ndo_tx_timeout = qeth_tx_timeout,
@@ -3136,8 +3072,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
3136 .ndo_set_rx_mode = qeth_l3_set_multicast_list, 3072 .ndo_set_rx_mode = qeth_l3_set_multicast_list,
3137 .ndo_do_ioctl = qeth_l3_do_ioctl, 3073 .ndo_do_ioctl = qeth_l3_do_ioctl,
3138 .ndo_change_mtu = qeth_change_mtu, 3074 .ndo_change_mtu = qeth_change_mtu,
3139 .ndo_fix_features = qeth_l3_fix_features, 3075 .ndo_fix_features = qeth_fix_features,
3140 .ndo_set_features = qeth_l3_set_features, 3076 .ndo_set_features = qeth_set_features,
3141 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 3077 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
3142 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 3078 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
3143 .ndo_tx_timeout = qeth_tx_timeout, 3079 .ndo_tx_timeout = qeth_tx_timeout,
@@ -3169,7 +3105,10 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3169 card->dev->hw_features = NETIF_F_SG | 3105 card->dev->hw_features = NETIF_F_SG |
3170 NETIF_F_RXCSUM | NETIF_F_IP_CSUM | 3106 NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
3171 NETIF_F_TSO; 3107 NETIF_F_TSO;
3172 card->dev->features = NETIF_F_RXCSUM; 3108 card->dev->vlan_features = NETIF_F_SG |
3109 NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
3110 NETIF_F_TSO;
3111 card->dev->features = NETIF_F_SG;
3173 } 3112 }
3174 } 3113 }
3175 } else if (card->info.type == QETH_CARD_TYPE_IQD) { 3114 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -3195,7 +3134,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3195 NETIF_F_HW_VLAN_CTAG_RX | 3134 NETIF_F_HW_VLAN_CTAG_RX |
3196 NETIF_F_HW_VLAN_CTAG_FILTER; 3135 NETIF_F_HW_VLAN_CTAG_FILTER;
3197 netif_keep_dst(card->dev); 3136 netif_keep_dst(card->dev);
3198 card->dev->gso_max_size = 15 * PAGE_SIZE; 3137 card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
3138 PAGE_SIZE;
3139 card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
3199 3140
3200 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3141 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3201 netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT); 3142 netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
@@ -3230,7 +3171,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3230 card->dev = NULL; 3171 card->dev = NULL;
3231 } 3172 }
3232 3173
3233 qeth_l3_clear_ip_list(card, 0); 3174 qeth_l3_clear_ip_htable(card, 0);
3234 qeth_l3_clear_ipato_list(card); 3175 qeth_l3_clear_ipato_list(card);
3235 return; 3176 return;
3236} 3177}
@@ -3315,7 +3256,7 @@ contin:
3315 card->state = CARD_STATE_SOFTSETUP; 3256 card->state = CARD_STATE_SOFTSETUP;
3316 3257
3317 qeth_set_allowed_threads(card, 0xffffffff, 0); 3258 qeth_set_allowed_threads(card, 0xffffffff, 0);
3318 qeth_l3_set_ip_addr_list(card); 3259 qeth_l3_recover_ip(card);
3319 if (card->lan_online) 3260 if (card->lan_online)
3320 netif_carrier_on(card->dev); 3261 netif_carrier_on(card->dev);
3321 else 3262 else
@@ -3516,6 +3457,7 @@ EXPORT_SYMBOL_GPL(qeth_l3_discipline);
3516static int qeth_l3_ip_event(struct notifier_block *this, 3457static int qeth_l3_ip_event(struct notifier_block *this,
3517 unsigned long event, void *ptr) 3458 unsigned long event, void *ptr)
3518{ 3459{
3460
3519 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 3461 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3520 struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev; 3462 struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev;
3521 struct qeth_ipaddr *addr; 3463 struct qeth_ipaddr *addr;
@@ -3530,27 +3472,27 @@ static int qeth_l3_ip_event(struct notifier_block *this,
3530 QETH_CARD_TEXT(card, 3, "ipevent"); 3472 QETH_CARD_TEXT(card, 3, "ipevent");
3531 3473
3532 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 3474 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
3533 if (addr != NULL) { 3475 if (addr) {
3534 addr->u.a4.addr = ifa->ifa_address; 3476 addr->u.a4.addr = ifa->ifa_address;
3535 addr->u.a4.mask = ifa->ifa_mask; 3477 addr->u.a4.mask = ifa->ifa_mask;
3536 addr->type = QETH_IP_TYPE_NORMAL; 3478 addr->type = QETH_IP_TYPE_NORMAL;
3537 } else 3479 } else
3538 goto out; 3480 return NOTIFY_DONE;
3539 3481
3540 switch (event) { 3482 switch (event) {
3541 case NETDEV_UP: 3483 case NETDEV_UP:
3542 if (!qeth_l3_add_ip(card, addr)) 3484 spin_lock_bh(&card->ip_lock);
3543 kfree(addr); 3485 qeth_l3_add_ip(card, addr);
3486 spin_unlock_bh(&card->ip_lock);
3544 break; 3487 break;
3545 case NETDEV_DOWN: 3488 case NETDEV_DOWN:
3546 if (!qeth_l3_delete_ip(card, addr)) 3489 spin_lock_bh(&card->ip_lock);
3547 kfree(addr); 3490 qeth_l3_delete_ip(card, addr);
3548 break; 3491 spin_unlock_bh(&card->ip_lock);
3549 default:
3550 break; 3492 break;
3551 } 3493 }
3552 qeth_l3_set_ip_addr_list(card); 3494
3553out: 3495 kfree(addr);
3554 return NOTIFY_DONE; 3496 return NOTIFY_DONE;
3555} 3497}
3556 3498
@@ -3579,27 +3521,27 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
3579 return NOTIFY_DONE; 3521 return NOTIFY_DONE;
3580 3522
3581 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 3523 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
3582 if (addr != NULL) { 3524 if (addr) {
3583 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr)); 3525 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
3584 addr->u.a6.pfxlen = ifa->prefix_len; 3526 addr->u.a6.pfxlen = ifa->prefix_len;
3585 addr->type = QETH_IP_TYPE_NORMAL; 3527 addr->type = QETH_IP_TYPE_NORMAL;
3586 } else 3528 } else
3587 goto out; 3529 return NOTIFY_DONE;
3588 3530
3589 switch (event) { 3531 switch (event) {
3590 case NETDEV_UP: 3532 case NETDEV_UP:
3591 if (!qeth_l3_add_ip(card, addr)) 3533 spin_lock_bh(&card->ip_lock);
3592 kfree(addr); 3534 qeth_l3_add_ip(card, addr);
3535 spin_unlock_bh(&card->ip_lock);
3593 break; 3536 break;
3594 case NETDEV_DOWN: 3537 case NETDEV_DOWN:
3595 if (!qeth_l3_delete_ip(card, addr)) 3538 spin_lock_bh(&card->ip_lock);
3596 kfree(addr); 3539 qeth_l3_delete_ip(card, addr);
3597 break; 3540 spin_unlock_bh(&card->ip_lock);
3598 default:
3599 break; 3541 break;
3600 } 3542 }
3601 qeth_l3_set_ip_addr_list(card); 3543
3602out: 3544 kfree(addr);
3603 return NOTIFY_DONE; 3545 return NOTIFY_DONE;
3604} 3546}
3605 3547
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 386eb7b89b1e..65645b11fc19 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <asm/ebcdic.h> 10#include <asm/ebcdic.h>
11#include <linux/hashtable.h>
11#include "qeth_l3.h" 12#include "qeth_l3.h"
12 13
13#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ 14#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
@@ -285,19 +286,19 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
285 if (card->options.hsuid[0]) { 286 if (card->options.hsuid[0]) {
286 /* delete old ip address */ 287 /* delete old ip address */
287 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 288 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
288 if (addr != NULL) { 289 if (!addr)
289 addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
290 addr->u.a6.addr.s6_addr32[1] = 0x00000000;
291 for (i = 8; i < 16; i++)
292 addr->u.a6.addr.s6_addr[i] =
293 card->options.hsuid[i - 8];
294 addr->u.a6.pfxlen = 0;
295 addr->type = QETH_IP_TYPE_NORMAL;
296 } else
297 return -ENOMEM; 290 return -ENOMEM;
298 if (!qeth_l3_delete_ip(card, addr)) 291
299 kfree(addr); 292 addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
300 qeth_l3_set_ip_addr_list(card); 293 addr->u.a6.addr.s6_addr32[1] = 0x00000000;
294 for (i = 8; i < 16; i++)
295 addr->u.a6.addr.s6_addr[i] =
296 card->options.hsuid[i - 8];
297 addr->u.a6.pfxlen = 0;
298 addr->type = QETH_IP_TYPE_NORMAL;
299
300 qeth_l3_delete_ip(card, addr);
301 kfree(addr);
301 } 302 }
302 303
303 if (strlen(tmp) == 0) { 304 if (strlen(tmp) == 0) {
@@ -328,9 +329,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
328 addr->type = QETH_IP_TYPE_NORMAL; 329 addr->type = QETH_IP_TYPE_NORMAL;
329 } else 330 } else
330 return -ENOMEM; 331 return -ENOMEM;
331 if (!qeth_l3_add_ip(card, addr)) 332 qeth_l3_add_ip(card, addr);
332 kfree(addr); 333 kfree(addr);
333 qeth_l3_set_ip_addr_list(card);
334 334
335 return count; 335 return count;
336} 336}
@@ -367,8 +367,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
367 struct device_attribute *attr, const char *buf, size_t count) 367 struct device_attribute *attr, const char *buf, size_t count)
368{ 368{
369 struct qeth_card *card = dev_get_drvdata(dev); 369 struct qeth_card *card = dev_get_drvdata(dev);
370 struct qeth_ipaddr *tmpipa, *t; 370 struct qeth_ipaddr *addr;
371 int rc = 0; 371 int i, rc = 0;
372 372
373 if (!card) 373 if (!card)
374 return -EINVAL; 374 return -EINVAL;
@@ -384,21 +384,20 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
384 card->ipato.enabled = (card->ipato.enabled)? 0 : 1; 384 card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
385 } else if (sysfs_streq(buf, "1")) { 385 } else if (sysfs_streq(buf, "1")) {
386 card->ipato.enabled = 1; 386 card->ipato.enabled = 1;
387 list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { 387 hash_for_each(card->ip_htable, i, addr, hnode) {
388 if ((tmpipa->type == QETH_IP_TYPE_NORMAL) && 388 if ((addr->type == QETH_IP_TYPE_NORMAL) &&
389 qeth_l3_is_addr_covered_by_ipato(card, tmpipa)) 389 qeth_l3_is_addr_covered_by_ipato(card, addr))
390 tmpipa->set_flags |= 390 addr->set_flags |=
391 QETH_IPA_SETIP_TAKEOVER_FLAG; 391 QETH_IPA_SETIP_TAKEOVER_FLAG;
392 } 392 }
393
394 } else if (sysfs_streq(buf, "0")) { 393 } else if (sysfs_streq(buf, "0")) {
395 card->ipato.enabled = 0; 394 card->ipato.enabled = 0;
396 list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { 395 hash_for_each(card->ip_htable, i, addr, hnode) {
397 if (tmpipa->set_flags & 396 if (addr->set_flags &
398 QETH_IPA_SETIP_TAKEOVER_FLAG) 397 QETH_IPA_SETIP_TAKEOVER_FLAG)
399 tmpipa->set_flags &= 398 addr->set_flags &=
400 ~QETH_IPA_SETIP_TAKEOVER_FLAG; 399 ~QETH_IPA_SETIP_TAKEOVER_FLAG;
401 } 400 }
402 } else 401 } else
403 rc = -EINVAL; 402 rc = -EINVAL;
404out: 403out:
@@ -452,7 +451,6 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
452 enum qeth_prot_versions proto) 451 enum qeth_prot_versions proto)
453{ 452{
454 struct qeth_ipato_entry *ipatoe; 453 struct qeth_ipato_entry *ipatoe;
455 unsigned long flags;
456 char addr_str[40]; 454 char addr_str[40];
457 int entry_len; /* length of 1 entry string, differs between v4 and v6 */ 455 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
458 int i = 0; 456 int i = 0;
@@ -460,7 +458,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
460 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; 458 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
461 /* add strlen for "/<mask>\n" */ 459 /* add strlen for "/<mask>\n" */
462 entry_len += (proto == QETH_PROT_IPV4)? 5 : 6; 460 entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
463 spin_lock_irqsave(&card->ip_lock, flags); 461 spin_lock_bh(&card->ip_lock);
464 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 462 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
465 if (ipatoe->proto != proto) 463 if (ipatoe->proto != proto)
466 continue; 464 continue;
@@ -473,7 +471,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
473 i += snprintf(buf + i, PAGE_SIZE - i, 471 i += snprintf(buf + i, PAGE_SIZE - i,
474 "%s/%i\n", addr_str, ipatoe->mask_bits); 472 "%s/%i\n", addr_str, ipatoe->mask_bits);
475 } 473 }
476 spin_unlock_irqrestore(&card->ip_lock, flags); 474 spin_unlock_bh(&card->ip_lock);
477 i += snprintf(buf + i, PAGE_SIZE - i, "\n"); 475 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
478 476
479 return i; 477 return i;
@@ -689,15 +687,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
689 enum qeth_prot_versions proto) 687 enum qeth_prot_versions proto)
690{ 688{
691 struct qeth_ipaddr *ipaddr; 689 struct qeth_ipaddr *ipaddr;
690 struct hlist_node *tmp;
692 char addr_str[40]; 691 char addr_str[40];
693 int entry_len; /* length of 1 entry string, differs between v4 and v6 */ 692 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
694 unsigned long flags;
695 int i = 0; 693 int i = 0;
696 694
697 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; 695 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
698 entry_len += 2; /* \n + terminator */ 696 entry_len += 2; /* \n + terminator */
699 spin_lock_irqsave(&card->ip_lock, flags); 697 spin_lock_bh(&card->ip_lock);
700 list_for_each_entry(ipaddr, &card->ip_list, entry) { 698 hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
701 if (ipaddr->proto != proto) 699 if (ipaddr->proto != proto)
702 continue; 700 continue;
703 if (ipaddr->type != QETH_IP_TYPE_VIPA) 701 if (ipaddr->type != QETH_IP_TYPE_VIPA)
@@ -711,7 +709,7 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
711 addr_str); 709 addr_str);
712 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); 710 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
713 } 711 }
714 spin_unlock_irqrestore(&card->ip_lock, flags); 712 spin_unlock_bh(&card->ip_lock);
715 i += snprintf(buf + i, PAGE_SIZE - i, "\n"); 713 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
716 714
717 return i; 715 return i;
@@ -851,15 +849,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
851 enum qeth_prot_versions proto) 849 enum qeth_prot_versions proto)
852{ 850{
853 struct qeth_ipaddr *ipaddr; 851 struct qeth_ipaddr *ipaddr;
852 struct hlist_node *tmp;
854 char addr_str[40]; 853 char addr_str[40];
855 int entry_len; /* length of 1 entry string, differs between v4 and v6 */ 854 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
856 unsigned long flags;
857 int i = 0; 855 int i = 0;
858 856
859 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; 857 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
860 entry_len += 2; /* \n + terminator */ 858 entry_len += 2; /* \n + terminator */
861 spin_lock_irqsave(&card->ip_lock, flags); 859 spin_lock_bh(&card->ip_lock);
862 list_for_each_entry(ipaddr, &card->ip_list, entry) { 860 hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
863 if (ipaddr->proto != proto) 861 if (ipaddr->proto != proto)
864 continue; 862 continue;
865 if (ipaddr->type != QETH_IP_TYPE_RXIP) 863 if (ipaddr->type != QETH_IP_TYPE_RXIP)
@@ -873,7 +871,7 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
873 addr_str); 871 addr_str);
874 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); 872 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
875 } 873 }
876 spin_unlock_irqrestore(&card->ip_lock, flags); 874 spin_unlock_bh(&card->ip_lock);
877 i += snprintf(buf + i, PAGE_SIZE - i, "\n"); 875 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
878 876
879 return i; 877 return i;
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
index 20978f2058a6..73a2e08b47ef 100644
--- a/drivers/soc/fsl/qe/Kconfig
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -22,7 +22,7 @@ config UCC_SLOW
22 22
23config UCC_FAST 23config UCC_FAST
24 bool 24 bool
25 default y if UCC_GETH 25 default y if UCC_GETH || QE_TDM
26 help 26 help
27 This option provides qe_lib support to UCC fast 27 This option provides qe_lib support to UCC fast
28 protocols: HDLC, Ethernet, ATM, transparent 28 protocols: HDLC, Ethernet, ATM, transparent
@@ -31,6 +31,10 @@ config UCC
31 bool 31 bool
32 default y if UCC_FAST || UCC_SLOW 32 default y if UCC_FAST || UCC_SLOW
33 33
34config QE_TDM
35 bool
36 default y if FSL_UCC_HDLC
37
34config QE_USB 38config QE_USB
35 bool 39 bool
36 default y if USB_FSL_QE 40 default y if USB_FSL_QE
diff --git a/drivers/soc/fsl/qe/Makefile b/drivers/soc/fsl/qe/Makefile
index ffac5410c5c7..2031d385bc7e 100644
--- a/drivers/soc/fsl/qe/Makefile
+++ b/drivers/soc/fsl/qe/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_CPM) += qe_common.o
6obj-$(CONFIG_UCC) += ucc.o 6obj-$(CONFIG_UCC) += ucc.o
7obj-$(CONFIG_UCC_SLOW) += ucc_slow.o 7obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
8obj-$(CONFIG_UCC_FAST) += ucc_fast.o 8obj-$(CONFIG_UCC_FAST) += ucc_fast.o
9obj-$(CONFIG_QE_TDM) += qe_tdm.o
9obj-$(CONFIG_QE_USB) += usb.o 10obj-$(CONFIG_QE_USB) += usb.o
10obj-$(CONFIG_QE_GPIO) += gpio.o 11obj-$(CONFIG_QE_GPIO) += gpio.o
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 709fc63809e5..7026507e6f1d 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -239,6 +239,12 @@ enum qe_clock qe_clock_source(const char *source)
239 if (strcasecmp(source, "none") == 0) 239 if (strcasecmp(source, "none") == 0)
240 return QE_CLK_NONE; 240 return QE_CLK_NONE;
241 241
242 if (strcmp(source, "tsync_pin") == 0)
243 return QE_TSYNC_PIN;
244
245 if (strcmp(source, "rsync_pin") == 0)
246 return QE_RSYNC_PIN;
247
242 if (strncasecmp(source, "brg", 3) == 0) { 248 if (strncasecmp(source, "brg", 3) == 0) {
243 i = simple_strtoul(source + 3, NULL, 10); 249 i = simple_strtoul(source + 3, NULL, 10);
244 if ((i >= 1) && (i <= 16)) 250 if ((i >= 1) && (i <= 16))
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
new file mode 100644
index 000000000000..5e48b1470178
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -0,0 +1,276 @@
1/*
2 * Copyright (C) 2015 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Authors: Zhao Qiang <qiang.zhao@nxp.com>
5 *
6 * Description:
7 * QE TDM API Set - TDM specific routines implementations.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14#include <linux/io.h>
15#include <linux/kernel.h>
16#include <linux/of_address.h>
17#include <linux/of_irq.h>
18#include <linux/of_platform.h>
19#include <soc/fsl/qe/qe_tdm.h>
20
21static int set_tdm_framer(const char *tdm_framer_type)
22{
23 if (strcmp(tdm_framer_type, "e1") == 0)
24 return TDM_FRAMER_E1;
25 else if (strcmp(tdm_framer_type, "t1") == 0)
26 return TDM_FRAMER_T1;
27 else
28 return -EINVAL;
29}
30
31static void set_si_param(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
32{
33 struct si_mode_info *si_info = &ut_info->si_info;
34
35 if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK) {
36 si_info->simr_crt = 1;
37 si_info->simr_rfsd = 0;
38 }
39}
40
41int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
42 struct ucc_tdm_info *ut_info)
43{
44 const char *sprop;
45 int ret = 0;
46 u32 val;
47 struct resource *res;
48 struct device_node *np2;
49 static int siram_init_flag;
50 struct platform_device *pdev;
51
52 sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
53 if (sprop) {
54 ut_info->uf_info.rx_sync = qe_clock_source(sprop);
55 if ((ut_info->uf_info.rx_sync < QE_CLK_NONE) ||
56 (ut_info->uf_info.rx_sync > QE_RSYNC_PIN)) {
57 pr_err("QE-TDM: Invalid rx-sync-clock property\n");
58 return -EINVAL;
59 }
60 } else {
61 pr_err("QE-TDM: Invalid rx-sync-clock property\n");
62 return -EINVAL;
63 }
64
65 sprop = of_get_property(np, "fsl,tx-sync-clock", NULL);
66 if (sprop) {
67 ut_info->uf_info.tx_sync = qe_clock_source(sprop);
68 if ((ut_info->uf_info.tx_sync < QE_CLK_NONE) ||
69 (ut_info->uf_info.tx_sync > QE_TSYNC_PIN)) {
70 pr_err("QE-TDM: Invalid tx-sync-clock property\n");
71 return -EINVAL;
72 }
73 } else {
74 pr_err("QE-TDM: Invalid tx-sync-clock property\n");
75 return -EINVAL;
76 }
77
78 ret = of_property_read_u32_index(np, "fsl,tx-timeslot-mask", 0, &val);
79 if (ret) {
80 pr_err("QE-TDM: Invalid tx-timeslot-mask property\n");
81 return -EINVAL;
82 }
83 utdm->tx_ts_mask = val;
84
85 ret = of_property_read_u32_index(np, "fsl,rx-timeslot-mask", 0, &val);
86 if (ret) {
87 ret = -EINVAL;
88 pr_err("QE-TDM: Invalid rx-timeslot-mask property\n");
89 return ret;
90 }
91 utdm->rx_ts_mask = val;
92
93 ret = of_property_read_u32_index(np, "fsl,tdm-id", 0, &val);
94 if (ret) {
95 ret = -EINVAL;
96 pr_err("QE-TDM: No fsl,tdm-id property for this UCC\n");
97 return ret;
98 }
99 utdm->tdm_port = val;
100 ut_info->uf_info.tdm_num = utdm->tdm_port;
101
102 if (of_get_property(np, "fsl,tdm-internal-loopback", NULL))
103 utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
104 else
105 utdm->tdm_mode = TDM_NORMAL;
106
107 sprop = of_get_property(np, "fsl,tdm-framer-type", NULL);
108 if (!sprop) {
109 ret = -EINVAL;
110 pr_err("QE-TDM: No tdm-framer-type property for UCC\n");
111 return ret;
112 }
113 ret = set_tdm_framer(sprop);
114 if (ret < 0)
115 return -EINVAL;
116 utdm->tdm_framer_type = ret;
117
118 ret = of_property_read_u32_index(np, "fsl,siram-entry-id", 0, &val);
119 if (ret) {
120 ret = -EINVAL;
121 pr_err("QE-TDM: No siram entry id for UCC\n");
122 return ret;
123 }
124 utdm->siram_entry_id = val;
125
126 set_si_param(utdm, ut_info);
127
128 np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
129 if (!np2)
130 return -EINVAL;
131
132 pdev = of_find_device_by_node(np2);
133 if (!pdev) {
134 pr_err("%s: failed to lookup pdev\n", np2->name);
135 of_node_put(np2);
136 return -EINVAL;
137 }
138
139 of_node_put(np2);
140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
141 utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
142 if (IS_ERR(utdm->si_regs)) {
143 ret = PTR_ERR(utdm->si_regs);
144 goto err_miss_siram_property;
145 }
146
147 np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
148 if (!np2) {
149 ret = -EINVAL;
150 goto err_miss_siram_property;
151 }
152
153 pdev = of_find_device_by_node(np2);
154 if (!pdev) {
155 ret = -EINVAL;
156 pr_err("%s: failed to lookup pdev\n", np2->name);
157 of_node_put(np2);
158 goto err_miss_siram_property;
159 }
160
161 of_node_put(np2);
162 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
163 utdm->siram = devm_ioremap_resource(&pdev->dev, res);
164 if (IS_ERR(utdm->siram)) {
165 ret = PTR_ERR(utdm->siram);
166 goto err_miss_siram_property;
167 }
168
169 if (siram_init_flag == 0) {
170 memset_io(utdm->siram, 0, res->end - res->start + 1);
171 siram_init_flag = 1;
172 }
173
174 return ret;
175
176err_miss_siram_property:
177 devm_iounmap(&pdev->dev, utdm->si_regs);
178 return ret;
179}
180
181void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
182{
183 struct si1 __iomem *si_regs;
184 u16 __iomem *siram;
185 u16 siram_entry_valid;
186 u16 siram_entry_closed;
187 u16 ucc_num;
188 u8 csel;
189 u16 sixmr;
190 u16 tdm_port;
191 u32 siram_entry_id;
192 u32 mask;
193 int i;
194
195 si_regs = utdm->si_regs;
196 siram = utdm->siram;
197 ucc_num = ut_info->uf_info.ucc_num;
198 tdm_port = utdm->tdm_port;
199 siram_entry_id = utdm->siram_entry_id;
200
201 if (utdm->tdm_framer_type == TDM_FRAMER_T1)
202 utdm->num_of_ts = 24;
203 if (utdm->tdm_framer_type == TDM_FRAMER_E1)
204 utdm->num_of_ts = 32;
205
206 /* set siram table */
207 csel = (ucc_num < 4) ? ucc_num + 9 : ucc_num - 3;
208
209 siram_entry_valid = SIR_CSEL(csel) | SIR_BYTE | SIR_CNT(0);
210 siram_entry_closed = SIR_IDLE | SIR_BYTE | SIR_CNT(0);
211
212 for (i = 0; i < utdm->num_of_ts; i++) {
213 mask = 0x01 << i;
214
215 if (utdm->tx_ts_mask & mask)
216 iowrite16be(siram_entry_valid,
217 &siram[siram_entry_id * 32 + i]);
218 else
219 iowrite16be(siram_entry_closed,
220 &siram[siram_entry_id * 32 + i]);
221
222 if (utdm->rx_ts_mask & mask)
223 iowrite16be(siram_entry_valid,
224 &siram[siram_entry_id * 32 + 0x200 + i]);
225 else
226 iowrite16be(siram_entry_closed,
227 &siram[siram_entry_id * 32 + 0x200 + i]);
228 }
229
230 setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
231 SIR_LAST);
232 setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
233 SIR_LAST);
234
235 /* Set SIxMR register */
236 sixmr = SIMR_SAD(siram_entry_id);
237
238 sixmr &= ~SIMR_SDM_MASK;
239
240 if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK)
241 sixmr |= SIMR_SDM_INTERNAL_LOOPBACK;
242 else
243 sixmr |= SIMR_SDM_NORMAL;
244
245 sixmr |= SIMR_RFSD(ut_info->si_info.simr_rfsd) |
246 SIMR_TFSD(ut_info->si_info.simr_tfsd);
247
248 if (ut_info->si_info.simr_crt)
249 sixmr |= SIMR_CRT;
250 if (ut_info->si_info.simr_sl)
251 sixmr |= SIMR_SL;
252 if (ut_info->si_info.simr_ce)
253 sixmr |= SIMR_CE;
254 if (ut_info->si_info.simr_fe)
255 sixmr |= SIMR_FE;
256 if (ut_info->si_info.simr_gm)
257 sixmr |= SIMR_GM;
258
259 switch (tdm_port) {
260 case 0:
261 iowrite16be(sixmr, &si_regs->sixmr1[0]);
262 break;
263 case 1:
264 iowrite16be(sixmr, &si_regs->sixmr1[1]);
265 break;
266 case 2:
267 iowrite16be(sixmr, &si_regs->sixmr1[2]);
268 break;
269 case 3:
270 iowrite16be(sixmr, &si_regs->sixmr1[3]);
271 break;
272 default:
273 pr_err("QE-TDM: can not find tdm sixmr reg\n");
274 break;
275 }
276}
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index b59d3358f9bd..c646d8713861 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -25,6 +25,12 @@
25#include <soc/fsl/qe/qe.h> 25#include <soc/fsl/qe/qe.h>
26#include <soc/fsl/qe/ucc.h> 26#include <soc/fsl/qe/ucc.h>
27 27
28#define UCC_TDM_NUM 8
29#define RX_SYNC_SHIFT_BASE 30
30#define TX_SYNC_SHIFT_BASE 14
31#define RX_CLK_SHIFT_BASE 28
32#define TX_CLK_SHIFT_BASE 12
33
28int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) 34int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
29{ 35{
30 unsigned long flags; 36 unsigned long flags;
@@ -210,3 +216,447 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
210 216
211 return 0; 217 return 0;
212} 218}
219
220static int ucc_get_tdm_common_clk(u32 tdm_num, enum qe_clock clock)
221{
222 int clock_bits = -EINVAL;
223
224 /*
225 * for TDM[0, 1, 2, 3], TX and RX use common
226 * clock source BRG3,4 and CLK1,2
227 * for TDM[4, 5, 6, 7], TX and RX use common
228 * clock source BRG12,13 and CLK23,24
229 */
230 switch (tdm_num) {
231 case 0:
232 case 1:
233 case 2:
234 case 3:
235 switch (clock) {
236 case QE_BRG3:
237 clock_bits = 1;
238 break;
239 case QE_BRG4:
240 clock_bits = 2;
241 break;
242 case QE_CLK1:
243 clock_bits = 4;
244 break;
245 case QE_CLK2:
246 clock_bits = 5;
247 break;
248 default:
249 break;
250 }
251 break;
252 case 4:
253 case 5:
254 case 6:
255 case 7:
256 switch (clock) {
257 case QE_BRG12:
258 clock_bits = 1;
259 break;
260 case QE_BRG13:
261 clock_bits = 2;
262 break;
263 case QE_CLK23:
264 clock_bits = 4;
265 break;
266 case QE_CLK24:
267 clock_bits = 5;
268 break;
269 default:
270 break;
271 }
272 break;
273 default:
274 break;
275 }
276
277 return clock_bits;
278}
279
280static int ucc_get_tdm_rx_clk(u32 tdm_num, enum qe_clock clock)
281{
282 int clock_bits = -EINVAL;
283
284 switch (tdm_num) {
285 case 0:
286 switch (clock) {
287 case QE_CLK3:
288 clock_bits = 6;
289 break;
290 case QE_CLK8:
291 clock_bits = 7;
292 break;
293 default:
294 break;
295 }
296 break;
297 case 1:
298 switch (clock) {
299 case QE_CLK5:
300 clock_bits = 6;
301 break;
302 case QE_CLK10:
303 clock_bits = 7;
304 break;
305 default:
306 break;
307 }
308 break;
309 case 2:
310 switch (clock) {
311 case QE_CLK7:
312 clock_bits = 6;
313 break;
314 case QE_CLK12:
315 clock_bits = 7;
316 break;
317 default:
318 break;
319 }
320 break;
321 case 3:
322 switch (clock) {
323 case QE_CLK9:
324 clock_bits = 6;
325 break;
326 case QE_CLK14:
327 clock_bits = 7;
328 break;
329 default:
330 break;
331 }
332 break;
333 case 4:
334 switch (clock) {
335 case QE_CLK11:
336 clock_bits = 6;
337 break;
338 case QE_CLK16:
339 clock_bits = 7;
340 break;
341 default:
342 break;
343 }
344 break;
345 case 5:
346 switch (clock) {
347 case QE_CLK13:
348 clock_bits = 6;
349 break;
350 case QE_CLK18:
351 clock_bits = 7;
352 break;
353 default:
354 break;
355 }
356 break;
357 case 6:
358 switch (clock) {
359 case QE_CLK15:
360 clock_bits = 6;
361 break;
362 case QE_CLK20:
363 clock_bits = 7;
364 break;
365 default:
366 break;
367 }
368 break;
369 case 7:
370 switch (clock) {
371 case QE_CLK17:
372 clock_bits = 6;
373 break;
374 case QE_CLK22:
375 clock_bits = 7;
376 break;
377 default:
378 break;
379 }
380 break;
381 }
382
383 return clock_bits;
384}
385
386static int ucc_get_tdm_tx_clk(u32 tdm_num, enum qe_clock clock)
387{
388 int clock_bits = -EINVAL;
389
390 switch (tdm_num) {
391 case 0:
392 switch (clock) {
393 case QE_CLK4:
394 clock_bits = 6;
395 break;
396 case QE_CLK9:
397 clock_bits = 7;
398 break;
399 default:
400 break;
401 }
402 break;
403 case 1:
404 switch (clock) {
405 case QE_CLK6:
406 clock_bits = 6;
407 break;
408 case QE_CLK11:
409 clock_bits = 7;
410 break;
411 default:
412 break;
413 }
414 break;
415 case 2:
416 switch (clock) {
417 case QE_CLK8:
418 clock_bits = 6;
419 break;
420 case QE_CLK13:
421 clock_bits = 7;
422 break;
423 default:
424 break;
425 }
426 break;
427 case 3:
428 switch (clock) {
429 case QE_CLK10:
430 clock_bits = 6;
431 break;
432 case QE_CLK15:
433 clock_bits = 7;
434 break;
435 default:
436 break;
437 }
438 break;
439 case 4:
440 switch (clock) {
441 case QE_CLK12:
442 clock_bits = 6;
443 break;
444 case QE_CLK17:
445 clock_bits = 7;
446 break;
447 default:
448 break;
449 }
450 break;
451 case 5:
452 switch (clock) {
453 case QE_CLK14:
454 clock_bits = 6;
455 break;
456 case QE_CLK19:
457 clock_bits = 7;
458 break;
459 default:
460 break;
461 }
462 break;
463 case 6:
464 switch (clock) {
465 case QE_CLK16:
466 clock_bits = 6;
467 break;
468 case QE_CLK21:
469 clock_bits = 7;
470 break;
471 default:
472 break;
473 }
474 break;
475 case 7:
476 switch (clock) {
477 case QE_CLK18:
478 clock_bits = 6;
479 break;
480 case QE_CLK3:
481 clock_bits = 7;
482 break;
483 default:
484 break;
485 }
486 break;
487 }
488
489 return clock_bits;
490}
491
492/* tdm_num: TDM A-H port num is 0-7 */
493static int ucc_get_tdm_rxtx_clk(enum comm_dir mode, u32 tdm_num,
494 enum qe_clock clock)
495{
496 int clock_bits;
497
498 clock_bits = ucc_get_tdm_common_clk(tdm_num, clock);
499 if (clock_bits > 0)
500 return clock_bits;
501 if (mode == COMM_DIR_RX)
502 clock_bits = ucc_get_tdm_rx_clk(tdm_num, clock);
503 if (mode == COMM_DIR_TX)
504 clock_bits = ucc_get_tdm_tx_clk(tdm_num, clock);
505 return clock_bits;
506}
507
508static u32 ucc_get_tdm_clk_shift(enum comm_dir mode, u32 tdm_num)
509{
510 u32 shift;
511
512 shift = (mode == COMM_DIR_RX) ? RX_CLK_SHIFT_BASE : TX_CLK_SHIFT_BASE;
513 if (tdm_num < 4)
514 shift -= tdm_num * 4;
515 else
516 shift -= (tdm_num - 4) * 4;
517
518 return shift;
519}
520
521int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
522 enum comm_dir mode)
523{
524 int clock_bits;
525 u32 shift;
526 struct qe_mux __iomem *qe_mux_reg;
527 __be32 __iomem *cmxs1cr;
528
529 qe_mux_reg = &qe_immr->qmx;
530
531 if (tdm_num > 7 || tdm_num < 0)
532 return -EINVAL;
533
534 /* The communications direction must be RX or TX */
535 if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
536 return -EINVAL;
537
538 clock_bits = ucc_get_tdm_rxtx_clk(mode, tdm_num, clock);
539 if (clock_bits < 0)
540 return -EINVAL;
541
542 shift = ucc_get_tdm_clk_shift(mode, tdm_num);
543
544 cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
545 &qe_mux_reg->cmxsi1cr_h;
546
547 qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
548 clock_bits << shift);
549
550 return 0;
551}
552
553static int ucc_get_tdm_sync_source(u32 tdm_num, enum qe_clock clock,
554 enum comm_dir mode)
555{
556 int source = -EINVAL;
557
558 if (mode == COMM_DIR_RX && clock == QE_RSYNC_PIN) {
559 source = 0;
560 return source;
561 }
562 if (mode == COMM_DIR_TX && clock == QE_TSYNC_PIN) {
563 source = 0;
564 return source;
565 }
566
567 switch (tdm_num) {
568 case 0:
569 case 1:
570 switch (clock) {
571 case QE_BRG9:
572 source = 1;
573 break;
574 case QE_BRG10:
575 source = 2;
576 break;
577 default:
578 break;
579 }
580 break;
581 case 2:
582 case 3:
583 switch (clock) {
584 case QE_BRG9:
585 source = 1;
586 break;
587 case QE_BRG11:
588 source = 2;
589 break;
590 default:
591 break;
592 }
593 break;
594 case 4:
595 case 5:
596 switch (clock) {
597 case QE_BRG13:
598 source = 1;
599 break;
600 case QE_BRG14:
601 source = 2;
602 break;
603 default:
604 break;
605 }
606 break;
607 case 6:
608 case 7:
609 switch (clock) {
610 case QE_BRG13:
611 source = 1;
612 break;
613 case QE_BRG15:
614 source = 2;
615 break;
616 default:
617 break;
618 }
619 break;
620 }
621
622 return source;
623}
624
625static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
626{
627 u32 shift;
628
629 shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
630 shift -= tdm_num * 2;
631
632 return shift;
633}
634
635int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
636 enum comm_dir mode)
637{
638 int source;
639 u32 shift;
640 struct qe_mux *qe_mux_reg;
641
642 qe_mux_reg = &qe_immr->qmx;
643
644 if (tdm_num >= UCC_TDM_NUM)
645 return -EINVAL;
646
647 /* The communications direction must be RX or TX */
648 if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
649 return -EINVAL;
650
651 source = ucc_get_tdm_sync_source(tdm_num, clock, mode);
652 if (source < 0)
653 return -EINVAL;
654
655 shift = ucc_get_tdm_sync_shift(mode, tdm_num);
656
657 qe_clrsetbits32(&qe_mux_reg->cmxsi1syr,
658 QE_CMXUCR_TX_CLK_SRC_MASK << shift,
659 source << shift);
660
661 return 0;
662}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
index a7689310fe40..83d8d16e3a69 100644
--- a/drivers/soc/fsl/qe/ucc_fast.c
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -327,6 +327,42 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
327 ucc_fast_free(uccf); 327 ucc_fast_free(uccf);
328 return -EINVAL; 328 return -EINVAL;
329 } 329 }
330 } else {
331 /* tdm Rx clock routing */
332 if ((uf_info->rx_clock != QE_CLK_NONE) &&
333 ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->rx_clock,
334 COMM_DIR_RX)) {
335 pr_err("%s: illegal value for RX clock", __func__);
336 ucc_fast_free(uccf);
337 return -EINVAL;
338 }
339
340 /* tdm Tx clock routing */
341 if ((uf_info->tx_clock != QE_CLK_NONE) &&
342 ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->tx_clock,
343 COMM_DIR_TX)) {
344 pr_err("%s: illegal value for TX clock", __func__);
345 ucc_fast_free(uccf);
346 return -EINVAL;
347 }
348
349 /* tdm Rx sync clock routing */
350 if ((uf_info->rx_sync != QE_CLK_NONE) &&
351 ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->rx_sync,
352 COMM_DIR_RX)) {
353 pr_err("%s: illegal value for RX clock", __func__);
354 ucc_fast_free(uccf);
355 return -EINVAL;
356 }
357
358 /* tdm Tx sync clock routing */
359 if ((uf_info->tx_sync != QE_CLK_NONE) &&
360 ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->tx_sync,
361 COMM_DIR_TX)) {
362 pr_err("%s: illegal value for TX clock", __func__);
363 ucc_fast_free(uccf);
364 return -EINVAL;
365 }
330 } 366 }
331 367
332 /* Set interrupt mask register at UCC level. */ 368 /* Set interrupt mask register at UCC level. */
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f744eeb3e2b4..1d3e45f84549 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -301,6 +301,32 @@ static bool vhost_can_busy_poll(struct vhost_dev *dev,
301 !vhost_has_work(dev); 301 !vhost_has_work(dev);
302} 302}
303 303
304static void vhost_net_disable_vq(struct vhost_net *n,
305 struct vhost_virtqueue *vq)
306{
307 struct vhost_net_virtqueue *nvq =
308 container_of(vq, struct vhost_net_virtqueue, vq);
309 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
310 if (!vq->private_data)
311 return;
312 vhost_poll_stop(poll);
313}
314
315static int vhost_net_enable_vq(struct vhost_net *n,
316 struct vhost_virtqueue *vq)
317{
318 struct vhost_net_virtqueue *nvq =
319 container_of(vq, struct vhost_net_virtqueue, vq);
320 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
321 struct socket *sock;
322
323 sock = vq->private_data;
324 if (!sock)
325 return 0;
326
327 return vhost_poll_start(poll, sock->file);
328}
329
304static int vhost_net_tx_get_vq_desc(struct vhost_net *net, 330static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
305 struct vhost_virtqueue *vq, 331 struct vhost_virtqueue *vq,
306 struct iovec iov[], unsigned int iov_size, 332 struct iovec iov[], unsigned int iov_size,
@@ -613,6 +639,7 @@ static void handle_rx(struct vhost_net *net)
613 if (!sock) 639 if (!sock)
614 goto out; 640 goto out;
615 vhost_disable_notify(&net->dev, vq); 641 vhost_disable_notify(&net->dev, vq);
642 vhost_net_disable_vq(net, vq);
616 643
617 vhost_hlen = nvq->vhost_hlen; 644 vhost_hlen = nvq->vhost_hlen;
618 sock_hlen = nvq->sock_hlen; 645 sock_hlen = nvq->sock_hlen;
@@ -629,7 +656,7 @@ static void handle_rx(struct vhost_net *net)
629 likely(mergeable) ? UIO_MAXIOV : 1); 656 likely(mergeable) ? UIO_MAXIOV : 1);
630 /* On error, stop handling until the next kick. */ 657 /* On error, stop handling until the next kick. */
631 if (unlikely(headcount < 0)) 658 if (unlikely(headcount < 0))
632 break; 659 goto out;
633 /* On overrun, truncate and discard */ 660 /* On overrun, truncate and discard */
634 if (unlikely(headcount > UIO_MAXIOV)) { 661 if (unlikely(headcount > UIO_MAXIOV)) {
635 iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); 662 iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
@@ -648,7 +675,7 @@ static void handle_rx(struct vhost_net *net)
648 } 675 }
649 /* Nothing new? Wait for eventfd to tell us 676 /* Nothing new? Wait for eventfd to tell us
650 * they refilled. */ 677 * they refilled. */
651 break; 678 goto out;
652 } 679 }
653 /* We don't need to be notified again. */ 680 /* We don't need to be notified again. */
654 iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); 681 iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
@@ -676,7 +703,7 @@ static void handle_rx(struct vhost_net *net)
676 &fixup) != sizeof(hdr)) { 703 &fixup) != sizeof(hdr)) {
677 vq_err(vq, "Unable to write vnet_hdr " 704 vq_err(vq, "Unable to write vnet_hdr "
678 "at addr %p\n", vq->iov->iov_base); 705 "at addr %p\n", vq->iov->iov_base);
679 break; 706 goto out;
680 } 707 }
681 } else { 708 } else {
682 /* Header came from socket; we'll need to patch 709 /* Header came from socket; we'll need to patch
@@ -692,7 +719,7 @@ static void handle_rx(struct vhost_net *net)
692 &fixup) != sizeof num_buffers) { 719 &fixup) != sizeof num_buffers) {
693 vq_err(vq, "Failed num_buffers write"); 720 vq_err(vq, "Failed num_buffers write");
694 vhost_discard_vq_desc(vq, headcount); 721 vhost_discard_vq_desc(vq, headcount);
695 break; 722 goto out;
696 } 723 }
697 vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, 724 vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
698 headcount); 725 headcount);
@@ -701,9 +728,10 @@ static void handle_rx(struct vhost_net *net)
701 total_len += vhost_len; 728 total_len += vhost_len;
702 if (unlikely(total_len >= VHOST_NET_WEIGHT)) { 729 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
703 vhost_poll_queue(&vq->poll); 730 vhost_poll_queue(&vq->poll);
704 break; 731 goto out;
705 } 732 }
706 } 733 }
734 vhost_net_enable_vq(net, vq);
707out: 735out:
708 mutex_unlock(&vq->mutex); 736 mutex_unlock(&vq->mutex);
709} 737}
@@ -782,32 +810,6 @@ static int vhost_net_open(struct inode *inode, struct file *f)
782 return 0; 810 return 0;
783} 811}
784 812
785static void vhost_net_disable_vq(struct vhost_net *n,
786 struct vhost_virtqueue *vq)
787{
788 struct vhost_net_virtqueue *nvq =
789 container_of(vq, struct vhost_net_virtqueue, vq);
790 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
791 if (!vq->private_data)
792 return;
793 vhost_poll_stop(poll);
794}
795
796static int vhost_net_enable_vq(struct vhost_net *n,
797 struct vhost_virtqueue *vq)
798{
799 struct vhost_net_virtqueue *nvq =
800 container_of(vq, struct vhost_net_virtqueue, vq);
801 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
802 struct socket *sock;
803
804 sock = vq->private_data;
805 if (!sock)
806 return 0;
807
808 return vhost_poll_start(poll, sock->file);
809}
810
811static struct socket *vhost_net_stop_vq(struct vhost_net *n, 813static struct socket *vhost_net_stop_vq(struct vhost_net *n,
812 struct vhost_virtqueue *vq) 814 struct vhost_virtqueue *vq)
813{ 815{
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 63cd9f939f19..4832de84d52c 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -85,18 +85,14 @@ int afs_open_socket(void)
85 85
86 skb_queue_head_init(&afs_incoming_calls); 86 skb_queue_head_init(&afs_incoming_calls);
87 87
88 ret = -ENOMEM;
88 afs_async_calls = create_singlethread_workqueue("kafsd"); 89 afs_async_calls = create_singlethread_workqueue("kafsd");
89 if (!afs_async_calls) { 90 if (!afs_async_calls)
90 _leave(" = -ENOMEM [wq]"); 91 goto error_0;
91 return -ENOMEM;
92 }
93 92
94 ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET, &socket); 93 ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
95 if (ret < 0) { 94 if (ret < 0)
96 destroy_workqueue(afs_async_calls); 95 goto error_1;
97 _leave(" = %d [socket]", ret);
98 return ret;
99 }
100 96
101 socket->sk->sk_allocation = GFP_NOFS; 97 socket->sk->sk_allocation = GFP_NOFS;
102 98
@@ -111,18 +107,26 @@ int afs_open_socket(void)
111 sizeof(srx.transport.sin.sin_addr)); 107 sizeof(srx.transport.sin.sin_addr));
112 108
113 ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); 109 ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
114 if (ret < 0) { 110 if (ret < 0)
115 sock_release(socket); 111 goto error_2;
116 destroy_workqueue(afs_async_calls); 112
117 _leave(" = %d [bind]", ret); 113 ret = kernel_listen(socket, INT_MAX);
118 return ret; 114 if (ret < 0)
119 } 115 goto error_2;
120 116
121 rxrpc_kernel_intercept_rx_messages(socket, afs_rx_interceptor); 117 rxrpc_kernel_intercept_rx_messages(socket, afs_rx_interceptor);
122 118
123 afs_socket = socket; 119 afs_socket = socket;
124 _leave(" = 0"); 120 _leave(" = 0");
125 return 0; 121 return 0;
122
123error_2:
124 sock_release(socket);
125error_1:
126 destroy_workqueue(afs_async_calls);
127error_0:
128 _leave(" = %d", ret);
129 return ret;
126} 130}
127 131
128/* 132/*
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 288fac5294f5..4d4bb4955682 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -543,6 +543,11 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *);
543 543
544struct fwnode_handle; 544struct fwnode_handle;
545 545
546static inline bool acpi_dev_found(const char *hid)
547{
548 return false;
549}
550
546static inline bool is_acpi_node(struct fwnode_handle *fwnode) 551static inline bool is_acpi_node(struct fwnode_handle *fwnode)
547{ 552{
548 return false; 553 return false;
@@ -654,6 +659,14 @@ static inline bool acpi_driver_match_device(struct device *dev,
654 return false; 659 return false;
655} 660}
656 661
662static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
663 const u8 *uuid,
664 int rev, int func,
665 union acpi_object *argv4)
666{
667 return NULL;
668}
669
657static inline int acpi_device_uevent_modalias(struct device *dev, 670static inline int acpi_device_uevent_modalias(struct device *dev,
658 struct kobj_uevent_env *env) 671 struct kobj_uevent_env *env)
659{ 672{
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0de4de6dd43e..8411032ac90d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -11,14 +11,17 @@
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/file.h> 12#include <linux/file.h>
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <linux/err.h>
14 15
16struct perf_event;
15struct bpf_map; 17struct bpf_map;
16 18
17/* map is generic key/value storage optionally accesible by eBPF programs */ 19/* map is generic key/value storage optionally accesible by eBPF programs */
18struct bpf_map_ops { 20struct bpf_map_ops {
19 /* funcs callable from userspace (via syscall) */ 21 /* funcs callable from userspace (via syscall) */
20 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 22 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
21 void (*map_free)(struct bpf_map *); 23 void (*map_release)(struct bpf_map *map, struct file *map_file);
24 void (*map_free)(struct bpf_map *map);
22 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 25 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
23 26
24 /* funcs callable from userspace and from eBPF programs */ 27 /* funcs callable from userspace and from eBPF programs */
@@ -27,8 +30,9 @@ struct bpf_map_ops {
27 int (*map_delete_elem)(struct bpf_map *map, void *key); 30 int (*map_delete_elem)(struct bpf_map *map, void *key);
28 31
29 /* funcs called by prog_array and perf_event_array map */ 32 /* funcs called by prog_array and perf_event_array map */
30 void *(*map_fd_get_ptr) (struct bpf_map *map, int fd); 33 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
31 void (*map_fd_put_ptr) (void *ptr); 34 int fd);
35 void (*map_fd_put_ptr)(void *ptr);
32}; 36};
33 37
34struct bpf_map { 38struct bpf_map {
@@ -189,11 +193,19 @@ struct bpf_array {
189 void __percpu *pptrs[0] __aligned(8); 193 void __percpu *pptrs[0] __aligned(8);
190 }; 194 };
191}; 195};
196
192#define MAX_TAIL_CALL_CNT 32 197#define MAX_TAIL_CALL_CNT 32
193 198
199struct bpf_event_entry {
200 struct perf_event *event;
201 struct file *perf_file;
202 struct file *map_file;
203 struct rcu_head rcu;
204};
205
194u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); 206u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
195u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 207u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
196void bpf_fd_array_map_clear(struct bpf_map *map); 208
197bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 209bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
198 210
199const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 211const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -231,8 +243,13 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
231 u64 flags); 243 u64 flags);
232int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 244int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
233 u64 flags); 245 u64 flags);
246
234int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 247int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
235 248
249int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
250 void *key, void *value, u64 map_flags);
251void bpf_fd_array_map_clear(struct bpf_map *map);
252
236/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 253/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
237 * forced to use 'long' read/writes to try to atomically copy long counters. 254 * forced to use 'long' read/writes to try to atomically copy long counters.
238 * Best-effort only. No barriers here, since it _will_ race with concurrent 255 * Best-effort only. No barriers here, since it _will_ race with concurrent
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 630f45335c73..57086e9fc64c 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -14,9 +14,12 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
14#if IS_ENABLED(CONFIG_IPV6) 14#if IS_ENABLED(CONFIG_IPV6)
15extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); 15extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
16 16
17typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info); 17typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
18 const struct in6_addr *force_saddr);
18extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); 19extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
19extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); 20extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
21int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
22 unsigned int data_len);
20 23
21#else 24#else
22 25
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5c91b0b055d4..c6dbcd84a2c7 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -283,6 +283,8 @@ struct tcp6_timewait_sock {
283}; 283};
284 284
285#if IS_ENABLED(CONFIG_IPV6) 285#if IS_ENABLED(CONFIG_IPV6)
286bool ipv6_mod_enabled(void);
287
286static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk) 288static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
287{ 289{
288 return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL; 290 return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
@@ -326,6 +328,11 @@ static inline int inet_v6_ipv6only(const struct sock *sk)
326#define ipv6_only_sock(sk) 0 328#define ipv6_only_sock(sk) 0
327#define ipv6_sk_rxinfo(sk) 0 329#define ipv6_sk_rxinfo(sk) 0
328 330
331static inline bool ipv6_mod_enabled(void)
332{
333 return false;
334}
335
329static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) 336static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
330{ 337{
331 return NULL; 338 return NULL;
diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h
index a243dbba8659..61f5b21b31c7 100644
--- a/include/linux/mdio-mux.h
+++ b/include/linux/mdio-mux.h
@@ -10,11 +10,13 @@
10#ifndef __LINUX_MDIO_MUX_H 10#ifndef __LINUX_MDIO_MUX_H
11#define __LINUX_MDIO_MUX_H 11#define __LINUX_MDIO_MUX_H
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/phy.h>
13 14
14int mdio_mux_init(struct device *dev, 15int mdio_mux_init(struct device *dev,
15 int (*switch_fn) (int cur, int desired, void *data), 16 int (*switch_fn) (int cur, int desired, void *data),
16 void **mux_handle, 17 void **mux_handle,
17 void *data); 18 void *data,
19 struct mii_bus *mux_bus);
18 20
19void mdio_mux_uninit(void *mux_handle); 21void mdio_mux_uninit(void *mux_handle);
20 22
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d46a0e7f144d..e6f6910278f3 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -536,6 +536,7 @@ struct mlx4_caps {
536 int max_rq_desc_sz; 536 int max_rq_desc_sz;
537 int max_qp_init_rdma; 537 int max_qp_init_rdma;
538 int max_qp_dest_rdma; 538 int max_qp_dest_rdma;
539 int max_tc_eth;
539 u32 *qp0_qkey; 540 u32 *qp0_qkey;
540 u32 *qp0_proxy; 541 u32 *qp0_proxy;
541 u32 *qp1_proxy; 542 u32 *qp1_proxy;
@@ -1495,6 +1496,7 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
1495 1496
1496int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, 1497int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
1497 u16 offset, u16 size, u8 *data); 1498 u16 offset, u16 size, u8 *data);
1499int mlx4_max_tc(struct mlx4_dev *dev);
1498 1500
1499/* Returns true if running in low memory profile (kdump kernel) */ 1501/* Returns true if running in low memory profile (kdump kernel) */
1500static inline bool mlx4_low_memory_profile(void) 1502static inline bool mlx4_low_memory_profile(void)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 73a48479892d..e0a3ed758287 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1330,6 +1330,7 @@ enum mlx5_cap_type {
1330 MLX5_CAP_ESWITCH, 1330 MLX5_CAP_ESWITCH,
1331 MLX5_CAP_RESERVED, 1331 MLX5_CAP_RESERVED,
1332 MLX5_CAP_VECTOR_CALC, 1332 MLX5_CAP_VECTOR_CALC,
1333 MLX5_CAP_QOS,
1333 /* NUM OF CAP Types */ 1334 /* NUM OF CAP Types */
1334 MLX5_CAP_NUM 1335 MLX5_CAP_NUM
1335}; 1336};
@@ -1414,6 +1415,9 @@ enum mlx5_cap_type {
1414 MLX5_GET(vector_calc_cap, \ 1415 MLX5_GET(vector_calc_cap, \
1415 mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap) 1416 mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
1416 1417
1418#define MLX5_CAP_QOS(mdev, cap)\
1419 MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
1420
1417enum { 1421enum {
1418 MLX5_CMD_STAT_OK = 0x0, 1422 MLX5_CMD_STAT_OK = 0x0,
1419 MLX5_CMD_STAT_INT_ERR = 0x1, 1423 MLX5_CMD_STAT_INT_ERR = 0x1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 80776d0c52dc..46260fdc5305 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -481,6 +481,21 @@ struct mlx5_fc_stats {
481 481
482struct mlx5_eswitch; 482struct mlx5_eswitch;
483 483
484struct mlx5_rl_entry {
485 u32 rate;
486 u16 index;
487 u16 refcount;
488};
489
490struct mlx5_rl_table {
491 /* protect rate limit table */
492 struct mutex rl_lock;
493 u16 max_size;
494 u32 max_rate;
495 u32 min_rate;
496 struct mlx5_rl_entry *rl_entry;
497};
498
484struct mlx5_priv { 499struct mlx5_priv {
485 char name[MLX5_MAX_NAME_LEN]; 500 char name[MLX5_MAX_NAME_LEN];
486 struct mlx5_eq_table eq_table; 501 struct mlx5_eq_table eq_table;
@@ -544,6 +559,7 @@ struct mlx5_priv {
544 struct mlx5_flow_root_namespace *esw_ingress_root_ns; 559 struct mlx5_flow_root_namespace *esw_ingress_root_ns;
545 560
546 struct mlx5_fc_stats fc_stats; 561 struct mlx5_fc_stats fc_stats;
562 struct mlx5_rl_table rl_table;
547}; 563};
548 564
549enum mlx5_device_state { 565enum mlx5_device_state {
@@ -861,6 +877,12 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
861int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, 877int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
862 u8 port_num, void *out, size_t sz); 878 u8 port_num, void *out, size_t sz);
863 879
880int mlx5_init_rl_table(struct mlx5_core_dev *dev);
881void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
882int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
883void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
884bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
885
864static inline int fw_initializing(struct mlx5_core_dev *dev) 886static inline int fw_initializing(struct mlx5_core_dev *dev)
865{ 887{
866 return ioread32be(&dev->iseg->initializing) >> 31; 888 return ioread32be(&dev->iseg->initializing) >> 31;
@@ -938,6 +960,11 @@ static inline int mlx5_get_gid_table_len(u16 param)
938 return 8 * (1 << param); 960 return 8 * (1 << param);
939} 961}
940 962
963static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
964{
965 return !!(dev->priv.rl_table.max_size);
966}
967
941enum { 968enum {
942 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, 969 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
943}; 970};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index e955a2859009..152421cc6f44 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -123,6 +123,10 @@ enum {
123 MLX5_CMD_OP_DRAIN_DCT = 0x712, 123 MLX5_CMD_OP_DRAIN_DCT = 0x712,
124 MLX5_CMD_OP_QUERY_DCT = 0x713, 124 MLX5_CMD_OP_QUERY_DCT = 0x713,
125 MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714, 125 MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714,
126 MLX5_CMD_OP_CREATE_XRQ = 0x717,
127 MLX5_CMD_OP_DESTROY_XRQ = 0x718,
128 MLX5_CMD_OP_QUERY_XRQ = 0x719,
129 MLX5_CMD_OP_ARM_XRQ = 0x71a,
126 MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, 130 MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
127 MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, 131 MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
128 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, 132 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
@@ -139,6 +143,8 @@ enum {
139 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, 143 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
140 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, 144 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
141 MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, 145 MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
146 MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
147 MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
142 MLX5_CMD_OP_ALLOC_PD = 0x800, 148 MLX5_CMD_OP_ALLOC_PD = 0x800,
143 MLX5_CMD_OP_DEALLOC_PD = 0x801, 149 MLX5_CMD_OP_DEALLOC_PD = 0x801,
144 MLX5_CMD_OP_ALLOC_UAR = 0x802, 150 MLX5_CMD_OP_ALLOC_UAR = 0x802,
@@ -362,7 +368,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
362}; 368};
363 369
364struct mlx5_ifc_fte_match_set_misc_bits { 370struct mlx5_ifc_fte_match_set_misc_bits {
365 u8 reserved_at_0[0x20]; 371 u8 reserved_at_0[0x8];
372 u8 source_sqn[0x18];
366 373
367 u8 reserved_at_20[0x10]; 374 u8 reserved_at_20[0x10];
368 u8 source_port[0x10]; 375 u8 source_port[0x10];
@@ -508,6 +515,17 @@ struct mlx5_ifc_e_switch_cap_bits {
508 u8 reserved_at_20[0x7e0]; 515 u8 reserved_at_20[0x7e0];
509}; 516};
510 517
518struct mlx5_ifc_qos_cap_bits {
519 u8 packet_pacing[0x1];
520 u8 reserved_0[0x1f];
521 u8 reserved_1[0x20];
522 u8 packet_pacing_max_rate[0x20];
523 u8 packet_pacing_min_rate[0x20];
524 u8 reserved_2[0x10];
525 u8 packet_pacing_rate_table_size[0x10];
526 u8 reserved_3[0x760];
527};
528
511struct mlx5_ifc_per_protocol_networking_offload_caps_bits { 529struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
512 u8 csum_cap[0x1]; 530 u8 csum_cap[0x1];
513 u8 vlan_cap[0x1]; 531 u8 vlan_cap[0x1];
@@ -747,7 +765,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
747 765
748 u8 out_of_seq_cnt[0x1]; 766 u8 out_of_seq_cnt[0x1];
749 u8 vport_counters[0x1]; 767 u8 vport_counters[0x1];
750 u8 reserved_at_182[0x4]; 768 u8 retransmission_q_counters[0x1];
769 u8 reserved_at_183[0x3];
751 u8 max_qp_cnt[0xa]; 770 u8 max_qp_cnt[0xa];
752 u8 pkey_table_size[0x10]; 771 u8 pkey_table_size[0x10];
753 772
@@ -774,7 +793,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
774 u8 log_max_msg[0x5]; 793 u8 log_max_msg[0x5];
775 u8 reserved_at_1c8[0x4]; 794 u8 reserved_at_1c8[0x4];
776 u8 max_tc[0x4]; 795 u8 max_tc[0x4];
777 u8 reserved_at_1d0[0x6]; 796 u8 reserved_at_1d0[0x1];
797 u8 dcbx[0x1];
798 u8 reserved_at_1d2[0x4];
778 u8 rol_s[0x1]; 799 u8 rol_s[0x1];
779 u8 rol_g[0x1]; 800 u8 rol_g[0x1];
780 u8 reserved_at_1d8[0x1]; 801 u8 reserved_at_1d8[0x1];
@@ -806,7 +827,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
806 u8 tph[0x1]; 827 u8 tph[0x1];
807 u8 rf[0x1]; 828 u8 rf[0x1];
808 u8 dct[0x1]; 829 u8 dct[0x1];
809 u8 reserved_at_21b[0x1]; 830 u8 qos[0x1];
810 u8 eth_net_offloads[0x1]; 831 u8 eth_net_offloads[0x1];
811 u8 roce[0x1]; 832 u8 roce[0x1];
812 u8 atomic[0x1]; 833 u8 atomic[0x1];
@@ -932,7 +953,15 @@ struct mlx5_ifc_cmd_hca_cap_bits {
932 u8 cqe_compression_timeout[0x10]; 953 u8 cqe_compression_timeout[0x10];
933 u8 cqe_compression_max_num[0x10]; 954 u8 cqe_compression_max_num[0x10];
934 955
935 u8 reserved_at_5e0[0x220]; 956 u8 reserved_at_5e0[0x10];
957 u8 tag_matching[0x1];
958 u8 rndv_offload_rc[0x1];
959 u8 rndv_offload_dc[0x1];
960 u8 log_tag_matching_list_sz[0x5];
961 u8 reserved_at_5e8[0x3];
962 u8 log_max_xrq[0x5];
963
964 u8 reserved_at_5f0[0x200];
936}; 965};
937 966
938enum mlx5_flow_destination_type { 967enum mlx5_flow_destination_type {
@@ -1970,7 +1999,7 @@ struct mlx5_ifc_qpc_bits {
1970 1999
1971 u8 reserved_at_560[0x5]; 2000 u8 reserved_at_560[0x5];
1972 u8 rq_type[0x3]; 2001 u8 rq_type[0x3];
1973 u8 srqn_rmpn[0x18]; 2002 u8 srqn_rmpn_xrqn[0x18];
1974 2003
1975 u8 reserved_at_580[0x8]; 2004 u8 reserved_at_580[0x8];
1976 u8 rmsn[0x18]; 2005 u8 rmsn[0x18];
@@ -2021,6 +2050,7 @@ union mlx5_ifc_hca_cap_union_bits {
2021 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; 2050 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
2022 struct mlx5_ifc_e_switch_cap_bits e_switch_cap; 2051 struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
2023 struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; 2052 struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
2053 struct mlx5_ifc_qos_cap_bits qos_cap;
2024 u8 reserved_at_0[0x8000]; 2054 u8 reserved_at_0[0x8000];
2025}; 2055};
2026 2056
@@ -2247,8 +2277,9 @@ struct mlx5_ifc_sqc_bits {
2247 u8 reserved_at_40[0x8]; 2277 u8 reserved_at_40[0x8];
2248 u8 cqn[0x18]; 2278 u8 cqn[0x18];
2249 2279
2250 u8 reserved_at_60[0xa0]; 2280 u8 reserved_at_60[0x90];
2251 2281
2282 u8 packet_pacing_rate_limit_index[0x10];
2252 u8 tis_lst_sz[0x10]; 2283 u8 tis_lst_sz[0x10];
2253 u8 reserved_at_110[0x10]; 2284 u8 reserved_at_110[0x10];
2254 2285
@@ -2596,7 +2627,7 @@ struct mlx5_ifc_dctc_bits {
2596 u8 reserved_at_98[0x8]; 2627 u8 reserved_at_98[0x8];
2597 2628
2598 u8 reserved_at_a0[0x8]; 2629 u8 reserved_at_a0[0x8];
2599 u8 srqn[0x18]; 2630 u8 srqn_xrqn[0x18];
2600 2631
2601 u8 reserved_at_c0[0x8]; 2632 u8 reserved_at_c0[0x8];
2602 u8 pd[0x18]; 2633 u8 pd[0x18];
@@ -2648,6 +2679,7 @@ enum {
2648enum { 2679enum {
2649 MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, 2680 MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
2650 MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, 2681 MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
2682 MLX5_CQ_PERIOD_NUM_MODES
2651}; 2683};
2652 2684
2653struct mlx5_ifc_cqc_bits { 2685struct mlx5_ifc_cqc_bits {
@@ -2725,6 +2757,54 @@ struct mlx5_ifc_query_adapter_param_block_bits {
2725 u8 vsd_contd_psid[16][0x8]; 2757 u8 vsd_contd_psid[16][0x8];
2726}; 2758};
2727 2759
2760enum {
2761 MLX5_XRQC_STATE_GOOD = 0x0,
2762 MLX5_XRQC_STATE_ERROR = 0x1,
2763};
2764
2765enum {
2766 MLX5_XRQC_TOPOLOGY_NO_SPECIAL_TOPOLOGY = 0x0,
2767 MLX5_XRQC_TOPOLOGY_TAG_MATCHING = 0x1,
2768};
2769
2770enum {
2771 MLX5_XRQC_OFFLOAD_RNDV = 0x1,
2772};
2773
2774struct mlx5_ifc_tag_matching_topology_context_bits {
2775 u8 log_matching_list_sz[0x4];
2776 u8 reserved_at_4[0xc];
2777 u8 append_next_index[0x10];
2778
2779 u8 sw_phase_cnt[0x10];
2780 u8 hw_phase_cnt[0x10];
2781
2782 u8 reserved_at_40[0x40];
2783};
2784
2785struct mlx5_ifc_xrqc_bits {
2786 u8 state[0x4];
2787 u8 rlkey[0x1];
2788 u8 reserved_at_5[0xf];
2789 u8 topology[0x4];
2790 u8 reserved_at_18[0x4];
2791 u8 offload[0x4];
2792
2793 u8 reserved_at_20[0x8];
2794 u8 user_index[0x18];
2795
2796 u8 reserved_at_40[0x8];
2797 u8 cqn[0x18];
2798
2799 u8 reserved_at_60[0xa0];
2800
2801 struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
2802
2803 u8 reserved_at_180[0x180];
2804
2805 struct mlx5_ifc_wq_bits wq;
2806};
2807
2728union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { 2808union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
2729 struct mlx5_ifc_modify_field_select_bits modify_field_select; 2809 struct mlx5_ifc_modify_field_select_bits modify_field_select;
2730 struct mlx5_ifc_resize_field_select_bits resize_field_select; 2810 struct mlx5_ifc_resize_field_select_bits resize_field_select;
@@ -3147,6 +3227,30 @@ struct mlx5_ifc_rst2init_qp_in_bits {
3147 u8 reserved_at_800[0x80]; 3227 u8 reserved_at_800[0x80];
3148}; 3228};
3149 3229
3230struct mlx5_ifc_query_xrq_out_bits {
3231 u8 status[0x8];
3232 u8 reserved_at_8[0x18];
3233
3234 u8 syndrome[0x20];
3235
3236 u8 reserved_at_40[0x40];
3237
3238 struct mlx5_ifc_xrqc_bits xrq_context;
3239};
3240
3241struct mlx5_ifc_query_xrq_in_bits {
3242 u8 opcode[0x10];
3243 u8 reserved_at_10[0x10];
3244
3245 u8 reserved_at_20[0x10];
3246 u8 op_mod[0x10];
3247
3248 u8 reserved_at_40[0x8];
3249 u8 xrqn[0x18];
3250
3251 u8 reserved_at_60[0x20];
3252};
3253
3150struct mlx5_ifc_query_xrc_srq_out_bits { 3254struct mlx5_ifc_query_xrc_srq_out_bits {
3151 u8 status[0x8]; 3255 u8 status[0x8];
3152 u8 reserved_at_8[0x18]; 3256 u8 reserved_at_8[0x18];
@@ -3550,7 +3654,27 @@ struct mlx5_ifc_query_q_counter_out_bits {
3550 3654
3551 u8 out_of_sequence[0x20]; 3655 u8 out_of_sequence[0x20];
3552 3656
3553 u8 reserved_at_1e0[0x620]; 3657 u8 reserved_at_1e0[0x20];
3658
3659 u8 duplicate_request[0x20];
3660
3661 u8 reserved_at_220[0x20];
3662
3663 u8 rnr_nak_retry_err[0x20];
3664
3665 u8 reserved_at_260[0x20];
3666
3667 u8 packet_seq_err[0x20];
3668
3669 u8 reserved_at_2a0[0x20];
3670
3671 u8 implied_nak_seq_err[0x20];
3672
3673 u8 reserved_at_2e0[0x20];
3674
3675 u8 local_ack_timeout_err[0x20];
3676
3677 u8 reserved_at_320[0x4e0];
3554}; 3678};
3555 3679
3556struct mlx5_ifc_query_q_counter_in_bits { 3680struct mlx5_ifc_query_q_counter_in_bits {
@@ -5004,6 +5128,28 @@ struct mlx5_ifc_detach_from_mcg_in_bits {
5004 u8 multicast_gid[16][0x8]; 5128 u8 multicast_gid[16][0x8];
5005}; 5129};
5006 5130
5131struct mlx5_ifc_destroy_xrq_out_bits {
5132 u8 status[0x8];
5133 u8 reserved_at_8[0x18];
5134
5135 u8 syndrome[0x20];
5136
5137 u8 reserved_at_40[0x40];
5138};
5139
5140struct mlx5_ifc_destroy_xrq_in_bits {
5141 u8 opcode[0x10];
5142 u8 reserved_at_10[0x10];
5143
5144 u8 reserved_at_20[0x10];
5145 u8 op_mod[0x10];
5146
5147 u8 reserved_at_40[0x8];
5148 u8 xrqn[0x18];
5149
5150 u8 reserved_at_60[0x20];
5151};
5152
5007struct mlx5_ifc_destroy_xrc_srq_out_bits { 5153struct mlx5_ifc_destroy_xrc_srq_out_bits {
5008 u8 status[0x8]; 5154 u8 status[0x8];
5009 u8 reserved_at_8[0x18]; 5155 u8 reserved_at_8[0x18];
@@ -5589,6 +5735,30 @@ struct mlx5_ifc_dealloc_flow_counter_in_bits {
5589 u8 reserved_at_60[0x20]; 5735 u8 reserved_at_60[0x20];
5590}; 5736};
5591 5737
5738struct mlx5_ifc_create_xrq_out_bits {
5739 u8 status[0x8];
5740 u8 reserved_at_8[0x18];
5741
5742 u8 syndrome[0x20];
5743
5744 u8 reserved_at_40[0x8];
5745 u8 xrqn[0x18];
5746
5747 u8 reserved_at_60[0x20];
5748};
5749
5750struct mlx5_ifc_create_xrq_in_bits {
5751 u8 opcode[0x10];
5752 u8 reserved_at_10[0x10];
5753
5754 u8 reserved_at_20[0x10];
5755 u8 op_mod[0x10];
5756
5757 u8 reserved_at_40[0x40];
5758
5759 struct mlx5_ifc_xrqc_bits xrq_context;
5760};
5761
5592struct mlx5_ifc_create_xrc_srq_out_bits { 5762struct mlx5_ifc_create_xrc_srq_out_bits {
5593 u8 status[0x8]; 5763 u8 status[0x8];
5594 u8 reserved_at_8[0x18]; 5764 u8 reserved_at_8[0x18];
@@ -6130,6 +6300,29 @@ struct mlx5_ifc_attach_to_mcg_in_bits {
6130 u8 multicast_gid[16][0x8]; 6300 u8 multicast_gid[16][0x8];
6131}; 6301};
6132 6302
6303struct mlx5_ifc_arm_xrq_out_bits {
6304 u8 status[0x8];
6305 u8 reserved_at_8[0x18];
6306
6307 u8 syndrome[0x20];
6308
6309 u8 reserved_at_40[0x40];
6310};
6311
6312struct mlx5_ifc_arm_xrq_in_bits {
6313 u8 opcode[0x10];
6314 u8 reserved_at_10[0x10];
6315
6316 u8 reserved_at_20[0x10];
6317 u8 op_mod[0x10];
6318
6319 u8 reserved_at_40[0x8];
6320 u8 xrqn[0x18];
6321
6322 u8 reserved_at_60[0x10];
6323 u8 lwm[0x10];
6324};
6325
6133struct mlx5_ifc_arm_xrc_srq_out_bits { 6326struct mlx5_ifc_arm_xrc_srq_out_bits {
6134 u8 status[0x8]; 6327 u8 status[0x8];
6135 u8 reserved_at_8[0x18]; 6328 u8 reserved_at_8[0x18];
@@ -6167,7 +6360,8 @@ struct mlx5_ifc_arm_rq_out_bits {
6167}; 6360};
6168 6361
6169enum { 6362enum {
6170 MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1, 6363 MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1,
6364 MLX5_ARM_RQ_IN_OP_MOD_XRQ = 0x2,
6171}; 6365};
6172 6366
6173struct mlx5_ifc_arm_rq_in_bits { 6367struct mlx5_ifc_arm_rq_in_bits {
@@ -6360,6 +6554,30 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
6360 u8 vxlan_udp_port[0x10]; 6554 u8 vxlan_udp_port[0x10];
6361}; 6555};
6362 6556
6557struct mlx5_ifc_set_rate_limit_out_bits {
6558 u8 status[0x8];
6559 u8 reserved_at_8[0x18];
6560
6561 u8 syndrome[0x20];
6562
6563 u8 reserved_at_40[0x40];
6564};
6565
6566struct mlx5_ifc_set_rate_limit_in_bits {
6567 u8 opcode[0x10];
6568 u8 reserved_at_10[0x10];
6569
6570 u8 reserved_at_20[0x10];
6571 u8 op_mod[0x10];
6572
6573 u8 reserved_at_40[0x10];
6574 u8 rate_limit_index[0x10];
6575
6576 u8 reserved_at_60[0x20];
6577
6578 u8 rate_limit[0x20];
6579};
6580
6363struct mlx5_ifc_access_register_out_bits { 6581struct mlx5_ifc_access_register_out_bits {
6364 u8 status[0x8]; 6582 u8 status[0x8];
6365 u8 reserved_at_8[0x18]; 6583 u8 reserved_at_8[0x18];
@@ -6484,12 +6702,15 @@ struct mlx5_ifc_pude_reg_bits {
6484}; 6702};
6485 6703
6486struct mlx5_ifc_ptys_reg_bits { 6704struct mlx5_ifc_ptys_reg_bits {
6487 u8 reserved_at_0[0x8]; 6705 u8 an_disable_cap[0x1];
6706 u8 an_disable_admin[0x1];
6707 u8 reserved_at_2[0x6];
6488 u8 local_port[0x8]; 6708 u8 local_port[0x8];
6489 u8 reserved_at_10[0xd]; 6709 u8 reserved_at_10[0xd];
6490 u8 proto_mask[0x3]; 6710 u8 proto_mask[0x3];
6491 6711
6492 u8 reserved_at_20[0x40]; 6712 u8 an_status[0x4];
6713 u8 reserved_at_24[0x3c];
6493 6714
6494 u8 eth_proto_capability[0x20]; 6715 u8 eth_proto_capability[0x20];
6495 6716
@@ -7450,4 +7671,34 @@ struct mlx5_ifc_mcia_reg_bits {
7450 u8 dword_11[0x20]; 7671 u8 dword_11[0x20];
7451}; 7672};
7452 7673
7674struct mlx5_ifc_dcbx_param_bits {
7675 u8 dcbx_cee_cap[0x1];
7676 u8 dcbx_ieee_cap[0x1];
7677 u8 dcbx_standby_cap[0x1];
7678 u8 reserved_at_0[0x5];
7679 u8 port_number[0x8];
7680 u8 reserved_at_10[0xa];
7681 u8 max_application_table_size[6];
7682 u8 reserved_at_20[0x15];
7683 u8 version_oper[0x3];
7684 u8 reserved_at_38[5];
7685 u8 version_admin[0x3];
7686 u8 willing_admin[0x1];
7687 u8 reserved_at_41[0x3];
7688 u8 pfc_cap_oper[0x4];
7689 u8 reserved_at_48[0x4];
7690 u8 pfc_cap_admin[0x4];
7691 u8 reserved_at_50[0x4];
7692 u8 num_of_tc_oper[0x4];
7693 u8 reserved_at_58[0x4];
7694 u8 num_of_tc_admin[0x4];
7695 u8 remote_willing[0x1];
7696 u8 reserved_at_61[3];
7697 u8 remote_pfc_cap[4];
7698 u8 reserved_at_68[0x14];
7699 u8 remote_num_of_tc[0x4];
7700 u8 reserved_at_80[0x18];
7701 u8 error[0x8];
7702 u8 reserved_at_a0[0x160];
7703};
7453#endif /* MLX5_IFC_H */ 7704#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 9851862c0ec5..e3012cc64b8a 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -47,6 +47,14 @@ enum mlx5_module_id {
47 MLX5_MODULE_ID_QSFP28 = 0x11, 47 MLX5_MODULE_ID_QSFP28 = 0x11,
48}; 48};
49 49
50enum mlx5_an_status {
51 MLX5_AN_UNAVAILABLE = 0,
52 MLX5_AN_COMPLETE = 1,
53 MLX5_AN_FAILED = 2,
54 MLX5_AN_LINK_UP = 3,
55 MLX5_AN_LINK_DOWN = 4,
56};
57
50#define MLX5_EEPROM_MAX_BYTES 32 58#define MLX5_EEPROM_MAX_BYTES 32
51#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff 59#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
52#define MLX5_I2C_ADDR_LOW 0x50 60#define MLX5_I2C_ADDR_LOW 0x50
@@ -65,13 +73,17 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
65int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, 73int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
66 u8 *proto_oper, int proto_mask, 74 u8 *proto_oper, int proto_mask,
67 u8 local_port); 75 u8 local_port);
68int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, 76int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
69 int proto_mask); 77 u32 proto_admin, int proto_mask);
78void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
70int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, 79int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
71 enum mlx5_port_status status); 80 enum mlx5_port_status status);
72int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, 81int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
73 enum mlx5_port_status *status); 82 enum mlx5_port_status *status);
74int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); 83int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
84void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
85 u8 *an_status,
86 u8 *an_disable_cap, u8 *an_disable_admin);
75 87
76int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); 88int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
77void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); 89void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index aa7b2400f98c..9c6c8ef2e9e7 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -53,8 +53,9 @@ enum {
53 * headers in software. 53 * headers in software.
54 */ 54 */
55 NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ 55 NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
56 NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
56 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ 57 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
57 NETIF_F_GSO_TUNNEL_REMCSUM_BIT, 58 NETIF_F_GSO_SCTP_BIT,
58 59
59 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ 60 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
60 NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ 61 NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -128,6 +129,7 @@ enum {
128#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID) 129#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID)
129#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) 130#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL)
130#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) 131#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
132#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP)
131#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) 133#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
132#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) 134#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
133#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 135#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
@@ -166,7 +168,8 @@ enum {
166 NETIF_F_FSO) 168 NETIF_F_FSO)
167 169
168/* List of features with software fallbacks. */ 170/* List of features with software fallbacks. */
169#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO) 171#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO | \
172 NETIF_F_GSO_SCTP)
170 173
171/* 174/*
172 * If one device supports one of these features, then enable them 175 * If one device supports one of these features, then enable them
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f45929ce8157..e84d9d23c2d5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -61,6 +61,8 @@ struct wireless_dev;
61/* 802.15.4 specific */ 61/* 802.15.4 specific */
62struct wpan_dev; 62struct wpan_dev;
63struct mpls_dev; 63struct mpls_dev;
64/* UDP Tunnel offloads */
65struct udp_tunnel_info;
64 66
65void netdev_set_default_ethtool_ops(struct net_device *dev, 67void netdev_set_default_ethtool_ops(struct net_device *dev,
66 const struct ethtool_ops *ops); 68 const struct ethtool_ops *ops);
@@ -90,7 +92,6 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
90#define NET_XMIT_SUCCESS 0x00 92#define NET_XMIT_SUCCESS 0x00
91#define NET_XMIT_DROP 0x01 /* skb dropped */ 93#define NET_XMIT_DROP 0x01 /* skb dropped */
92#define NET_XMIT_CN 0x02 /* congestion notification */ 94#define NET_XMIT_CN 0x02 /* congestion notification */
93#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
94#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 95#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
95 96
96/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 97/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
@@ -1025,31 +1026,18 @@ struct tc_to_netdev {
1025 * not implement this, it is assumed that the hw is not able to have 1026 * not implement this, it is assumed that the hw is not able to have
1026 * multiple net devices on single physical port. 1027 * multiple net devices on single physical port.
1027 * 1028 *
1028 * void (*ndo_add_vxlan_port)(struct net_device *dev, 1029 * void (*ndo_udp_tunnel_add)(struct net_device *dev,
1029 * sa_family_t sa_family, __be16 port); 1030 * struct udp_tunnel_info *ti);
1030 * Called by vxlan to notify a driver about the UDP port and socket 1031 * Called by UDP tunnel to notify a driver about the UDP port and socket
1031 * address family that vxlan is listening to. It is called only when 1032 * address family that a UDP tunnel is listnening to. It is called only
1032 * a new port starts listening. The operation is protected by the 1033 * when a new port starts listening. The operation is protected by the
1033 * vxlan_net->sock_lock. 1034 * RTNL.
1034 * 1035 *
1035 * void (*ndo_add_geneve_port)(struct net_device *dev, 1036 * void (*ndo_udp_tunnel_del)(struct net_device *dev,
1036 * sa_family_t sa_family, __be16 port); 1037 * struct udp_tunnel_info *ti);
1037 * Called by geneve to notify a driver about the UDP port and socket 1038 * Called by UDP tunnel to notify the driver about a UDP port and socket
1038 * address family that geneve is listnening to. It is called only when 1039 * address family that the UDP tunnel is not listening to anymore. The
1039 * a new port starts listening. The operation is protected by the 1040 * operation is protected by the RTNL.
1040 * geneve_net->sock_lock.
1041 *
1042 * void (*ndo_del_geneve_port)(struct net_device *dev,
1043 * sa_family_t sa_family, __be16 port);
1044 * Called by geneve to notify the driver about a UDP port and socket
1045 * address family that geneve is not listening to anymore. The operation
1046 * is protected by the geneve_net->sock_lock.
1047 *
1048 * void (*ndo_del_vxlan_port)(struct net_device *dev,
1049 * sa_family_t sa_family, __be16 port);
1050 * Called by vxlan to notify the driver about a UDP port and socket
1051 * address family that vxlan is not listening to anymore. The operation
1052 * is protected by the vxlan_net->sock_lock.
1053 * 1041 *
1054 * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1042 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1055 * struct net_device *dev) 1043 * struct net_device *dev)
@@ -1258,18 +1246,10 @@ struct net_device_ops {
1258 struct netdev_phys_item_id *ppid); 1246 struct netdev_phys_item_id *ppid);
1259 int (*ndo_get_phys_port_name)(struct net_device *dev, 1247 int (*ndo_get_phys_port_name)(struct net_device *dev,
1260 char *name, size_t len); 1248 char *name, size_t len);
1261 void (*ndo_add_vxlan_port)(struct net_device *dev, 1249 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1262 sa_family_t sa_family, 1250 struct udp_tunnel_info *ti);
1263 __be16 port); 1251 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1264 void (*ndo_del_vxlan_port)(struct net_device *dev, 1252 struct udp_tunnel_info *ti);
1265 sa_family_t sa_family,
1266 __be16 port);
1267 void (*ndo_add_geneve_port)(struct net_device *dev,
1268 sa_family_t sa_family,
1269 __be16 port);
1270 void (*ndo_del_geneve_port)(struct net_device *dev,
1271 sa_family_t sa_family,
1272 __be16 port);
1273 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1253 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1274 struct net_device *dev); 1254 struct net_device *dev);
1275 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1255 void (*ndo_dfwd_del_station)(struct net_device *pdev,
@@ -1457,6 +1437,8 @@ enum netdev_priv_flags {
1457 * @netdev_ops: Includes several pointers to callbacks, 1437 * @netdev_ops: Includes several pointers to callbacks,
1458 * if one wants to override the ndo_*() functions 1438 * if one wants to override the ndo_*() functions
1459 * @ethtool_ops: Management operations 1439 * @ethtool_ops: Management operations
1440 * @ndisc_ops: Includes callbacks for different IPv6 neighbour
1441 * discovery handling. Necessary for e.g. 6LoWPAN.
1460 * @header_ops: Includes callbacks for creating,parsing,caching,etc 1442 * @header_ops: Includes callbacks for creating,parsing,caching,etc
1461 * of Layer 2 headers. 1443 * of Layer 2 headers.
1462 * 1444 *
@@ -1484,8 +1466,7 @@ enum netdev_priv_flags {
1484 * @perm_addr: Permanent hw address 1466 * @perm_addr: Permanent hw address
1485 * @addr_assign_type: Hw address assignment type 1467 * @addr_assign_type: Hw address assignment type
1486 * @addr_len: Hardware address length 1468 * @addr_len: Hardware address length
1487 * @neigh_priv_len; Used in neigh_alloc(), 1469 * @neigh_priv_len: Used in neigh_alloc()
1488 * initialized only in atm/clip.c
1489 * @dev_id: Used to differentiate devices that share 1470 * @dev_id: Used to differentiate devices that share
1490 * the same link layer address 1471 * the same link layer address
1491 * @dev_port: Used to differentiate devices that share 1472 * @dev_port: Used to differentiate devices that share
@@ -1594,7 +1575,8 @@ enum netdev_priv_flags {
1594 * @phydev: Physical device may attach itself 1575 * @phydev: Physical device may attach itself
1595 * for hardware timestamping 1576 * for hardware timestamping
1596 * 1577 *
1597 * @qdisc_tx_busylock: XXX: need comments on this one 1578 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1579 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
1598 * 1580 *
1599 * @proto_down: protocol port state information can be sent to the 1581 * @proto_down: protocol port state information can be sent to the
1600 * switch driver and used to set the phys state of the 1582 * switch driver and used to set the phys state of the
@@ -1673,6 +1655,9 @@ struct net_device {
1673#ifdef CONFIG_NET_L3_MASTER_DEV 1655#ifdef CONFIG_NET_L3_MASTER_DEV
1674 const struct l3mdev_ops *l3mdev_ops; 1656 const struct l3mdev_ops *l3mdev_ops;
1675#endif 1657#endif
1658#if IS_ENABLED(CONFIG_IPV6)
1659 const struct ndisc_ops *ndisc_ops;
1660#endif
1676 1661
1677 const struct header_ops *header_ops; 1662 const struct header_ops *header_ops;
1678 1663
@@ -1862,6 +1847,7 @@ struct net_device {
1862#endif 1847#endif
1863 struct phy_device *phydev; 1848 struct phy_device *phydev;
1864 struct lock_class_key *qdisc_tx_busylock; 1849 struct lock_class_key *qdisc_tx_busylock;
1850 struct lock_class_key *qdisc_running_key;
1865 bool proto_down; 1851 bool proto_down;
1866}; 1852};
1867#define to_net_dev(d) container_of(d, struct net_device, dev) 1853#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -1944,6 +1930,23 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
1944 f(dev, &dev->_tx[i], arg); 1930 f(dev, &dev->_tx[i], arg);
1945} 1931}
1946 1932
1933#define netdev_lockdep_set_classes(dev) \
1934{ \
1935 static struct lock_class_key qdisc_tx_busylock_key; \
1936 static struct lock_class_key qdisc_running_key; \
1937 static struct lock_class_key qdisc_xmit_lock_key; \
1938 static struct lock_class_key dev_addr_list_lock_key; \
1939 unsigned int i; \
1940 \
1941 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
1942 (dev)->qdisc_running_key = &qdisc_running_key; \
1943 lockdep_set_class(&(dev)->addr_list_lock, \
1944 &dev_addr_list_lock_key); \
1945 for (i = 0; i < (dev)->num_tx_queues; i++) \
1946 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
1947 &qdisc_xmit_lock_key); \
1948}
1949
1947struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1950struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1948 struct sk_buff *skb, 1951 struct sk_buff *skb,
1949 void *accel_priv); 1952 void *accel_priv);
@@ -2233,8 +2236,7 @@ struct netdev_lag_lower_state_info {
2233#define NETDEV_BONDING_INFO 0x0019 2236#define NETDEV_BONDING_INFO 0x0019
2234#define NETDEV_PRECHANGEUPPER 0x001A 2237#define NETDEV_PRECHANGEUPPER 0x001A
2235#define NETDEV_CHANGELOWERSTATE 0x001B 2238#define NETDEV_CHANGELOWERSTATE 0x001B
2236#define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C 2239#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
2237#define NETDEV_OFFLOAD_PUSH_GENEVE 0x001D
2238 2240
2239int register_netdevice_notifier(struct notifier_block *nb); 2241int register_netdevice_notifier(struct notifier_block *nb);
2240int unregister_netdevice_notifier(struct notifier_block *nb); 2242int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2370,6 +2372,8 @@ void synchronize_net(void);
2370int init_dummy_netdev(struct net_device *dev); 2372int init_dummy_netdev(struct net_device *dev);
2371 2373
2372DECLARE_PER_CPU(int, xmit_recursion); 2374DECLARE_PER_CPU(int, xmit_recursion);
2375#define XMIT_RECURSION_LIMIT 10
2376
2373static inline int dev_recursion_level(void) 2377static inline int dev_recursion_level(void)
2374{ 2378{
2375 return this_cpu_read(xmit_recursion); 2379 return this_cpu_read(xmit_recursion);
@@ -4012,6 +4016,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4012 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 4016 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4013 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 4017 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4014 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 4018 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4019 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4015 4020
4016 return (features & feature) == feature; 4021 return (features & feature) == feature;
4017} 4022}
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 8f2237eb3485..4b04587d0441 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -25,6 +25,8 @@ struct phy_device *of_phy_attach(struct net_device *dev,
25 25
26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
27extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); 27extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
28extern int of_phy_register_fixed_link(struct device_node *np);
29extern bool of_phy_is_fixed_link(struct device_node *np);
28 30
29#else /* CONFIG_OF */ 31#else /* CONFIG_OF */
30static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) 32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
@@ -67,12 +69,6 @@ static inline int of_mdio_parse_addr(struct device *dev,
67{ 69{
68 return -ENOSYS; 70 return -ENOSYS;
69} 71}
70#endif /* CONFIG_OF */
71
72#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
73extern int of_phy_register_fixed_link(struct device_node *np);
74extern bool of_phy_is_fixed_link(struct device_node *np);
75#else
76static inline int of_phy_register_fixed_link(struct device_node *np) 72static inline int of_phy_register_fixed_link(struct device_node *np)
77{ 73{
78 return -ENOSYS; 74 return -ENOSYS;
diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h
new file mode 100644
index 000000000000..69d279c0da96
--- /dev/null
+++ b/include/linux/platform_data/b53.h
@@ -0,0 +1,33 @@
1/*
2 * B53 platform data
3 *
4 * Copyright (C) 2013 Jonas Gorski <jogo@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef __B53_H
20#define __B53_H
21
22#include <linux/kernel.h>
23
24struct b53_platform_data {
25 u32 chip_id;
26 u16 enabled_ports;
27
28 /* only used by MMAP'd driver */
29 unsigned big_endian:1;
30 void __iomem *regs;
31};
32
33#endif
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
new file mode 100644
index 000000000000..562a65e8bcc0
--- /dev/null
+++ b/include/linux/ptr_ring.h
@@ -0,0 +1,393 @@
1/*
2 * Definitions for the 'struct ptr_ring' datastructure.
3 *
4 * Author:
5 * Michael S. Tsirkin <mst@redhat.com>
6 *
7 * Copyright (C) 2016 Red Hat, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This is a limited-size FIFO maintaining pointers in FIFO order, with
15 * one CPU producing entries and another consuming entries from a FIFO.
16 *
17 * This implementation tries to minimize cache-contention when there is a
18 * single producer and a single consumer CPU.
19 */
20
21#ifndef _LINUX_PTR_RING_H
22#define _LINUX_PTR_RING_H 1
23
24#ifdef __KERNEL__
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/types.h>
28#include <linux/compiler.h>
29#include <linux/cache.h>
30#include <linux/slab.h>
31#include <asm/errno.h>
32#endif
33
34struct ptr_ring {
35 int producer ____cacheline_aligned_in_smp;
36 spinlock_t producer_lock;
37 int consumer ____cacheline_aligned_in_smp;
38 spinlock_t consumer_lock;
39 /* Shared consumer/producer data */
40 /* Read-only by both the producer and the consumer */
41 int size ____cacheline_aligned_in_smp; /* max entries in queue */
42 void **queue;
43};
44
45/* Note: callers invoking this in a loop must use a compiler barrier,
46 * for example cpu_relax(). If ring is ever resized, callers must hold
47 * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold
48 * producer_lock, the next call to __ptr_ring_produce may fail.
49 */
50static inline bool __ptr_ring_full(struct ptr_ring *r)
51{
52 return r->queue[r->producer];
53}
54
55static inline bool ptr_ring_full(struct ptr_ring *r)
56{
57 bool ret;
58
59 spin_lock(&r->producer_lock);
60 ret = __ptr_ring_full(r);
61 spin_unlock(&r->producer_lock);
62
63 return ret;
64}
65
66static inline bool ptr_ring_full_irq(struct ptr_ring *r)
67{
68 bool ret;
69
70 spin_lock_irq(&r->producer_lock);
71 ret = __ptr_ring_full(r);
72 spin_unlock_irq(&r->producer_lock);
73
74 return ret;
75}
76
77static inline bool ptr_ring_full_any(struct ptr_ring *r)
78{
79 unsigned long flags;
80 bool ret;
81
82 spin_lock_irqsave(&r->producer_lock, flags);
83 ret = __ptr_ring_full(r);
84 spin_unlock_irqrestore(&r->producer_lock, flags);
85
86 return ret;
87}
88
89static inline bool ptr_ring_full_bh(struct ptr_ring *r)
90{
91 bool ret;
92
93 spin_lock_bh(&r->producer_lock);
94 ret = __ptr_ring_full(r);
95 spin_unlock_bh(&r->producer_lock);
96
97 return ret;
98}
99
100/* Note: callers invoking this in a loop must use a compiler barrier,
101 * for example cpu_relax(). Callers must hold producer_lock.
102 */
103static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
104{
105 if (r->queue[r->producer])
106 return -ENOSPC;
107
108 r->queue[r->producer++] = ptr;
109 if (unlikely(r->producer >= r->size))
110 r->producer = 0;
111 return 0;
112}
113
114static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
115{
116 int ret;
117
118 spin_lock(&r->producer_lock);
119 ret = __ptr_ring_produce(r, ptr);
120 spin_unlock(&r->producer_lock);
121
122 return ret;
123}
124
125static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
126{
127 int ret;
128
129 spin_lock_irq(&r->producer_lock);
130 ret = __ptr_ring_produce(r, ptr);
131 spin_unlock_irq(&r->producer_lock);
132
133 return ret;
134}
135
136static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
137{
138 unsigned long flags;
139 int ret;
140
141 spin_lock_irqsave(&r->producer_lock, flags);
142 ret = __ptr_ring_produce(r, ptr);
143 spin_unlock_irqrestore(&r->producer_lock, flags);
144
145 return ret;
146}
147
148static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
149{
150 int ret;
151
152 spin_lock_bh(&r->producer_lock);
153 ret = __ptr_ring_produce(r, ptr);
154 spin_unlock_bh(&r->producer_lock);
155
156 return ret;
157}
158
159/* Note: callers invoking this in a loop must use a compiler barrier,
160 * for example cpu_relax(). Callers must take consumer_lock
161 * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
162 * If ring is never resized, and if the pointer is merely
163 * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
164 */
165static inline void *__ptr_ring_peek(struct ptr_ring *r)
166{
167 return r->queue[r->consumer];
168}
169
170/* Note: callers invoking this in a loop must use a compiler barrier,
171 * for example cpu_relax(). Callers must take consumer_lock
172 * if the ring is ever resized - see e.g. ptr_ring_empty.
173 */
174static inline bool __ptr_ring_empty(struct ptr_ring *r)
175{
176 return !__ptr_ring_peek(r);
177}
178
179static inline bool ptr_ring_empty(struct ptr_ring *r)
180{
181 bool ret;
182
183 spin_lock(&r->consumer_lock);
184 ret = __ptr_ring_empty(r);
185 spin_unlock(&r->consumer_lock);
186
187 return ret;
188}
189
190static inline bool ptr_ring_empty_irq(struct ptr_ring *r)
191{
192 bool ret;
193
194 spin_lock_irq(&r->consumer_lock);
195 ret = __ptr_ring_empty(r);
196 spin_unlock_irq(&r->consumer_lock);
197
198 return ret;
199}
200
201static inline bool ptr_ring_empty_any(struct ptr_ring *r)
202{
203 unsigned long flags;
204 bool ret;
205
206 spin_lock_irqsave(&r->consumer_lock, flags);
207 ret = __ptr_ring_empty(r);
208 spin_unlock_irqrestore(&r->consumer_lock, flags);
209
210 return ret;
211}
212
213static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
214{
215 bool ret;
216
217 spin_lock_bh(&r->consumer_lock);
218 ret = __ptr_ring_empty(r);
219 spin_unlock_bh(&r->consumer_lock);
220
221 return ret;
222}
223
224/* Must only be called after __ptr_ring_peek returned !NULL */
225static inline void __ptr_ring_discard_one(struct ptr_ring *r)
226{
227 r->queue[r->consumer++] = NULL;
228 if (unlikely(r->consumer >= r->size))
229 r->consumer = 0;
230}
231
232static inline void *__ptr_ring_consume(struct ptr_ring *r)
233{
234 void *ptr;
235
236 ptr = __ptr_ring_peek(r);
237 if (ptr)
238 __ptr_ring_discard_one(r);
239
240 return ptr;
241}
242
243static inline void *ptr_ring_consume(struct ptr_ring *r)
244{
245 void *ptr;
246
247 spin_lock(&r->consumer_lock);
248 ptr = __ptr_ring_consume(r);
249 spin_unlock(&r->consumer_lock);
250
251 return ptr;
252}
253
254static inline void *ptr_ring_consume_irq(struct ptr_ring *r)
255{
256 void *ptr;
257
258 spin_lock_irq(&r->consumer_lock);
259 ptr = __ptr_ring_consume(r);
260 spin_unlock_irq(&r->consumer_lock);
261
262 return ptr;
263}
264
265static inline void *ptr_ring_consume_any(struct ptr_ring *r)
266{
267 unsigned long flags;
268 void *ptr;
269
270 spin_lock_irqsave(&r->consumer_lock, flags);
271 ptr = __ptr_ring_consume(r);
272 spin_unlock_irqrestore(&r->consumer_lock, flags);
273
274 return ptr;
275}
276
277static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
278{
279 void *ptr;
280
281 spin_lock_bh(&r->consumer_lock);
282 ptr = __ptr_ring_consume(r);
283 spin_unlock_bh(&r->consumer_lock);
284
285 return ptr;
286}
287
288/* Cast to structure type and call a function without discarding from FIFO.
289 * Function must return a value.
290 * Callers must take consumer_lock.
291 */
292#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r)))
293
294#define PTR_RING_PEEK_CALL(r, f) ({ \
295 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
296 \
297 spin_lock(&(r)->consumer_lock); \
298 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
299 spin_unlock(&(r)->consumer_lock); \
300 __PTR_RING_PEEK_CALL_v; \
301})
302
303#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \
304 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
305 \
306 spin_lock_irq(&(r)->consumer_lock); \
307 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
308 spin_unlock_irq(&(r)->consumer_lock); \
309 __PTR_RING_PEEK_CALL_v; \
310})
311
312#define PTR_RING_PEEK_CALL_BH(r, f) ({ \
313 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
314 \
315 spin_lock_bh(&(r)->consumer_lock); \
316 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
317 spin_unlock_bh(&(r)->consumer_lock); \
318 __PTR_RING_PEEK_CALL_v; \
319})
320
321#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \
322 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
323 unsigned long __PTR_RING_PEEK_CALL_f;\
324 \
325 spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
326 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
327 spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
328 __PTR_RING_PEEK_CALL_v; \
329})
330
331static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
332{
333 return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
334}
335
336static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
337{
338 r->queue = __ptr_ring_init_queue_alloc(size, gfp);
339 if (!r->queue)
340 return -ENOMEM;
341
342 r->size = size;
343 r->producer = r->consumer = 0;
344 spin_lock_init(&r->producer_lock);
345 spin_lock_init(&r->consumer_lock);
346
347 return 0;
348}
349
350static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
351 void (*destroy)(void *))
352{
353 unsigned long flags;
354 int producer = 0;
355 void **queue = __ptr_ring_init_queue_alloc(size, gfp);
356 void **old;
357 void *ptr;
358
359 if (!queue)
360 return -ENOMEM;
361
362 spin_lock_irqsave(&(r)->producer_lock, flags);
363
364 while ((ptr = ptr_ring_consume(r)))
365 if (producer < size)
366 queue[producer++] = ptr;
367 else if (destroy)
368 destroy(ptr);
369
370 r->size = size;
371 r->producer = producer;
372 r->consumer = 0;
373 old = r->queue;
374 r->queue = queue;
375
376 spin_unlock_irqrestore(&(r)->producer_lock, flags);
377
378 kfree(old);
379
380 return 0;
381}
382
383static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
384{
385 void *ptr;
386
387 if (destroy)
388 while ((ptr = ptr_ring_consume(r)))
389 destroy(ptr);
390 kfree(r->queue);
391}
392
393#endif /* _LINUX_PTR_RING_H */
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 3f14c7efe68f..40c0ada01806 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -12,10 +12,21 @@
12#define CORE_SPQE_PAGE_SIZE_BYTES 4096 12#define CORE_SPQE_PAGE_SIZE_BYTES 4096
13 13
14#define X_FINAL_CLEANUP_AGG_INT 1 14#define X_FINAL_CLEANUP_AGG_INT 1
15#define NUM_OF_GLOBAL_QUEUES 128
16
17/* Queue Zone sizes in bytes */
18#define TSTORM_QZONE_SIZE 8
19#define MSTORM_QZONE_SIZE 0
20#define USTORM_QZONE_SIZE 8
21#define XSTORM_QZONE_SIZE 8
22#define YSTORM_QZONE_SIZE 0
23#define PSTORM_QZONE_SIZE 0
24
25#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16
15 26
16#define FW_MAJOR_VERSION 8 27#define FW_MAJOR_VERSION 8
17#define FW_MINOR_VERSION 7 28#define FW_MINOR_VERSION 10
18#define FW_REVISION_VERSION 3 29#define FW_REVISION_VERSION 5
19#define FW_ENGINEERING_VERSION 0 30#define FW_ENGINEERING_VERSION 0
20 31
21/***********************/ 32/***********************/
@@ -97,45 +108,86 @@
97#define DQ_XCM_AGG_VAL_SEL_REG6 7 108#define DQ_XCM_AGG_VAL_SEL_REG6 7
98 109
99/* XCM agg val selection */ 110/* XCM agg val selection */
100#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \ 111#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
101 DQ_XCM_AGG_VAL_SEL_WORD2 112#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
102#define DQ_XCM_ETH_TX_BD_CONS_CMD \ 113#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
103 DQ_XCM_AGG_VAL_SEL_WORD3 114#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2
104#define DQ_XCM_CORE_TX_BD_CONS_CMD \ 115#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
105 DQ_XCM_AGG_VAL_SEL_WORD3 116#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
106#define DQ_XCM_ETH_TX_BD_PROD_CMD \ 117#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
107 DQ_XCM_AGG_VAL_SEL_WORD4 118
108#define DQ_XCM_CORE_TX_BD_PROD_CMD \ 119/* UCM agg val selection (HW) */
109 DQ_XCM_AGG_VAL_SEL_WORD4 120#define DQ_UCM_AGG_VAL_SEL_WORD0 0
110#define DQ_XCM_CORE_SPQ_PROD_CMD \ 121#define DQ_UCM_AGG_VAL_SEL_WORD1 1
111 DQ_XCM_AGG_VAL_SEL_WORD4 122#define DQ_UCM_AGG_VAL_SEL_WORD2 2
112#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 123#define DQ_UCM_AGG_VAL_SEL_WORD3 3
124#define DQ_UCM_AGG_VAL_SEL_REG0 4
125#define DQ_UCM_AGG_VAL_SEL_REG1 5
126#define DQ_UCM_AGG_VAL_SEL_REG2 6
127#define DQ_UCM_AGG_VAL_SEL_REG3 7
128
129/* UCM agg val selection (FW) */
130#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2
131#define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3
132#define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0
133#define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2
134
135/* TCM agg val selection (HW) */
136#define DQ_TCM_AGG_VAL_SEL_WORD0 0
137#define DQ_TCM_AGG_VAL_SEL_WORD1 1
138#define DQ_TCM_AGG_VAL_SEL_WORD2 2
139#define DQ_TCM_AGG_VAL_SEL_WORD3 3
140#define DQ_TCM_AGG_VAL_SEL_REG1 4
141#define DQ_TCM_AGG_VAL_SEL_REG2 5
142#define DQ_TCM_AGG_VAL_SEL_REG6 6
143#define DQ_TCM_AGG_VAL_SEL_REG9 7
144
145/* TCM agg val selection (FW) */
146#define DQ_TCM_L2B_BD_PROD_CMD \
147 DQ_TCM_AGG_VAL_SEL_WORD1
148#define DQ_TCM_ROCE_RQ_PROD_CMD \
149 DQ_TCM_AGG_VAL_SEL_WORD0
113 150
114/* XCM agg counter flag selection */ 151/* XCM agg counter flag selection */
115#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 152#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
116#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 153#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
117#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 154#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
118#define DQ_XCM_AGG_FLG_SHIFT_CF13 3 155#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
119#define DQ_XCM_AGG_FLG_SHIFT_CF18 4 156#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
120#define DQ_XCM_AGG_FLG_SHIFT_CF19 5 157#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
121#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 158#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
122#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 159#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
123 160
124/* XCM agg counter flag selection */ 161/* XCM agg counter flag selection */
125#define DQ_XCM_ETH_DQ_CF_CMD (1 << \ 162#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
126 DQ_XCM_AGG_FLG_SHIFT_CF18) 163#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
127#define DQ_XCM_CORE_DQ_CF_CMD (1 << \ 164#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
128 DQ_XCM_AGG_FLG_SHIFT_CF18) 165#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
129#define DQ_XCM_ETH_TERMINATE_CMD (1 << \ 166#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
130 DQ_XCM_AGG_FLG_SHIFT_CF19) 167#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
131#define DQ_XCM_CORE_TERMINATE_CMD (1 << \ 168#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
132 DQ_XCM_AGG_FLG_SHIFT_CF19) 169
133#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \ 170/* UCM agg counter flag selection (HW) */
134 DQ_XCM_AGG_FLG_SHIFT_CF22) 171#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
135#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \ 172#define DQ_UCM_AGG_FLG_SHIFT_CF1 1
136 DQ_XCM_AGG_FLG_SHIFT_CF22) 173#define DQ_UCM_AGG_FLG_SHIFT_CF3 2
137#define DQ_XCM_ETH_TPH_EN_CMD (1 << \ 174#define DQ_UCM_AGG_FLG_SHIFT_CF4 3
138 DQ_XCM_AGG_FLG_SHIFT_CF23) 175#define DQ_UCM_AGG_FLG_SHIFT_CF5 4
176#define DQ_UCM_AGG_FLG_SHIFT_CF6 5
177#define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6
178#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
179
180/* UCM agg counter flag selection (FW) */
181#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
182#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
183
184#define DQ_REGION_SHIFT (12)
185
186/* DPM */
187#define DQ_DPM_WQE_BUFF_SIZE (320)
188
189/* Conn type ranges */
190#define DQ_CONN_TYPE_RANGE_SHIFT (4)
139 191
140/*****************/ 192/*****************/
141/* QM CONSTANTS */ 193/* QM CONSTANTS */
@@ -282,8 +334,6 @@
282 (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ 334 (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
283 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) 335 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
284 336
285#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
286#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
287 337
288#define PXP_VF_BAR0_START_IGU 0 338#define PXP_VF_BAR0_START_IGU 0
289#define PXP_VF_BAR0_IGU_LENGTH 0x3000 339#define PXP_VF_BAR0_IGU_LENGTH 0x3000
@@ -342,6 +392,9 @@
342 392
343#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 393#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
344 394
395#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
396#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
397
345/* ILT Records */ 398/* ILT Records */
346#define PXP_NUM_ILT_RECORDS_BB 7600 399#define PXP_NUM_ILT_RECORDS_BB 7600
347#define PXP_NUM_ILT_RECORDS_K2 11000 400#define PXP_NUM_ILT_RECORDS_K2 11000
@@ -379,6 +432,38 @@ struct async_data {
379 u8 fw_debug_param; 432 u8 fw_debug_param;
380}; 433};
381 434
435struct coalescing_timeset {
436 u8 value;
437#define COALESCING_TIMESET_TIMESET_MASK 0x7F
438#define COALESCING_TIMESET_TIMESET_SHIFT 0
439#define COALESCING_TIMESET_VALID_MASK 0x1
440#define COALESCING_TIMESET_VALID_SHIFT 7
441};
442
443struct common_prs_pf_msg_info {
444 __le32 value;
445#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1
446#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0
447#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1
448#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1
449#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1
450#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2
451#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1
452#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3
453#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF
454#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4
455};
456
457struct common_queue_zone {
458 __le16 ring_drv_data_consumer;
459 __le16 reserved;
460};
461
462struct eth_rx_prod_data {
463 __le16 bd_prod;
464 __le16 cqe_prod;
465};
466
382struct regpair { 467struct regpair {
383 __le32 lo; 468 __le32 lo;
384 __le32 hi; 469 __le32 hi;
@@ -388,11 +473,23 @@ struct vf_pf_channel_eqe_data {
388 struct regpair msg_addr; 473 struct regpair msg_addr;
389}; 474};
390 475
476struct malicious_vf_eqe_data {
477 u8 vf_id;
478 u8 err_id;
479 __le16 reserved[3];
480};
481
482struct initial_cleanup_eqe_data {
483 u8 vf_id;
484 u8 reserved[7];
485};
486
391/* Event Data Union */ 487/* Event Data Union */
392union event_ring_data { 488union event_ring_data {
393 u8 bytes[8]; 489 u8 bytes[8];
394 struct vf_pf_channel_eqe_data vf_pf_channel; 490 struct vf_pf_channel_eqe_data vf_pf_channel;
395 struct async_data async_info; 491 struct malicious_vf_eqe_data malicious_vf;
492 struct initial_cleanup_eqe_data vf_init_cleanup;
396}; 493};
397 494
398/* Event Ring Entry */ 495/* Event Ring Entry */
@@ -420,9 +517,9 @@ enum mf_mode {
420 517
421/* Per-protocol connection types */ 518/* Per-protocol connection types */
422enum protocol_type { 519enum protocol_type {
423 PROTOCOLID_RESERVED1, 520 PROTOCOLID_ISCSI,
424 PROTOCOLID_RESERVED2, 521 PROTOCOLID_RESERVED2,
425 PROTOCOLID_RESERVED3, 522 PROTOCOLID_ROCE,
426 PROTOCOLID_CORE, 523 PROTOCOLID_CORE,
427 PROTOCOLID_ETH, 524 PROTOCOLID_ETH,
428 PROTOCOLID_RESERVED4, 525 PROTOCOLID_RESERVED4,
@@ -433,6 +530,16 @@ enum protocol_type {
433 MAX_PROTOCOL_TYPE 530 MAX_PROTOCOL_TYPE
434}; 531};
435 532
533struct ustorm_eth_queue_zone {
534 struct coalescing_timeset int_coalescing_timeset;
535 u8 reserved[3];
536};
537
538struct ustorm_queue_zone {
539 struct ustorm_eth_queue_zone eth;
540 struct common_queue_zone common;
541};
542
436/* status block structure */ 543/* status block structure */
437struct cau_pi_entry { 544struct cau_pi_entry {
438 u32 prod; 545 u32 prod;
@@ -588,7 +695,10 @@ struct parsing_and_err_flags {
588#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 695#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
589}; 696};
590 697
591/* Concrete Function ID. */ 698struct pb_context {
699 __le32 crc[4];
700};
701
592struct pxp_concrete_fid { 702struct pxp_concrete_fid {
593 __le16 fid; 703 __le16 fid;
594#define PXP_CONCRETE_FID_PFID_MASK 0xF 704#define PXP_CONCRETE_FID_PFID_MASK 0xF
@@ -655,6 +765,72 @@ struct pxp_ptt_entry {
655}; 765};
656 766
657/* RSS hash type */ 767/* RSS hash type */
768struct rdif_task_context {
769 __le32 initial_ref_tag;
770 __le16 app_tag_value;
771 __le16 app_tag_mask;
772 u8 flags0;
773#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
774#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
775#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
776#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
777#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
778#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
779#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
780#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
781#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
782#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
783#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
784#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
785#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
786#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
787 u8 partial_dif_data[7];
788 __le16 partial_crc_value;
789 __le16 partial_checksum_value;
790 __le32 offset_in_io;
791 __le16 flags1;
792#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
793#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
794#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
795#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
796#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
797#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
798#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
799#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
800#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
801#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
802#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
803#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
804#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
805#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
806#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
807#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
808#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
809#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
810#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
811#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
812#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
813#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
814#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
815#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
816#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
817#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
818 __le16 state;
819#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
820#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
821#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
822#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
823#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
824#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
825#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
826#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
827#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
828#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
829#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
830#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
831 __le32 reserved2;
832};
833
658enum rss_hash_type { 834enum rss_hash_type {
659 RSS_HASH_TYPE_DEFAULT = 0, 835 RSS_HASH_TYPE_DEFAULT = 0,
660 RSS_HASH_TYPE_IPV4 = 1, 836 RSS_HASH_TYPE_IPV4 = 1,
@@ -683,19 +859,122 @@ struct status_block {
683#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 859#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
684}; 860};
685 861
686struct tunnel_parsing_flags { 862struct tdif_task_context {
687 u8 flags; 863 __le32 initial_ref_tag;
688#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 864 __le16 app_tag_value;
689#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 865 __le16 app_tag_mask;
690#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 866 __le16 partial_crc_valueB;
691#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 867 __le16 partial_checksum_valueB;
692#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 868 __le16 stateB;
693#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 869#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
694#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 870#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
695#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 871#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
696#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 872#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
697#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 873#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
698#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 874#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
699#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 875#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
876#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
877#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
878#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
879 u8 reserved1;
880 u8 flags0;
881#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
882#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
883#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
884#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
885#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
886#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
887#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
888#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
889#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
890#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
891#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
892#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
893#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
894#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
895 __le32 flags1;
896#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
897#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
898#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
899#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
900#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
901#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
902#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
903#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
904#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
905#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
906#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
907#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
908#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
909#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
910#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
911#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
912#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
913#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
914#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
915#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
916#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
917#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
918#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
919#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
920#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
921#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
922#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
923#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
924#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
925#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
926#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
927#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
928#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
929#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
930#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
931#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
932#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
933#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
934#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
935#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
936 __le32 offset_in_iob;
937 __le16 partial_crc_value_a;
938 __le16 partial_checksum_valuea_;
939 __le32 offset_in_ioa;
940 u8 partial_dif_data_a[8];
941 u8 partial_dif_data_b[8];
942};
943
944struct timers_context {
945 __le32 logical_client0;
946#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
947#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
948#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
949#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
950#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
951#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
952#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
953#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
954 __le32 logical_client1;
955#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
956#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
957#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
958#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
959#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
960#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
961#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
962#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
963 __le32 logical_client2;
964#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
965#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
966#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
967#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
968#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
969#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
970#define TIMERS_CONTEXT_RESERVED2_MASK 0x3
971#define TIMERS_CONTEXT_RESERVED2_SHIFT 30
972 __le32 host_expiration_fields;
973#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF
974#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
975#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
976#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
977#define TIMERS_CONTEXT_RESERVED3_MASK 0x7
978#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
700}; 979};
701#endif /* __COMMON_HSI__ */ 980#endif /* __COMMON_HSI__ */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 092cb0c1afcb..b5ebc697d05f 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -12,6 +12,8 @@
12/********************/ 12/********************/
13/* ETH FW CONSTANTS */ 13/* ETH FW CONSTANTS */
14/********************/ 14/********************/
15#define ETH_HSI_VER_MAJOR 3
16#define ETH_HSI_VER_MINOR 0
15#define ETH_CACHE_LINE_SIZE 64 17#define ETH_CACHE_LINE_SIZE 64
16 18
17#define ETH_MAX_RAMROD_PER_CON 8 19#define ETH_MAX_RAMROD_PER_CON 8
@@ -57,19 +59,6 @@
57#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 59#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
58#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 60#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
59 61
60/* Queue Zone sizes */
61#define TSTORM_QZONE_SIZE 0
62#define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone)
63#define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone)
64#define XSTORM_QZONE_SIZE 0
65#define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone)
66#define PSTORM_QZONE_SIZE 0
67
68/* Interrupt coalescing TimeSet */
69struct coalescing_timeset {
70 u8 timeset;
71 u8 valid;
72};
73 62
74struct eth_tx_1st_bd_flags { 63struct eth_tx_1st_bd_flags {
75 u8 bitfields; 64 u8 bitfields;
@@ -97,12 +86,12 @@ struct eth_tx_data_1st_bd {
97 u8 nbds; 86 u8 nbds;
98 struct eth_tx_1st_bd_flags bd_flags; 87 struct eth_tx_1st_bd_flags bd_flags;
99 __le16 bitfields; 88 __le16 bitfields;
100#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1 89#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
101#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0 90#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
102#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 91#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
103#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 92#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
104#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF 93#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
105#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2 94#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
106}; 95};
107 96
108/* The parsing information data for the second tx bd of a given packet. */ 97/* The parsing information data for the second tx bd of a given packet. */
@@ -136,28 +125,51 @@ struct eth_tx_data_2nd_bd {
136#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 125#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
137}; 126};
138 127
128struct eth_fast_path_cqe_fw_debug {
129 u8 reserved0;
130 u8 reserved1;
131 __le16 reserved2;
132};
133
134/* tunneling parsing flags */
135struct eth_tunnel_parsing_flags {
136 u8 flags;
137#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
138#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
139#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
140#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
141#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
142#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
143#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
144#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
145#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
146#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
147#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
148#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
149};
150
139/* Regular ETH Rx FP CQE. */ 151/* Regular ETH Rx FP CQE. */
140struct eth_fast_path_rx_reg_cqe { 152struct eth_fast_path_rx_reg_cqe {
141 u8 type; 153 u8 type;
142 u8 bitfields; 154 u8 bitfields;
143#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 155#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
144#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 156#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
145#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF 157#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
146#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 158#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
147#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 159#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
148#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 160#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
149 __le16 pkt_len; 161 __le16 pkt_len;
150 struct parsing_and_err_flags pars_flags; 162 struct parsing_and_err_flags pars_flags;
151 __le16 vlan_tag; 163 __le16 vlan_tag;
152 __le32 rss_hash; 164 __le32 rss_hash;
153 __le16 len_on_first_bd; 165 __le16 len_on_first_bd;
154 u8 placement_offset; 166 u8 placement_offset;
155 struct tunnel_parsing_flags tunnel_pars_flags; 167 struct eth_tunnel_parsing_flags tunnel_pars_flags;
156 u8 bd_num; 168 u8 bd_num;
157 u8 reserved[7]; 169 u8 reserved[7];
158 u32 fw_debug; 170 struct eth_fast_path_cqe_fw_debug fw_debug;
159 u8 reserved1[3]; 171 u8 reserved1[3];
160 u8 flags; 172 u8 flags;
161#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1 173#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
162#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0 174#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
163#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1 175#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
@@ -207,11 +219,11 @@ struct eth_fast_path_rx_tpa_start_cqe {
207 __le32 rss_hash; 219 __le32 rss_hash;
208 __le16 len_on_first_bd; 220 __le16 len_on_first_bd;
209 u8 placement_offset; 221 u8 placement_offset;
210 struct tunnel_parsing_flags tunnel_pars_flags; 222 struct eth_tunnel_parsing_flags tunnel_pars_flags;
211 u8 tpa_agg_index; 223 u8 tpa_agg_index;
212 u8 header_len; 224 u8 header_len;
213 __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; 225 __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
214 u32 fw_debug; 226 struct eth_fast_path_cqe_fw_debug fw_debug;
215}; 227};
216 228
217/* The L4 pseudo checksum mode for Ethernet */ 229/* The L4 pseudo checksum mode for Ethernet */
@@ -264,12 +276,25 @@ enum eth_rx_cqe_type {
264 MAX_ETH_RX_CQE_TYPE 276 MAX_ETH_RX_CQE_TYPE
265}; 277};
266 278
267/* ETH Rx producers data */ 279enum eth_rx_tunn_type {
268struct eth_rx_prod_data { 280 ETH_RX_NO_TUNN,
269 __le16 bd_prod; 281 ETH_RX_TUNN_GENEVE,
270 __le16 cqe_prod; 282 ETH_RX_TUNN_GRE,
271 __le16 reserved; 283 ETH_RX_TUNN_VXLAN,
272 __le16 reserved1; 284 MAX_ETH_RX_TUNN_TYPE
285};
286
287/* Aggregation end reason. */
288enum eth_tpa_end_reason {
289 ETH_AGG_END_UNUSED,
290 ETH_AGG_END_SP_UPDATE,
291 ETH_AGG_END_MAX_LEN,
292 ETH_AGG_END_LAST_SEG,
293 ETH_AGG_END_TIMEOUT,
294 ETH_AGG_END_NOT_CONSISTENT,
295 ETH_AGG_END_OUT_OF_ORDER,
296 ETH_AGG_END_NON_TPA_SEG,
297 MAX_ETH_TPA_END_REASON
273}; 298};
274 299
275/* The first tx bd of a given packet */ 300/* The first tx bd of a given packet */
@@ -337,21 +362,18 @@ union eth_tx_bd_types {
337}; 362};
338 363
339/* Mstorm Queue Zone */ 364/* Mstorm Queue Zone */
340struct mstorm_eth_queue_zone { 365enum eth_tx_tunn_type {
341 struct eth_rx_prod_data rx_producers; 366 ETH_TX_TUNN_GENEVE,
342 __le32 reserved[2]; 367 ETH_TX_TUNN_TTAG,
343}; 368 ETH_TX_TUNN_GRE,
344 369 ETH_TX_TUNN_VXLAN,
345/* Ustorm Queue Zone */ 370 MAX_ETH_TX_TUNN_TYPE
346struct ustorm_eth_queue_zone {
347 struct coalescing_timeset int_coalescing_timeset;
348 __le16 reserved[3];
349}; 371};
350 372
351/* Ystorm Queue Zone */ 373/* Ystorm Queue Zone */
352struct ystorm_eth_queue_zone { 374struct xstorm_eth_queue_zone {
353 struct coalescing_timeset int_coalescing_timeset; 375 struct coalescing_timeset int_coalescing_timeset;
354 __le16 reserved[3]; 376 u8 reserved[7];
355}; 377};
356 378
357/* ETH doorbell data */ 379/* ETH doorbell data */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
new file mode 100644
index 000000000000..b3c0feb15ae9
--- /dev/null
+++ b/include/linux/qed/iscsi_common.h
@@ -0,0 +1,1439 @@
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef __ISCSI_COMMON__
10#define __ISCSI_COMMON__
11/**********************/
12/* ISCSI FW CONSTANTS */
13/**********************/
14
15/* iSCSI HSI constants */
16#define ISCSI_DEFAULT_MTU (1500)
17
18/* Current iSCSI HSI version number composed of two fields (16 bit) */
19#define ISCSI_HSI_MAJOR_VERSION (0)
20#define ISCSI_HSI_MINOR_VERSION (0)
21
22/* KWQ (kernel work queue) layer codes */
23#define ISCSI_SLOW_PATH_LAYER_CODE (6)
24
25/* CQE completion status */
26#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
27#define ISCSI_EQE_RST_CONN_RCVD (0x1)
28
29/* iSCSI parameter defaults */
30#define ISCSI_DEFAULT_HEADER_DIGEST (0)
31#define ISCSI_DEFAULT_DATA_DIGEST (0)
32#define ISCSI_DEFAULT_INITIAL_R2T (1)
33#define ISCSI_DEFAULT_IMMEDIATE_DATA (1)
34#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000)
35#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000)
36#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000)
37#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
38
39/* iSCSI parameter limits */
40#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200)
41#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff)
42#define ISCSI_MIN_VAL_BURST_LENGTH (0x200)
43#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff)
44#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
45#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
46
47/* iSCSI reserved params */
48#define ISCSI_ITT_ALL_ONES (0xffffffff)
49#define ISCSI_TTT_ALL_ONES (0xffffffff)
50
51#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
52#define ISCSI_OPTION_2_ON_CHIP_TCP 2
53
54#define ISCSI_INITIATOR_MODE 0
55#define ISCSI_TARGET_MODE 1
56
57/* iSCSI request op codes */
58#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0)
59#define ISCSI_OPCODE_NOP_OUT ( \
60 ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40)
61#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1)
62#define ISCSI_OPCODE_SCSI_CMD ( \
63 ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40)
64#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2)
65#define ISCSI_OPCODE_TMF_REQUEST ( \
66 ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40)
67#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3)
68#define ISCSI_OPCODE_LOGIN_REQUEST ( \
69 ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40)
70#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4)
71#define ISCSI_OPCODE_TEXT_REQUEST ( \
72 ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40)
73#define ISCSI_OPCODE_DATA_OUT (5)
74#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6)
75#define ISCSI_OPCODE_LOGOUT_REQUEST ( \
76 ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40)
77
78/* iSCSI response/messages op codes */
79#define ISCSI_OPCODE_NOP_IN (0x20)
80#define ISCSI_OPCODE_SCSI_RESPONSE (0x21)
81#define ISCSI_OPCODE_TMF_RESPONSE (0x22)
82#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23)
83#define ISCSI_OPCODE_TEXT_RESPONSE (0x24)
84#define ISCSI_OPCODE_DATA_IN (0x25)
85#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26)
86#define ISCSI_OPCODE_R2T (0x31)
87#define ISCSI_OPCODE_ASYNC_MSG (0x32)
88#define ISCSI_OPCODE_REJECT (0x3f)
89
90/* iSCSI stages */
91#define ISCSI_STAGE_SECURITY_NEGOTIATION (0)
92#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1)
93#define ISCSI_STAGE_FULL_FEATURE_PHASE (3)
94
95/* iSCSI CQE errors */
96#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08)
97#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
98#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
99
100struct cqe_error_bitmap {
101 u8 cqe_error_status_bits;
102#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
103#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
104#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
105#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
106#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
107#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
108#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1
109#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5
110#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1
111#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6
112#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1
113#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
114};
115
116union cqe_error_status {
117 u8 error_status;
118 struct cqe_error_bitmap error_bits;
119};
120
121struct data_hdr {
122 __le32 data[12];
123};
124
125struct iscsi_async_msg_hdr {
126 __le16 reserved0;
127 u8 flags_attr;
128#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
129#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
130#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
131#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
132 u8 opcode;
133 __le32 hdr_second_dword;
134#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
135#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
136#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
137#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
138 struct regpair lun;
139 __le32 all_ones;
140 __le32 reserved1;
141 __le32 stat_sn;
142 __le32 exp_cmd_sn;
143 __le32 max_cmd_sn;
144 __le16 param1_rsrv;
145 u8 async_vcode;
146 u8 async_event;
147 __le16 param3_rsrv;
148 __le16 param2_rsrv;
149 __le32 reserved7;
150};
151
152struct iscsi_sge {
153 struct regpair sge_addr;
154 __le16 sge_len;
155 __le16 reserved0;
156 __le32 reserved1;
157};
158
159struct iscsi_cached_sge_ctx {
160 struct iscsi_sge sge;
161 struct regpair reserved;
162 __le32 dsgl_curr_offset[2];
163};
164
165struct iscsi_cmd_hdr {
166 __le16 reserved1;
167 u8 flags_attr;
168#define ISCSI_CMD_HDR_ATTR_MASK 0x7
169#define ISCSI_CMD_HDR_ATTR_SHIFT 0
170#define ISCSI_CMD_HDR_RSRV_MASK 0x3
171#define ISCSI_CMD_HDR_RSRV_SHIFT 3
172#define ISCSI_CMD_HDR_WRITE_MASK 0x1
173#define ISCSI_CMD_HDR_WRITE_SHIFT 5
174#define ISCSI_CMD_HDR_READ_MASK 0x1
175#define ISCSI_CMD_HDR_READ_SHIFT 6
176#define ISCSI_CMD_HDR_FINAL_MASK 0x1
177#define ISCSI_CMD_HDR_FINAL_SHIFT 7
178 u8 opcode;
179 __le32 hdr_second_dword;
180#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
181#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
182#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
183#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
184 struct regpair lun;
185 __le32 itt;
186 __le32 expected_transfer_length;
187 __le32 cmd_sn;
188 __le32 exp_stat_sn;
189 __le32 cdb[4];
190};
191
192struct iscsi_common_hdr {
193 u8 hdr_status;
194 u8 hdr_response;
195 u8 hdr_flags;
196 u8 hdr_first_byte;
197#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
198#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
199#define ISCSI_COMMON_HDR_IMM_MASK 0x1
200#define ISCSI_COMMON_HDR_IMM_SHIFT 6
201#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
202#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
203 __le32 hdr_second_dword;
204#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
205#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
206#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
207#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
208 __le32 lun_reserved[4];
209 __le32 data[6];
210};
211
212struct iscsi_conn_offload_params {
213 struct regpair sq_pbl_addr;
214 struct regpair r2tq_pbl_addr;
215 struct regpair xhq_pbl_addr;
216 struct regpair uhq_pbl_addr;
217 __le32 initial_ack;
218 __le16 physical_q0;
219 __le16 physical_q1;
220 u8 flags;
221#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
222#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
223#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
224#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
225#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F
226#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2
227 u8 pbl_page_size_log;
228 u8 pbe_page_size_log;
229 u8 default_cq;
230 __le32 stat_sn;
231};
232
233struct iscsi_slow_path_hdr {
234 u8 op_code;
235 u8 flags;
236#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
237#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
238#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
239#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
240#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
241#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
242};
243
244struct iscsi_conn_update_ramrod_params {
245 struct iscsi_slow_path_hdr hdr;
246 __le16 conn_id;
247 __le32 fw_cid;
248 u8 flags;
249#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
250#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
251#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
252#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
253#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
254#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
255#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
256#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
257#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF
258#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4
259 u8 reserved0[3];
260 __le32 max_seq_size;
261 __le32 max_send_pdu_length;
262 __le32 max_recv_pdu_length;
263 __le32 first_seq_length;
264 __le32 exp_stat_sn;
265};
266
267struct iscsi_ext_cdb_cmd_hdr {
268 __le16 reserved1;
269 u8 flags_attr;
270#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7
271#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0
272#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3
273#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3
274#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1
275#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5
276#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1
277#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6
278#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1
279#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7
280 u8 opcode;
281 __le32 hdr_second_dword;
282#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
283#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
284#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF
285#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24
286 struct regpair lun;
287 __le32 itt;
288 __le32 expected_transfer_length;
289 __le32 cmd_sn;
290 __le32 exp_stat_sn;
291 struct iscsi_sge cdb_sge;
292};
293
294struct iscsi_login_req_hdr {
295 u8 version_min;
296 u8 version_max;
297 u8 flags_attr;
298#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3
299#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0
300#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3
301#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2
302#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3
303#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
304#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1
305#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6
306#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1
307#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7
308 u8 opcode;
309 __le32 hdr_second_dword;
310#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
311#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
312#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
313#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
314 __le32 isid_TABC;
315 __le16 tsih;
316 __le16 isid_d;
317 __le32 itt;
318 __le16 reserved1;
319 __le16 cid;
320 __le32 cmd_sn;
321 __le32 exp_stat_sn;
322 __le32 reserved2[4];
323};
324
325struct iscsi_logout_req_hdr {
326 __le16 reserved0;
327 u8 reason_code;
328 u8 opcode;
329 __le32 reserved1;
330 __le32 reserved2[2];
331 __le32 itt;
332 __le16 reserved3;
333 __le16 cid;
334 __le32 cmd_sn;
335 __le32 exp_stat_sn;
336 __le32 reserved4[4];
337};
338
339struct iscsi_data_out_hdr {
340 __le16 reserved1;
341 u8 flags_attr;
342#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F
343#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0
344#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1
345#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
346 u8 opcode;
347 __le32 reserved2;
348 struct regpair lun;
349 __le32 itt;
350 __le32 ttt;
351 __le32 reserved3;
352 __le32 exp_stat_sn;
353 __le32 reserved4;
354 __le32 data_sn;
355 __le32 buffer_offset;
356 __le32 reserved5;
357};
358
359struct iscsi_data_in_hdr {
360 u8 status_rsvd;
361 u8 reserved1;
362 u8 flags;
363#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1
364#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0
365#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1
366#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
367#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1
368#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2
369#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7
370#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3
371#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1
372#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6
373#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1
374#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7
375 u8 opcode;
376 __le32 reserved2;
377 struct regpair lun;
378 __le32 itt;
379 __le32 ttt;
380 __le32 stat_sn;
381 __le32 exp_cmd_sn;
382 __le32 max_cmd_sn;
383 __le32 data_sn;
384 __le32 buffer_offset;
385 __le32 residual_count;
386};
387
388struct iscsi_r2t_hdr {
389 u8 reserved0[3];
390 u8 opcode;
391 __le32 reserved2;
392 struct regpair lun;
393 __le32 itt;
394 __le32 ttt;
395 __le32 stat_sn;
396 __le32 exp_cmd_sn;
397 __le32 max_cmd_sn;
398 __le32 r2t_sn;
399 __le32 buffer_offset;
400 __le32 desired_data_trns_len;
401};
402
403struct iscsi_nop_out_hdr {
404 __le16 reserved1;
405 u8 flags_attr;
406#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F
407#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0
408#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1
409#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
410 u8 opcode;
411 __le32 reserved2;
412 struct regpair lun;
413 __le32 itt;
414 __le32 ttt;
415 __le32 cmd_sn;
416 __le32 exp_stat_sn;
417 __le32 reserved3;
418 __le32 reserved4;
419 __le32 reserved5;
420 __le32 reserved6;
421};
422
423struct iscsi_nop_in_hdr {
424 __le16 reserved0;
425 u8 flags_attr;
426#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F
427#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0
428#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1
429#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7
430 u8 opcode;
431 __le32 hdr_second_dword;
432#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
433#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0
434#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF
435#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
436 struct regpair lun;
437 __le32 itt;
438 __le32 ttt;
439 __le32 stat_sn;
440 __le32 exp_cmd_sn;
441 __le32 max_cmd_sn;
442 __le32 reserved5;
443 __le32 reserved6;
444 __le32 reserved7;
445};
446
447struct iscsi_login_response_hdr {
448 u8 version_active;
449 u8 version_max;
450 u8 flags_attr;
451#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3
452#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0
453#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3
454#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2
455#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3
456#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4
457#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1
458#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6
459#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1
460#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7
461 u8 opcode;
462 __le32 hdr_second_dword;
463#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
464#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
465#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
466#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
467 __le32 isid_TABC;
468 __le16 tsih;
469 __le16 isid_d;
470 __le32 itt;
471 __le32 reserved1;
472 __le32 stat_sn;
473 __le32 exp_cmd_sn;
474 __le32 max_cmd_sn;
475 __le16 reserved2;
476 u8 status_detail;
477 u8 status_class;
478 __le32 reserved4[2];
479};
480
481struct iscsi_logout_response_hdr {
482 u8 reserved1;
483 u8 response;
484 u8 flags;
485 u8 opcode;
486 __le32 hdr_second_dword;
487#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
488#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
489#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
490#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
491 __le32 reserved2[2];
492 __le32 itt;
493 __le32 reserved3;
494 __le32 stat_sn;
495 __le32 exp_cmd_sn;
496 __le32 max_cmd_sn;
497 __le32 reserved4;
498 __le16 time2retain;
499 __le16 time2wait;
500 __le32 reserved5[1];
501};
502
503struct iscsi_text_request_hdr {
504 __le16 reserved0;
505 u8 flags_attr;
506#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F
507#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0
508#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1
509#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6
510#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1
511#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7
512 u8 opcode;
513 __le32 hdr_second_dword;
514#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
515#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
516#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
517#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
518 struct regpair lun;
519 __le32 itt;
520 __le32 ttt;
521 __le32 cmd_sn;
522 __le32 exp_stat_sn;
523 __le32 reserved4[4];
524};
525
526struct iscsi_text_response_hdr {
527 __le16 reserved1;
528 u8 flags;
529#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F
530#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0
531#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1
532#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6
533#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1
534#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7
535 u8 opcode;
536 __le32 hdr_second_dword;
537#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
538#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
539#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
540#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
541 struct regpair lun;
542 __le32 itt;
543 __le32 ttt;
544 __le32 stat_sn;
545 __le32 exp_cmd_sn;
546 __le32 max_cmd_sn;
547 __le32 reserved4[3];
548};
549
550struct iscsi_tmf_request_hdr {
551 __le16 reserved0;
552 u8 function;
553 u8 opcode;
554 __le32 hdr_second_dword;
555#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
556#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
557#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
558#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
559 struct regpair lun;
560 __le32 itt;
561 __le32 rtt;
562 __le32 cmd_sn;
563 __le32 exp_stat_sn;
564 __le32 ref_cmd_sn;
565 __le32 exp_data_sn;
566 __le32 reserved4[2];
567};
568
569struct iscsi_tmf_response_hdr {
570 u8 reserved2;
571 u8 hdr_response;
572 u8 hdr_flags;
573 u8 opcode;
574 __le32 hdr_second_dword;
575#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
576#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
577#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
578#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
579 struct regpair reserved0;
580 __le32 itt;
581 __le32 rtt;
582 __le32 stat_sn;
583 __le32 exp_cmd_sn;
584 __le32 max_cmd_sn;
585 __le32 reserved4[3];
586};
587
588struct iscsi_response_hdr {
589 u8 hdr_status;
590 u8 hdr_response;
591 u8 hdr_flags;
592 u8 opcode;
593 __le32 hdr_second_dword;
594#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
595#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
596#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
597#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
598 struct regpair lun;
599 __le32 itt;
600 __le32 snack_tag;
601 __le32 stat_sn;
602 __le32 exp_cmd_sn;
603 __le32 max_cmd_sn;
604 __le32 exp_data_sn;
605 __le32 bi_residual_count;
606 __le32 residual_count;
607};
608
609struct iscsi_reject_hdr {
610 u8 reserved4;
611 u8 hdr_reason;
612 u8 hdr_flags;
613 u8 opcode;
614 __le32 hdr_second_dword;
615#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
616#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0
617#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
618#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
619 struct regpair reserved0;
620 __le32 reserved1;
621 __le32 reserved2;
622 __le32 stat_sn;
623 __le32 exp_cmd_sn;
624 __le32 max_cmd_sn;
625 __le32 data_sn;
626 __le32 reserved3[2];
627};
628
629union iscsi_task_hdr {
630 struct iscsi_common_hdr common;
631 struct data_hdr data;
632 struct iscsi_cmd_hdr cmd;
633 struct iscsi_ext_cdb_cmd_hdr ext_cdb_cmd;
634 struct iscsi_login_req_hdr login_req;
635 struct iscsi_logout_req_hdr logout_req;
636 struct iscsi_data_out_hdr data_out;
637 struct iscsi_data_in_hdr data_in;
638 struct iscsi_r2t_hdr r2t;
639 struct iscsi_nop_out_hdr nop_out;
640 struct iscsi_nop_in_hdr nop_in;
641 struct iscsi_login_response_hdr login_response;
642 struct iscsi_logout_response_hdr logout_response;
643 struct iscsi_text_request_hdr text_request;
644 struct iscsi_text_response_hdr text_response;
645 struct iscsi_tmf_request_hdr tmf_request;
646 struct iscsi_tmf_response_hdr tmf_response;
647 struct iscsi_response_hdr response;
648 struct iscsi_reject_hdr reject;
649 struct iscsi_async_msg_hdr async_msg;
650};
651
652struct iscsi_cqe_common {
653 __le16 conn_id;
654 u8 cqe_type;
655 union cqe_error_status error_bitmap;
656 __le32 reserved[3];
657 union iscsi_task_hdr iscsi_hdr;
658};
659
660struct iscsi_cqe_solicited {
661 __le16 conn_id;
662 u8 cqe_type;
663 union cqe_error_status error_bitmap;
664 __le16 itid;
665 u8 task_type;
666 u8 fw_dbg_field;
667 __le32 reserved1[2];
668 union iscsi_task_hdr iscsi_hdr;
669};
670
671struct iscsi_cqe_unsolicited {
672 __le16 conn_id;
673 u8 cqe_type;
674 union cqe_error_status error_bitmap;
675 __le16 reserved0;
676 u8 reserved1;
677 u8 unsol_cqe_type;
678 struct regpair rqe_opaque;
679 union iscsi_task_hdr iscsi_hdr;
680};
681
682union iscsi_cqe {
683 struct iscsi_cqe_common cqe_common;
684 struct iscsi_cqe_solicited cqe_solicited;
685 struct iscsi_cqe_unsolicited cqe_unsolicited;
686};
687
688enum iscsi_cqes_type {
689 ISCSI_CQE_TYPE_SOLICITED = 1,
690 ISCSI_CQE_TYPE_UNSOLICITED,
691 ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE
692 ,
693 ISCSI_CQE_TYPE_TASK_CLEANUP,
694 ISCSI_CQE_TYPE_DUMMY,
695 MAX_ISCSI_CQES_TYPE
696};
697
698enum iscsi_cqe_unsolicited_type {
699 ISCSI_CQE_UNSOLICITED_NONE,
700 ISCSI_CQE_UNSOLICITED_SINGLE,
701 ISCSI_CQE_UNSOLICITED_FIRST,
702 ISCSI_CQE_UNSOLICITED_MIDDLE,
703 ISCSI_CQE_UNSOLICITED_LAST,
704 MAX_ISCSI_CQE_UNSOLICITED_TYPE
705};
706
707struct iscsi_virt_sgl_ctx {
708 struct regpair sgl_base;
709 struct regpair dsgl_base;
710 __le32 sgl_initial_offset;
711 __le32 dsgl_initial_offset;
712 __le32 dsgl_curr_offset[2];
713};
714
715struct iscsi_sgl_var_params {
716 u8 sgl_ptr;
717 u8 dsgl_ptr;
718 __le16 sge_offset;
719 __le16 dsge_offset;
720};
721
722struct iscsi_phys_sgl_ctx {
723 struct regpair sgl_base;
724 struct regpair dsgl_base;
725 u8 sgl_size;
726 u8 dsgl_size;
727 __le16 reserved;
728 struct iscsi_sgl_var_params var_params[2];
729};
730
731union iscsi_data_desc_ctx {
732 struct iscsi_virt_sgl_ctx virt_sgl;
733 struct iscsi_phys_sgl_ctx phys_sgl;
734 struct iscsi_cached_sge_ctx cached_sge;
735};
736
737struct iscsi_debug_modes {
738 u8 flags;
739#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
740#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
741#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
742#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
743#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
744#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
745#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
746#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
747#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
748#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
749#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
750#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
751#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3
752#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6
753};
754
755struct iscsi_dif_flags {
756 u8 flags;
757#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
758#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
759#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
760#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
761#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
762#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
763};
764
765enum iscsi_eqe_opcode {
766 ISCSI_EVENT_TYPE_INIT_FUNC = 0,
767 ISCSI_EVENT_TYPE_DESTROY_FUNC,
768 ISCSI_EVENT_TYPE_OFFLOAD_CONN,
769 ISCSI_EVENT_TYPE_UPDATE_CONN,
770 ISCSI_EVENT_TYPE_CLEAR_SQ,
771 ISCSI_EVENT_TYPE_TERMINATE_CONN,
772 ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
773 ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
774 RESERVED8,
775 RESERVED9,
776 ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
777 ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
778 ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
779 ISCSI_EVENT_TYPE_ASYN_SYN_RCVD,
780 ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME,
781 ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT,
782 ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT,
783 ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
784 ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
785 ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
786 ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
787 MAX_ISCSI_EQE_OPCODE
788};
789
790enum iscsi_error_types {
791 ISCSI_STATUS_NONE = 0,
792 ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
793 ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
794 ISCSI_CONN_ERROR_TASK_NOT_VALID,
795 ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
796 ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
797 ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
798 ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
799 ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
800 ISCSI_CONN_ERROR_DATA_OVERRUN,
801 ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
802 ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR,
803 ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
804 ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION,
805 ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
806 ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
807 ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE,
808 ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
809 ISCSI_CONN_ERROR_PROTOCOL_ERR_TTT_OUT_OF_RANGE,
810 ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
811 ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
812 ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
813 ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
814 ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
815 ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
816 ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
817 ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_OUT_ITT,
818 ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
819 ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
820 ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
821 ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
822 ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
823 ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
824 ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
825 ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
826 ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
827 ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO_S_BIT_ONE,
828 ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
829 ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
830 ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
831 ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
832 ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
833 ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
834 ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
835 ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
836 ISCSI_ERROR_UNKNOWN,
837 MAX_ISCSI_ERROR_TYPES
838};
839
840struct iscsi_mflags {
841 u8 mflags;
842#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1
843#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0
844#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1
845#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
846#define ISCSI_MFLAGS_RESERVED_MASK 0x3F
847#define ISCSI_MFLAGS_RESERVED_SHIFT 2
848};
849
850struct iscsi_sgl {
851 struct regpair sgl_addr;
852 __le16 updated_sge_size;
853 __le16 updated_sge_offset;
854 __le32 byte_offset;
855};
856
857union iscsi_mstorm_sgl {
858 struct iscsi_sgl sgl_struct;
859 struct iscsi_sge single_sge;
860};
861
862enum iscsi_ramrod_cmd_id {
863 ISCSI_RAMROD_CMD_ID_UNUSED = 0,
864 ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
865 ISCSI_RAMROD_CMD_ID_DESTROY_FUNC = 2,
866 ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
867 ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
868 ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
869 ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
870 MAX_ISCSI_RAMROD_CMD_ID
871};
872
873struct iscsi_reg1 {
874 __le32 reg1_map;
875#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7
876#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0
877#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF
878#define ISCSI_REG1_RESERVED1_SHIFT 3
879};
880
881union iscsi_seq_num {
882 __le16 data_sn;
883 __le16 r2t_sn;
884};
885
886struct iscsi_spe_conn_offload {
887 struct iscsi_slow_path_hdr hdr;
888 __le16 conn_id;
889 __le32 fw_cid;
890 struct iscsi_conn_offload_params iscsi;
891 struct tcp_offload_params tcp;
892};
893
894struct iscsi_spe_conn_offload_option2 {
895 struct iscsi_slow_path_hdr hdr;
896 __le16 conn_id;
897 __le32 fw_cid;
898 struct iscsi_conn_offload_params iscsi;
899 struct tcp_offload_params_opt2 tcp;
900};
901
902struct iscsi_spe_conn_termination {
903 struct iscsi_slow_path_hdr hdr;
904 __le16 conn_id;
905 __le32 fw_cid;
906 u8 abortive;
907 u8 reserved0[7];
908 struct regpair queue_cnts_addr;
909 struct regpair query_params_addr;
910};
911
912struct iscsi_spe_func_dstry {
913 struct iscsi_slow_path_hdr hdr;
914 __le16 reserved0;
915 __le32 reserved1;
916};
917
918struct iscsi_spe_func_init {
919 struct iscsi_slow_path_hdr hdr;
920 __le16 half_way_close_timeout;
921 u8 num_sq_pages_in_ring;
922 u8 num_r2tq_pages_in_ring;
923 u8 num_uhq_pages_in_ring;
924 u8 ll2_rx_queue_id;
925 u8 ooo_enable;
926 struct iscsi_debug_modes debug_mode;
927 __le16 reserved1;
928 __le32 reserved2;
929 __le32 reserved3;
930 __le32 reserved4;
931 struct scsi_init_func_params func_params;
932 struct scsi_init_func_queues q_params;
933};
934
935struct ystorm_iscsi_task_state {
936 union iscsi_data_desc_ctx sgl_ctx_union;
937 __le32 buffer_offset[2];
938 __le16 bytes_nxt_dif;
939 __le16 rxmit_bytes_nxt_dif;
940 union iscsi_seq_num seq_num_union;
941 u8 dif_bytes_leftover;
942 u8 rxmit_dif_bytes_leftover;
943 __le16 reuse_count;
944 struct iscsi_dif_flags dif_flags;
945 u8 local_comp;
946 __le32 exp_r2t_sn;
947 __le32 sgl_offset[2];
948};
949
950struct ystorm_iscsi_task_st_ctx {
951 struct ystorm_iscsi_task_state state;
952 union iscsi_task_hdr pdu_hdr;
953};
954
955struct ystorm_iscsi_task_ag_ctx {
956 u8 reserved;
957 u8 byte1;
958 __le16 word0;
959 u8 flags0;
960#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
961#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
962#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
963#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
964#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
965#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
966#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
967#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
968#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
969#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
970 u8 flags1;
971#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
972#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
973#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
974#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
975#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
976#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
977#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
978#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
979#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
980#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
981 u8 flags2;
982#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
983#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
984#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
985#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
986#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
987#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
988#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
989#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
990#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
991#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
992#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
993#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
994#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
995#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
996#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
997#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
998 u8 byte2;
999 __le32 TTT;
1000 u8 byte3;
1001 u8 byte4;
1002 __le16 word1;
1003};
1004
1005struct mstorm_iscsi_task_ag_ctx {
1006 u8 cdu_validation;
1007 u8 byte1;
1008 __le16 task_cid;
1009 u8 flags0;
1010#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
1011#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
1012#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
1013#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
1014#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
1015#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
1016#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
1017#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
1018#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
1019#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
1020 u8 flags1;
1021#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
1022#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
1023#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
1024#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
1025#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
1026#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
1027#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
1028#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
1029#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
1030#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
1031 u8 flags2;
1032#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
1033#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
1034#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
1035#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
1036#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
1037#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
1038#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
1039#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
1040#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
1041#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
1042#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
1043#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
1044#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
1045#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
1046#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
1047#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
1048 u8 byte2;
1049 __le32 reg0;
1050 u8 byte3;
1051 u8 byte4;
1052 __le16 word1;
1053};
1054
1055struct ustorm_iscsi_task_ag_ctx {
1056 u8 reserved;
1057 u8 state;
1058 __le16 icid;
1059 u8 flags0;
1060#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
1061#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
1062#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
1063#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
1064#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
1065#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
1066#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
1067#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
1068 u8 flags1;
1069#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
1070#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
1071#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
1072#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
1073#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
1074#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
1075#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
1076#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
1077 u8 flags2;
1078#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
1079#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
1080#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
1081#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
1082#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
1083#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
1084#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
1085#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
1086#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
1087#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
1088#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
1089#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
1090#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
1091#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
1092#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
1093#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
1094 u8 flags3;
1095#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
1096#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
1097#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
1098#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
1099#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
1100#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
1101#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
1102#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
1103#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
1104#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
1105 __le32 dif_err_intervals;
1106 __le32 dif_error_1st_interval;
1107 __le32 rcv_cont_len;
1108 __le32 exp_cont_len;
1109 __le32 total_data_acked;
1110 __le32 exp_data_acked;
1111 u8 next_tid_valid;
1112 u8 byte3;
1113 __le16 word1;
1114 __le16 next_tid;
1115 __le16 word3;
1116 __le32 hdr_residual_count;
1117 __le32 exp_r2t_sn;
1118};
1119
1120struct mstorm_iscsi_task_st_ctx {
1121 union iscsi_mstorm_sgl sgl_union;
1122 struct iscsi_dif_flags dif_flags;
1123 struct iscsi_mflags flags;
1124 u8 sgl_size;
1125 u8 host_sge_index;
1126 __le16 dix_cur_sge_offset;
1127 __le16 dix_cur_sge_size;
1128 __le32 data_offset_rtid;
1129 u8 dif_offset;
1130 u8 dix_sgl_size;
1131 u8 dix_sge_index;
1132 u8 task_type;
1133 struct regpair sense_db;
1134 struct regpair dix_sgl_cur_sge;
1135 __le32 rem_task_size;
1136 __le16 reuse_count;
1137 __le16 dif_data_residue;
1138 u8 reserved0[4];
1139 __le32 reserved1[1];
1140};
1141
1142struct ustorm_iscsi_task_st_ctx {
1143 __le32 rem_rcv_len;
1144 __le32 exp_data_transfer_len;
1145 __le32 exp_data_sn;
1146 struct regpair lun;
1147 struct iscsi_reg1 reg1;
1148 u8 flags2;
1149#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
1150#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
1151#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
1152#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
1153 u8 reserved2;
1154 __le16 reserved3;
1155 __le32 reserved4;
1156 __le32 reserved5;
1157 __le32 reserved6;
1158 __le32 reserved7;
1159 u8 task_type;
1160 u8 error_flags;
1161#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
1162#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
1163#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
1164#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
1165#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
1166#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
1167#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
1168#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
1169 u8 flags;
1170#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
1171#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
1172#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
1173#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
1174#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
1175#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
1176#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1
1177#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4
1178#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1
1179#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5
1180#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
1181#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
1182#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
1183#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
1184 u8 cq_rss_number;
1185};
1186
1187struct iscsi_task_context {
1188 struct ystorm_iscsi_task_st_ctx ystorm_st_context;
1189 struct regpair ystorm_st_padding[2];
1190 struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
1191 struct regpair ystorm_ag_padding[2];
1192 struct tdif_task_context tdif_context;
1193 struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
1194 struct regpair mstorm_ag_padding[2];
1195 struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
1196 struct mstorm_iscsi_task_st_ctx mstorm_st_context;
1197 struct ustorm_iscsi_task_st_ctx ustorm_st_context;
1198 struct rdif_task_context rdif_context;
1199};
1200
1201enum iscsi_task_type {
1202 ISCSI_TASK_TYPE_INITIATOR_WRITE,
1203 ISCSI_TASK_TYPE_INITIATOR_READ,
1204 ISCSI_TASK_TYPE_MIDPATH,
1205 ISCSI_TASK_TYPE_UNSOLIC,
1206 ISCSI_TASK_TYPE_EXCHCLEANUP,
1207 ISCSI_TASK_TYPE_IRRELEVANT,
1208 ISCSI_TASK_TYPE_TARGET_WRITE,
1209 ISCSI_TASK_TYPE_TARGET_READ,
1210 ISCSI_TASK_TYPE_TARGET_RESPONSE,
1211 ISCSI_TASK_TYPE_LOGIN_RESPONSE,
1212 MAX_ISCSI_TASK_TYPE
1213};
1214
1215union iscsi_ttt_txlen_union {
1216 __le32 desired_tx_len;
1217 __le32 ttt;
1218};
1219
1220struct iscsi_uhqe {
1221 __le32 reg1;
1222#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
1223#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
1224#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
1225#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
1226#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
1227#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
1228#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
1229#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
1230#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1
1231#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
1232#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF
1233#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24
1234 __le32 reg2;
1235#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF
1236#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
1237#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF
1238#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
1239};
1240
1241struct iscsi_wqe_field {
1242 __le32 contlen_cdbsize_field;
1243#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF
1244#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
1245#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF
1246#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
1247};
1248
1249union iscsi_wqe_field_union {
1250 struct iscsi_wqe_field cont_field;
1251 __le32 prev_tid;
1252};
1253
1254struct iscsi_wqe {
1255 __le16 task_id;
1256 u8 flags;
1257#define ISCSI_WQE_WQE_TYPE_MASK 0x7
1258#define ISCSI_WQE_WQE_TYPE_SHIFT 0
1259#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7
1260#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3
1261#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1
1262#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
1263#define ISCSI_WQE_RESPONSE_MASK 0x1
1264#define ISCSI_WQE_RESPONSE_SHIFT 7
1265 struct iscsi_dif_flags prot_flags;
1266 union iscsi_wqe_field_union cont_prevtid_union;
1267};
1268
1269enum iscsi_wqe_type {
1270 ISCSI_WQE_TYPE_NORMAL,
1271 ISCSI_WQE_TYPE_TASK_CLEANUP,
1272 ISCSI_WQE_TYPE_MIDDLE_PATH,
1273 ISCSI_WQE_TYPE_LOGIN,
1274 ISCSI_WQE_TYPE_FIRST_R2T_CONT,
1275 ISCSI_WQE_TYPE_NONFIRST_R2T_CONT,
1276 ISCSI_WQE_TYPE_RESPONSE,
1277 MAX_ISCSI_WQE_TYPE
1278};
1279
1280struct iscsi_xhqe {
1281 union iscsi_ttt_txlen_union ttt_or_txlen;
1282 __le32 exp_stat_sn;
1283 struct iscsi_dif_flags prot_flags;
1284 u8 total_ahs_length;
1285 u8 opcode;
1286 u8 flags;
1287#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7
1288#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0
1289#define ISCSI_XHQE_FINAL_MASK 0x1
1290#define ISCSI_XHQE_FINAL_SHIFT 3
1291#define ISCSI_XHQE_SUPER_IO_MASK 0x1
1292#define ISCSI_XHQE_SUPER_IO_SHIFT 4
1293#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
1294#define ISCSI_XHQE_STATUS_BIT_SHIFT 5
1295#define ISCSI_XHQE_RESERVED_MASK 0x3
1296#define ISCSI_XHQE_RESERVED_SHIFT 6
1297 union iscsi_seq_num seq_num_union;
1298 __le16 reserved1;
1299};
1300
1301struct mstorm_iscsi_stats_drv {
1302 struct regpair iscsi_rx_dropped_pdus_task_not_valid;
1303};
1304
1305struct ooo_opaque {
1306 __le32 cid;
1307 u8 drop_isle;
1308 u8 drop_size;
1309 u8 ooo_opcode;
1310 u8 ooo_isle;
1311};
1312
1313struct pstorm_iscsi_stats_drv {
1314 struct regpair iscsi_tx_bytes_cnt;
1315 struct regpair iscsi_tx_packet_cnt;
1316};
1317
1318struct tstorm_iscsi_stats_drv {
1319 struct regpair iscsi_rx_bytes_cnt;
1320 struct regpair iscsi_rx_packet_cnt;
1321 struct regpair iscsi_rx_new_ooo_isle_events_cnt;
1322 __le32 iscsi_cmdq_threshold_cnt;
1323 __le32 iscsi_rq_threshold_cnt;
1324 __le32 iscsi_immq_threshold_cnt;
1325};
1326
1327struct ustorm_iscsi_stats_drv {
1328 struct regpair iscsi_rx_data_pdu_cnt;
1329 struct regpair iscsi_rx_r2t_pdu_cnt;
1330 struct regpair iscsi_rx_total_pdu_cnt;
1331};
1332
1333struct xstorm_iscsi_stats_drv {
1334 struct regpair iscsi_tx_go_to_slow_start_event_cnt;
1335 struct regpair iscsi_tx_fast_retransmit_event_cnt;
1336};
1337
1338struct ystorm_iscsi_stats_drv {
1339 struct regpair iscsi_tx_data_pdu_cnt;
1340 struct regpair iscsi_tx_r2t_pdu_cnt;
1341 struct regpair iscsi_tx_total_pdu_cnt;
1342};
1343
1344struct iscsi_db_data {
1345 u8 params;
1346#define ISCSI_DB_DATA_DEST_MASK 0x3
1347#define ISCSI_DB_DATA_DEST_SHIFT 0
1348#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
1349#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
1350#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
1351#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
1352#define ISCSI_DB_DATA_RESERVED_MASK 0x1
1353#define ISCSI_DB_DATA_RESERVED_SHIFT 5
1354#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
1355#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
1356 u8 agg_flags;
1357 __le16 sq_prod;
1358};
1359
1360struct tstorm_iscsi_task_ag_ctx {
1361 u8 byte0;
1362 u8 byte1;
1363 __le16 word0;
1364 u8 flags0;
1365#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
1366#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
1367#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
1368#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
1369#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
1370#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
1371#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
1372#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
1373#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
1374#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
1375 u8 flags1;
1376#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
1377#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
1378#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
1379#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
1380#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
1381#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
1382#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
1383#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
1384#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
1385#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
1386 u8 flags2;
1387#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
1388#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
1389#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
1390#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
1391#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
1392#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
1393#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
1394#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
1395 u8 flags3;
1396#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
1397#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
1398#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
1399#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
1400#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
1401#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
1402#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
1403#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
1404#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
1405#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
1406#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
1407#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
1408#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
1409#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
1410 u8 flags4;
1411#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
1412#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
1413#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
1414#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
1415#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
1416#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
1417#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
1418#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
1419#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
1420#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
1421#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
1422#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
1423#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
1424#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
1425#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
1426#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
1427 u8 byte2;
1428 __le16 word1;
1429 __le32 reg0;
1430 u8 byte3;
1431 u8 byte4;
1432 __le16 word2;
1433 __le16 word3;
1434 __le16 word4;
1435 __le32 reg1;
1436 __le32 reg2;
1437};
1438
1439#endif /* __ISCSI_COMMON__ */
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 5f8fcaaa6504..7e441bdeabdc 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -25,10 +25,9 @@
25 } while (0) 25 } while (0)
26 26
27#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) 27#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
28#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
29#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) 28#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
30#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
31#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) 29#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
30#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
32 31
33enum qed_chain_mode { 32enum qed_chain_mode {
34 /* Each Page contains a next pointer at its end */ 33 /* Each Page contains a next pointer at its end */
@@ -47,16 +46,56 @@ enum qed_chain_use_mode {
47 QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ 46 QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
48}; 47};
49 48
49enum qed_chain_cnt_type {
50 /* The chain's size/prod/cons are kept in 16-bit variables */
51 QED_CHAIN_CNT_TYPE_U16,
52
53 /* The chain's size/prod/cons are kept in 32-bit variables */
54 QED_CHAIN_CNT_TYPE_U32,
55};
56
50struct qed_chain_next { 57struct qed_chain_next {
51 struct regpair next_phys; 58 struct regpair next_phys;
52 void *next_virt; 59 void *next_virt;
53}; 60};
54 61
62struct qed_chain_pbl_u16 {
63 u16 prod_page_idx;
64 u16 cons_page_idx;
65};
66
67struct qed_chain_pbl_u32 {
68 u32 prod_page_idx;
69 u32 cons_page_idx;
70};
71
55struct qed_chain_pbl { 72struct qed_chain_pbl {
73 /* Base address of a pre-allocated buffer for pbl */
56 dma_addr_t p_phys_table; 74 dma_addr_t p_phys_table;
57 void *p_virt_table; 75 void *p_virt_table;
58 u16 prod_page_idx; 76
59 u16 cons_page_idx; 77 /* Table for keeping the virtual addresses of the chain pages,
78 * respectively to the physical addresses in the pbl table.
79 */
80 void **pp_virt_addr_tbl;
81
82 /* Index to current used page by producer/consumer */
83 union {
84 struct qed_chain_pbl_u16 pbl16;
85 struct qed_chain_pbl_u32 pbl32;
86 } u;
87};
88
89struct qed_chain_u16 {
90 /* Cyclic index of next element to produce/consme */
91 u16 prod_idx;
92 u16 cons_idx;
93};
94
95struct qed_chain_u32 {
96 /* Cyclic index of next element to produce/consme */
97 u32 prod_idx;
98 u32 cons_idx;
60}; 99};
61 100
62struct qed_chain { 101struct qed_chain {
@@ -64,13 +103,25 @@ struct qed_chain {
64 dma_addr_t p_phys_addr; 103 dma_addr_t p_phys_addr;
65 void *p_prod_elem; 104 void *p_prod_elem;
66 void *p_cons_elem; 105 void *p_cons_elem;
67 u16 page_cnt; 106
68 enum qed_chain_mode mode; 107 enum qed_chain_mode mode;
69 enum qed_chain_use_mode intended_use; /* used to produce/consume */ 108 enum qed_chain_use_mode intended_use; /* used to produce/consume */
70 u16 capacity; /*< number of _usable_ elements */ 109 enum qed_chain_cnt_type cnt_type;
71 u16 size; /* number of elements */ 110
72 u16 prod_idx; 111 union {
73 u16 cons_idx; 112 struct qed_chain_u16 chain16;
113 struct qed_chain_u32 chain32;
114 } u;
115
116 u32 page_cnt;
117
118 /* Number of elements - capacity is for usable elements only,
119 * while size will contain total number of elements [for entire chain].
120 */
121 u32 capacity;
122 u32 size;
123
124 /* Elements information for fast calculations */
74 u16 elem_per_page; 125 u16 elem_per_page;
75 u16 elem_per_page_mask; 126 u16 elem_per_page_mask;
76 u16 elem_unusable; 127 u16 elem_unusable;
@@ -96,66 +147,69 @@ struct qed_chain {
96#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ 147#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
97 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) 148 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
98 149
150#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
151#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
152
99/* Accessors */ 153/* Accessors */
100static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) 154static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
101{ 155{
102 return p_chain->prod_idx; 156 return p_chain->u.chain16.prod_idx;
103} 157}
104 158
105static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) 159static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
106{ 160{
107 return p_chain->cons_idx; 161 return p_chain->u.chain16.cons_idx;
162}
163
164static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
165{
166 return p_chain->u.chain32.cons_idx;
108} 167}
109 168
110static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) 169static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
111{ 170{
112 u16 used; 171 u16 used;
113 172
114 /* we don't need to trancate upon assignmet, as we assign u32->u16 */ 173 used = (u16) (((u32)0x10000 +
115 used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - 174 (u32)p_chain->u.chain16.prod_idx) -
116 (u32)p_chain->cons_idx; 175 (u32)p_chain->u.chain16.cons_idx);
117 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) 176 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
118 used -= p_chain->prod_idx / p_chain->elem_per_page - 177 used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
119 p_chain->cons_idx / p_chain->elem_per_page; 178 p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
120 179
121 return p_chain->capacity - used; 180 return (u16)(p_chain->capacity - used);
122} 181}
123 182
124static inline u8 qed_chain_is_full(struct qed_chain *p_chain) 183static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
125{ 184{
126 return qed_chain_get_elem_left(p_chain) == p_chain->capacity; 185 u32 used;
127}
128 186
129static inline u8 qed_chain_is_empty(struct qed_chain *p_chain) 187 used = (u32) (((u64)0x100000000ULL +
130{ 188 (u64)p_chain->u.chain32.prod_idx) -
131 return qed_chain_get_elem_left(p_chain) == 0; 189 (u64)p_chain->u.chain32.cons_idx);
132} 190 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
191 used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
192 p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
133 193
134static inline u16 qed_chain_get_elem_per_page( 194 return p_chain->capacity - used;
135 struct qed_chain *p_chain)
136{
137 return p_chain->elem_per_page;
138} 195}
139 196
140static inline u16 qed_chain_get_usable_per_page( 197static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
141 struct qed_chain *p_chain)
142{ 198{
143 return p_chain->usable_per_page; 199 return p_chain->usable_per_page;
144} 200}
145 201
146static inline u16 qed_chain_get_unusable_per_page( 202static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
147 struct qed_chain *p_chain)
148{ 203{
149 return p_chain->elem_unusable; 204 return p_chain->elem_unusable;
150} 205}
151 206
152static inline u16 qed_chain_get_size(struct qed_chain *p_chain) 207static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
153{ 208{
154 return p_chain->size; 209 return p_chain->page_cnt;
155} 210}
156 211
157static inline dma_addr_t 212static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
158qed_chain_get_pbl_phys(struct qed_chain *p_chain)
159{ 213{
160 return p_chain->pbl.p_phys_table; 214 return p_chain->pbl.p_phys_table;
161} 215}
@@ -172,65 +226,63 @@ qed_chain_get_pbl_phys(struct qed_chain *p_chain)
172 */ 226 */
173static inline void 227static inline void
174qed_chain_advance_page(struct qed_chain *p_chain, 228qed_chain_advance_page(struct qed_chain *p_chain,
175 void **p_next_elem, 229 void **p_next_elem, void *idx_to_inc, void *page_to_inc)
176 u16 *idx_to_inc,
177 u16 *page_to_inc)
178 230
179{ 231{
232 struct qed_chain_next *p_next = NULL;
233 u32 page_index = 0;
180 switch (p_chain->mode) { 234 switch (p_chain->mode) {
181 case QED_CHAIN_MODE_NEXT_PTR: 235 case QED_CHAIN_MODE_NEXT_PTR:
182 { 236 p_next = *p_next_elem;
183 struct qed_chain_next *p_next = *p_next_elem;
184 *p_next_elem = p_next->next_virt; 237 *p_next_elem = p_next->next_virt;
185 *idx_to_inc += p_chain->elem_unusable; 238 if (is_chain_u16(p_chain))
239 *(u16 *)idx_to_inc += p_chain->elem_unusable;
240 else
241 *(u32 *)idx_to_inc += p_chain->elem_unusable;
186 break; 242 break;
187 }
188 case QED_CHAIN_MODE_SINGLE: 243 case QED_CHAIN_MODE_SINGLE:
189 *p_next_elem = p_chain->p_virt_addr; 244 *p_next_elem = p_chain->p_virt_addr;
190 break; 245 break;
191 246
192 case QED_CHAIN_MODE_PBL: 247 case QED_CHAIN_MODE_PBL:
193 /* It is assumed pages are sequential, next element needs 248 if (is_chain_u16(p_chain)) {
194 * to change only when passing going back to first from last. 249 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
195 */ 250 *(u16 *)page_to_inc = 0;
196 if (++(*page_to_inc) == p_chain->page_cnt) { 251 page_index = *(u16 *)page_to_inc;
197 *page_to_inc = 0; 252 } else {
198 *p_next_elem = p_chain->p_virt_addr; 253 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
254 *(u32 *)page_to_inc = 0;
255 page_index = *(u32 *)page_to_inc;
199 } 256 }
257 *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
200 } 258 }
201} 259}
202 260
203#define is_unusable_idx(p, idx) \ 261#define is_unusable_idx(p, idx) \
204 (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page) 262 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
263
264#define is_unusable_idx_u32(p, idx) \
265 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
266#define is_unusable_next_idx(p, idx) \
267 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
268 (p)->usable_per_page)
205 269
206#define is_unusable_next_idx(p, idx) \ 270#define is_unusable_next_idx_u32(p, idx) \
207 ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page) 271 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
272 (p)->usable_per_page)
208 273
209#define test_ans_skip(p, idx) \ 274#define test_and_skip(p, idx) \
210 do { \ 275 do { \
211 if (is_unusable_idx(p, idx)) { \ 276 if (is_chain_u16(p)) { \
212 (p)->idx += (p)->elem_unusable; \ 277 if (is_unusable_idx(p, idx)) \
278 (p)->u.chain16.idx += (p)->elem_unusable; \
279 } else { \
280 if (is_unusable_idx_u32(p, idx)) \
281 (p)->u.chain32.idx += (p)->elem_unusable; \
213 } \ 282 } \
214 } while (0) 283 } while (0)
215 284
216/** 285/**
217 * @brief qed_chain_return_multi_produced -
218 *
219 * A chain in which the driver "Produces" elements should use this API
220 * to indicate previous produced elements are now consumed.
221 *
222 * @param p_chain
223 * @param num
224 */
225static inline void
226qed_chain_return_multi_produced(struct qed_chain *p_chain,
227 u16 num)
228{
229 p_chain->cons_idx += num;
230 test_ans_skip(p_chain, cons_idx);
231}
232
233/**
234 * @brief qed_chain_return_produced - 286 * @brief qed_chain_return_produced -
235 * 287 *
236 * A chain in which the driver "Produces" elements should use this API 288 * A chain in which the driver "Produces" elements should use this API
@@ -240,8 +292,11 @@ qed_chain_return_multi_produced(struct qed_chain *p_chain,
240 */ 292 */
241static inline void qed_chain_return_produced(struct qed_chain *p_chain) 293static inline void qed_chain_return_produced(struct qed_chain *p_chain)
242{ 294{
243 p_chain->cons_idx++; 295 if (is_chain_u16(p_chain))
244 test_ans_skip(p_chain, cons_idx); 296 p_chain->u.chain16.cons_idx++;
297 else
298 p_chain->u.chain32.cons_idx++;
299 test_and_skip(p_chain, cons_idx);
245} 300}
246 301
247/** 302/**
@@ -257,21 +312,33 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain)
257 */ 312 */
258static inline void *qed_chain_produce(struct qed_chain *p_chain) 313static inline void *qed_chain_produce(struct qed_chain *p_chain)
259{ 314{
260 void *ret = NULL; 315 void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
261 316
262 if ((p_chain->prod_idx & p_chain->elem_per_page_mask) == 317 if (is_chain_u16(p_chain)) {
263 p_chain->next_page_mask) { 318 if ((p_chain->u.chain16.prod_idx &
264 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, 319 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
265 &p_chain->prod_idx, 320 p_prod_idx = &p_chain->u.chain16.prod_idx;
266 &p_chain->pbl.prod_page_idx); 321 p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
322 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
323 p_prod_idx, p_prod_page_idx);
324 }
325 p_chain->u.chain16.prod_idx++;
326 } else {
327 if ((p_chain->u.chain32.prod_idx &
328 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
329 p_prod_idx = &p_chain->u.chain32.prod_idx;
330 p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
331 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
332 p_prod_idx, p_prod_page_idx);
333 }
334 p_chain->u.chain32.prod_idx++;
267 } 335 }
268 336
269 ret = p_chain->p_prod_elem; 337 p_ret = p_chain->p_prod_elem;
270 p_chain->prod_idx++;
271 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + 338 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
272 p_chain->elem_size); 339 p_chain->elem_size);
273 340
274 return ret; 341 return p_ret;
275} 342}
276 343
277/** 344/**
@@ -282,9 +349,9 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
282 * @param p_chain 349 * @param p_chain
283 * @param num 350 * @param num
284 * 351 *
285 * @return u16, number of unusable BDs 352 * @return number of unusable BDs
286 */ 353 */
287static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain) 354static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
288{ 355{
289 return p_chain->capacity; 356 return p_chain->capacity;
290} 357}
@@ -297,11 +364,13 @@ static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain)
297 * 364 *
298 * @param p_chain 365 * @param p_chain
299 */ 366 */
300static inline void 367static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
301qed_chain_recycle_consumed(struct qed_chain *p_chain)
302{ 368{
303 test_ans_skip(p_chain, prod_idx); 369 test_and_skip(p_chain, prod_idx);
304 p_chain->prod_idx++; 370 if (is_chain_u16(p_chain))
371 p_chain->u.chain16.prod_idx++;
372 else
373 p_chain->u.chain32.prod_idx++;
305} 374}
306 375
307/** 376/**
@@ -316,21 +385,33 @@ qed_chain_recycle_consumed(struct qed_chain *p_chain)
316 */ 385 */
317static inline void *qed_chain_consume(struct qed_chain *p_chain) 386static inline void *qed_chain_consume(struct qed_chain *p_chain)
318{ 387{
319 void *ret = NULL; 388 void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
320 389
321 if ((p_chain->cons_idx & p_chain->elem_per_page_mask) == 390 if (is_chain_u16(p_chain)) {
322 p_chain->next_page_mask) { 391 if ((p_chain->u.chain16.cons_idx &
392 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
393 p_cons_idx = &p_chain->u.chain16.cons_idx;
394 p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
395 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
396 p_cons_idx, p_cons_page_idx);
397 }
398 p_chain->u.chain16.cons_idx++;
399 } else {
400 if ((p_chain->u.chain32.cons_idx &
401 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
402 p_cons_idx = &p_chain->u.chain32.cons_idx;
403 p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
323 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, 404 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
324 &p_chain->cons_idx, 405 p_cons_idx, p_cons_page_idx);
325 &p_chain->pbl.cons_page_idx); 406 }
407 p_chain->u.chain32.cons_idx++;
326 } 408 }
327 409
328 ret = p_chain->p_cons_elem; 410 p_ret = p_chain->p_cons_elem;
329 p_chain->cons_idx++;
330 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + 411 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
331 p_chain->elem_size); 412 p_chain->elem_size);
332 413
333 return ret; 414 return p_ret;
334} 415}
335 416
336/** 417/**
@@ -340,16 +421,33 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
340 */ 421 */
341static inline void qed_chain_reset(struct qed_chain *p_chain) 422static inline void qed_chain_reset(struct qed_chain *p_chain)
342{ 423{
343 int i; 424 u32 i;
344 425
345 p_chain->prod_idx = 0; 426 if (is_chain_u16(p_chain)) {
346 p_chain->cons_idx = 0; 427 p_chain->u.chain16.prod_idx = 0;
347 p_chain->p_cons_elem = p_chain->p_virt_addr; 428 p_chain->u.chain16.cons_idx = 0;
348 p_chain->p_prod_elem = p_chain->p_virt_addr; 429 } else {
430 p_chain->u.chain32.prod_idx = 0;
431 p_chain->u.chain32.cons_idx = 0;
432 }
433 p_chain->p_cons_elem = p_chain->p_virt_addr;
434 p_chain->p_prod_elem = p_chain->p_virt_addr;
349 435
350 if (p_chain->mode == QED_CHAIN_MODE_PBL) { 436 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
351 p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1; 437 /* Use (page_cnt - 1) as a reset value for the prod/cons page's
352 p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1; 438 * indices, to avoid unnecessary page advancing on the first
439 * call to qed_chain_produce/consume. Instead, the indices
440 * will be advanced to page_cnt and then will be wrapped to 0.
441 */
442 u32 reset_val = p_chain->page_cnt - 1;
443
444 if (is_chain_u16(p_chain)) {
445 p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
446 p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
447 } else {
448 p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
449 p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
450 }
353 } 451 }
354 452
355 switch (p_chain->intended_use) { 453 switch (p_chain->intended_use) {
@@ -377,168 +475,184 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
377 * @param intended_use 475 * @param intended_use
378 * @param mode 476 * @param mode
379 */ 477 */
380static inline void qed_chain_init(struct qed_chain *p_chain, 478static inline void qed_chain_init_params(struct qed_chain *p_chain,
381 void *p_virt_addr, 479 u32 page_cnt,
382 dma_addr_t p_phys_addr, 480 u8 elem_size,
383 u16 page_cnt, 481 enum qed_chain_use_mode intended_use,
384 u8 elem_size, 482 enum qed_chain_mode mode,
385 enum qed_chain_use_mode intended_use, 483 enum qed_chain_cnt_type cnt_type)
386 enum qed_chain_mode mode)
387{ 484{
388 /* chain fixed parameters */ 485 /* chain fixed parameters */
389 p_chain->p_virt_addr = p_virt_addr; 486 p_chain->p_virt_addr = NULL;
390 p_chain->p_phys_addr = p_phys_addr; 487 p_chain->p_phys_addr = 0;
391 p_chain->elem_size = elem_size; 488 p_chain->elem_size = elem_size;
392 p_chain->page_cnt = page_cnt; 489 p_chain->intended_use = intended_use;
393 p_chain->mode = mode; 490 p_chain->mode = mode;
491 p_chain->cnt_type = cnt_type;
394 492
395 p_chain->intended_use = intended_use;
396 p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); 493 p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
397 p_chain->usable_per_page = 494 p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
398 USABLE_ELEMS_PER_PAGE(elem_size, mode);
399 p_chain->capacity = p_chain->usable_per_page * page_cnt;
400 p_chain->size = p_chain->elem_per_page * page_cnt;
401 p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; 495 p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
402
403 p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); 496 p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
404
405 p_chain->next_page_mask = (p_chain->usable_per_page & 497 p_chain->next_page_mask = (p_chain->usable_per_page &
406 p_chain->elem_per_page_mask); 498 p_chain->elem_per_page_mask);
407 499
408 if (mode == QED_CHAIN_MODE_NEXT_PTR) { 500 p_chain->page_cnt = page_cnt;
409 struct qed_chain_next *p_next; 501 p_chain->capacity = p_chain->usable_per_page * page_cnt;
410 u16 i; 502 p_chain->size = p_chain->elem_per_page * page_cnt;
411
412 for (i = 0; i < page_cnt - 1; i++) {
413 /* Increment mem_phy to the next page. */
414 p_phys_addr += QED_CHAIN_PAGE_SIZE;
415
416 /* Initialize the physical address of the next page. */
417 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
418 elem_size *
419 p_chain->
420 usable_per_page);
421
422 p_next->next_phys.lo = DMA_LO_LE(p_phys_addr);
423 p_next->next_phys.hi = DMA_HI_LE(p_phys_addr);
424
425 /* Initialize the virtual address of the next page. */
426 p_next->next_virt = (void *)((u8 *)p_virt_addr +
427 QED_CHAIN_PAGE_SIZE);
428
429 /* Move to the next page. */
430 p_virt_addr = p_next->next_virt;
431 }
432
433 /* Last page's next should point to beginning of the chain */
434 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
435 elem_size *
436 p_chain->usable_per_page);
437 503
438 p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr); 504 p_chain->pbl.p_phys_table = 0;
439 p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr); 505 p_chain->pbl.p_virt_table = NULL;
440 p_next->next_virt = p_chain->p_virt_addr; 506 p_chain->pbl.pp_virt_addr_tbl = NULL;
441 }
442 qed_chain_reset(p_chain);
443} 507}
444 508
445/** 509/**
446 * @brief qed_chain_pbl_init - Initalizes a basic pbl chain 510 * @brief qed_chain_init_mem -
447 * struct 511 *
512 * Initalizes a basic chain struct with its chain buffers
513 *
448 * @param p_chain 514 * @param p_chain
449 * @param p_virt_addr virtual address of allocated buffer's beginning 515 * @param p_virt_addr virtual address of allocated buffer's beginning
450 * @param p_phys_addr physical address of allocated buffer's beginning 516 * @param p_phys_addr physical address of allocated buffer's beginning
451 * @param page_cnt number of pages in the allocated buffer 517 *
452 * @param elem_size size of each element in the chain
453 * @param use_mode
454 * @param p_phys_pbl pointer to a pre-allocated side table
455 * which will hold physical page addresses.
456 * @param p_virt_pbl pointer to a pre allocated side table
457 * which will hold virtual page addresses.
458 */ 518 */
459static inline void 519static inline void qed_chain_init_mem(struct qed_chain *p_chain,
460qed_chain_pbl_init(struct qed_chain *p_chain, 520 void *p_virt_addr, dma_addr_t p_phys_addr)
461 void *p_virt_addr,
462 dma_addr_t p_phys_addr,
463 u16 page_cnt,
464 u8 elem_size,
465 enum qed_chain_use_mode use_mode,
466 dma_addr_t p_phys_pbl,
467 dma_addr_t *p_virt_pbl)
468{ 521{
469 dma_addr_t *p_pbl_dma = p_virt_pbl; 522 p_chain->p_virt_addr = p_virt_addr;
470 int i; 523 p_chain->p_phys_addr = p_phys_addr;
471 524}
472 qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt,
473 elem_size, use_mode, QED_CHAIN_MODE_PBL);
474 525
526/**
527 * @brief qed_chain_init_pbl_mem -
528 *
529 * Initalizes a basic chain struct with its pbl buffers
530 *
531 * @param p_chain
532 * @param p_virt_pbl pointer to a pre allocated side table which will hold
533 * virtual page addresses.
534 * @param p_phys_pbl pointer to a pre-allocated side table which will hold
535 * physical page addresses.
536 * @param pp_virt_addr_tbl
537 * pointer to a pre-allocated side table which will hold
538 * the virtual addresses of the chain pages.
539 *
540 */
541static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
542 void *p_virt_pbl,
543 dma_addr_t p_phys_pbl,
544 void **pp_virt_addr_tbl)
545{
475 p_chain->pbl.p_phys_table = p_phys_pbl; 546 p_chain->pbl.p_phys_table = p_phys_pbl;
476 p_chain->pbl.p_virt_table = p_virt_pbl; 547 p_chain->pbl.p_virt_table = p_virt_pbl;
477 548 p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
478 /* Fill the PBL with physical addresses*/
479 for (i = 0; i < page_cnt; i++) {
480 *p_pbl_dma = p_phys_addr;
481 p_phys_addr += QED_CHAIN_PAGE_SIZE;
482 p_pbl_dma++;
483 }
484} 549}
485 550
486/** 551/**
487 * @brief qed_chain_set_prod - sets the prod to the given 552 * @brief qed_chain_init_next_ptr_elem -
488 * value 553 *
554 * Initalizes a next pointer element
555 *
556 * @param p_chain
557 * @param p_virt_curr virtual address of a chain page of which the next
558 * pointer element is initialized
559 * @param p_virt_next virtual address of the next chain page
560 * @param p_phys_next physical address of the next chain page
489 * 561 *
490 * @param prod_idx
491 * @param p_prod_elem
492 */ 562 */
493static inline void qed_chain_set_prod(struct qed_chain *p_chain, 563static inline void
494 u16 prod_idx, 564qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
495 void *p_prod_elem) 565 void *p_virt_curr,
566 void *p_virt_next, dma_addr_t p_phys_next)
496{ 567{
497 p_chain->prod_idx = prod_idx; 568 struct qed_chain_next *p_next;
498 p_chain->p_prod_elem = p_prod_elem; 569 u32 size;
570
571 size = p_chain->elem_size * p_chain->usable_per_page;
572 p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
573
574 DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
575
576 p_next->next_virt = p_virt_next;
499} 577}
500 578
501/** 579/**
502 * @brief qed_chain_get_elem - 580 * @brief qed_chain_get_last_elem -
503 * 581 *
504 * get a pointer to an element represented by absolute idx 582 * Returns a pointer to the last element of the chain
505 * 583 *
506 * @param p_chain 584 * @param p_chain
507 * @assumption p_chain->size is a power of 2
508 * 585 *
509 * @return void*, a pointer to next element 586 * @return void*
510 */ 587 */
511static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain, 588static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
512 u16 idx)
513{ 589{
514 void *ret = NULL; 590 struct qed_chain_next *p_next = NULL;
515 591 void *p_virt_addr = NULL;
516 if (idx >= p_chain->size) 592 u32 size, last_page_idx;
517 return NULL;
518 593
519 ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx; 594 if (!p_chain->p_virt_addr)
595 goto out;
520 596
521 return ret; 597 switch (p_chain->mode) {
598 case QED_CHAIN_MODE_NEXT_PTR:
599 size = p_chain->elem_size * p_chain->usable_per_page;
600 p_virt_addr = p_chain->p_virt_addr;
601 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
602 while (p_next->next_virt != p_chain->p_virt_addr) {
603 p_virt_addr = p_next->next_virt;
604 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
605 size);
606 }
607 break;
608 case QED_CHAIN_MODE_SINGLE:
609 p_virt_addr = p_chain->p_virt_addr;
610 break;
611 case QED_CHAIN_MODE_PBL:
612 last_page_idx = p_chain->page_cnt - 1;
613 p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
614 break;
615 }
616 /* p_virt_addr points at this stage to the last page of the chain */
617 size = p_chain->elem_size * (p_chain->usable_per_page - 1);
618 p_virt_addr = (u8 *)p_virt_addr + size;
619out:
620 return p_virt_addr;
522} 621}
523 622
524/** 623/**
525 * @brief qed_chain_sge_inc_cons_prod 624 * @brief qed_chain_set_prod - sets the prod to the given value
526 * 625 *
527 * for sge chains, producer isn't increased serially, the ring 626 * @param prod_idx
528 * is expected to be full at all times. Once elements are 627 * @param p_prod_elem
529 * consumed, they are immediately produced. 628 */
629static inline void qed_chain_set_prod(struct qed_chain *p_chain,
630 u32 prod_idx, void *p_prod_elem)
631{
632 if (is_chain_u16(p_chain))
633 p_chain->u.chain16.prod_idx = (u16) prod_idx;
634 else
635 p_chain->u.chain32.prod_idx = prod_idx;
636 p_chain->p_prod_elem = p_prod_elem;
637}
638
639/**
640 * @brief qed_chain_pbl_zero_mem - set chain memory to 0
530 * 641 *
531 * @param p_chain 642 * @param p_chain
532 * @param cnt
533 *
534 * @return inline void
535 */ 643 */
536static inline void 644static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
537qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain,
538 u16 cnt)
539{ 645{
540 p_chain->prod_idx += cnt; 646 u32 i, page_cnt;
541 p_chain->cons_idx += cnt; 647
648 if (p_chain->mode != QED_CHAIN_MODE_PBL)
649 return;
650
651 page_cnt = qed_chain_get_page_cnt(p_chain);
652
653 for (i = 0; i < page_cnt; i++)
654 memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
655 QED_CHAIN_PAGE_SIZE);
542} 656}
543 657
544#endif 658#endif
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 6c876a63558d..4475a9d8ae15 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -114,6 +114,7 @@ struct qed_queue_start_common_params {
114 u8 vport_id; 114 u8 vport_id;
115 u16 sb; 115 u16 sb;
116 u16 sb_idx; 116 u16 sb_idx;
117 u16 vf_qid;
117}; 118};
118 119
119struct qed_tunn_params { 120struct qed_tunn_params {
@@ -128,11 +129,73 @@ struct qed_eth_cb_ops {
128 void (*force_mac) (void *dev, u8 *mac); 129 void (*force_mac) (void *dev, u8 *mac);
129}; 130};
130 131
132#ifdef CONFIG_DCB
133/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration
134 * of dcbnl_rtnl_ops structure.
135 */
136struct qed_eth_dcbnl_ops {
137 /* IEEE 802.1Qaz std */
138 int (*ieee_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
139 int (*ieee_setpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
140 int (*ieee_getets)(struct qed_dev *cdev, struct ieee_ets *ets);
141 int (*ieee_setets)(struct qed_dev *cdev, struct ieee_ets *ets);
142 int (*ieee_peer_getets)(struct qed_dev *cdev, struct ieee_ets *ets);
143 int (*ieee_peer_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
144 int (*ieee_getapp)(struct qed_dev *cdev, struct dcb_app *app);
145 int (*ieee_setapp)(struct qed_dev *cdev, struct dcb_app *app);
146
147 /* CEE std */
148 u8 (*getstate)(struct qed_dev *cdev);
149 u8 (*setstate)(struct qed_dev *cdev, u8 state);
150 void (*getpgtccfgtx)(struct qed_dev *cdev, int prio, u8 *prio_type,
151 u8 *pgid, u8 *bw_pct, u8 *up_map);
152 void (*getpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 *bw_pct);
153 void (*getpgtccfgrx)(struct qed_dev *cdev, int prio, u8 *prio_type,
154 u8 *pgid, u8 *bw_pct, u8 *up_map);
155 void (*getpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 *bw_pct);
156 void (*getpfccfg)(struct qed_dev *cdev, int prio, u8 *setting);
157 void (*setpfccfg)(struct qed_dev *cdev, int prio, u8 setting);
158 u8 (*getcap)(struct qed_dev *cdev, int capid, u8 *cap);
159 int (*getnumtcs)(struct qed_dev *cdev, int tcid, u8 *num);
160 u8 (*getpfcstate)(struct qed_dev *cdev);
161 int (*getapp)(struct qed_dev *cdev, u8 idtype, u16 id);
162 u8 (*getfeatcfg)(struct qed_dev *cdev, int featid, u8 *flags);
163
164 /* DCBX configuration */
165 u8 (*getdcbx)(struct qed_dev *cdev);
166 void (*setpgtccfgtx)(struct qed_dev *cdev, int prio,
167 u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map);
168 void (*setpgtccfgrx)(struct qed_dev *cdev, int prio,
169 u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map);
170 void (*setpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 bw_pct);
171 void (*setpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 bw_pct);
172 u8 (*setall)(struct qed_dev *cdev);
173 int (*setnumtcs)(struct qed_dev *cdev, int tcid, u8 num);
174 void (*setpfcstate)(struct qed_dev *cdev, u8 state);
175 int (*setapp)(struct qed_dev *cdev, u8 idtype, u16 idval, u8 up);
176 u8 (*setdcbx)(struct qed_dev *cdev, u8 state);
177 u8 (*setfeatcfg)(struct qed_dev *cdev, int featid, u8 flags);
178
179 /* Peer apps */
180 int (*peer_getappinfo)(struct qed_dev *cdev,
181 struct dcb_peer_app_info *info,
182 u16 *app_count);
183 int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table);
184
185 /* CEE peer */
186 int (*cee_peer_getpfc)(struct qed_dev *cdev, struct cee_pfc *pfc);
187 int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg);
188};
189#endif
190
131struct qed_eth_ops { 191struct qed_eth_ops {
132 const struct qed_common_ops *common; 192 const struct qed_common_ops *common;
133#ifdef CONFIG_QED_SRIOV 193#ifdef CONFIG_QED_SRIOV
134 const struct qed_iov_hv_ops *iov; 194 const struct qed_iov_hv_ops *iov;
135#endif 195#endif
196#ifdef CONFIG_DCB
197 const struct qed_eth_dcbnl_ops *dcb;
198#endif
136 199
137 int (*fill_dev_info)(struct qed_dev *cdev, 200 int (*fill_dev_info)(struct qed_dev *cdev,
138 struct qed_dev_eth_info *info); 201 struct qed_dev_eth_info *info);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 4c29439f54bf..b1e3c57c7117 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -34,6 +34,96 @@ enum dcbx_protocol_type {
34 DCBX_MAX_PROTOCOL_TYPE 34 DCBX_MAX_PROTOCOL_TYPE
35}; 35};
36 36
37#ifdef CONFIG_DCB
38#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
39#define QED_LLDP_PORT_ID_STAT_LEN 4
40#define QED_DCBX_MAX_APP_PROTOCOL 32
41#define QED_MAX_PFC_PRIORITIES 8
42#define QED_DCBX_DSCP_SIZE 64
43
44struct qed_dcbx_lldp_remote {
45 u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
46 u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
47 bool enable_rx;
48 bool enable_tx;
49 u32 tx_interval;
50 u32 max_credit;
51};
52
53struct qed_dcbx_lldp_local {
54 u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
55 u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
56};
57
58struct qed_dcbx_app_prio {
59 u8 roce;
60 u8 roce_v2;
61 u8 fcoe;
62 u8 iscsi;
63 u8 eth;
64};
65
66struct qed_dbcx_pfc_params {
67 bool willing;
68 bool enabled;
69 u8 prio[QED_MAX_PFC_PRIORITIES];
70 u8 max_tc;
71};
72
73struct qed_app_entry {
74 bool ethtype;
75 bool enabled;
76 u8 prio;
77 u16 proto_id;
78 enum dcbx_protocol_type proto_type;
79};
80
81struct qed_dcbx_params {
82 struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
83 u16 num_app_entries;
84 bool app_willing;
85 bool app_valid;
86 bool app_error;
87 bool ets_willing;
88 bool ets_enabled;
89 bool ets_cbs;
90 bool valid;
91 u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
92 u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
93 u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
94 struct qed_dbcx_pfc_params pfc;
95 u8 max_ets_tc;
96};
97
98struct qed_dcbx_admin_params {
99 struct qed_dcbx_params params;
100 bool valid;
101};
102
103struct qed_dcbx_remote_params {
104 struct qed_dcbx_params params;
105 bool valid;
106};
107
108struct qed_dcbx_operational_params {
109 struct qed_dcbx_app_prio app_prio;
110 struct qed_dcbx_params params;
111 bool valid;
112 bool enabled;
113 bool ieee;
114 bool cee;
115 u32 err;
116};
117
118struct qed_dcbx_get {
119 struct qed_dcbx_operational_params operational;
120 struct qed_dcbx_lldp_remote lldp_remote;
121 struct qed_dcbx_lldp_local lldp_local;
122 struct qed_dcbx_remote_params remote;
123 struct qed_dcbx_admin_params local;
124};
125#endif
126
37enum qed_led_mode { 127enum qed_led_mode {
38 QED_LED_MODE_OFF, 128 QED_LED_MODE_OFF,
39 QED_LED_MODE_ON, 129 QED_LED_MODE_ON,
@@ -58,8 +148,70 @@ struct qed_eth_pf_params {
58 u16 num_cons; 148 u16 num_cons;
59}; 149};
60 150
151/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
152struct qed_iscsi_pf_params {
153 u64 glbl_q_params_addr;
154 u64 bdq_pbl_base_addr[2];
155 u32 max_cwnd;
156 u16 cq_num_entries;
157 u16 cmdq_num_entries;
158 u16 dup_ack_threshold;
159 u16 tx_sws_timer;
160 u16 min_rto;
161 u16 min_rto_rt;
162 u16 max_rto;
163
164 /* The following parameters are used during HW-init
165 * and these parameters need to be passed as arguments
166 * to update_pf_params routine invoked before slowpath start
167 */
168 u16 num_cons;
169 u16 num_tasks;
170
171 /* The following parameters are used during protocol-init */
172 u16 half_way_close_timeout;
173 u16 bdq_xoff_threshold[2];
174 u16 bdq_xon_threshold[2];
175 u16 cmdq_xoff_threshold;
176 u16 cmdq_xon_threshold;
177 u16 rq_buffer_size;
178
179 u8 num_sq_pages_in_ring;
180 u8 num_r2tq_pages_in_ring;
181 u8 num_uhq_pages_in_ring;
182 u8 num_queues;
183 u8 log_page_size;
184 u8 rqe_log_size;
185 u8 max_fin_rt;
186 u8 gl_rq_pi;
187 u8 gl_cmd_pi;
188 u8 debug_mode;
189 u8 ll2_ooo_queue_id;
190 u8 ooo_enable;
191
192 u8 is_target;
193 u8 bdq_pbl_num_entries[2];
194};
195
196struct qed_rdma_pf_params {
197 /* Supplied to QED during resource allocation (may affect the ILT and
198 * the doorbell BAR).
199 */
200 u32 min_dpis; /* number of requested DPIs */
201 u32 num_mrs; /* number of requested memory regions */
202 u32 num_qps; /* number of requested Queue Pairs */
203 u32 num_srqs; /* number of requested SRQ */
204 u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
205 u8 gl_pi; /* protocol index */
206
207 /* Will allocate rate limiters to be used with QPs */
208 u8 enable_dcqcn;
209};
210
61struct qed_pf_params { 211struct qed_pf_params {
62 struct qed_eth_pf_params eth_pf_params; 212 struct qed_eth_pf_params eth_pf_params;
213 struct qed_iscsi_pf_params iscsi_pf_params;
214 struct qed_rdma_pf_params rdma_pf_params;
63}; 215};
64 216
65enum qed_int_mode { 217enum qed_int_mode {
@@ -100,6 +252,8 @@ struct qed_dev_info {
100 /* MFW version */ 252 /* MFW version */
101 u32 mfw_rev; 253 u32 mfw_rev;
102 254
255 bool rdma_supported;
256
103 u32 flash_size; 257 u32 flash_size;
104 u8 mf_mode; 258 u8 mf_mode;
105 bool tx_switching; 259 bool tx_switching;
@@ -111,6 +265,7 @@ enum qed_sb_type {
111 265
112enum qed_protocol { 266enum qed_protocol {
113 QED_PROTOCOL_ETH, 267 QED_PROTOCOL_ETH,
268 QED_PROTOCOL_ISCSI,
114}; 269};
115 270
116struct qed_link_params { 271struct qed_link_params {
@@ -325,7 +480,8 @@ struct qed_common_ops {
325 int (*chain_alloc)(struct qed_dev *cdev, 480 int (*chain_alloc)(struct qed_dev *cdev,
326 enum qed_chain_use_mode intended_use, 481 enum qed_chain_use_mode intended_use,
327 enum qed_chain_mode mode, 482 enum qed_chain_mode mode,
328 u16 num_elems, 483 enum qed_chain_cnt_type cnt_type,
484 u32 num_elems,
329 size_t elem_size, 485 size_t elem_size,
330 struct qed_chain *p_chain); 486 struct qed_chain *p_chain);
331 487
@@ -333,6 +489,30 @@ struct qed_common_ops {
333 struct qed_chain *p_chain); 489 struct qed_chain *p_chain);
334 490
335/** 491/**
492 * @brief get_coalesce - Get coalesce parameters in usec
493 *
494 * @param cdev
495 * @param rx_coal - Rx coalesce value in usec
496 * @param tx_coal - Tx coalesce value in usec
497 *
498 */
499 void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
500
501/**
502 * @brief set_coalesce - Configure Rx coalesce value in usec
503 *
504 * @param cdev
505 * @param rx_coal - Rx coalesce value in usec
506 * @param tx_coal - Tx coalesce value in usec
507 * @param qid - Queue index
508 * @param sb_id - Status Block Id
509 *
510 * @return 0 on success, error otherwise.
511 */
512 int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
513 u8 qid, u16 sb_id);
514
515/**
336 * @brief set_led - Configure LED mode 516 * @brief set_led - Configure LED mode
337 * 517 *
338 * @param cdev 518 * @param cdev
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
new file mode 100644
index 000000000000..187991c1f439
--- /dev/null
+++ b/include/linux/qed/rdma_common.h
@@ -0,0 +1,44 @@
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef __RDMA_COMMON__
10#define __RDMA_COMMON__
11/************************/
12/* RDMA FW CONSTANTS */
13/************************/
14
15#define RDMA_RESERVED_LKEY (0)
16#define RDMA_RING_PAGE_SIZE (0x1000)
17
18#define RDMA_MAX_SGE_PER_SQ_WQE (4)
19#define RDMA_MAX_SGE_PER_RQ_WQE (4)
20
21#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF)
22
23#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
24#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
25
26#define RDMA_MAX_CQS (64 * 1024)
27#define RDMA_MAX_TIDS (128 * 1024 - 1)
28#define RDMA_MAX_PDS (64 * 1024)
29
30#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
31
32#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
33
34struct rdma_srq_id {
35 __le16 srq_idx;
36 __le16 opaque_fid;
37};
38
39struct rdma_srq_producers {
40 __le32 sge_prod;
41 __le32 wqe_prod;
42};
43
44#endif /* __RDMA_COMMON__ */
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
new file mode 100644
index 000000000000..2eeaf3dc6646
--- /dev/null
+++ b/include/linux/qed/roce_common.h
@@ -0,0 +1,17 @@
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef __ROCE_COMMON__
10#define __ROCE_COMMON__
11
12#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
13#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
14
15#define ROCE_MAX_QPS (32 * 1024)
16
17#endif /* __ROCE_COMMON__ */
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
new file mode 100644
index 000000000000..3b8e1efd9bc2
--- /dev/null
+++ b/include/linux/qed/storage_common.h
@@ -0,0 +1,91 @@
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef __STORAGE_COMMON__
10#define __STORAGE_COMMON__
11
12#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
13#define BDQ_NUM_RESOURCES (4)
14
15#define BDQ_ID_RQ (0)
16#define BDQ_ID_IMM_DATA (1)
17#define BDQ_NUM_IDS (2)
18
19#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
20
21struct scsi_bd {
22 struct regpair address;
23 struct regpair opaque;
24};
25
26struct scsi_bdq_ram_drv_data {
27 __le16 external_producer;
28 __le16 reserved0[3];
29};
30
31struct scsi_drv_cmdq {
32 __le16 cmdq_cons;
33 __le16 reserved0;
34 __le32 reserved1;
35};
36
37struct scsi_init_func_params {
38 __le16 num_tasks;
39 u8 log_page_size;
40 u8 debug_mode;
41 u8 reserved2[12];
42};
43
44struct scsi_init_func_queues {
45 struct regpair glbl_q_params_addr;
46 __le16 rq_buffer_size;
47 __le16 cq_num_entries;
48 __le16 cmdq_num_entries;
49 u8 bdq_resource_id;
50 u8 q_validity;
51#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
52#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
53#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
54#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
55#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
56#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
57#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F
58#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
59 u8 num_queues;
60 u8 queue_relative_offset;
61 u8 cq_sb_pi;
62 u8 cmdq_sb_pi;
63 __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
64 __le16 reserved0;
65 u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
66 struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
67 __le16 bdq_xoff_threshold[BDQ_NUM_IDS];
68 __le16 bdq_xon_threshold[BDQ_NUM_IDS];
69 __le16 cmdq_xoff_threshold;
70 __le16 cmdq_xon_threshold;
71 __le32 reserved1;
72};
73
74struct scsi_ram_per_bdq_resource_drv_data {
75 struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
76};
77
78struct scsi_sge {
79 struct regpair sge_addr;
80 __le16 sge_len;
81 __le16 reserved0;
82 __le32 reserved1;
83};
84
85struct scsi_terminate_extra_params {
86 __le16 unsolicited_cq_count;
87 __le16 cmdq_count;
88 u8 reserved[4];
89};
90
91#endif /* __STORAGE_COMMON__ */
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
new file mode 100644
index 000000000000..accba0e6b704
--- /dev/null
+++ b/include/linux/qed/tcp_common.h
@@ -0,0 +1,226 @@
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef __TCP_COMMON__
10#define __TCP_COMMON__
11
12#define TCP_INVALID_TIMEOUT_VAL -1
13
14enum tcp_connect_mode {
15 TCP_CONNECT_ACTIVE,
16 TCP_CONNECT_PASSIVE,
17 MAX_TCP_CONNECT_MODE
18};
19
20struct tcp_init_params {
21 __le32 max_cwnd;
22 __le16 dup_ack_threshold;
23 __le16 tx_sws_timer;
24 __le16 min_rto;
25 __le16 min_rto_rt;
26 __le16 max_rto;
27 u8 maxfinrt;
28 u8 reserved[1];
29};
30
31enum tcp_ip_version {
32 TCP_IPV4,
33 TCP_IPV6,
34 MAX_TCP_IP_VERSION
35};
36
37struct tcp_offload_params {
38 __le16 local_mac_addr_lo;
39 __le16 local_mac_addr_mid;
40 __le16 local_mac_addr_hi;
41 __le16 remote_mac_addr_lo;
42 __le16 remote_mac_addr_mid;
43 __le16 remote_mac_addr_hi;
44 __le16 vlan_id;
45 u8 flags;
46#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
47#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
48#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
49#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
50#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
51#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
52#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
53#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3
54#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
55#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4
56#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
57#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5
58#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
59#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
60#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1
61#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7
62 u8 ip_version;
63 __le32 remote_ip[4];
64 __le32 local_ip[4];
65 __le32 flow_label;
66 u8 ttl;
67 u8 tos_or_tc;
68 __le16 remote_port;
69 __le16 local_port;
70 __le16 mss;
71 u8 rcv_wnd_scale;
72 u8 connect_mode;
73 __le16 srtt;
74 __le32 cwnd;
75 __le32 ss_thresh;
76 __le16 reserved1;
77 u8 ka_max_probe_cnt;
78 u8 dup_ack_theshold;
79 __le32 rcv_next;
80 __le32 snd_una;
81 __le32 snd_next;
82 __le32 snd_max;
83 __le32 snd_wnd;
84 __le32 rcv_wnd;
85 __le32 snd_wl1;
86 __le32 ts_time;
87 __le32 ts_recent;
88 __le32 ts_recent_age;
89 __le32 total_rt;
90 __le32 ka_timeout_delta;
91 __le32 rt_timeout_delta;
92 u8 dup_ack_cnt;
93 u8 snd_wnd_probe_cnt;
94 u8 ka_probe_cnt;
95 u8 rt_cnt;
96 __le16 rtt_var;
97 __le16 reserved2;
98 __le32 ka_timeout;
99 __le32 ka_interval;
100 __le32 max_rt_time;
101 __le32 initial_rcv_wnd;
102 u8 snd_wnd_scale;
103 u8 ack_frequency;
104 __le16 da_timeout_value;
105 __le32 ts_ticks_per_second;
106};
107
108struct tcp_offload_params_opt2 {
109 __le16 local_mac_addr_lo;
110 __le16 local_mac_addr_mid;
111 __le16 local_mac_addr_hi;
112 __le16 remote_mac_addr_lo;
113 __le16 remote_mac_addr_mid;
114 __le16 remote_mac_addr_hi;
115 __le16 vlan_id;
116 u8 flags;
117#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
118#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
119#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
120#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
121#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
122#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
123#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
124#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
125 u8 ip_version;
126 __le32 remote_ip[4];
127 __le32 local_ip[4];
128 __le32 flow_label;
129 u8 ttl;
130 u8 tos_or_tc;
131 __le16 remote_port;
132 __le16 local_port;
133 __le16 mss;
134 u8 rcv_wnd_scale;
135 u8 connect_mode;
136 __le16 syn_ip_payload_length;
137 __le32 syn_phy_addr_lo;
138 __le32 syn_phy_addr_hi;
139 __le32 reserved1[22];
140};
141
142enum tcp_seg_placement_event {
143 TCP_EVENT_ADD_PEN,
144 TCP_EVENT_ADD_NEW_ISLE,
145 TCP_EVENT_ADD_ISLE_RIGHT,
146 TCP_EVENT_ADD_ISLE_LEFT,
147 TCP_EVENT_JOIN,
148 TCP_EVENT_NOP,
149 MAX_TCP_SEG_PLACEMENT_EVENT
150};
151
152struct tcp_update_params {
153 __le16 flags;
154#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
155#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
156#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
157#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
158#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
159#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
160#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
161#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
162#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
163#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
164#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
165#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
166#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
167#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
168#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
169#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
170#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
171#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
172#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
173#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
174#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
175#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
176#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
177#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
178#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
179#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
180#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
181#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
182#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
183#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
184#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
185#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
186 __le16 remote_mac_addr_lo;
187 __le16 remote_mac_addr_mid;
188 __le16 remote_mac_addr_hi;
189 __le16 mss;
190 u8 ttl;
191 u8 tos_or_tc;
192 __le32 ka_timeout;
193 __le32 ka_interval;
194 __le32 max_rt_time;
195 __le32 flow_label;
196 __le32 initial_rcv_wnd;
197 u8 ka_max_probe_cnt;
198 u8 reserved1[7];
199};
200
201struct tcp_upload_params {
202 __le32 rcv_next;
203 __le32 snd_una;
204 __le32 snd_next;
205 __le32 snd_max;
206 __le32 snd_wnd;
207 __le32 rcv_wnd;
208 __le32 snd_wl1;
209 __le32 cwnd;
210 __le32 ss_thresh;
211 __le16 srtt;
212 __le16 rtt_var;
213 __le32 ts_time;
214 __le32 ts_recent;
215 __le32 ts_recent_age;
216 __le32 total_rt;
217 __le32 ka_timeout_delta;
218 __le32 rt_timeout_delta;
219 u8 dup_ack_cnt;
220 u8 snd_wnd_probe_cnt;
221 u8 ka_probe_cnt;
222 u8 rt_cnt;
223 __le32 reserved;
224};
225
226#endif /* __TCP_COMMON__ */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index c006cc900c44..2daece8979f7 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -89,8 +89,9 @@ void net_inc_egress_queue(void);
89void net_dec_egress_queue(void); 89void net_dec_egress_queue(void);
90#endif 90#endif
91 91
92extern void rtnetlink_init(void); 92void rtnetlink_init(void);
93extern void __rtnl_unlock(void); 93void __rtnl_unlock(void);
94void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
94 95
95#define ASSERT_RTNL() do { \ 96#define ASSERT_RTNL() do { \
96 if (unlikely(!rtnl_is_locked())) { \ 97 if (unlikely(!rtnl_is_locked())) { \
diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h
index a53915cd5581..c68307bc306f 100644
--- a/include/linux/rxrpc.h
+++ b/include/linux/rxrpc.h
@@ -35,21 +35,24 @@ struct sockaddr_rxrpc {
35 */ 35 */
36#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */ 36#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */
37#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ 37#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */
38#define RXRPC_EXCLUSIVE_CONNECTION 3 /* [clnt] use exclusive RxRPC connection */ 38#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */
39#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ 39#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */
40 40
41/* 41/*
42 * RxRPC control messages 42 * RxRPC control messages
43 * - If neither abort or accept are specified, the message is a data message.
43 * - terminal messages mean that a user call ID tag can be recycled 44 * - terminal messages mean that a user call ID tag can be recycled
45 * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg()
44 */ 46 */
45#define RXRPC_USER_CALL_ID 1 /* user call ID specifier */ 47#define RXRPC_USER_CALL_ID 1 /* sr: user call ID specifier */
46#define RXRPC_ABORT 2 /* abort request / notification [terminal] */ 48#define RXRPC_ABORT 2 /* sr: abort request / notification [terminal] */
47#define RXRPC_ACK 3 /* [Server] RPC op final ACK received [terminal] */ 49#define RXRPC_ACK 3 /* -r: [Service] RPC op final ACK received [terminal] */
48#define RXRPC_NET_ERROR 5 /* network error received [terminal] */ 50#define RXRPC_NET_ERROR 5 /* -r: network error received [terminal] */
49#define RXRPC_BUSY 6 /* server busy received [terminal] */ 51#define RXRPC_BUSY 6 /* -r: server busy received [terminal] */
50#define RXRPC_LOCAL_ERROR 7 /* local error generated [terminal] */ 52#define RXRPC_LOCAL_ERROR 7 /* -r: local error generated [terminal] */
51#define RXRPC_NEW_CALL 8 /* [Server] new incoming call notification */ 53#define RXRPC_NEW_CALL 8 /* -r: [Service] new incoming call notification */
52#define RXRPC_ACCEPT 9 /* [Server] accept request */ 54#define RXRPC_ACCEPT 9 /* s-: [Service] accept request */
55#define RXRPC_EXCLUSIVE_CALL 10 /* s-: Call should be on exclusive connection */
53 56
54/* 57/*
55 * RxRPC security levels 58 * RxRPC security levels
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
new file mode 100644
index 000000000000..678bfbf78ac4
--- /dev/null
+++ b/include/linux/skb_array.h
@@ -0,0 +1,169 @@
1/*
2 * Definitions for the 'struct skb_array' datastructure.
3 *
4 * Author:
5 * Michael S. Tsirkin <mst@redhat.com>
6 *
7 * Copyright (C) 2016 Red Hat, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * Limited-size FIFO of skbs. Can be used more or less whenever
15 * sk_buff_head can be used, except you need to know the queue size in
16 * advance.
17 * Implemented as a type-safe wrapper around ptr_ring.
18 */
19
20#ifndef _LINUX_SKB_ARRAY_H
21#define _LINUX_SKB_ARRAY_H 1
22
23#ifdef __KERNEL__
24#include <linux/ptr_ring.h>
25#include <linux/skbuff.h>
26#include <linux/if_vlan.h>
27#endif
28
29struct skb_array {
30 struct ptr_ring ring;
31};
32
33/* Might be slightly faster than skb_array_full below, but callers invoking
34 * this in a loop must use a compiler barrier, for example cpu_relax().
35 */
36static inline bool __skb_array_full(struct skb_array *a)
37{
38 return __ptr_ring_full(&a->ring);
39}
40
41static inline bool skb_array_full(struct skb_array *a)
42{
43 return ptr_ring_full(&a->ring);
44}
45
46static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
47{
48 return ptr_ring_produce(&a->ring, skb);
49}
50
51static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
52{
53 return ptr_ring_produce_irq(&a->ring, skb);
54}
55
56static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
57{
58 return ptr_ring_produce_bh(&a->ring, skb);
59}
60
61static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
62{
63 return ptr_ring_produce_any(&a->ring, skb);
64}
65
66/* Might be slightly faster than skb_array_empty below, but only safe if the
67 * array is never resized. Also, callers invoking this in a loop must take care
68 * to use a compiler barrier, for example cpu_relax().
69 */
70static inline bool __skb_array_empty(struct skb_array *a)
71{
72 return !__ptr_ring_peek(&a->ring);
73}
74
75static inline bool skb_array_empty(struct skb_array *a)
76{
77 return ptr_ring_empty(&a->ring);
78}
79
80static inline bool skb_array_empty_bh(struct skb_array *a)
81{
82 return ptr_ring_empty_bh(&a->ring);
83}
84
85static inline bool skb_array_empty_irq(struct skb_array *a)
86{
87 return ptr_ring_empty_irq(&a->ring);
88}
89
90static inline bool skb_array_empty_any(struct skb_array *a)
91{
92 return ptr_ring_empty_any(&a->ring);
93}
94
95static inline struct sk_buff *skb_array_consume(struct skb_array *a)
96{
97 return ptr_ring_consume(&a->ring);
98}
99
100static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
101{
102 return ptr_ring_consume_irq(&a->ring);
103}
104
105static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
106{
107 return ptr_ring_consume_any(&a->ring);
108}
109
110static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
111{
112 return ptr_ring_consume_bh(&a->ring);
113}
114
115static inline int __skb_array_len_with_tag(struct sk_buff *skb)
116{
117 if (likely(skb)) {
118 int len = skb->len;
119
120 if (skb_vlan_tag_present(skb))
121 len += VLAN_HLEN;
122
123 return len;
124 } else {
125 return 0;
126 }
127}
128
129static inline int skb_array_peek_len(struct skb_array *a)
130{
131 return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
132}
133
134static inline int skb_array_peek_len_irq(struct skb_array *a)
135{
136 return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
137}
138
139static inline int skb_array_peek_len_bh(struct skb_array *a)
140{
141 return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
142}
143
144static inline int skb_array_peek_len_any(struct skb_array *a)
145{
146 return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
147}
148
149static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
150{
151 return ptr_ring_init(&a->ring, size, gfp);
152}
153
154void __skb_array_destroy_skb(void *ptr)
155{
156 kfree_skb(ptr);
157}
158
159int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
160{
161 return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
162}
163
164static inline void skb_array_cleanup(struct skb_array *a)
165{
166 ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
167}
168
169#endif /* _LINUX_SKB_ARRAY_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ee38a4127475..dc0fca747c5e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -301,6 +301,11 @@ struct sk_buff;
301#endif 301#endif
302extern int sysctl_max_skb_frags; 302extern int sysctl_max_skb_frags;
303 303
304/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
305 * segment using its current segmentation instead.
306 */
307#define GSO_BY_FRAGS 0xFFFF
308
304typedef struct skb_frag_struct skb_frag_t; 309typedef struct skb_frag_struct skb_frag_t;
305 310
306struct skb_frag_struct { 311struct skb_frag_struct {
@@ -482,6 +487,8 @@ enum {
482 SKB_GSO_PARTIAL = 1 << 13, 487 SKB_GSO_PARTIAL = 1 << 13,
483 488
484 SKB_GSO_TUNNEL_REMCSUM = 1 << 14, 489 SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
490
491 SKB_GSO_SCTP = 1 << 15,
485}; 492};
486 493
487#if BITS_PER_LONG > 32 494#if BITS_PER_LONG > 32
@@ -2987,6 +2994,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
2987int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); 2994int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
2988void skb_scrub_packet(struct sk_buff *skb, bool xnet); 2995void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2989unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); 2996unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
2997bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
2990struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); 2998struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
2991struct sk_buff *skb_vlan_untag(struct sk_buff *skb); 2999struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
2992int skb_ensure_writable(struct sk_buff *skb, int write_len); 3000int skb_ensure_writable(struct sk_buff *skb, int write_len);
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index ffdaca9c01af..705840e0438f 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -135,9 +135,12 @@ struct plat_stmmacenet_data {
135 void (*bus_setup)(void __iomem *ioaddr); 135 void (*bus_setup)(void __iomem *ioaddr);
136 int (*init)(struct platform_device *pdev, void *priv); 136 int (*init)(struct platform_device *pdev, void *priv);
137 void (*exit)(struct platform_device *pdev, void *priv); 137 void (*exit)(struct platform_device *pdev, void *priv);
138 void (*suspend)(struct platform_device *pdev, void *priv);
139 void (*resume)(struct platform_device *pdev, void *priv);
138 void *bsp_priv; 140 void *bsp_priv;
139 struct stmmac_axi *axi; 141 struct stmmac_axi *axi;
140 int has_gmac4; 142 int has_gmac4;
141 bool tso_en; 143 bool tso_en;
144 int mac_port_sel_speed;
142}; 145};
143#endif 146#endif
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
new file mode 100644
index 000000000000..1c912f85e041
--- /dev/null
+++ b/include/linux/virtio_net.h
@@ -0,0 +1,101 @@
1#ifndef _LINUX_VIRTIO_NET_H
2#define _LINUX_VIRTIO_NET_H
3
4#include <linux/if_vlan.h>
5#include <uapi/linux/virtio_net.h>
6
7static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
8 const struct virtio_net_hdr *hdr,
9 bool little_endian)
10{
11 unsigned short gso_type = 0;
12
13 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
14 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
15 case VIRTIO_NET_HDR_GSO_TCPV4:
16 gso_type = SKB_GSO_TCPV4;
17 break;
18 case VIRTIO_NET_HDR_GSO_TCPV6:
19 gso_type = SKB_GSO_TCPV6;
20 break;
21 case VIRTIO_NET_HDR_GSO_UDP:
22 gso_type = SKB_GSO_UDP;
23 break;
24 default:
25 return -EINVAL;
26 }
27
28 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
29 gso_type |= SKB_GSO_TCP_ECN;
30
31 if (hdr->gso_size == 0)
32 return -EINVAL;
33 }
34
35 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
36 u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
37 u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
38
39 if (!skb_partial_csum_set(skb, start, off))
40 return -EINVAL;
41 }
42
43 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
44 u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
45
46 skb_shinfo(skb)->gso_size = gso_size;
47 skb_shinfo(skb)->gso_type = gso_type;
48
49 /* Header must be checked, and gso_segs computed. */
50 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
51 skb_shinfo(skb)->gso_segs = 0;
52 }
53
54 return 0;
55}
56
57static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
58 struct virtio_net_hdr *hdr,
59 bool little_endian)
60{
61 memset(hdr, 0, sizeof(*hdr));
62
63 if (skb_is_gso(skb)) {
64 struct skb_shared_info *sinfo = skb_shinfo(skb);
65
66 /* This is a hint as to how much should be linear. */
67 hdr->hdr_len = __cpu_to_virtio16(little_endian,
68 skb_headlen(skb));
69 hdr->gso_size = __cpu_to_virtio16(little_endian,
70 sinfo->gso_size);
71 if (sinfo->gso_type & SKB_GSO_TCPV4)
72 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
73 else if (sinfo->gso_type & SKB_GSO_TCPV6)
74 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
75 else if (sinfo->gso_type & SKB_GSO_UDP)
76 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
77 else
78 return -EINVAL;
79 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
80 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
81 } else
82 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
83
84 if (skb->ip_summed == CHECKSUM_PARTIAL) {
85 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
86 if (skb_vlan_tag_present(skb))
87 hdr->csum_start = __cpu_to_virtio16(little_endian,
88 skb_checksum_start_offset(skb) + VLAN_HLEN);
89 else
90 hdr->csum_start = __cpu_to_virtio16(little_endian,
91 skb_checksum_start_offset(skb));
92 hdr->csum_offset = __cpu_to_virtio16(little_endian,
93 skb->csum_offset);
94 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
95 hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
96 } /* else everything is zero */
97
98 return 0;
99}
100
101#endif /* _LINUX_VIRTIO_BYTEORDER */
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index da84cf920b78..5ab4c9901ccc 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -141,6 +141,16 @@ struct lowpan_dev {
141 u8 priv[0] __aligned(sizeof(void *)); 141 u8 priv[0] __aligned(sizeof(void *));
142}; 142};
143 143
144struct lowpan_802154_neigh {
145 __le16 short_addr;
146};
147
148static inline
149struct lowpan_802154_neigh *lowpan_802154_neigh(void *neigh_priv)
150{
151 return neigh_priv;
152}
153
144static inline 154static inline
145struct lowpan_dev *lowpan_dev(const struct net_device *dev) 155struct lowpan_dev *lowpan_dev(const struct net_device *dev)
146{ 156{
@@ -244,6 +254,12 @@ static inline bool lowpan_fetch_skb(struct sk_buff *skb, void *data,
244 return false; 254 return false;
245} 255}
246 256
257static inline bool lowpan_802154_is_valid_src_short_addr(__le16 addr)
258{
259 /* First bit of addr is multicast, reserved or 802.15.4 specific */
260 return !(addr & cpu_to_le16(0x8000));
261}
262
247static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data, 263static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
248 const size_t len) 264 const size_t len)
249{ 265{
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 9a9a8edc138f..fb82b5b5d9e7 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -2,8 +2,8 @@
2#define __NET_ACT_API_H 2#define __NET_ACT_API_H
3 3
4/* 4/*
5 * Public police action API for classifiers/qdiscs 5 * Public action API for classifiers/qdiscs
6 */ 6*/
7 7
8#include <net/sch_generic.h> 8#include <net/sch_generic.h>
9#include <net/pkt_sched.h> 9#include <net/pkt_sched.h>
@@ -76,6 +76,16 @@ static inline void tcf_lastuse_update(struct tcf_t *tm)
76 76
77 if (tm->lastuse != now) 77 if (tm->lastuse != now)
78 tm->lastuse = now; 78 tm->lastuse = now;
79 if (unlikely(!tm->firstuse))
80 tm->firstuse = now;
81}
82
83static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
84{
85 dtm->install = jiffies_to_clock_t(jiffies - stm->install);
86 dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
87 dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse);
88 dtm->expires = jiffies_to_clock_t(stm->expires);
79} 89}
80 90
81struct tc_action { 91struct tc_action {
@@ -97,7 +107,8 @@ struct tc_action_ops {
97 char kind[IFNAMSIZ]; 107 char kind[IFNAMSIZ];
98 __u32 type; /* TBD to match kind */ 108 __u32 type; /* TBD to match kind */
99 struct module *owner; 109 struct module *owner;
100 int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); 110 int (*act)(struct sk_buff *, const struct tc_action *,
111 struct tcf_result *);
101 int (*dump)(struct sk_buff *, struct tc_action *, int, int); 112 int (*dump)(struct sk_buff *, struct tc_action *, int, int);
102 void (*cleanup)(struct tc_action *, int bind); 113 void (*cleanup)(struct tc_action *, int bind);
103 int (*lookup)(struct net *, struct tc_action *, u32); 114 int (*lookup)(struct net *, struct tc_action *, u32);
@@ -115,8 +126,8 @@ struct tc_action_net {
115}; 126};
116 127
117static inline 128static inline
118int tc_action_net_init(struct tc_action_net *tn, const struct tc_action_ops *ops, 129int tc_action_net_init(struct tc_action_net *tn,
119 unsigned int mask) 130 const struct tc_action_ops *ops, unsigned int mask)
120{ 131{
121 int err = 0; 132 int err = 0;
122 133
@@ -144,8 +155,8 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
144 struct tc_action *a); 155 struct tc_action *a);
145int tcf_hash_search(struct tc_action_net *tn, struct tc_action *a, u32 index); 156int tcf_hash_search(struct tc_action_net *tn, struct tc_action *a, u32 index);
146u32 tcf_hash_new_index(struct tc_action_net *tn); 157u32 tcf_hash_new_index(struct tc_action_net *tn);
147int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a, 158bool tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a,
148 int bind); 159 int bind);
149int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 160int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
150 struct tc_action *a, int size, int bind, bool cpustats); 161 struct tc_action *a, int size, int bind, bool cpustats);
151void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); 162void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
@@ -159,7 +170,8 @@ static inline int tcf_hash_release(struct tc_action *a, bool bind)
159} 170}
160 171
161int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops); 172int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
162int tcf_unregister_action(struct tc_action_ops *a, struct pernet_operations *ops); 173int tcf_unregister_action(struct tc_action_ops *a,
174 struct pernet_operations *ops);
163int tcf_action_destroy(struct list_head *actions, int bind); 175int tcf_action_destroy(struct list_head *actions, int bind);
164int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, 176int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
165 struct tcf_result *res); 177 struct tcf_result *res);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 730d856683e5..9826d3a9464c 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -94,6 +94,16 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
94void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr); 94void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
95void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr); 95void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
96 96
97void addrconf_add_linklocal(struct inet6_dev *idev,
98 const struct in6_addr *addr, u32 flags);
99
100int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
101 const struct prefix_info *pinfo,
102 struct inet6_dev *in6_dev,
103 const struct in6_addr *addr, int addr_type,
104 u32 addr_flags, bool sllao, bool tokenized,
105 __u32 valid_lft, u32 prefered_lft);
106
97static inline int addrconf_ifid_eui48(u8 *eui, struct net_device *dev) 107static inline int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
98{ 108{
99 if (dev->addr_len != ETH_ALEN) 109 if (dev->addr_len != ETH_ALEN)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 63921672bed0..7bbb00d8b2cd 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -2367,19 +2367,23 @@ struct cfg80211_qos_map {
2367 * (invoked with the wireless_dev mutex held) 2367 * (invoked with the wireless_dev mutex held)
2368 * 2368 *
2369 * @connect: Connect to the ESS with the specified parameters. When connected, 2369 * @connect: Connect to the ESS with the specified parameters. When connected,
2370 * call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS. 2370 * call cfg80211_connect_result()/cfg80211_connect_bss() with status code
2371 * If the connection fails for some reason, call cfg80211_connect_result() 2371 * %WLAN_STATUS_SUCCESS. If the connection fails for some reason, call
2372 * with the status from the AP. The driver is allowed to roam to other 2372 * cfg80211_connect_result()/cfg80211_connect_bss() with the status code
2373 * BSSes within the ESS when the other BSS matches the connect parameters. 2373 * from the AP or cfg80211_connect_timeout() if no frame with status code
2374 * When such roaming is initiated by the driver, the driver is expected to 2374 * was received.
2375 * verify that the target matches the configured security parameters and 2375 * The driver is allowed to roam to other BSSes within the ESS when the
2376 * to use Reassociation Request frame instead of Association Request frame. 2376 * other BSS matches the connect parameters. When such roaming is initiated
2377 * The connect function can also be used to request the driver to perform 2377 * by the driver, the driver is expected to verify that the target matches
2378 * a specific roam when connected to an ESS. In that case, the prev_bssid 2378 * the configured security parameters and to use Reassociation Request
2379 * frame instead of Association Request frame.
2380 * The connect function can also be used to request the driver to perform a
2381 * specific roam when connected to an ESS. In that case, the prev_bssid
2379 * parameter is set to the BSSID of the currently associated BSS as an 2382 * parameter is set to the BSSID of the currently associated BSS as an
2380 * indication of requesting reassociation. In both the driver-initiated and 2383 * indication of requesting reassociation.
2381 * new connect() call initiated roaming cases, the result of roaming is 2384 * In both the driver-initiated and new connect() call initiated roaming
2382 * indicated with a call to cfg80211_roamed() or cfg80211_roamed_bss(). 2385 * cases, the result of roaming is indicated with a call to
2386 * cfg80211_roamed() or cfg80211_roamed_bss().
2383 * (invoked with the wireless_dev mutex held) 2387 * (invoked with the wireless_dev mutex held)
2384 * @disconnect: Disconnect from the BSS/ESS. 2388 * @disconnect: Disconnect from the BSS/ESS.
2385 * (invoked with the wireless_dev mutex held) 2389 * (invoked with the wireless_dev mutex held)
@@ -3080,6 +3084,24 @@ struct wiphy_vendor_command {
3080}; 3084};
3081 3085
3082/** 3086/**
3087 * struct wiphy_iftype_ext_capab - extended capabilities per interface type
3088 * @iftype: interface type
3089 * @extended_capabilities: extended capabilities supported by the driver,
3090 * additional capabilities might be supported by userspace; these are the
3091 * 802.11 extended capabilities ("Extended Capabilities element") and are
3092 * in the same format as in the information element. See IEEE Std
3093 * 802.11-2012 8.4.2.29 for the defined fields.
3094 * @extended_capabilities_mask: mask of the valid values
3095 * @extended_capabilities_len: length of the extended capabilities
3096 */
3097struct wiphy_iftype_ext_capab {
3098 enum nl80211_iftype iftype;
3099 const u8 *extended_capabilities;
3100 const u8 *extended_capabilities_mask;
3101 u8 extended_capabilities_len;
3102};
3103
3104/**
3083 * struct wiphy - wireless hardware description 3105 * struct wiphy - wireless hardware description
3084 * @reg_notifier: the driver's regulatory notification callback, 3106 * @reg_notifier: the driver's regulatory notification callback,
3085 * note that if your driver uses wiphy_apply_custom_regulatory() 3107 * note that if your driver uses wiphy_apply_custom_regulatory()
@@ -3199,9 +3221,14 @@ struct wiphy_vendor_command {
3199 * additional capabilities might be supported by userspace; these are 3221 * additional capabilities might be supported by userspace; these are
3200 * the 802.11 extended capabilities ("Extended Capabilities element") 3222 * the 802.11 extended capabilities ("Extended Capabilities element")
3201 * and are in the same format as in the information element. See 3223 * and are in the same format as in the information element. See
3202 * 802.11-2012 8.4.2.29 for the defined fields. 3224 * 802.11-2012 8.4.2.29 for the defined fields. These are the default
3225 * extended capabilities to be used if the capabilities are not specified
3226 * for a specific interface type in iftype_ext_capab.
3203 * @extended_capabilities_mask: mask of the valid values 3227 * @extended_capabilities_mask: mask of the valid values
3204 * @extended_capabilities_len: length of the extended capabilities 3228 * @extended_capabilities_len: length of the extended capabilities
3229 * @iftype_ext_capab: array of extended capabilities per interface type
3230 * @num_iftype_ext_capab: number of interface types for which extended
3231 * capabilities are specified separately.
3205 * @coalesce: packet coalescing support information 3232 * @coalesce: packet coalescing support information
3206 * 3233 *
3207 * @vendor_commands: array of vendor commands supported by the hardware 3234 * @vendor_commands: array of vendor commands supported by the hardware
@@ -3301,6 +3328,9 @@ struct wiphy {
3301 const u8 *extended_capabilities, *extended_capabilities_mask; 3328 const u8 *extended_capabilities, *extended_capabilities_mask;
3302 u8 extended_capabilities_len; 3329 u8 extended_capabilities_len;
3303 3330
3331 const struct wiphy_iftype_ext_capab *iftype_ext_capab;
3332 unsigned int num_iftype_ext_capab;
3333
3304 /* If multiple wiphys are registered and you're handed e.g. 3334 /* If multiple wiphys are registered and you're handed e.g.
3305 * a regular netdev with assigned ieee80211_ptr, you won't 3335 * a regular netdev with assigned ieee80211_ptr, you won't
3306 * know whether it points to a wiphy your driver has registered 3336 * know whether it points to a wiphy your driver has registered
@@ -4680,7 +4710,7 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
4680void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, 4710void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
4681 struct cfg80211_bss *bss, const u8 *req_ie, 4711 struct cfg80211_bss *bss, const u8 *req_ie,
4682 size_t req_ie_len, const u8 *resp_ie, 4712 size_t req_ie_len, const u8 *resp_ie,
4683 size_t resp_ie_len, u16 status, gfp_t gfp); 4713 size_t resp_ie_len, int status, gfp_t gfp);
4684 4714
4685/** 4715/**
4686 * cfg80211_connect_result - notify cfg80211 of connection result 4716 * cfg80211_connect_result - notify cfg80211 of connection result
@@ -4710,6 +4740,29 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
4710} 4740}
4711 4741
4712/** 4742/**
4743 * cfg80211_connect_timeout - notify cfg80211 of connection timeout
4744 *
4745 * @dev: network device
4746 * @bssid: the BSSID of the AP
4747 * @req_ie: association request IEs (maybe be %NULL)
4748 * @req_ie_len: association request IEs length
4749 * @gfp: allocation flags
4750 *
4751 * It should be called by the underlying driver whenever connect() has failed
4752 * in a sequence where no explicit authentication/association rejection was
4753 * received from the AP. This could happen, e.g., due to not being able to send
4754 * out the Authentication or Association Request frame or timing out while
4755 * waiting for the response.
4756 */
4757static inline void
4758cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
4759 const u8 *req_ie, size_t req_ie_len, gfp_t gfp)
4760{
4761 cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1,
4762 gfp);
4763}
4764
4765/**
4713 * cfg80211_roamed - notify cfg80211 of roaming 4766 * cfg80211_roamed - notify cfg80211 of roaming
4714 * 4767 *
4715 * @dev: network device 4768 * @dev: network device
diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h
index 8144d9cd2908..098630f83a55 100644
--- a/include/net/codel_qdisc.h
+++ b/include/net/codel_qdisc.h
@@ -52,6 +52,7 @@
52/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ 52/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
53struct codel_skb_cb { 53struct codel_skb_cb {
54 codel_time_t enqueue_time; 54 codel_time_t enqueue_time;
55 unsigned int mem_usage;
55}; 56};
56 57
57static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) 58static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 17c3d37b6779..20b3087ad193 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -26,6 +26,7 @@ enum dsa_tag_protocol {
26 DSA_TAG_PROTO_TRAILER, 26 DSA_TAG_PROTO_TRAILER,
27 DSA_TAG_PROTO_EDSA, 27 DSA_TAG_PROTO_EDSA,
28 DSA_TAG_PROTO_BRCM, 28 DSA_TAG_PROTO_BRCM,
29 DSA_TAG_LAST, /* MUST BE LAST */
29}; 30};
30 31
31#define DSA_MAX_SWITCHES 4 32#define DSA_MAX_SWITCHES 4
@@ -58,12 +59,11 @@ struct dsa_chip_data {
58 struct device_node *port_dn[DSA_MAX_PORTS]; 59 struct device_node *port_dn[DSA_MAX_PORTS];
59 60
60 /* 61 /*
61 * An array (with nr_chips elements) of which element [a] 62 * An array of which element [a] indicates which port on this
62 * indicates which port on this switch should be used to 63 * switch should be used to send packets to that are destined
63 * send packets to that are destined for switch a. Can be 64 * for switch a. Can be NULL if there is only one switch chip.
64 * NULL if there is only one switch chip.
65 */ 65 */
66 s8 *rtable; 66 s8 rtable[DSA_MAX_SWITCHES];
67}; 67};
68 68
69struct dsa_platform_data { 69struct dsa_platform_data {
@@ -85,6 +85,17 @@ struct dsa_platform_data {
85struct packet_type; 85struct packet_type;
86 86
87struct dsa_switch_tree { 87struct dsa_switch_tree {
88 struct list_head list;
89
90 /* Tree identifier */
91 u32 tree;
92
93 /* Number of switches attached to this tree */
94 struct kref refcount;
95
96 /* Has this tree been applied to the hardware? */
97 bool applied;
98
88 /* 99 /*
89 * Configuration data for the platform device that owns 100 * Configuration data for the platform device that owns
90 * this dsa switch tree instance. 101 * this dsa switch tree instance.
@@ -100,12 +111,12 @@ struct dsa_switch_tree {
100 struct net_device *dev, 111 struct net_device *dev,
101 struct packet_type *pt, 112 struct packet_type *pt,
102 struct net_device *orig_dev); 113 struct net_device *orig_dev);
103 enum dsa_tag_protocol tag_protocol;
104 114
105 /* 115 /*
106 * Original copy of the master netdev ethtool_ops 116 * Original copy of the master netdev ethtool_ops
107 */ 117 */
108 struct ethtool_ops master_ethtool_ops; 118 struct ethtool_ops master_ethtool_ops;
119 const struct ethtool_ops *master_orig_ethtool_ops;
109 120
110 /* 121 /*
111 * The switch and port to which the CPU is attached. 122 * The switch and port to which the CPU is attached.
@@ -117,6 +128,17 @@ struct dsa_switch_tree {
117 * Data for the individual switch chips. 128 * Data for the individual switch chips.
118 */ 129 */
119 struct dsa_switch *ds[DSA_MAX_SWITCHES]; 130 struct dsa_switch *ds[DSA_MAX_SWITCHES];
131
132 /*
133 * Tagging protocol operations for adding and removing an
134 * encapsulation tag.
135 */
136 const struct dsa_device_ops *tag_ops;
137};
138
139struct dsa_port {
140 struct net_device *netdev;
141 struct device_node *dn;
120}; 142};
121 143
122struct dsa_switch { 144struct dsa_switch {
@@ -144,6 +166,13 @@ struct dsa_switch {
144 */ 166 */
145 struct dsa_switch_driver *drv; 167 struct dsa_switch_driver *drv;
146 168
169 /*
170 * An array of which element [a] indicates which port on this
171 * switch should be used to send packets to that are destined
172 * for switch a. Can be NULL if there is only one switch chip.
173 */
174 s8 rtable[DSA_MAX_SWITCHES];
175
147#ifdef CONFIG_NET_DSA_HWMON 176#ifdef CONFIG_NET_DSA_HWMON
148 /* 177 /*
149 * Hardware monitoring information 178 * Hardware monitoring information
@@ -153,13 +182,19 @@ struct dsa_switch {
153#endif 182#endif
154 183
155 /* 184 /*
185 * The lower device this switch uses to talk to the host
186 */
187 struct net_device *master_netdev;
188
189 /*
156 * Slave mii_bus and devices for the individual ports. 190 * Slave mii_bus and devices for the individual ports.
157 */ 191 */
158 u32 dsa_port_mask; 192 u32 dsa_port_mask;
193 u32 cpu_port_mask;
159 u32 enabled_port_mask; 194 u32 enabled_port_mask;
160 u32 phys_mii_mask; 195 u32 phys_mii_mask;
196 struct dsa_port ports[DSA_MAX_PORTS];
161 struct mii_bus *slave_mii_bus; 197 struct mii_bus *slave_mii_bus;
162 struct net_device *ports[DSA_MAX_PORTS];
163}; 198};
164 199
165static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) 200static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
@@ -174,7 +209,7 @@ static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
174 209
175static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p) 210static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
176{ 211{
177 return ds->enabled_port_mask & (1 << p) && ds->ports[p]; 212 return ds->enabled_port_mask & (1 << p) && ds->ports[p].netdev;
178} 213}
179 214
180static inline u8 dsa_upstream_port(struct dsa_switch *ds) 215static inline u8 dsa_upstream_port(struct dsa_switch *ds)
@@ -190,7 +225,7 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
190 if (dst->cpu_switch == ds->index) 225 if (dst->cpu_switch == ds->index)
191 return dst->cpu_port; 226 return dst->cpu_port;
192 else 227 else
193 return ds->cd->rtable[dst->cpu_switch]; 228 return ds->rtable[dst->cpu_switch];
194} 229}
195 230
196struct switchdev_trans; 231struct switchdev_trans;
@@ -344,4 +379,7 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
344{ 379{
345 return dst->rcv != NULL; 380 return dst->rcv != NULL;
346} 381}
382
383void dsa_unregister_switch(struct dsa_switch *ds);
384int dsa_register_switch(struct dsa_switch *ds, struct device_node *np);
347#endif 385#endif
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 59160de702b6..456e4a6006ab 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -17,7 +17,8 @@ struct fib_rule {
17 u32 flags; 17 u32 flags;
18 u32 table; 18 u32 table;
19 u8 action; 19 u8 action;
20 /* 3 bytes hole, try to use */ 20 u8 l3mdev;
21 /* 2 bytes hole, try to use */
21 u32 target; 22 u32 target;
22 __be64 tun_id; 23 __be64 tun_id;
23 struct fib_rule __rcu *ctarget; 24 struct fib_rule __rcu *ctarget;
@@ -36,6 +37,7 @@ struct fib_lookup_arg {
36 void *lookup_ptr; 37 void *lookup_ptr;
37 void *result; 38 void *result;
38 struct fib_rule *rule; 39 struct fib_rule *rule;
40 u32 table;
39 int flags; 41 int flags;
40#define FIB_LOOKUP_NOREF 1 42#define FIB_LOOKUP_NOREF 1
41#define FIB_LOOKUP_IGNORE_LINKSTATE 2 43#define FIB_LOOKUP_IGNORE_LINKSTATE 2
@@ -89,7 +91,8 @@ struct fib_rules_ops {
89 [FRA_TABLE] = { .type = NLA_U32 }, \ 91 [FRA_TABLE] = { .type = NLA_U32 }, \
90 [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \ 92 [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
91 [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \ 93 [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
92 [FRA_GOTO] = { .type = NLA_U32 } 94 [FRA_GOTO] = { .type = NLA_U32 }, \
95 [FRA_L3MDEV] = { .type = NLA_U8 }
93 96
94static inline void fib_rule_get(struct fib_rule *rule) 97static inline void fib_rule_get(struct fib_rule *rule)
95{ 98{
@@ -102,6 +105,20 @@ static inline void fib_rule_put(struct fib_rule *rule)
102 kfree_rcu(rule, rcu); 105 kfree_rcu(rule, rcu);
103} 106}
104 107
108#ifdef CONFIG_NET_L3_MASTER_DEV
109static inline u32 fib_rule_get_table(struct fib_rule *rule,
110 struct fib_lookup_arg *arg)
111{
112 return rule->l3mdev ? arg->table : rule->table;
113}
114#else
115static inline u32 fib_rule_get_table(struct fib_rule *rule,
116 struct fib_lookup_arg *arg)
117{
118 return rule->table;
119}
120#endif
121
105static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla) 122static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
106{ 123{
107 if (nla[FRA_TABLE]) 124 if (nla[FRA_TABLE])
@@ -117,4 +134,7 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
117 struct fib_lookup_arg *); 134 struct fib_lookup_arg *);
118int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table, 135int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
119 u32 flags); 136 u32 flags);
137
138int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh);
139int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh);
120#endif 140#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 610cd397890e..231e121cc7d9 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -33,10 +33,12 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
33 spinlock_t *lock, struct gnet_dump *d, 33 spinlock_t *lock, struct gnet_dump *d,
34 int padattr); 34 int padattr);
35 35
36int gnet_stats_copy_basic(struct gnet_dump *d, 36int gnet_stats_copy_basic(const seqcount_t *running,
37 struct gnet_dump *d,
37 struct gnet_stats_basic_cpu __percpu *cpu, 38 struct gnet_stats_basic_cpu __percpu *cpu,
38 struct gnet_stats_basic_packed *b); 39 struct gnet_stats_basic_packed *b);
39void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, 40void __gnet_stats_copy_basic(const seqcount_t *running,
41 struct gnet_stats_basic_packed *bstats,
40 struct gnet_stats_basic_cpu __percpu *cpu, 42 struct gnet_stats_basic_cpu __percpu *cpu,
41 struct gnet_stats_basic_packed *b); 43 struct gnet_stats_basic_packed *b);
42int gnet_stats_copy_rate_est(struct gnet_dump *d, 44int gnet_stats_copy_rate_est(struct gnet_dump *d,
@@ -52,13 +54,15 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
52int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 54int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
53 struct gnet_stats_basic_cpu __percpu *cpu_bstats, 55 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
54 struct gnet_stats_rate_est64 *rate_est, 56 struct gnet_stats_rate_est64 *rate_est,
55 spinlock_t *stats_lock, struct nlattr *opt); 57 spinlock_t *stats_lock,
58 seqcount_t *running, struct nlattr *opt);
56void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 59void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
57 struct gnet_stats_rate_est64 *rate_est); 60 struct gnet_stats_rate_est64 *rate_est);
58int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, 61int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
59 struct gnet_stats_basic_cpu __percpu *cpu_bstats, 62 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
60 struct gnet_stats_rate_est64 *rate_est, 63 struct gnet_stats_rate_est64 *rate_est,
61 spinlock_t *stats_lock, struct nlattr *opt); 64 spinlock_t *stats_lock,
65 seqcount_t *running, struct nlattr *opt);
62bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 66bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
63 const struct gnet_stats_rate_est64 *rate_est); 67 const struct gnet_stats_rate_est64 *rate_est);
64#endif 68#endif
diff --git a/include/net/geneve.h b/include/net/geneve.h
index cb544a530146..ec0327d4331b 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -1,10 +1,7 @@
1#ifndef __NET_GENEVE_H 1#ifndef __NET_GENEVE_H
2#define __NET_GENEVE_H 1 2#define __NET_GENEVE_H 1
3 3
4#ifdef CONFIG_INET
5#include <net/udp_tunnel.h> 4#include <net/udp_tunnel.h>
6#endif
7
8 5
9/* Geneve Header: 6/* Geneve Header:
10 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 7 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -62,12 +59,6 @@ struct genevehdr {
62 struct geneve_opt options[]; 59 struct geneve_opt options[];
63}; 60};
64 61
65static inline void geneve_get_rx_port(struct net_device *netdev)
66{
67 ASSERT_RTNL();
68 call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_GENEVE, netdev);
69}
70
71#ifdef CONFIG_INET 62#ifdef CONFIG_INET
72struct net_device *geneve_dev_create_fb(struct net *net, const char *name, 63struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
73 u8 name_assign_type, u16 dst_port); 64 u8 name_assign_type, u16 dst_port);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 54c779416eec..d97305d0e71f 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -18,6 +18,7 @@ struct route_info {
18 __u8 prefix[0]; /* 0,8 or 16 */ 18 __u8 prefix[0]; /* 0,8 or 16 */
19}; 19};
20 20
21#include <net/addrconf.h>
21#include <net/flow.h> 22#include <net/flow.h>
22#include <net/ip6_fib.h> 23#include <net/ip6_fib.h>
23#include <net/sock.h> 24#include <net/sock.h>
@@ -76,6 +77,8 @@ static inline struct dst_entry *ip6_route_output(struct net *net,
76 77
77struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, 78struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
78 int flags); 79 int flags);
80struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
81 int ifindex, struct flowi6 *fl6, int flags);
79 82
80int ip6_route_init(void); 83int ip6_route_init(void);
81void ip6_route_cleanup(void); 84void ip6_route_cleanup(void);
@@ -86,9 +89,23 @@ int ip6_route_add(struct fib6_config *cfg);
86int ip6_ins_rt(struct rt6_info *); 89int ip6_ins_rt(struct rt6_info *);
87int ip6_del_rt(struct rt6_info *); 90int ip6_del_rt(struct rt6_info *);
88 91
89int ip6_route_get_saddr(struct net *net, struct rt6_info *rt, 92static inline int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
90 const struct in6_addr *daddr, unsigned int prefs, 93 const struct in6_addr *daddr,
91 struct in6_addr *saddr); 94 unsigned int prefs,
95 struct in6_addr *saddr)
96{
97 struct inet6_dev *idev =
98 rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
99 int err = 0;
100
101 if (rt && rt->rt6i_prefsrc.plen)
102 *saddr = rt->rt6i_prefsrc.addr;
103 else
104 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
105 daddr, prefs, saddr);
106
107 return err;
108}
92 109
93struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, 110struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
94 const struct in6_addr *saddr, int oif, int flags); 111 const struct in6_addr *saddr, int oif, int flags);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index dbf444428437..a5e7035fb93f 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -132,6 +132,7 @@ struct ip_tunnel {
132 int ip_tnl_net_id; 132 int ip_tnl_net_id;
133 struct gro_cells gro_cells; 133 struct gro_cells gro_cells;
134 bool collect_md; 134 bool collect_md;
135 bool ignore_df;
135}; 136};
136 137
137#define TUNNEL_CSUM __cpu_to_be16(0x01) 138#define TUNNEL_CSUM __cpu_to_be16(0x01)
@@ -156,6 +157,7 @@ struct tnl_ptk_info {
156 __be16 proto; 157 __be16 proto;
157 __be32 key; 158 __be32 key;
158 __be32 seq; 159 __be32 seq;
160 int hdr_len;
159}; 161};
160 162
161#define PACKET_RCVD 0 163#define PACKET_RCVD 0
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 374388dc01c8..e90095091aa0 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -11,6 +11,8 @@
11#ifndef _NET_L3MDEV_H_ 11#ifndef _NET_L3MDEV_H_
12#define _NET_L3MDEV_H_ 12#define _NET_L3MDEV_H_
13 13
14#include <net/fib_rules.h>
15
14/** 16/**
15 * struct l3mdev_ops - l3mdev operations 17 * struct l3mdev_ops - l3mdev operations
16 * 18 *
@@ -36,11 +38,17 @@ struct l3mdev_ops {
36 38
37 /* IPv6 ops */ 39 /* IPv6 ops */
38 struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *dev, 40 struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *dev,
39 const struct flowi6 *fl6); 41 struct flowi6 *fl6);
42 int (*l3mdev_get_saddr6)(struct net_device *dev,
43 const struct sock *sk,
44 struct flowi6 *fl6);
40}; 45};
41 46
42#ifdef CONFIG_NET_L3_MASTER_DEV 47#ifdef CONFIG_NET_L3_MASTER_DEV
43 48
49int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
50 struct fib_lookup_arg *arg);
51
44int l3mdev_master_ifindex_rcu(const struct net_device *dev); 52int l3mdev_master_ifindex_rcu(const struct net_device *dev);
45static inline int l3mdev_master_ifindex(struct net_device *dev) 53static inline int l3mdev_master_ifindex(struct net_device *dev)
46{ 54{
@@ -71,6 +79,31 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
71 return rc; 79 return rc;
72} 80}
73 81
82static inline
83const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
84{
85 /* netdev_master_upper_dev_get_rcu calls
86 * list_first_or_null_rcu to walk the upper dev list.
87 * list_first_or_null_rcu does not handle a const arg. We aren't
88 * making changes, just want the master device from that list so
89 * typecast to remove the const
90 */
91 struct net_device *dev = (struct net_device *)_dev;
92 const struct net_device *master;
93
94 if (!dev)
95 return NULL;
96
97 if (netif_is_l3_master(dev))
98 master = dev;
99 else if (netif_is_l3_slave(dev))
100 master = netdev_master_upper_dev_get_rcu(dev);
101 else
102 master = NULL;
103
104 return master;
105}
106
74/* get index of an interface to use for FIB lookups. For devices 107/* get index of an interface to use for FIB lookups. For devices
75 * enslaved to an L3 master device FIB lookups are based on the 108 * enslaved to an L3 master device FIB lookups are based on the
76 * master index 109 * master index
@@ -134,7 +167,9 @@ static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
134 167
135int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4); 168int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4);
136 169
137struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6); 170struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6);
171int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
172 struct flowi6 *fl6);
138 173
139static inline 174static inline
140struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto) 175struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
@@ -180,6 +215,12 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
180 return 0; 215 return 0;
181} 216}
182 217
218static inline
219const struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
220{
221 return NULL;
222}
223
183static inline int l3mdev_fib_oif_rcu(struct net_device *dev) 224static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
184{ 225{
185 return dev ? dev->ifindex : 0; 226 return dev ? dev->ifindex : 0;
@@ -220,11 +261,17 @@ static inline int l3mdev_get_saddr(struct net *net, int ifindex,
220} 261}
221 262
222static inline 263static inline
223struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6) 264struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6)
224{ 265{
225 return NULL; 266 return NULL;
226} 267}
227 268
269static inline int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
270 struct flowi6 *fl6)
271{
272 return 0;
273}
274
228static inline 275static inline
229struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb) 276struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
230{ 277{
@@ -236,6 +283,13 @@ struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
236{ 283{
237 return skb; 284 return skb;
238} 285}
286
287static inline
288int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
289 struct fib_lookup_arg *arg)
290{
291 return 1;
292}
239#endif 293#endif
240 294
241#endif /* _NET_L3MDEV_H_ */ 295#endif /* _NET_L3MDEV_H_ */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index be30b0549b88..a52009ffc19f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -21,6 +21,7 @@
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/ieee80211.h> 22#include <linux/ieee80211.h>
23#include <net/cfg80211.h> 23#include <net/cfg80211.h>
24#include <net/codel.h>
24#include <asm/unaligned.h> 25#include <asm/unaligned.h>
25 26
26/** 27/**
@@ -895,7 +896,18 @@ struct ieee80211_tx_info {
895 unsigned long jiffies; 896 unsigned long jiffies;
896 }; 897 };
897 /* NB: vif can be NULL for injected frames */ 898 /* NB: vif can be NULL for injected frames */
898 struct ieee80211_vif *vif; 899 union {
900 /* NB: vif can be NULL for injected frames */
901 struct ieee80211_vif *vif;
902
903 /* When packets are enqueued on txq it's easy
904 * to re-construct the vif pointer. There's no
905 * more space in tx_info so it can be used to
906 * store the necessary enqueue time for packet
907 * sojourn time computation.
908 */
909 codel_time_t enqueue_time;
910 };
899 struct ieee80211_key_conf *hw_key; 911 struct ieee80211_key_conf *hw_key;
900 u32 flags; 912 u32 flags;
901 /* 4 bytes free */ 913 /* 4 bytes free */
@@ -2147,9 +2159,6 @@ enum ieee80211_hw_flags {
2147 * @n_cipher_schemes: a size of an array of cipher schemes definitions. 2159 * @n_cipher_schemes: a size of an array of cipher schemes definitions.
2148 * @cipher_schemes: a pointer to an array of cipher scheme definitions 2160 * @cipher_schemes: a pointer to an array of cipher scheme definitions
2149 * supported by HW. 2161 * supported by HW.
2150 *
2151 * @txq_ac_max_pending: maximum number of frames per AC pending in all txq
2152 * entries for a vif.
2153 */ 2162 */
2154struct ieee80211_hw { 2163struct ieee80211_hw {
2155 struct ieee80211_conf conf; 2164 struct ieee80211_conf conf;
@@ -2180,7 +2189,6 @@ struct ieee80211_hw {
2180 u8 uapsd_max_sp_len; 2189 u8 uapsd_max_sp_len;
2181 u8 n_cipher_schemes; 2190 u8 n_cipher_schemes;
2182 const struct ieee80211_cipher_scheme *cipher_schemes; 2191 const struct ieee80211_cipher_scheme *cipher_schemes;
2183 int txq_ac_max_pending;
2184}; 2192};
2185 2193
2186static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw, 2194static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 2d8edaad29cb..be1fe2283254 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -35,6 +35,7 @@ enum {
35 ND_OPT_ROUTE_INFO = 24, /* RFC4191 */ 35 ND_OPT_ROUTE_INFO = 24, /* RFC4191 */
36 ND_OPT_RDNSS = 25, /* RFC5006 */ 36 ND_OPT_RDNSS = 25, /* RFC5006 */
37 ND_OPT_DNSSL = 31, /* RFC6106 */ 37 ND_OPT_DNSSL = 31, /* RFC6106 */
38 ND_OPT_6CO = 34, /* RFC6775 */
38 __ND_OPT_MAX 39 __ND_OPT_MAX
39}; 40};
40 41
@@ -53,11 +54,21 @@ enum {
53 54
54#include <net/neighbour.h> 55#include <net/neighbour.h>
55 56
57/* Set to 3 to get tracing... */
58#define ND_DEBUG 1
59
60#define ND_PRINTK(val, level, fmt, ...) \
61do { \
62 if (val <= ND_DEBUG) \
63 net_##level##_ratelimited(fmt, ##__VA_ARGS__); \
64} while (0)
65
56struct ctl_table; 66struct ctl_table;
57struct inet6_dev; 67struct inet6_dev;
58struct net_device; 68struct net_device;
59struct net_proto_family; 69struct net_proto_family;
60struct sk_buff; 70struct sk_buff;
71struct prefix_info;
61 72
62extern struct neigh_table nd_tbl; 73extern struct neigh_table nd_tbl;
63 74
@@ -99,20 +110,201 @@ struct ndisc_options {
99#endif 110#endif
100 struct nd_opt_hdr *nd_useropts; 111 struct nd_opt_hdr *nd_useropts;
101 struct nd_opt_hdr *nd_useropts_end; 112 struct nd_opt_hdr *nd_useropts_end;
113#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
114 struct nd_opt_hdr *nd_802154_opt_array[ND_OPT_TARGET_LL_ADDR + 1];
115#endif
102}; 116};
103 117
104#define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR] 118#define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR]
105#define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR] 119#define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR]
106#define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO] 120#define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO]
107#define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END] 121#define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END]
108#define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR] 122#define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR]
109#define nd_opts_mtu nd_opt_array[ND_OPT_MTU] 123#define nd_opts_mtu nd_opt_array[ND_OPT_MTU]
124#define nd_802154_opts_src_lladdr nd_802154_opt_array[ND_OPT_SOURCE_LL_ADDR]
125#define nd_802154_opts_tgt_lladdr nd_802154_opt_array[ND_OPT_TARGET_LL_ADDR]
110 126
111#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7) 127#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
112 128
113struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, 129struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
130 u8 *opt, int opt_len,
114 struct ndisc_options *ndopts); 131 struct ndisc_options *ndopts);
115 132
133void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data,
134 int data_len, int pad);
135
136#define NDISC_OPS_REDIRECT_DATA_SPACE 2
137
138/*
139 * This structure defines the hooks for IPv6 neighbour discovery.
140 * The following hooks can be defined; unless noted otherwise, they are
141 * optional and can be filled with a null pointer.
142 *
143 * int (*is_useropt)(u8 nd_opt_type):
144 * This function is called when IPv6 decide RA userspace options. if
145 * this function returns 1 then the option given by nd_opt_type will
146 * be handled as userspace option additional to the IPv6 options.
147 *
148 * int (*parse_options)(const struct net_device *dev,
149 * struct nd_opt_hdr *nd_opt,
150 * struct ndisc_options *ndopts):
151 * This function is called while parsing ndisc ops and put each position
152 * as pointer into ndopts. If this function return unequal 0, then this
153 * function took care about the ndisc option, if 0 then the IPv6 ndisc
154 * option parser will take care about that option.
155 *
156 * void (*update)(const struct net_device *dev, struct neighbour *n,
157 * u32 flags, u8 icmp6_type,
158 * const struct ndisc_options *ndopts):
159 * This function is called when IPv6 ndisc updates the neighbour cache
160 * entry. Additional options which can be updated may be previously
161 * parsed by parse_opts callback and accessible over ndopts parameter.
162 *
163 * int (*opt_addr_space)(const struct net_device *dev, u8 icmp6_type,
164 * struct neighbour *neigh, u8 *ha_buf,
165 * u8 **ha):
166 * This function is called when the necessary option space will be
167 * calculated before allocating a skb. The parameters neigh, ha_buf
168 * abd ha are available on NDISC_REDIRECT messages only.
169 *
170 * void (*fill_addr_option)(const struct net_device *dev,
171 * struct sk_buff *skb, u8 icmp6_type,
172 * const u8 *ha):
173 * This function is called when the skb will finally fill the option
174 * fields inside skb. NOTE: this callback should fill the option
175 * fields to the skb which are previously indicated by opt_space
176 * parameter. That means the decision to add such option should
177 * not lost between these two callbacks, e.g. protected by interface
178 * up state.
179 *
180 * void (*prefix_rcv_add_addr)(struct net *net, struct net_device *dev,
181 * const struct prefix_info *pinfo,
182 * struct inet6_dev *in6_dev,
183 * struct in6_addr *addr,
184 * int addr_type, u32 addr_flags,
185 * bool sllao, bool tokenized,
186 * __u32 valid_lft, u32 prefered_lft,
187 * bool dev_addr_generated):
188 * This function is called when a RA messages is received with valid
189 * PIO option fields and an IPv6 address will be added to the interface
190 * for autoconfiguration. The parameter dev_addr_generated reports about
191 * if the address was based on dev->dev_addr or not. This can be used
192 * to add a second address if link-layer operates with two link layer
193 * addresses. E.g. 802.15.4 6LoWPAN.
194 */
195struct ndisc_ops {
196 int (*is_useropt)(u8 nd_opt_type);
197 int (*parse_options)(const struct net_device *dev,
198 struct nd_opt_hdr *nd_opt,
199 struct ndisc_options *ndopts);
200 void (*update)(const struct net_device *dev, struct neighbour *n,
201 u32 flags, u8 icmp6_type,
202 const struct ndisc_options *ndopts);
203 int (*opt_addr_space)(const struct net_device *dev, u8 icmp6_type,
204 struct neighbour *neigh, u8 *ha_buf,
205 u8 **ha);
206 void (*fill_addr_option)(const struct net_device *dev,
207 struct sk_buff *skb, u8 icmp6_type,
208 const u8 *ha);
209 void (*prefix_rcv_add_addr)(struct net *net, struct net_device *dev,
210 const struct prefix_info *pinfo,
211 struct inet6_dev *in6_dev,
212 struct in6_addr *addr,
213 int addr_type, u32 addr_flags,
214 bool sllao, bool tokenized,
215 __u32 valid_lft, u32 prefered_lft,
216 bool dev_addr_generated);
217};
218
219#if IS_ENABLED(CONFIG_IPV6)
220static inline int ndisc_ops_is_useropt(const struct net_device *dev,
221 u8 nd_opt_type)
222{
223 if (dev->ndisc_ops && dev->ndisc_ops->is_useropt)
224 return dev->ndisc_ops->is_useropt(nd_opt_type);
225 else
226 return 0;
227}
228
229static inline int ndisc_ops_parse_options(const struct net_device *dev,
230 struct nd_opt_hdr *nd_opt,
231 struct ndisc_options *ndopts)
232{
233 if (dev->ndisc_ops && dev->ndisc_ops->parse_options)
234 return dev->ndisc_ops->parse_options(dev, nd_opt, ndopts);
235 else
236 return 0;
237}
238
239static inline void ndisc_ops_update(const struct net_device *dev,
240 struct neighbour *n, u32 flags,
241 u8 icmp6_type,
242 const struct ndisc_options *ndopts)
243{
244 if (dev->ndisc_ops && dev->ndisc_ops->update)
245 dev->ndisc_ops->update(dev, n, flags, icmp6_type, ndopts);
246}
247
248static inline int ndisc_ops_opt_addr_space(const struct net_device *dev,
249 u8 icmp6_type)
250{
251 if (dev->ndisc_ops && dev->ndisc_ops->opt_addr_space &&
252 icmp6_type != NDISC_REDIRECT)
253 return dev->ndisc_ops->opt_addr_space(dev, icmp6_type, NULL,
254 NULL, NULL);
255 else
256 return 0;
257}
258
259static inline int ndisc_ops_redirect_opt_addr_space(const struct net_device *dev,
260 struct neighbour *neigh,
261 u8 *ha_buf, u8 **ha)
262{
263 if (dev->ndisc_ops && dev->ndisc_ops->opt_addr_space)
264 return dev->ndisc_ops->opt_addr_space(dev, NDISC_REDIRECT,
265 neigh, ha_buf, ha);
266 else
267 return 0;
268}
269
270static inline void ndisc_ops_fill_addr_option(const struct net_device *dev,
271 struct sk_buff *skb,
272 u8 icmp6_type)
273{
274 if (dev->ndisc_ops && dev->ndisc_ops->fill_addr_option &&
275 icmp6_type != NDISC_REDIRECT)
276 dev->ndisc_ops->fill_addr_option(dev, skb, icmp6_type, NULL);
277}
278
279static inline void ndisc_ops_fill_redirect_addr_option(const struct net_device *dev,
280 struct sk_buff *skb,
281 const u8 *ha)
282{
283 if (dev->ndisc_ops && dev->ndisc_ops->fill_addr_option)
284 dev->ndisc_ops->fill_addr_option(dev, skb, NDISC_REDIRECT, ha);
285}
286
287static inline void ndisc_ops_prefix_rcv_add_addr(struct net *net,
288 struct net_device *dev,
289 const struct prefix_info *pinfo,
290 struct inet6_dev *in6_dev,
291 struct in6_addr *addr,
292 int addr_type, u32 addr_flags,
293 bool sllao, bool tokenized,
294 __u32 valid_lft,
295 u32 prefered_lft,
296 bool dev_addr_generated)
297{
298 if (dev->ndisc_ops && dev->ndisc_ops->prefix_rcv_add_addr)
299 dev->ndisc_ops->prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
300 addr, addr_type,
301 addr_flags, sllao,
302 tokenized, valid_lft,
303 prefered_lft,
304 dev_addr_generated);
305}
306#endif
307
116/* 308/*
117 * Return the padding between the option length and the start of the 309 * Return the padding between the option length and the start of the
118 * link addr. Currently only IP-over-InfiniBand needs this, although 310 * link addr. Currently only IP-over-InfiniBand needs this, although
@@ -127,23 +319,48 @@ static inline int ndisc_addr_option_pad(unsigned short type)
127 } 319 }
128} 320}
129 321
130static inline int ndisc_opt_addr_space(struct net_device *dev) 322static inline int __ndisc_opt_addr_space(unsigned char addr_len, int pad)
131{ 323{
132 return NDISC_OPT_SPACE(dev->addr_len + 324 return NDISC_OPT_SPACE(addr_len + pad);
133 ndisc_addr_option_pad(dev->type));
134} 325}
135 326
136static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p, 327#if IS_ENABLED(CONFIG_IPV6)
137 struct net_device *dev) 328static inline int ndisc_opt_addr_space(struct net_device *dev, u8 icmp6_type)
329{
330 return __ndisc_opt_addr_space(dev->addr_len,
331 ndisc_addr_option_pad(dev->type)) +
332 ndisc_ops_opt_addr_space(dev, icmp6_type);
333}
334
335static inline int ndisc_redirect_opt_addr_space(struct net_device *dev,
336 struct neighbour *neigh,
337 u8 *ops_data_buf,
338 u8 **ops_data)
339{
340 return __ndisc_opt_addr_space(dev->addr_len,
341 ndisc_addr_option_pad(dev->type)) +
342 ndisc_ops_redirect_opt_addr_space(dev, neigh, ops_data_buf,
343 ops_data);
344}
345#endif
346
347static inline u8 *__ndisc_opt_addr_data(struct nd_opt_hdr *p,
348 unsigned char addr_len, int prepad)
138{ 349{
139 u8 *lladdr = (u8 *)(p + 1); 350 u8 *lladdr = (u8 *)(p + 1);
140 int lladdrlen = p->nd_opt_len << 3; 351 int lladdrlen = p->nd_opt_len << 3;
141 int prepad = ndisc_addr_option_pad(dev->type); 352 if (lladdrlen != __ndisc_opt_addr_space(addr_len, prepad))
142 if (lladdrlen != ndisc_opt_addr_space(dev))
143 return NULL; 353 return NULL;
144 return lladdr + prepad; 354 return lladdr + prepad;
145} 355}
146 356
357static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
358 struct net_device *dev)
359{
360 return __ndisc_opt_addr_data(p, dev->addr_len,
361 ndisc_addr_option_pad(dev->type));
362}
363
147static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd) 364static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd)
148{ 365{
149 const u32 *p32 = pkey; 366 const u32 *p32 = pkey;
@@ -194,6 +411,9 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target);
194int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, 411int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
195 int dir); 412 int dir);
196 413
414void ndisc_update(const struct net_device *dev, struct neighbour *neigh,
415 const u8 *lladdr, u8 new, u32 flags, u8 icmp6_type,
416 struct ndisc_options *ndopts);
197 417
198/* 418/*
199 * IGMP 419 * IGMP
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index fea53f4d92ca..7caa99b482c6 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -67,12 +67,12 @@ struct qdisc_watchdog {
67}; 67};
68 68
69void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); 69void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
70void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle); 70void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
71 71
72static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, 72static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
73 psched_time_t expires) 73 psched_time_t expires)
74{ 74{
75 qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires), true); 75 qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
76} 76}
77 77
78void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); 78void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 62d553184e91..909aff2db2b3 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -26,14 +26,6 @@ struct qdisc_rate_table {
26enum qdisc_state_t { 26enum qdisc_state_t {
27 __QDISC_STATE_SCHED, 27 __QDISC_STATE_SCHED,
28 __QDISC_STATE_DEACTIVATED, 28 __QDISC_STATE_DEACTIVATED,
29 __QDISC_STATE_THROTTLED,
30};
31
32/*
33 * following bits are only changed while qdisc lock is held
34 */
35enum qdisc___state_t {
36 __QDISC___STATE_RUNNING = 1,
37}; 29};
38 30
39struct qdisc_size_table { 31struct qdisc_size_table {
@@ -45,8 +37,10 @@ struct qdisc_size_table {
45}; 37};
46 38
47struct Qdisc { 39struct Qdisc {
48 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); 40 int (*enqueue)(struct sk_buff *skb,
49 struct sk_buff * (*dequeue)(struct Qdisc *dev); 41 struct Qdisc *sch,
42 struct sk_buff **to_free);
43 struct sk_buff * (*dequeue)(struct Qdisc *sch);
50 unsigned int flags; 44 unsigned int flags;
51#define TCQ_F_BUILTIN 1 45#define TCQ_F_BUILTIN 1
52#define TCQ_F_INGRESS 2 46#define TCQ_F_INGRESS 2
@@ -70,31 +64,25 @@ struct Qdisc {
70 struct list_head list; 64 struct list_head list;
71 u32 handle; 65 u32 handle;
72 u32 parent; 66 u32 parent;
73 int (*reshape_fail)(struct sk_buff *skb,
74 struct Qdisc *q);
75
76 void *u32_node; 67 void *u32_node;
77 68
78 /* This field is deprecated, but it is still used by CBQ
79 * and it will live until better solution will be invented.
80 */
81 struct Qdisc *__parent;
82 struct netdev_queue *dev_queue; 69 struct netdev_queue *dev_queue;
83 70
84 struct gnet_stats_rate_est64 rate_est; 71 struct gnet_stats_rate_est64 rate_est;
85 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 72 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
86 struct gnet_stats_queue __percpu *cpu_qstats; 73 struct gnet_stats_queue __percpu *cpu_qstats;
87 74
88 struct Qdisc *next_sched;
89 struct sk_buff *gso_skb;
90 /* 75 /*
91 * For performance sake on SMP, we put highly modified fields at the end 76 * For performance sake on SMP, we put highly modified fields at the end
92 */ 77 */
93 unsigned long state; 78 struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
94 struct sk_buff_head q; 79 struct sk_buff_head q;
95 struct gnet_stats_basic_packed bstats; 80 struct gnet_stats_basic_packed bstats;
96 unsigned int __state; 81 seqcount_t running;
97 struct gnet_stats_queue qstats; 82 struct gnet_stats_queue qstats;
83 unsigned long state;
84 struct Qdisc *next_sched;
85 struct sk_buff *skb_bad_txq;
98 struct rcu_head rcu_head; 86 struct rcu_head rcu_head;
99 int padded; 87 int padded;
100 atomic_t refcnt; 88 atomic_t refcnt;
@@ -104,20 +92,24 @@ struct Qdisc {
104 92
105static inline bool qdisc_is_running(const struct Qdisc *qdisc) 93static inline bool qdisc_is_running(const struct Qdisc *qdisc)
106{ 94{
107 return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false; 95 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
108} 96}
109 97
110static inline bool qdisc_run_begin(struct Qdisc *qdisc) 98static inline bool qdisc_run_begin(struct Qdisc *qdisc)
111{ 99{
112 if (qdisc_is_running(qdisc)) 100 if (qdisc_is_running(qdisc))
113 return false; 101 return false;
114 qdisc->__state |= __QDISC___STATE_RUNNING; 102 /* Variant of write_seqcount_begin() telling lockdep a trylock
103 * was attempted.
104 */
105 raw_write_seqcount_begin(&qdisc->running);
106 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
115 return true; 107 return true;
116} 108}
117 109
118static inline void qdisc_run_end(struct Qdisc *qdisc) 110static inline void qdisc_run_end(struct Qdisc *qdisc)
119{ 111{
120 qdisc->__state &= ~__QDISC___STATE_RUNNING; 112 write_seqcount_end(&qdisc->running);
121} 113}
122 114
123static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 115static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
@@ -135,21 +127,6 @@ static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
135#endif 127#endif
136} 128}
137 129
138static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
139{
140 return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
141}
142
143static inline void qdisc_throttled(struct Qdisc *qdisc)
144{
145 set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
146}
147
148static inline void qdisc_unthrottled(struct Qdisc *qdisc)
149{
150 clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
151}
152
153struct Qdisc_class_ops { 130struct Qdisc_class_ops {
154 /* Child qdisc manipulation */ 131 /* Child qdisc manipulation */
155 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 132 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
@@ -186,10 +163,11 @@ struct Qdisc_ops {
186 char id[IFNAMSIZ]; 163 char id[IFNAMSIZ];
187 int priv_size; 164 int priv_size;
188 165
189 int (*enqueue)(struct sk_buff *, struct Qdisc *); 166 int (*enqueue)(struct sk_buff *skb,
167 struct Qdisc *sch,
168 struct sk_buff **to_free);
190 struct sk_buff * (*dequeue)(struct Qdisc *); 169 struct sk_buff * (*dequeue)(struct Qdisc *);
191 struct sk_buff * (*peek)(struct Qdisc *); 170 struct sk_buff * (*peek)(struct Qdisc *);
192 unsigned int (*drop)(struct Qdisc *);
193 171
194 int (*init)(struct Qdisc *, struct nlattr *arg); 172 int (*init)(struct Qdisc *, struct nlattr *arg);
195 void (*reset)(struct Qdisc *); 173 void (*reset)(struct Qdisc *);
@@ -322,6 +300,14 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
322 return qdisc_lock(root); 300 return qdisc_lock(root);
323} 301}
324 302
303static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
304{
305 struct Qdisc *root = qdisc_root_sleeping(qdisc);
306
307 ASSERT_RTNL();
308 return &root->running;
309}
310
325static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 311static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
326{ 312{
327 return qdisc->dev_queue->dev; 313 return qdisc->dev_queue->dev;
@@ -517,10 +503,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
517#endif 503#endif
518} 504}
519 505
520static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 506static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
507 struct sk_buff **to_free)
521{ 508{
522 qdisc_calculate_pkt_len(skb, sch); 509 qdisc_calculate_pkt_len(skb, sch);
523 return sch->enqueue(skb, sch); 510 return sch->enqueue(skb, sch, to_free);
524} 511}
525 512
526static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 513static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
@@ -645,40 +632,36 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
645 return __qdisc_dequeue_head(sch, &sch->q); 632 return __qdisc_dequeue_head(sch, &sch->q);
646} 633}
647 634
635/* Instead of calling kfree_skb() while root qdisc lock is held,
636 * queue the skb for future freeing at end of __dev_xmit_skb()
637 */
638static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
639{
640 skb->next = *to_free;
641 *to_free = skb;
642}
643
648static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 644static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
649 struct sk_buff_head *list) 645 struct sk_buff_head *list,
646 struct sk_buff **to_free)
650{ 647{
651 struct sk_buff *skb = __skb_dequeue(list); 648 struct sk_buff *skb = __skb_dequeue(list);
652 649
653 if (likely(skb != NULL)) { 650 if (likely(skb != NULL)) {
654 unsigned int len = qdisc_pkt_len(skb); 651 unsigned int len = qdisc_pkt_len(skb);
652
655 qdisc_qstats_backlog_dec(sch, skb); 653 qdisc_qstats_backlog_dec(sch, skb);
656 kfree_skb(skb); 654 __qdisc_drop(skb, to_free);
657 return len; 655 return len;
658 } 656 }
659 657
660 return 0; 658 return 0;
661} 659}
662 660
663static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) 661static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
664{ 662 struct sk_buff **to_free)
665 return __qdisc_queue_drop_head(sch, &sch->q);
666}
667
668static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
669 struct sk_buff_head *list)
670{
671 struct sk_buff *skb = __skb_dequeue_tail(list);
672
673 if (likely(skb != NULL))
674 qdisc_qstats_backlog_dec(sch, skb);
675
676 return skb;
677}
678
679static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
680{ 663{
681 return __qdisc_dequeue_tail(sch, &sch->q); 664 return __qdisc_queue_drop_head(sch, &sch->q, to_free);
682} 665}
683 666
684static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 667static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
@@ -718,19 +701,21 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
718 return skb; 701 return skb;
719} 702}
720 703
721static inline void __qdisc_reset_queue(struct Qdisc *sch, 704static inline void __qdisc_reset_queue(struct sk_buff_head *list)
722 struct sk_buff_head *list)
723{ 705{
724 /* 706 /*
725 * We do not know the backlog in bytes of this list, it 707 * We do not know the backlog in bytes of this list, it
726 * is up to the caller to correct it 708 * is up to the caller to correct it
727 */ 709 */
728 __skb_queue_purge(list); 710 if (!skb_queue_empty(list)) {
711 rtnl_kfree_skbs(list->next, list->prev);
712 __skb_queue_head_init(list);
713 }
729} 714}
730 715
731static inline void qdisc_reset_queue(struct Qdisc *sch) 716static inline void qdisc_reset_queue(struct Qdisc *sch)
732{ 717{
733 __qdisc_reset_queue(sch, &sch->q); 718 __qdisc_reset_queue(&sch->q);
734 sch->qstats.backlog = 0; 719 sch->qstats.backlog = 0;
735} 720}
736 721
@@ -751,46 +736,19 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
751 return old; 736 return old;
752} 737}
753 738
754static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, 739static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
755 struct sk_buff_head *list)
756{ 740{
757 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); 741 rtnl_kfree_skbs(skb, skb);
758
759 if (likely(skb != NULL)) {
760 unsigned int len = qdisc_pkt_len(skb);
761 kfree_skb(skb);
762 return len;
763 }
764
765 return 0;
766}
767
768static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
769{
770 return __qdisc_queue_drop(sch, &sch->q);
771}
772
773static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
774{
775 kfree_skb(skb);
776 qdisc_qstats_drop(sch); 742 qdisc_qstats_drop(sch);
777
778 return NET_XMIT_DROP;
779} 743}
780 744
781static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) 745
746static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
747 struct sk_buff **to_free)
782{ 748{
749 __qdisc_drop(skb, to_free);
783 qdisc_qstats_drop(sch); 750 qdisc_qstats_drop(sch);
784 751
785#ifdef CONFIG_NET_CLS_ACT
786 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
787 goto drop;
788
789 return NET_XMIT_SUCCESS;
790
791drop:
792#endif
793 kfree_skb(skb);
794 return NET_XMIT_DROP; 752 return NET_XMIT_DROP;
795} 753}
796 754
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index b392ac8382f2..632e205ca54b 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -186,6 +186,10 @@ void sctp_assocs_proc_exit(struct net *net);
186int sctp_remaddr_proc_init(struct net *net); 186int sctp_remaddr_proc_init(struct net *net);
187void sctp_remaddr_proc_exit(struct net *net); 187void sctp_remaddr_proc_exit(struct net *net);
188 188
189/*
190 * sctp/offload.c
191 */
192int sctp_offload_init(void);
189 193
190/* 194/*
191 * Module global variables 195 * Module global variables
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 16b013a6191c..83c5ec58b93a 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -566,6 +566,9 @@ struct sctp_chunk {
566 /* This points to the sk_buff containing the actual data. */ 566 /* This points to the sk_buff containing the actual data. */
567 struct sk_buff *skb; 567 struct sk_buff *skb;
568 568
569 /* In case of GSO packets, this will store the head one */
570 struct sk_buff *head_skb;
571
569 /* These are the SCTP headers by reverse order in a packet. 572 /* These are the SCTP headers by reverse order in a packet.
570 * Note that some of these may happen more than once. In that 573 * Note that some of these may happen more than once. In that
571 * case, we point at the "current" one, whatever that means 574 * case, we point at the "current" one, whatever that means
@@ -696,6 +699,8 @@ struct sctp_packet {
696 size_t overhead; 699 size_t overhead;
697 /* This is the total size of all chunks INCLUDING padding. */ 700 /* This is the total size of all chunks INCLUDING padding. */
698 size_t size; 701 size_t size;
702 /* This is the maximum size this packet may have */
703 size_t max_size;
699 704
700 /* The packet is destined for this transport address. 705 /* The packet is destined for this transport address.
701 * The function we finally use to pass down to the next lower 706 * The function we finally use to pass down to the next lower
diff --git a/include/net/tc_act/tc_defact.h b/include/net/tc_act/tc_defact.h
index 9763dcbb9bc3..ab9b5d6be67b 100644
--- a/include/net/tc_act/tc_defact.h
+++ b/include/net/tc_act/tc_defact.h
@@ -5,8 +5,8 @@
5 5
6struct tcf_defact { 6struct tcf_defact {
7 struct tcf_common common; 7 struct tcf_common common;
8 u32 tcfd_datalen; 8 u32 tcfd_datalen;
9 void *tcfd_defdata; 9 void *tcfd_defdata;
10}; 10};
11#define to_defact(a) \ 11#define to_defact(a) \
12 container_of(a->priv, struct tcf_defact, common) 12 container_of(a->priv, struct tcf_defact, common)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0bcc70f4e1fb..a79894b66726 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -767,6 +767,7 @@ struct tcp_skb_cb {
767 union { 767 union {
768 struct { 768 struct {
769 /* There is space for up to 20 bytes */ 769 /* There is space for up to 20 bytes */
770 __u32 in_flight;/* Bytes in flight when packet sent */
770 } tx; /* only used for outgoing skbs */ 771 } tx; /* only used for outgoing skbs */
771 union { 772 union {
772 struct inet_skb_parm h4; 773 struct inet_skb_parm h4;
@@ -859,6 +860,7 @@ union tcp_cc_info;
859struct ack_sample { 860struct ack_sample {
860 u32 pkts_acked; 861 u32 pkts_acked;
861 s32 rtt_us; 862 s32 rtt_us;
863 u32 in_flight;
862}; 864};
863 865
864struct tcp_congestion_ops { 866struct tcp_congestion_ops {
diff --git a/include/net/udp.h b/include/net/udp.h
index ae07f375370d..8894d7144189 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -160,8 +160,8 @@ void udp_set_csum(bool nocheck, struct sk_buff *skb,
160 160
161static inline void udp_csum_pull_header(struct sk_buff *skb) 161static inline void udp_csum_pull_header(struct sk_buff *skb)
162{ 162{
163 if (skb->ip_summed == CHECKSUM_NONE) 163 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
164 skb->csum = csum_partial(udp_hdr(skb), sizeof(struct udphdr), 164 skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
165 skb->csum); 165 skb->csum);
166 skb_pull_rcsum(skb, sizeof(struct udphdr)); 166 skb_pull_rcsum(skb, sizeof(struct udphdr));
167 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr); 167 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 9d14f707e534..02c5be037451 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -84,6 +84,46 @@ struct udp_tunnel_sock_cfg {
84void setup_udp_tunnel_sock(struct net *net, struct socket *sock, 84void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
85 struct udp_tunnel_sock_cfg *sock_cfg); 85 struct udp_tunnel_sock_cfg *sock_cfg);
86 86
87/* -- List of parsable UDP tunnel types --
88 *
89 * Adding to this list will result in serious debate. The main issue is
90 * that this list is essentially a list of workarounds for either poorly
91 * designed tunnels, or poorly designed device offloads.
92 *
93 * The parsing supported via these types should really be used for Rx
94 * traffic only as the network stack will have already inserted offsets for
95 * the location of the headers in the skb. In addition any ports that are
96 * pushed should be kept within the namespace without leaking to other
97 * devices such as VFs or other ports on the same device.
98 *
99 * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the
100 * need to use this for Rx checksum offload. It should not be necessary to
101 * call this function to perform Tx offloads on outgoing traffic.
102 */
103enum udp_parsable_tunnel_type {
104 UDP_TUNNEL_TYPE_VXLAN, /* RFC 7348 */
105 UDP_TUNNEL_TYPE_GENEVE, /* draft-ietf-nvo3-geneve */
106 UDP_TUNNEL_TYPE_VXLAN_GPE, /* draft-ietf-nvo3-vxlan-gpe */
107};
108
109struct udp_tunnel_info {
110 unsigned short type;
111 sa_family_t sa_family;
112 __be16 port;
113};
114
115/* Notify network devices of offloadable types */
116void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
117 unsigned short type);
118void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
119void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
120
121static inline void udp_tunnel_get_rx_info(struct net_device *dev)
122{
123 ASSERT_RTNL();
124 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
125}
126
87/* Transmit the skb using UDP encapsulation. */ 127/* Transmit the skb using UDP encapsulation. */
88void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, 128void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
89 __be32 src, __be32 dst, __u8 tos, __u8 ttl, 129 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
@@ -105,12 +145,14 @@ struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
105 __be16 flags, __be64 tunnel_id, 145 __be16 flags, __be64 tunnel_id,
106 int md_size); 146 int md_size);
107 147
148#ifdef CONFIG_INET
108static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum) 149static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
109{ 150{
110 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; 151 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
111 152
112 return iptunnel_handle_offloads(skb, type); 153 return iptunnel_handle_offloads(skb, type);
113} 154}
155#endif
114 156
115static inline void udp_tunnel_encap_enable(struct socket *sock) 157static inline void udp_tunnel_encap_enable(struct socket *sock)
116{ 158{
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index b8803165df91..b96d0360c095 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -1,13 +1,10 @@
1#ifndef __NET_VXLAN_H 1#ifndef __NET_VXLAN_H
2#define __NET_VXLAN_H 1 2#define __NET_VXLAN_H 1
3 3
4#include <linux/ip.h>
5#include <linux/ipv6.h>
6#include <linux/if_vlan.h> 4#include <linux/if_vlan.h>
7#include <linux/skbuff.h> 5#include <net/udp_tunnel.h>
8#include <linux/netdevice.h>
9#include <linux/udp.h>
10#include <net/dst_metadata.h> 6#include <net/dst_metadata.h>
7#include <net/udp_tunnel.h>
11 8
12/* VXLAN protocol (RFC 7348) header: 9/* VXLAN protocol (RFC 7348) header:
13 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 10 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -392,12 +389,6 @@ static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset)
392 return vni_field; 389 return vni_field;
393} 390}
394 391
395static inline void vxlan_get_rx_port(struct net_device *netdev)
396{
397 ASSERT_RTNL();
398 call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_VXLAN, netdev);
399}
400
401static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs) 392static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
402{ 393{
403 return vs->sock->sk->sk_family; 394 return vs->sock->sk->sk_family;
diff --git a/include/soc/fsl/qe/immap_qe.h b/include/soc/fsl/qe/immap_qe.h
index bedbff891423..c76ef30b05ba 100644
--- a/include/soc/fsl/qe/immap_qe.h
+++ b/include/soc/fsl/qe/immap_qe.h
@@ -159,10 +159,7 @@ struct spi {
159 159
160/* SI */ 160/* SI */
161struct si1 { 161struct si1 {
162 __be16 siamr1; /* SI1 TDMA mode register */ 162 __be16 sixmr1[4]; /* SI1 TDMx (x = A B C D) mode register */
163 __be16 sibmr1; /* SI1 TDMB mode register */
164 __be16 sicmr1; /* SI1 TDMC mode register */
165 __be16 sidmr1; /* SI1 TDMD mode register */
166 u8 siglmr1_h; /* SI1 global mode register high */ 163 u8 siglmr1_h; /* SI1 global mode register high */
167 u8 res0[0x1]; 164 u8 res0[0x1];
168 u8 sicmdr1_h; /* SI1 command register high */ 165 u8 sicmdr1_h; /* SI1 command register high */
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index 33b29ead3d55..70339d7958c0 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -80,6 +80,8 @@ enum qe_clock {
80 QE_CLK22, /* Clock 22 */ 80 QE_CLK22, /* Clock 22 */
81 QE_CLK23, /* Clock 23 */ 81 QE_CLK23, /* Clock 23 */
82 QE_CLK24, /* Clock 24 */ 82 QE_CLK24, /* Clock 24 */
83 QE_RSYNC_PIN, /* RSYNC from pin */
84 QE_TSYNC_PIN, /* TSYNC from pin */
83 QE_CLK_DUMMY 85 QE_CLK_DUMMY
84}; 86};
85 87
@@ -242,6 +244,22 @@ static inline int qe_alive_during_sleep(void)
242#define qe_muram_addr cpm_muram_addr 244#define qe_muram_addr cpm_muram_addr
243#define qe_muram_offset cpm_muram_offset 245#define qe_muram_offset cpm_muram_offset
244 246
247#define qe_setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
248#define qe_clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
249
250#define qe_setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
251#define qe_clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
252
253#define qe_setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
254#define qe_clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
255
256#define qe_clrsetbits32(addr, clear, set) \
257 iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
258#define qe_clrsetbits16(addr, clear, set) \
259 iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
260#define qe_clrsetbits8(addr, clear, set) \
261 iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
262
245/* Structure that defines QE firmware binary files. 263/* Structure that defines QE firmware binary files.
246 * 264 *
247 * See Documentation/powerpc/qe_firmware.txt for a description of these 265 * See Documentation/powerpc/qe_firmware.txt for a description of these
@@ -639,6 +657,7 @@ struct ucc_slow_pram {
639#define UCC_SLOW_GUMR_L_MODE_QMC 0x00000002 657#define UCC_SLOW_GUMR_L_MODE_QMC 0x00000002
640 658
641/* General UCC FAST Mode Register */ 659/* General UCC FAST Mode Register */
660#define UCC_FAST_GUMR_LOOPBACK 0x40000000
642#define UCC_FAST_GUMR_TCI 0x20000000 661#define UCC_FAST_GUMR_TCI 0x20000000
643#define UCC_FAST_GUMR_TRX 0x10000000 662#define UCC_FAST_GUMR_TRX 0x10000000
644#define UCC_FAST_GUMR_TTX 0x08000000 663#define UCC_FAST_GUMR_TTX 0x08000000
diff --git a/include/soc/fsl/qe/qe_tdm.h b/include/soc/fsl/qe/qe_tdm.h
new file mode 100644
index 000000000000..a1664b635f1a
--- /dev/null
+++ b/include/soc/fsl/qe/qe_tdm.h
@@ -0,0 +1,94 @@
1/*
2 * Internal header file for QE TDM mode routines.
3 *
4 * Copyright (C) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Authors: Zhao Qiang <qiang.zhao@nxp.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version
12 */
13
14#ifndef _QE_TDM_H_
15#define _QE_TDM_H_
16
17#include <linux/kernel.h>
18#include <linux/list.h>
19
20#include <soc/fsl/qe/immap_qe.h>
21#include <soc/fsl/qe/qe.h>
22
23#include <soc/fsl/qe/ucc.h>
24#include <soc/fsl/qe/ucc_fast.h>
25
26/* SI RAM entries */
27#define SIR_LAST 0x0001
28#define SIR_BYTE 0x0002
29#define SIR_CNT(x) ((x) << 2)
30#define SIR_CSEL(x) ((x) << 5)
31#define SIR_SGS 0x0200
32#define SIR_SWTR 0x4000
33#define SIR_MCC 0x8000
34#define SIR_IDLE 0
35
36/* SIxMR fields */
37#define SIMR_SAD(x) ((x) << 12)
38#define SIMR_SDM_NORMAL 0x0000
39#define SIMR_SDM_INTERNAL_LOOPBACK 0x0800
40#define SIMR_SDM_MASK 0x0c00
41#define SIMR_CRT 0x0040
42#define SIMR_SL 0x0020
43#define SIMR_CE 0x0010
44#define SIMR_FE 0x0008
45#define SIMR_GM 0x0004
46#define SIMR_TFSD(n) (n)
47#define SIMR_RFSD(n) ((n) << 8)
48
49enum tdm_ts_t {
50 TDM_TX_TS,
51 TDM_RX_TS
52};
53
54enum tdm_framer_t {
55 TDM_FRAMER_T1,
56 TDM_FRAMER_E1
57};
58
59enum tdm_mode_t {
60 TDM_INTERNAL_LOOPBACK,
61 TDM_NORMAL
62};
63
64struct si_mode_info {
65 u8 simr_rfsd;
66 u8 simr_tfsd;
67 u8 simr_crt;
68 u8 simr_sl;
69 u8 simr_ce;
70 u8 simr_fe;
71 u8 simr_gm;
72};
73
74struct ucc_tdm_info {
75 struct ucc_fast_info uf_info;
76 struct si_mode_info si_info;
77};
78
79struct ucc_tdm {
80 u16 tdm_port; /* port for this tdm:TDMA,TDMB */
81 u32 siram_entry_id;
82 u16 __iomem *siram;
83 struct si1 __iomem *si_regs;
84 enum tdm_framer_t tdm_framer_type;
85 enum tdm_mode_t tdm_mode;
86 u8 num_of_ts; /* the number of timeslots in this tdm frame */
87 u32 tx_ts_mask; /* tx time slot mask */
88 u32 rx_ts_mask; /* rx time slot mask */
89};
90
91int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
92 struct ucc_tdm_info *ut_info);
93void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info);
94#endif
diff --git a/include/soc/fsl/qe/ucc.h b/include/soc/fsl/qe/ucc.h
index 894f14cbb044..6bbbb597f2af 100644
--- a/include/soc/fsl/qe/ucc.h
+++ b/include/soc/fsl/qe/ucc.h
@@ -41,6 +41,10 @@ int ucc_set_qe_mux_mii_mng(unsigned int ucc_num);
41 41
42int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, 42int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
43 enum comm_dir mode); 43 enum comm_dir mode);
44int ucc_set_tdm_rxtx_clk(unsigned int tdm_num, enum qe_clock clock,
45 enum comm_dir mode);
46int ucc_set_tdm_rxtx_sync(unsigned int tdm_num, enum qe_clock clock,
47 enum comm_dir mode);
44 48
45int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask); 49int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask);
46 50
diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h
index df8ea7958c63..3ee9e7c1a7d7 100644
--- a/include/soc/fsl/qe/ucc_fast.h
+++ b/include/soc/fsl/qe/ucc_fast.h
@@ -21,19 +21,37 @@
21 21
22#include <soc/fsl/qe/ucc.h> 22#include <soc/fsl/qe/ucc.h>
23 23
24/* Receive BD's status */ 24/* Receive BD's status and length*/
25#define R_E 0x80000000 /* buffer empty */ 25#define R_E 0x80000000 /* buffer empty */
26#define R_W 0x20000000 /* wrap bit */ 26#define R_W 0x20000000 /* wrap bit */
27#define R_I 0x10000000 /* interrupt on reception */ 27#define R_I 0x10000000 /* interrupt on reception */
28#define R_L 0x08000000 /* last */ 28#define R_L 0x08000000 /* last */
29#define R_F 0x04000000 /* first */ 29#define R_F 0x04000000 /* first */
30 30
31/* transmit BD's status */ 31/* transmit BD's status and length*/
32#define T_R 0x80000000 /* ready bit */ 32#define T_R 0x80000000 /* ready bit */
33#define T_W 0x20000000 /* wrap bit */ 33#define T_W 0x20000000 /* wrap bit */
34#define T_I 0x10000000 /* interrupt on completion */ 34#define T_I 0x10000000 /* interrupt on completion */
35#define T_L 0x08000000 /* last */ 35#define T_L 0x08000000 /* last */
36 36
37/* Receive BD's status */
38#define R_E_S 0x8000 /* buffer empty */
39#define R_W_S 0x2000 /* wrap bit */
40#define R_I_S 0x1000 /* interrupt on reception */
41#define R_L_S 0x0800 /* last */
42#define R_F_S 0x0400 /* first */
43#define R_CM_S 0x0200 /* continuous mode */
44#define R_CR_S 0x0004 /* crc */
45#define R_OV_S 0x0002 /* crc */
46
47/* transmit BD's status */
48#define T_R_S 0x8000 /* ready bit */
49#define T_W_S 0x2000 /* wrap bit */
50#define T_I_S 0x1000 /* interrupt on completion */
51#define T_L_S 0x0800 /* last */
52#define T_TC_S 0x0400 /* crc */
53#define T_TM_S 0x0200 /* continuous mode */
54
37/* Rx Data buffer must be 4 bytes aligned in most cases */ 55/* Rx Data buffer must be 4 bytes aligned in most cases */
38#define UCC_FAST_RX_ALIGN 4 56#define UCC_FAST_RX_ALIGN 4
39#define UCC_FAST_MRBLR_ALIGNMENT 4 57#define UCC_FAST_MRBLR_ALIGNMENT 4
@@ -118,9 +136,12 @@ enum ucc_fast_transparent_tcrc {
118/* Fast UCC initialization structure */ 136/* Fast UCC initialization structure */
119struct ucc_fast_info { 137struct ucc_fast_info {
120 int ucc_num; 138 int ucc_num;
139 int tdm_num;
121 enum qe_clock rx_clock; 140 enum qe_clock rx_clock;
122 enum qe_clock tx_clock; 141 enum qe_clock tx_clock;
123 u32 regs; 142 enum qe_clock rx_sync;
143 enum qe_clock tx_sync;
144 resource_size_t regs;
124 int irq; 145 int irq;
125 u32 uccm_mask; 146 u32 uccm_mask;
126 int bd_mem_part; 147 int bd_mem_part;
diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h
index 7a291dc1ff15..cefb304414ba 100644
--- a/include/uapi/linux/can/bcm.h
+++ b/include/uapi/linux/can/bcm.h
@@ -99,5 +99,6 @@ enum {
99#define RX_ANNOUNCE_RESUME 0x0100 99#define RX_ANNOUNCE_RESUME 0x0100
100#define TX_RESET_MULTI_IDX 0x0200 100#define TX_RESET_MULTI_IDX 0x0200
101#define RX_RTR_FRAME 0x0400 101#define RX_RTR_FRAME 0x0400
102#define CAN_FD_FRAME 0x0800
102 103
103#endif /* !_UAPI_CAN_BCM_H */ 104#endif /* !_UAPI_CAN_BCM_H */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 5f030b46cff4..b8f38e84d93a 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1362,6 +1362,7 @@ enum ethtool_link_mode_bit_indices {
1362 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, 1362 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
1363 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, 1363 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
1364 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, 1364 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
1365 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40,
1365 1366
1366 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit 1367 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
1367 * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* 1368 * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1370,7 +1371,7 @@ enum ethtool_link_mode_bit_indices {
1370 */ 1371 */
1371 1372
1372 __ETHTOOL_LINK_MODE_LAST 1373 __ETHTOOL_LINK_MODE_LAST
1373 = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 1374 = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1374}; 1375};
1375 1376
1376#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ 1377#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 620c8a5ddc00..14404b3ebb89 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -50,6 +50,7 @@ enum {
50 FRA_FWMASK, /* mask for netfilter mark */ 50 FRA_FWMASK, /* mask for netfilter mark */
51 FRA_OIFNAME, 51 FRA_OIFNAME,
52 FRA_PAD, 52 FRA_PAD,
53 FRA_L3MDEV, /* iif or oif is l3mdev goto its table */
53 __FRA_MAX 54 __FRA_MAX
54}; 55};
55 56
diff --git a/include/uapi/linux/icmp.h b/include/uapi/linux/icmp.h
index 16fff055f734..fddd9d736284 100644
--- a/include/uapi/linux/icmp.h
+++ b/include/uapi/linux/icmp.h
@@ -79,6 +79,7 @@ struct icmphdr {
79 __be16 __unused; 79 __be16 __unused;
80 __be16 mtu; 80 __be16 mtu;
81 } frag; 81 } frag;
82 __u8 reserved[4];
82 } un; 83 } un;
83}; 84};
84 85
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index af4de90ba27d..1046f5515174 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -113,6 +113,7 @@ enum {
113 IFLA_GRE_ENCAP_SPORT, 113 IFLA_GRE_ENCAP_SPORT,
114 IFLA_GRE_ENCAP_DPORT, 114 IFLA_GRE_ENCAP_DPORT,
115 IFLA_GRE_COLLECT_METADATA, 115 IFLA_GRE_COLLECT_METADATA,
116 IFLA_GRE_IGNORE_DF,
116 __IFLA_GRE_MAX, 117 __IFLA_GRE_MAX,
117}; 118};
118 119
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index a16643705669..abbd1dc5d683 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -72,6 +72,7 @@ enum {
72 INET_DIAG_BC_AUTO, 72 INET_DIAG_BC_AUTO,
73 INET_DIAG_BC_S_COND, 73 INET_DIAG_BC_S_COND,
74 INET_DIAG_BC_D_COND, 74 INET_DIAG_BC_D_COND,
75 INET_DIAG_BC_DEV_COND, /* u32 ifindex */
75}; 76};
76 77
77struct inet_diag_hostcond { 78struct inet_diag_hostcond {
diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h
index d79399394b46..76b4d87c83a8 100644
--- a/include/uapi/linux/netlink_diag.h
+++ b/include/uapi/linux/netlink_diag.h
@@ -49,6 +49,7 @@ enum {
49#define NDIAG_SHOW_MEMINFO 0x00000001 /* show memory info of a socket */ 49#define NDIAG_SHOW_MEMINFO 0x00000001 /* show memory info of a socket */
50#define NDIAG_SHOW_GROUPS 0x00000002 /* show groups of a netlink socket */ 50#define NDIAG_SHOW_GROUPS 0x00000002 /* show groups of a netlink socket */
51#ifndef __KERNEL__ 51#ifndef __KERNEL__
52/* deprecated since 4.6 */
52#define NDIAG_SHOW_RING_CFG 0x00000004 /* show ring configuration */ 53#define NDIAG_SHOW_RING_CFG 0x00000004 /* show ring configuration */
53#endif 54#endif
54 55
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index e23d78685a01..53c8278827a0 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -493,7 +493,12 @@
493 * This attribute is ignored if driver does not support roam scan. 493 * This attribute is ignored if driver does not support roam scan.
494 * It is also sent as an event, with the BSSID and response IEs when the 494 * It is also sent as an event, with the BSSID and response IEs when the
495 * connection is established or failed to be established. This can be 495 * connection is established or failed to be established. This can be
496 * determined by the STATUS_CODE attribute. 496 * determined by the %NL80211_ATTR_STATUS_CODE attribute (0 = success,
497 * non-zero = failure). If %NL80211_ATTR_TIMED_OUT is included in the
498 * event, the connection attempt failed due to not being able to initiate
499 * authentication/association or not receiving a response from the AP.
500 * Non-zero %NL80211_ATTR_STATUS_CODE value is indicated in that case as
501 * well to remain backwards compatible.
497 * @NL80211_CMD_ROAM: request that the card roam (currently not implemented), 502 * @NL80211_CMD_ROAM: request that the card roam (currently not implemented),
498 * sent as an event when the card/driver roamed by itself. 503 * sent as an event when the card/driver roamed by itself.
499 * @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify 504 * @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify
@@ -1819,6 +1824,11 @@ enum nl80211_commands {
1819 * 1824 *
1820 * @NL80211_ATTR_PAD: attribute used for padding for 64-bit alignment 1825 * @NL80211_ATTR_PAD: attribute used for padding for 64-bit alignment
1821 * 1826 *
1827 * @NL80211_ATTR_IFTYPE_EXT_CAPA: Nested attribute of the following attributes:
1828 * %NL80211_ATTR_IFTYPE, %NL80211_ATTR_EXT_CAPA,
1829 * %NL80211_ATTR_EXT_CAPA_MASK, to specify the extended capabilities per
1830 * interface type.
1831 *
1822 * @NUM_NL80211_ATTR: total number of nl80211_attrs available 1832 * @NUM_NL80211_ATTR: total number of nl80211_attrs available
1823 * @NL80211_ATTR_MAX: highest attribute number currently defined 1833 * @NL80211_ATTR_MAX: highest attribute number currently defined
1824 * @__NL80211_ATTR_AFTER_LAST: internal use 1834 * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2201,6 +2211,8 @@ enum nl80211_attrs {
2201 2211
2202 NL80211_ATTR_PAD, 2212 NL80211_ATTR_PAD,
2203 2213
2214 NL80211_ATTR_IFTYPE_EXT_CAPA,
2215
2204 /* add attributes here, update the policy in nl80211.c */ 2216 /* add attributes here, update the policy in nl80211.c */
2205 2217
2206 __NL80211_ATTR_AFTER_LAST, 2218 __NL80211_ATTR_AFTER_LAST,
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index bb0d515b7654..d95a3018f6a1 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -166,6 +166,7 @@ enum ovs_packet_cmd {
166 * output port is actually a tunnel port. Contains the output tunnel key 166 * output port is actually a tunnel port. Contains the output tunnel key
167 * extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes. 167 * extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes.
168 * @OVS_PACKET_ATTR_MRU: Present for an %OVS_PACKET_CMD_ACTION and 168 * @OVS_PACKET_ATTR_MRU: Present for an %OVS_PACKET_CMD_ACTION and
169 * @OVS_PACKET_ATTR_LEN: Packet size before truncation.
169 * %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment 170 * %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment
170 * size. 171 * size.
171 * 172 *
@@ -185,6 +186,7 @@ enum ovs_packet_attr {
185 OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe, 186 OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
186 error logging should be suppressed. */ 187 error logging should be suppressed. */
187 OVS_PACKET_ATTR_MRU, /* Maximum received IP fragment size. */ 188 OVS_PACKET_ATTR_MRU, /* Maximum received IP fragment size. */
189 OVS_PACKET_ATTR_LEN, /* Packet size before truncation. */
188 __OVS_PACKET_ATTR_MAX 190 __OVS_PACKET_ATTR_MAX
189}; 191};
190 192
@@ -580,6 +582,10 @@ enum ovs_userspace_attr {
580 582
581#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) 583#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
582 584
585struct ovs_action_trunc {
586 uint32_t max_len; /* Max packet size in bytes. */
587};
588
583/** 589/**
584 * struct ovs_action_push_mpls - %OVS_ACTION_ATTR_PUSH_MPLS action argument. 590 * struct ovs_action_push_mpls - %OVS_ACTION_ATTR_PUSH_MPLS action argument.
585 * @mpls_lse: MPLS label stack entry to push. 591 * @mpls_lse: MPLS label stack entry to push.
@@ -703,6 +709,7 @@ enum ovs_nat_attr {
703 * enum ovs_action_attr - Action types. 709 * enum ovs_action_attr - Action types.
704 * 710 *
705 * @OVS_ACTION_ATTR_OUTPUT: Output packet to port. 711 * @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
712 * @OVS_ACTION_ATTR_TRUNC: Output packet to port with truncated packet size.
706 * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested 713 * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested
707 * %OVS_USERSPACE_ATTR_* attributes. 714 * %OVS_USERSPACE_ATTR_* attributes.
708 * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The 715 * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The
@@ -756,6 +763,7 @@ enum ovs_action_attr {
756 * The data must be zero for the unmasked 763 * The data must be zero for the unmasked
757 * bits. */ 764 * bits. */
758 OVS_ACTION_ATTR_CT, /* Nested OVS_CT_ATTR_* . */ 765 OVS_ACTION_ATTR_CT, /* Nested OVS_CT_ATTR_* . */
766 OVS_ACTION_ATTR_TRUNC, /* u32 struct ovs_action_trunc. */
759 767
760 __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted 768 __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
761 * from userspace. */ 769 * from userspace. */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index f4297c8a42fe..5702e933dc07 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -115,8 +115,8 @@ struct tc_police {
115 __u32 mtu; 115 __u32 mtu;
116 struct tc_ratespec rate; 116 struct tc_ratespec rate;
117 struct tc_ratespec peakrate; 117 struct tc_ratespec peakrate;
118 int refcnt; 118 int refcnt;
119 int bindcnt; 119 int bindcnt;
120 __u32 capab; 120 __u32 capab;
121}; 121};
122 122
@@ -124,10 +124,11 @@ struct tcf_t {
124 __u64 install; 124 __u64 install;
125 __u64 lastuse; 125 __u64 lastuse;
126 __u64 expires; 126 __u64 expires;
127 __u64 firstuse;
127}; 128};
128 129
129struct tc_cnt { 130struct tc_cnt {
130 int refcnt; 131 int refcnt;
131 int bindcnt; 132 int bindcnt;
132}; 133};
133 134
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index ec32293a00db..fc353b518288 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -1,5 +1,5 @@
1#ifndef _LINUX_VIRTIO_NET_H 1#ifndef _UAPI_LINUX_VIRTIO_NET_H
2#define _LINUX_VIRTIO_NET_H 2#define _UAPI_LINUX_VIRTIO_NET_H
3/* This header is BSD licensed so anyone can use the definitions to implement 3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers. 4 * compatible drivers/servers.
5 * 5 *
@@ -35,6 +35,7 @@
35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ 35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ 36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
37#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */ 37#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
38#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice */
38#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ 39#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
39#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ 40#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
40#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ 41#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
@@ -73,6 +74,8 @@ struct virtio_net_config {
73 * Legal values are between 1 and 0x8000 74 * Legal values are between 1 and 0x8000
74 */ 75 */
75 __u16 max_virtqueue_pairs; 76 __u16 max_virtqueue_pairs;
77 /* Default maximum transmit unit advice */
78 __u16 mtu;
76} __attribute__((packed)); 79} __attribute__((packed));
77 80
78/* 81/*
@@ -242,4 +245,4 @@ struct virtio_net_ctrl_mq {
242#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5 245#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
243#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0 246#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
244 247
245#endif /* _LINUX_VIRTIO_NET_H */ 248#endif /* _UAPI_LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
index c1592e3e4036..d9ecd7c6d691 100644
--- a/include/uapi/linux/wireless.h
+++ b/include/uapi/linux/wireless.h
@@ -670,8 +670,7 @@
670/* 670/*
671 * Generic format for most parameters that fit in an int 671 * Generic format for most parameters that fit in an int
672 */ 672 */
673struct iw_param 673struct iw_param {
674{
675 __s32 value; /* The value of the parameter itself */ 674 __s32 value; /* The value of the parameter itself */
676 __u8 fixed; /* Hardware should not use auto select */ 675 __u8 fixed; /* Hardware should not use auto select */
677 __u8 disabled; /* Disable the feature */ 676 __u8 disabled; /* Disable the feature */
@@ -682,8 +681,7 @@ struct iw_param
682 * For all data larger than 16 octets, we need to use a 681 * For all data larger than 16 octets, we need to use a
683 * pointer to memory allocated in user space. 682 * pointer to memory allocated in user space.
684 */ 683 */
685struct iw_point 684struct iw_point {
686{
687 void __user *pointer; /* Pointer to the data (in user space) */ 685 void __user *pointer; /* Pointer to the data (in user space) */
688 __u16 length; /* number of fields or size in bytes */ 686 __u16 length; /* number of fields or size in bytes */
689 __u16 flags; /* Optional params */ 687 __u16 flags; /* Optional params */
@@ -698,8 +696,7 @@ struct iw_point
698 * of 10 to get 'm' lower than 10^9, with 'm'= f / (10^'e')... 696 * of 10 to get 'm' lower than 10^9, with 'm'= f / (10^'e')...
699 * The power of 10 is in 'e', the result of the division is in 'm'. 697 * The power of 10 is in 'e', the result of the division is in 'm'.
700 */ 698 */
701struct iw_freq 699struct iw_freq {
702{
703 __s32 m; /* Mantissa */ 700 __s32 m; /* Mantissa */
704 __s16 e; /* Exponent */ 701 __s16 e; /* Exponent */
705 __u8 i; /* List index (when in range struct) */ 702 __u8 i; /* List index (when in range struct) */
@@ -709,8 +706,7 @@ struct iw_freq
709/* 706/*
710 * Quality of the link 707 * Quality of the link
711 */ 708 */
712struct iw_quality 709struct iw_quality {
713{
714 __u8 qual; /* link quality (%retries, SNR, 710 __u8 qual; /* link quality (%retries, SNR,
715 %missed beacons or better...) */ 711 %missed beacons or better...) */
716 __u8 level; /* signal level (dBm) */ 712 __u8 level; /* signal level (dBm) */
@@ -725,8 +721,7 @@ struct iw_quality
725 * is already pretty exhaustive, and you should use that first. 721 * is already pretty exhaustive, and you should use that first.
726 * This is only additional stats... 722 * This is only additional stats...
727 */ 723 */
728struct iw_discarded 724struct iw_discarded {
729{
730 __u32 nwid; /* Rx : Wrong nwid/essid */ 725 __u32 nwid; /* Rx : Wrong nwid/essid */
731 __u32 code; /* Rx : Unable to code/decode (WEP) */ 726 __u32 code; /* Rx : Unable to code/decode (WEP) */
732 __u32 fragment; /* Rx : Can't perform MAC reassembly */ 727 __u32 fragment; /* Rx : Can't perform MAC reassembly */
@@ -738,16 +733,14 @@ struct iw_discarded
738 * Packet/Time period missed in the wireless adapter due to 733 * Packet/Time period missed in the wireless adapter due to
739 * "wireless" specific problems... 734 * "wireless" specific problems...
740 */ 735 */
741struct iw_missed 736struct iw_missed {
742{
743 __u32 beacon; /* Missed beacons/superframe */ 737 __u32 beacon; /* Missed beacons/superframe */
744}; 738};
745 739
746/* 740/*
747 * Quality range (for spy threshold) 741 * Quality range (for spy threshold)
748 */ 742 */
749struct iw_thrspy 743struct iw_thrspy {
750{
751 struct sockaddr addr; /* Source address (hw/mac) */ 744 struct sockaddr addr; /* Source address (hw/mac) */
752 struct iw_quality qual; /* Quality of the link */ 745 struct iw_quality qual; /* Quality of the link */
753 struct iw_quality low; /* Low threshold */ 746 struct iw_quality low; /* Low threshold */
@@ -765,8 +758,7 @@ struct iw_thrspy
765 * Especially, scan results are required to include an entry for the 758 * Especially, scan results are required to include an entry for the
766 * current BSS if the driver is in Managed mode and associated with an AP. 759 * current BSS if the driver is in Managed mode and associated with an AP.
767 */ 760 */
768struct iw_scan_req 761struct iw_scan_req {
769{
770 __u8 scan_type; /* IW_SCAN_TYPE_{ACTIVE,PASSIVE} */ 762 __u8 scan_type; /* IW_SCAN_TYPE_{ACTIVE,PASSIVE} */
771 __u8 essid_len; 763 __u8 essid_len;
772 __u8 num_channels; /* num entries in channel_list; 764 __u8 num_channels; /* num entries in channel_list;
@@ -827,8 +819,7 @@ struct iw_scan_req
827 * RX_SEQ_VALID for SIOCGIWENCODEEXT are optional, but can be useful for 819 * RX_SEQ_VALID for SIOCGIWENCODEEXT are optional, but can be useful for
828 * debugging/testing. 820 * debugging/testing.
829 */ 821 */
830struct iw_encode_ext 822struct iw_encode_ext {
831{
832 __u32 ext_flags; /* IW_ENCODE_EXT_* */ 823 __u32 ext_flags; /* IW_ENCODE_EXT_* */
833 __u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ 824 __u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
834 __u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ 825 __u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
@@ -841,8 +832,7 @@ struct iw_encode_ext
841}; 832};
842 833
843/* SIOCSIWMLME data */ 834/* SIOCSIWMLME data */
844struct iw_mlme 835struct iw_mlme {
845{
846 __u16 cmd; /* IW_MLME_* */ 836 __u16 cmd; /* IW_MLME_* */
847 __u16 reason_code; 837 __u16 reason_code;
848 struct sockaddr addr; 838 struct sockaddr addr;
@@ -855,16 +845,14 @@ struct iw_mlme
855 845
856#define IW_PMKID_LEN 16 846#define IW_PMKID_LEN 16
857 847
858struct iw_pmksa 848struct iw_pmksa {
859{
860 __u32 cmd; /* IW_PMKSA_* */ 849 __u32 cmd; /* IW_PMKSA_* */
861 struct sockaddr bssid; 850 struct sockaddr bssid;
862 __u8 pmkid[IW_PMKID_LEN]; 851 __u8 pmkid[IW_PMKID_LEN];
863}; 852};
864 853
865/* IWEVMICHAELMICFAILURE data */ 854/* IWEVMICHAELMICFAILURE data */
866struct iw_michaelmicfailure 855struct iw_michaelmicfailure {
867{
868 __u32 flags; 856 __u32 flags;
869 struct sockaddr src_addr; 857 struct sockaddr src_addr;
870 __u8 tsc[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ 858 __u8 tsc[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
@@ -872,8 +860,7 @@ struct iw_michaelmicfailure
872 860
873/* IWEVPMKIDCAND data */ 861/* IWEVPMKIDCAND data */
874#define IW_PMKID_CAND_PREAUTH 0x00000001 /* RNS pre-authentication enabled */ 862#define IW_PMKID_CAND_PREAUTH 0x00000001 /* RNS pre-authentication enabled */
875struct iw_pmkid_cand 863struct iw_pmkid_cand {
876{
877 __u32 flags; /* IW_PMKID_CAND_* */ 864 __u32 flags; /* IW_PMKID_CAND_* */
878 __u32 index; /* the smaller the index, the higher the 865 __u32 index; /* the smaller the index, the higher the
879 * priority */ 866 * priority */
@@ -884,8 +871,7 @@ struct iw_pmkid_cand
884/* 871/*
885 * Wireless statistics (used for /proc/net/wireless) 872 * Wireless statistics (used for /proc/net/wireless)
886 */ 873 */
887struct iw_statistics 874struct iw_statistics {
888{
889 __u16 status; /* Status 875 __u16 status; /* Status
890 * - device dependent for now */ 876 * - device dependent for now */
891 877
@@ -897,7 +883,7 @@ struct iw_statistics
897 883
898/* ------------------------ IOCTL REQUEST ------------------------ */ 884/* ------------------------ IOCTL REQUEST ------------------------ */
899/* 885/*
900 * This structure defines the payload of an ioctl, and is used 886 * This structure defines the payload of an ioctl, and is used
901 * below. 887 * below.
902 * 888 *
903 * Note that this structure should fit on the memory footprint 889 * Note that this structure should fit on the memory footprint
@@ -906,8 +892,7 @@ struct iw_statistics
906 * You should check this when increasing the structures defined 892 * You should check this when increasing the structures defined
907 * above in this file... 893 * above in this file...
908 */ 894 */
909union iwreq_data 895union iwreq_data {
910{
911 /* Config - generic */ 896 /* Config - generic */
912 char name[IFNAMSIZ]; 897 char name[IFNAMSIZ];
913 /* Name : used to verify the presence of wireless extensions. 898 /* Name : used to verify the presence of wireless extensions.
@@ -944,15 +929,14 @@ union iwreq_data
944 * convenience... 929 * convenience...
945 * Do I need to remind you about structure size (32 octets) ? 930 * Do I need to remind you about structure size (32 octets) ?
946 */ 931 */
947struct iwreq 932struct iwreq {
948{
949 union 933 union
950 { 934 {
951 char ifrn_name[IFNAMSIZ]; /* if name, e.g. "eth0" */ 935 char ifrn_name[IFNAMSIZ]; /* if name, e.g. "eth0" */
952 } ifr_ifrn; 936 } ifr_ifrn;
953 937
954 /* Data part (defined just above) */ 938 /* Data part (defined just above) */
955 union iwreq_data u; 939 union iwreq_data u;
956}; 940};
957 941
958/* -------------------------- IOCTL DATA -------------------------- */ 942/* -------------------------- IOCTL DATA -------------------------- */
@@ -965,8 +949,7 @@ struct iwreq
965 * Range of parameters 949 * Range of parameters
966 */ 950 */
967 951
968struct iw_range 952struct iw_range {
969{
970 /* Informative stuff (to choose between different interface) */ 953 /* Informative stuff (to choose between different interface) */
971 __u32 throughput; /* To give an idea... */ 954 __u32 throughput; /* To give an idea... */
972 /* In theory this value should be the maximum benchmarked 955 /* In theory this value should be the maximum benchmarked
@@ -1069,9 +1052,8 @@ struct iw_range
1069/* 1052/*
1070 * Private ioctl interface information 1053 * Private ioctl interface information
1071 */ 1054 */
1072 1055
1073struct iw_priv_args 1056struct iw_priv_args {
1074{
1075 __u32 cmd; /* Number of the ioctl to issue */ 1057 __u32 cmd; /* Number of the ioctl to issue */
1076 __u16 set_args; /* Type and number of args */ 1058 __u16 set_args; /* Type and number of args */
1077 __u16 get_args; /* Type and number of args */ 1059 __u16 get_args; /* Type and number of args */
@@ -1088,8 +1070,7 @@ struct iw_priv_args
1088/* 1070/*
1089 * A Wireless Event. Contains basically the same data as the ioctl... 1071 * A Wireless Event. Contains basically the same data as the ioctl...
1090 */ 1072 */
1091struct iw_event 1073struct iw_event {
1092{
1093 __u16 len; /* Real length of this stuff */ 1074 __u16 len; /* Real length of this stuff */
1094 __u16 cmd; /* Wireless IOCTL */ 1075 __u16 cmd; /* Wireless IOCTL */
1095 union iwreq_data u; /* IOCTL fixed payload */ 1076 union iwreq_data u; /* IOCTL fixed payload */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 76d5a794e426..5af30732697b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -328,8 +328,8 @@ static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
328} 328}
329 329
330/* only called from syscall */ 330/* only called from syscall */
331static int fd_array_map_update_elem(struct bpf_map *map, void *key, 331int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
332 void *value, u64 map_flags) 332 void *key, void *value, u64 map_flags)
333{ 333{
334 struct bpf_array *array = container_of(map, struct bpf_array, map); 334 struct bpf_array *array = container_of(map, struct bpf_array, map);
335 void *new_ptr, *old_ptr; 335 void *new_ptr, *old_ptr;
@@ -342,7 +342,7 @@ static int fd_array_map_update_elem(struct bpf_map *map, void *key,
342 return -E2BIG; 342 return -E2BIG;
343 343
344 ufd = *(u32 *)value; 344 ufd = *(u32 *)value;
345 new_ptr = map->ops->map_fd_get_ptr(map, ufd); 345 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
346 if (IS_ERR(new_ptr)) 346 if (IS_ERR(new_ptr))
347 return PTR_ERR(new_ptr); 347 return PTR_ERR(new_ptr);
348 348
@@ -371,10 +371,12 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
371 } 371 }
372} 372}
373 373
374static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd) 374static void *prog_fd_array_get_ptr(struct bpf_map *map,
375 struct file *map_file, int fd)
375{ 376{
376 struct bpf_array *array = container_of(map, struct bpf_array, map); 377 struct bpf_array *array = container_of(map, struct bpf_array, map);
377 struct bpf_prog *prog = bpf_prog_get(fd); 378 struct bpf_prog *prog = bpf_prog_get(fd);
379
378 if (IS_ERR(prog)) 380 if (IS_ERR(prog))
379 return prog; 381 return prog;
380 382
@@ -382,6 +384,7 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd)
382 bpf_prog_put(prog); 384 bpf_prog_put(prog);
383 return ERR_PTR(-EINVAL); 385 return ERR_PTR(-EINVAL);
384 } 386 }
387
385 return prog; 388 return prog;
386} 389}
387 390
@@ -407,7 +410,6 @@ static const struct bpf_map_ops prog_array_ops = {
407 .map_free = fd_array_map_free, 410 .map_free = fd_array_map_free,
408 .map_get_next_key = array_map_get_next_key, 411 .map_get_next_key = array_map_get_next_key,
409 .map_lookup_elem = fd_array_map_lookup_elem, 412 .map_lookup_elem = fd_array_map_lookup_elem,
410 .map_update_elem = fd_array_map_update_elem,
411 .map_delete_elem = fd_array_map_delete_elem, 413 .map_delete_elem = fd_array_map_delete_elem,
412 .map_fd_get_ptr = prog_fd_array_get_ptr, 414 .map_fd_get_ptr = prog_fd_array_get_ptr,
413 .map_fd_put_ptr = prog_fd_array_put_ptr, 415 .map_fd_put_ptr = prog_fd_array_put_ptr,
@@ -425,59 +427,105 @@ static int __init register_prog_array_map(void)
425} 427}
426late_initcall(register_prog_array_map); 428late_initcall(register_prog_array_map);
427 429
428static void perf_event_array_map_free(struct bpf_map *map) 430static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
431 struct file *map_file)
429{ 432{
430 bpf_fd_array_map_clear(map); 433 struct bpf_event_entry *ee;
431 fd_array_map_free(map); 434
435 ee = kzalloc(sizeof(*ee), GFP_KERNEL);
436 if (ee) {
437 ee->event = perf_file->private_data;
438 ee->perf_file = perf_file;
439 ee->map_file = map_file;
440 }
441
442 return ee;
432} 443}
433 444
434static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd) 445static void __bpf_event_entry_free(struct rcu_head *rcu)
435{ 446{
436 struct perf_event *event; 447 struct bpf_event_entry *ee;
437 const struct perf_event_attr *attr;
438 struct file *file;
439 448
440 file = perf_event_get(fd); 449 ee = container_of(rcu, struct bpf_event_entry, rcu);
441 if (IS_ERR(file)) 450 fput(ee->perf_file);
442 return file; 451 kfree(ee);
452}
443 453
444 event = file->private_data; 454static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
455{
456 call_rcu(&ee->rcu, __bpf_event_entry_free);
457}
445 458
446 attr = perf_event_attrs(event); 459static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
447 if (IS_ERR(attr)) 460 struct file *map_file, int fd)
448 goto err; 461{
462 const struct perf_event_attr *attr;
463 struct bpf_event_entry *ee;
464 struct perf_event *event;
465 struct file *perf_file;
449 466
450 if (attr->inherit) 467 perf_file = perf_event_get(fd);
451 goto err; 468 if (IS_ERR(perf_file))
469 return perf_file;
452 470
453 if (attr->type == PERF_TYPE_RAW) 471 event = perf_file->private_data;
454 return file; 472 ee = ERR_PTR(-EINVAL);
455 473
456 if (attr->type == PERF_TYPE_HARDWARE) 474 attr = perf_event_attrs(event);
457 return file; 475 if (IS_ERR(attr) || attr->inherit)
476 goto err_out;
477
478 switch (attr->type) {
479 case PERF_TYPE_SOFTWARE:
480 if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
481 goto err_out;
482 /* fall-through */
483 case PERF_TYPE_RAW:
484 case PERF_TYPE_HARDWARE:
485 ee = bpf_event_entry_gen(perf_file, map_file);
486 if (ee)
487 return ee;
488 ee = ERR_PTR(-ENOMEM);
489 /* fall-through */
490 default:
491 break;
492 }
458 493
459 if (attr->type == PERF_TYPE_SOFTWARE && 494err_out:
460 attr->config == PERF_COUNT_SW_BPF_OUTPUT) 495 fput(perf_file);
461 return file; 496 return ee;
462err:
463 fput(file);
464 return ERR_PTR(-EINVAL);
465} 497}
466 498
467static void perf_event_fd_array_put_ptr(void *ptr) 499static void perf_event_fd_array_put_ptr(void *ptr)
468{ 500{
469 fput((struct file *)ptr); 501 bpf_event_entry_free_rcu(ptr);
502}
503
504static void perf_event_fd_array_release(struct bpf_map *map,
505 struct file *map_file)
506{
507 struct bpf_array *array = container_of(map, struct bpf_array, map);
508 struct bpf_event_entry *ee;
509 int i;
510
511 rcu_read_lock();
512 for (i = 0; i < array->map.max_entries; i++) {
513 ee = READ_ONCE(array->ptrs[i]);
514 if (ee && ee->map_file == map_file)
515 fd_array_map_delete_elem(map, &i);
516 }
517 rcu_read_unlock();
470} 518}
471 519
472static const struct bpf_map_ops perf_event_array_ops = { 520static const struct bpf_map_ops perf_event_array_ops = {
473 .map_alloc = fd_array_map_alloc, 521 .map_alloc = fd_array_map_alloc,
474 .map_free = perf_event_array_map_free, 522 .map_free = fd_array_map_free,
475 .map_get_next_key = array_map_get_next_key, 523 .map_get_next_key = array_map_get_next_key,
476 .map_lookup_elem = fd_array_map_lookup_elem, 524 .map_lookup_elem = fd_array_map_lookup_elem,
477 .map_update_elem = fd_array_map_update_elem,
478 .map_delete_elem = fd_array_map_delete_elem, 525 .map_delete_elem = fd_array_map_delete_elem,
479 .map_fd_get_ptr = perf_event_fd_array_get_ptr, 526 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
480 .map_fd_put_ptr = perf_event_fd_array_put_ptr, 527 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
528 .map_release = perf_event_fd_array_release,
481}; 529};
482 530
483static struct bpf_map_type_list perf_event_array_type __read_mostly = { 531static struct bpf_map_type_list perf_event_array_type __read_mostly = {
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 46ecce4b79ed..c23a4e9311b3 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -124,7 +124,12 @@ void bpf_map_put_with_uref(struct bpf_map *map)
124 124
125static int bpf_map_release(struct inode *inode, struct file *filp) 125static int bpf_map_release(struct inode *inode, struct file *filp)
126{ 126{
127 bpf_map_put_with_uref(filp->private_data); 127 struct bpf_map *map = filp->private_data;
128
129 if (map->ops->map_release)
130 map->ops->map_release(map, filp);
131
132 bpf_map_put_with_uref(map);
128 return 0; 133 return 0;
129} 134}
130 135
@@ -387,6 +392,12 @@ static int map_update_elem(union bpf_attr *attr)
387 err = bpf_percpu_hash_update(map, key, value, attr->flags); 392 err = bpf_percpu_hash_update(map, key, value, attr->flags);
388 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 393 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
389 err = bpf_percpu_array_update(map, key, value, attr->flags); 394 err = bpf_percpu_array_update(map, key, value, attr->flags);
395 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
396 map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
397 rcu_read_lock();
398 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
399 attr->flags);
400 rcu_read_unlock();
390 } else { 401 } else {
391 rcu_read_lock(); 402 rcu_read_lock();
392 err = map->ops->map_update_elem(map, key, value, attr->flags); 403 err = map->ops->map_update_elem(map, key, value, attr->flags);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 26f603da7e26..3de25fbed785 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -192,18 +192,17 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
192{ 192{
193 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 193 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
194 struct bpf_array *array = container_of(map, struct bpf_array, map); 194 struct bpf_array *array = container_of(map, struct bpf_array, map);
195 struct bpf_event_entry *ee;
195 struct perf_event *event; 196 struct perf_event *event;
196 struct file *file;
197 197
198 if (unlikely(index >= array->map.max_entries)) 198 if (unlikely(index >= array->map.max_entries))
199 return -E2BIG; 199 return -E2BIG;
200 200
201 file = READ_ONCE(array->ptrs[index]); 201 ee = READ_ONCE(array->ptrs[index]);
202 if (unlikely(!file)) 202 if (unlikely(!ee))
203 return -ENOENT; 203 return -ENOENT;
204 204
205 event = file->private_data; 205 event = ee->event;
206
207 /* make sure event is local and doesn't have pmu::count */ 206 /* make sure event is local and doesn't have pmu::count */
208 if (event->oncpu != smp_processor_id() || 207 if (event->oncpu != smp_processor_id() ||
209 event->pmu->count) 208 event->pmu->count)
@@ -237,8 +236,8 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
237 u64 index = flags & BPF_F_INDEX_MASK; 236 u64 index = flags & BPF_F_INDEX_MASK;
238 void *data = (void *) (long) r4; 237 void *data = (void *) (long) r4;
239 struct perf_sample_data sample_data; 238 struct perf_sample_data sample_data;
239 struct bpf_event_entry *ee;
240 struct perf_event *event; 240 struct perf_event *event;
241 struct file *file;
242 struct perf_raw_record raw = { 241 struct perf_raw_record raw = {
243 .size = size, 242 .size = size,
244 .data = data, 243 .data = data,
@@ -251,12 +250,11 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
251 if (unlikely(index >= array->map.max_entries)) 250 if (unlikely(index >= array->map.max_entries))
252 return -E2BIG; 251 return -E2BIG;
253 252
254 file = READ_ONCE(array->ptrs[index]); 253 ee = READ_ONCE(array->ptrs[index]);
255 if (unlikely(!file)) 254 if (unlikely(!ee))
256 return -ENOENT; 255 return -ENOENT;
257 256
258 event = file->private_data; 257 event = ee->event;
259
260 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 258 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
261 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 259 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
262 return -EINVAL; 260 return -EINVAL;
diff --git a/net/6lowpan/6lowpan_i.h b/net/6lowpan/6lowpan_i.h
index 97ecc27aeca6..a67caee11929 100644
--- a/net/6lowpan/6lowpan_i.h
+++ b/net/6lowpan/6lowpan_i.h
@@ -12,6 +12,10 @@ static inline bool lowpan_is_ll(const struct net_device *dev,
12 return lowpan_dev(dev)->lltype == lltype; 12 return lowpan_dev(dev)->lltype == lltype;
13} 13}
14 14
15extern const struct ndisc_ops lowpan_ndisc_ops;
16
17int addrconf_ifid_802154_6lowpan(u8 *eui, struct net_device *dev);
18
15#ifdef CONFIG_6LOWPAN_DEBUGFS 19#ifdef CONFIG_6LOWPAN_DEBUGFS
16int lowpan_dev_debugfs_init(struct net_device *dev); 20int lowpan_dev_debugfs_init(struct net_device *dev);
17void lowpan_dev_debugfs_exit(struct net_device *dev); 21void lowpan_dev_debugfs_exit(struct net_device *dev);
diff --git a/net/6lowpan/Makefile b/net/6lowpan/Makefile
index e44f3bf2dd42..12d131ab2324 100644
--- a/net/6lowpan/Makefile
+++ b/net/6lowpan/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_6LOWPAN) += 6lowpan.o 1obj-$(CONFIG_6LOWPAN) += 6lowpan.o
2 2
36lowpan-y := core.o iphc.o nhc.o 36lowpan-y := core.o iphc.o nhc.o ndisc.o
46lowpan-$(CONFIG_6LOWPAN_DEBUGFS) += debugfs.o 46lowpan-$(CONFIG_6LOWPAN_DEBUGFS) += debugfs.o
5 5
6#rfc6282 nhcs 6#rfc6282 nhcs
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c
index 7a240b3eaed1..5945f7e19c67 100644
--- a/net/6lowpan/core.c
+++ b/net/6lowpan/core.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16#include <net/6lowpan.h> 16#include <net/6lowpan.h>
17#include <net/addrconf.h>
17 18
18#include "6lowpan_i.h" 19#include "6lowpan_i.h"
19 20
@@ -33,6 +34,8 @@ int lowpan_register_netdevice(struct net_device *dev,
33 for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) 34 for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++)
34 lowpan_dev(dev)->ctx.table[i].id = i; 35 lowpan_dev(dev)->ctx.table[i].id = i;
35 36
37 dev->ndisc_ops = &lowpan_ndisc_ops;
38
36 ret = register_netdevice(dev); 39 ret = register_netdevice(dev);
37 if (ret < 0) 40 if (ret < 0)
38 return ret; 41 return ret;
@@ -72,16 +75,61 @@ void lowpan_unregister_netdev(struct net_device *dev)
72} 75}
73EXPORT_SYMBOL(lowpan_unregister_netdev); 76EXPORT_SYMBOL(lowpan_unregister_netdev);
74 77
78int addrconf_ifid_802154_6lowpan(u8 *eui, struct net_device *dev)
79{
80 struct wpan_dev *wpan_dev = lowpan_802154_dev(dev)->wdev->ieee802154_ptr;
81
82 /* Set short_addr autoconfiguration if short_addr is present only */
83 if (!lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr))
84 return -1;
85
86 /* For either address format, all zero addresses MUST NOT be used */
87 if (wpan_dev->pan_id == cpu_to_le16(0x0000) &&
88 wpan_dev->short_addr == cpu_to_le16(0x0000))
89 return -1;
90
91 /* Alternatively, if no PAN ID is known, 16 zero bits may be used */
92 if (wpan_dev->pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
93 memset(eui, 0, 2);
94 else
95 ieee802154_le16_to_be16(eui, &wpan_dev->pan_id);
96
97 /* The "Universal/Local" (U/L) bit shall be set to zero */
98 eui[0] &= ~2;
99 eui[2] = 0;
100 eui[3] = 0xFF;
101 eui[4] = 0xFE;
102 eui[5] = 0;
103 ieee802154_le16_to_be16(&eui[6], &wpan_dev->short_addr);
104 return 0;
105}
106
75static int lowpan_event(struct notifier_block *unused, 107static int lowpan_event(struct notifier_block *unused,
76 unsigned long event, void *ptr) 108 unsigned long event, void *ptr)
77{ 109{
78 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 110 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
111 struct inet6_dev *idev;
112 struct in6_addr addr;
79 int i; 113 int i;
80 114
81 if (dev->type != ARPHRD_6LOWPAN) 115 if (dev->type != ARPHRD_6LOWPAN)
82 return NOTIFY_DONE; 116 return NOTIFY_DONE;
83 117
118 idev = __in6_dev_get(dev);
119 if (!idev)
120 return NOTIFY_DONE;
121
84 switch (event) { 122 switch (event) {
123 case NETDEV_UP:
124 case NETDEV_CHANGE:
125 /* (802.15.4 6LoWPAN short address slaac handling */
126 if (lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154) &&
127 addrconf_ifid_802154_6lowpan(addr.s6_addr + 8, dev) == 0) {
128 __ipv6_addr_set_half(&addr.s6_addr32[0],
129 htonl(0xFE800000), 0);
130 addrconf_add_linklocal(idev, &addr, 0);
131 }
132 break;
85 case NETDEV_DOWN: 133 case NETDEV_DOWN:
86 for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) 134 for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++)
87 clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, 135 clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE,
@@ -112,8 +160,6 @@ static int __init lowpan_module_init(void)
112 return ret; 160 return ret;
113 } 161 }
114 162
115 request_module_nowait("ipv6");
116
117 request_module_nowait("nhc_dest"); 163 request_module_nowait("nhc_dest");
118 request_module_nowait("nhc_fragment"); 164 request_module_nowait("nhc_fragment");
119 request_module_nowait("nhc_hop"); 165 request_module_nowait("nhc_hop");
diff --git a/net/6lowpan/debugfs.c b/net/6lowpan/debugfs.c
index acbaa3db493b..24915e0bb9ea 100644
--- a/net/6lowpan/debugfs.c
+++ b/net/6lowpan/debugfs.c
@@ -245,6 +245,41 @@ static const struct file_operations lowpan_context_fops = {
245 .release = single_release, 245 .release = single_release,
246}; 246};
247 247
248static int lowpan_short_addr_get(void *data, u64 *val)
249{
250 struct wpan_dev *wdev = data;
251
252 rtnl_lock();
253 *val = le16_to_cpu(wdev->short_addr);
254 rtnl_unlock();
255
256 return 0;
257}
258
259DEFINE_SIMPLE_ATTRIBUTE(lowpan_short_addr_fops, lowpan_short_addr_get,
260 NULL, "0x%04llx\n");
261
262static int lowpan_dev_debugfs_802154_init(const struct net_device *dev,
263 struct lowpan_dev *ldev)
264{
265 struct dentry *dentry, *root;
266
267 if (!lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154))
268 return 0;
269
270 root = debugfs_create_dir("ieee802154", ldev->iface_debugfs);
271 if (!root)
272 return -EINVAL;
273
274 dentry = debugfs_create_file("short_addr", 0444, root,
275 lowpan_802154_dev(dev)->wdev->ieee802154_ptr,
276 &lowpan_short_addr_fops);
277 if (!dentry)
278 return -EINVAL;
279
280 return 0;
281}
282
248int lowpan_dev_debugfs_init(struct net_device *dev) 283int lowpan_dev_debugfs_init(struct net_device *dev)
249{ 284{
250 struct lowpan_dev *ldev = lowpan_dev(dev); 285 struct lowpan_dev *ldev = lowpan_dev(dev);
@@ -272,6 +307,10 @@ int lowpan_dev_debugfs_init(struct net_device *dev)
272 goto remove_root; 307 goto remove_root;
273 } 308 }
274 309
310 ret = lowpan_dev_debugfs_802154_init(dev, ldev);
311 if (ret < 0)
312 goto remove_root;
313
275 return 0; 314 return 0;
276 315
277remove_root: 316remove_root:
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 8501dd532fe1..79f1fa22509a 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -761,22 +761,75 @@ static const u8 lowpan_iphc_dam_to_sam_value[] = {
761 [LOWPAN_IPHC_DAM_11] = LOWPAN_IPHC_SAM_11, 761 [LOWPAN_IPHC_DAM_11] = LOWPAN_IPHC_SAM_11,
762}; 762};
763 763
764static u8 lowpan_compress_ctx_addr(u8 **hc_ptr, const struct in6_addr *ipaddr, 764static inline bool
765lowpan_iphc_compress_ctx_802154_lladdr(const struct in6_addr *ipaddr,
766 const struct lowpan_iphc_ctx *ctx,
767 const void *lladdr)
768{
769 const struct ieee802154_addr *addr = lladdr;
770 unsigned char extended_addr[EUI64_ADDR_LEN];
771 bool lladdr_compress = false;
772 struct in6_addr tmp = {};
773
774 switch (addr->mode) {
775 case IEEE802154_ADDR_LONG:
776 ieee802154_le64_to_be64(&extended_addr, &addr->extended_addr);
777 /* check for SAM/DAM = 11 */
778 memcpy(&tmp.s6_addr[8], &extended_addr, EUI64_ADDR_LEN);
779 /* second bit-flip (Universe/Local) is done according RFC2464 */
780 tmp.s6_addr[8] ^= 0x02;
781 /* context information are always used */
782 ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen);
783 if (ipv6_addr_equal(&tmp, ipaddr))
784 lladdr_compress = true;
785 break;
786 case IEEE802154_ADDR_SHORT:
787 tmp.s6_addr[11] = 0xFF;
788 tmp.s6_addr[12] = 0xFE;
789 ieee802154_le16_to_be16(&tmp.s6_addr16[7],
790 &addr->short_addr);
791 /* context information are always used */
792 ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen);
793 if (ipv6_addr_equal(&tmp, ipaddr))
794 lladdr_compress = true;
795 break;
796 default:
797 /* should never handled and filtered by 802154 6lowpan */
798 WARN_ON_ONCE(1);
799 break;
800 }
801
802 return lladdr_compress;
803}
804
805static u8 lowpan_compress_ctx_addr(u8 **hc_ptr, const struct net_device *dev,
806 const struct in6_addr *ipaddr,
765 const struct lowpan_iphc_ctx *ctx, 807 const struct lowpan_iphc_ctx *ctx,
766 const unsigned char *lladdr, bool sam) 808 const unsigned char *lladdr, bool sam)
767{ 809{
768 struct in6_addr tmp = {}; 810 struct in6_addr tmp = {};
769 u8 dam; 811 u8 dam;
770 812
771 /* check for SAM/DAM = 11 */ 813 switch (lowpan_dev(dev)->lltype) {
772 memcpy(&tmp.s6_addr[8], lladdr, 8); 814 case LOWPAN_LLTYPE_IEEE802154:
773 /* second bit-flip (Universe/Local) is done according RFC2464 */ 815 if (lowpan_iphc_compress_ctx_802154_lladdr(ipaddr, ctx,
774 tmp.s6_addr[8] ^= 0x02; 816 lladdr)) {
775 /* context information are always used */ 817 dam = LOWPAN_IPHC_DAM_11;
776 ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen); 818 goto out;
777 if (ipv6_addr_equal(&tmp, ipaddr)) { 819 }
778 dam = LOWPAN_IPHC_DAM_11; 820 break;
779 goto out; 821 default:
822 /* check for SAM/DAM = 11 */
823 memcpy(&tmp.s6_addr[8], lladdr, EUI64_ADDR_LEN);
824 /* second bit-flip (Universe/Local) is done according RFC2464 */
825 tmp.s6_addr[8] ^= 0x02;
826 /* context information are always used */
827 ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen);
828 if (ipv6_addr_equal(&tmp, ipaddr)) {
829 dam = LOWPAN_IPHC_DAM_11;
830 goto out;
831 }
832 break;
780 } 833 }
781 834
782 memset(&tmp, 0, sizeof(tmp)); 835 memset(&tmp, 0, sizeof(tmp));
@@ -813,28 +866,85 @@ out:
813 return dam; 866 return dam;
814} 867}
815 868
816static u8 lowpan_compress_addr_64(u8 **hc_ptr, const struct in6_addr *ipaddr, 869static inline bool
870lowpan_iphc_compress_802154_lladdr(const struct in6_addr *ipaddr,
871 const void *lladdr)
872{
873 const struct ieee802154_addr *addr = lladdr;
874 unsigned char extended_addr[EUI64_ADDR_LEN];
875 bool lladdr_compress = false;
876 struct in6_addr tmp = {};
877
878 switch (addr->mode) {
879 case IEEE802154_ADDR_LONG:
880 ieee802154_le64_to_be64(&extended_addr, &addr->extended_addr);
881 if (is_addr_mac_addr_based(ipaddr, extended_addr))
882 lladdr_compress = true;
883 break;
884 case IEEE802154_ADDR_SHORT:
885 /* fe:80::ff:fe00:XXXX
886 * \__/
887 * short_addr
888 *
889 * Universe/Local bit is zero.
890 */
891 tmp.s6_addr[0] = 0xFE;
892 tmp.s6_addr[1] = 0x80;
893 tmp.s6_addr[11] = 0xFF;
894 tmp.s6_addr[12] = 0xFE;
895 ieee802154_le16_to_be16(&tmp.s6_addr16[7],
896 &addr->short_addr);
897 if (ipv6_addr_equal(&tmp, ipaddr))
898 lladdr_compress = true;
899 break;
900 default:
901 /* should never handled and filtered by 802154 6lowpan */
902 WARN_ON_ONCE(1);
903 break;
904 }
905
906 return lladdr_compress;
907}
908
909static u8 lowpan_compress_addr_64(u8 **hc_ptr, const struct net_device *dev,
910 const struct in6_addr *ipaddr,
817 const unsigned char *lladdr, bool sam) 911 const unsigned char *lladdr, bool sam)
818{ 912{
819 u8 dam = LOWPAN_IPHC_DAM_00; 913 u8 dam = LOWPAN_IPHC_DAM_01;
820 914
821 if (is_addr_mac_addr_based(ipaddr, lladdr)) { 915 switch (lowpan_dev(dev)->lltype) {
822 dam = LOWPAN_IPHC_DAM_11; /* 0-bits */ 916 case LOWPAN_LLTYPE_IEEE802154:
823 pr_debug("address compression 0 bits\n"); 917 if (lowpan_iphc_compress_802154_lladdr(ipaddr, lladdr)) {
824 } else if (lowpan_is_iid_16_bit_compressable(ipaddr)) { 918 dam = LOWPAN_IPHC_DAM_11; /* 0-bits */
919 pr_debug("address compression 0 bits\n");
920 goto out;
921 }
922 break;
923 default:
924 if (is_addr_mac_addr_based(ipaddr, lladdr)) {
925 dam = LOWPAN_IPHC_DAM_11; /* 0-bits */
926 pr_debug("address compression 0 bits\n");
927 goto out;
928 }
929 break;
930 }
931
932 if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
825 /* compress IID to 16 bits xxxx::XXXX */ 933 /* compress IID to 16 bits xxxx::XXXX */
826 lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[7], 2); 934 lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[7], 2);
827 dam = LOWPAN_IPHC_DAM_10; /* 16-bits */ 935 dam = LOWPAN_IPHC_DAM_10; /* 16-bits */
828 raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)", 936 raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)",
829 *hc_ptr - 2, 2); 937 *hc_ptr - 2, 2);
830 } else { 938 goto out;
831 /* do not compress IID => xxxx::IID */
832 lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[4], 8);
833 dam = LOWPAN_IPHC_DAM_01; /* 64-bits */
834 raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
835 *hc_ptr - 8, 8);
836 } 939 }
837 940
941 /* do not compress IID => xxxx::IID */
942 lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[4], 8);
943 raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
944 *hc_ptr - 8, 8);
945
946out:
947
838 if (sam) 948 if (sam)
839 return lowpan_iphc_dam_to_sam_value[dam]; 949 return lowpan_iphc_dam_to_sam_value[dam];
840 else 950 else
@@ -1013,9 +1123,6 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
1013 iphc0 = LOWPAN_DISPATCH_IPHC; 1123 iphc0 = LOWPAN_DISPATCH_IPHC;
1014 iphc1 = 0; 1124 iphc1 = 0;
1015 1125
1016 raw_dump_inline(__func__, "saddr", saddr, EUI64_ADDR_LEN);
1017 raw_dump_inline(__func__, "daddr", daddr, EUI64_ADDR_LEN);
1018
1019 raw_dump_table(__func__, "sending raw skb network uncompressed packet", 1126 raw_dump_table(__func__, "sending raw skb network uncompressed packet",
1020 skb->data, skb->len); 1127 skb->data, skb->len);
1021 1128
@@ -1088,14 +1195,15 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
1088 iphc1 |= LOWPAN_IPHC_SAC; 1195 iphc1 |= LOWPAN_IPHC_SAC;
1089 } else { 1196 } else {
1090 if (sci) { 1197 if (sci) {
1091 iphc1 |= lowpan_compress_ctx_addr(&hc_ptr, &hdr->saddr, 1198 iphc1 |= lowpan_compress_ctx_addr(&hc_ptr, dev,
1199 &hdr->saddr,
1092 &sci_entry, saddr, 1200 &sci_entry, saddr,
1093 true); 1201 true);
1094 iphc1 |= LOWPAN_IPHC_SAC; 1202 iphc1 |= LOWPAN_IPHC_SAC;
1095 } else { 1203 } else {
1096 if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL && 1204 if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL &&
1097 lowpan_is_linklocal_zero_padded(hdr->saddr)) { 1205 lowpan_is_linklocal_zero_padded(hdr->saddr)) {
1098 iphc1 |= lowpan_compress_addr_64(&hc_ptr, 1206 iphc1 |= lowpan_compress_addr_64(&hc_ptr, dev,
1099 &hdr->saddr, 1207 &hdr->saddr,
1100 saddr, true); 1208 saddr, true);
1101 pr_debug("source address unicast link-local %pI6c iphc1 0x%02x\n", 1209 pr_debug("source address unicast link-local %pI6c iphc1 0x%02x\n",
@@ -1123,14 +1231,15 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
1123 } 1231 }
1124 } else { 1232 } else {
1125 if (dci) { 1233 if (dci) {
1126 iphc1 |= lowpan_compress_ctx_addr(&hc_ptr, &hdr->daddr, 1234 iphc1 |= lowpan_compress_ctx_addr(&hc_ptr, dev,
1235 &hdr->daddr,
1127 &dci_entry, daddr, 1236 &dci_entry, daddr,
1128 false); 1237 false);
1129 iphc1 |= LOWPAN_IPHC_DAC; 1238 iphc1 |= LOWPAN_IPHC_DAC;
1130 } else { 1239 } else {
1131 if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL && 1240 if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL &&
1132 lowpan_is_linklocal_zero_padded(hdr->daddr)) { 1241 lowpan_is_linklocal_zero_padded(hdr->daddr)) {
1133 iphc1 |= lowpan_compress_addr_64(&hc_ptr, 1242 iphc1 |= lowpan_compress_addr_64(&hc_ptr, dev,
1134 &hdr->daddr, 1243 &hdr->daddr,
1135 daddr, false); 1244 daddr, false);
1136 pr_debug("dest address unicast link-local %pI6c iphc1 0x%02x\n", 1245 pr_debug("dest address unicast link-local %pI6c iphc1 0x%02x\n",
diff --git a/net/6lowpan/ndisc.c b/net/6lowpan/ndisc.c
new file mode 100644
index 000000000000..ae1d4199aa4c
--- /dev/null
+++ b/net/6lowpan/ndisc.c
@@ -0,0 +1,234 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License version 2
3 * as published by the Free Software Foundation.
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Authors:
11 * (C) 2016 Pengutronix, Alexander Aring <aar@pengutronix.de>
12 */
13
14#include <net/6lowpan.h>
15#include <net/addrconf.h>
16#include <net/ndisc.h>
17
18#include "6lowpan_i.h"
19
20static int lowpan_ndisc_is_useropt(u8 nd_opt_type)
21{
22 return nd_opt_type == ND_OPT_6CO;
23}
24
25#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
26#define NDISC_802154_SHORT_ADDR_LENGTH 1
27static int lowpan_ndisc_parse_802154_options(const struct net_device *dev,
28 struct nd_opt_hdr *nd_opt,
29 struct ndisc_options *ndopts)
30{
31 switch (nd_opt->nd_opt_len) {
32 case NDISC_802154_SHORT_ADDR_LENGTH:
33 if (ndopts->nd_802154_opt_array[nd_opt->nd_opt_type])
34 ND_PRINTK(2, warn,
35 "%s: duplicated short addr ND6 option found: type=%d\n",
36 __func__, nd_opt->nd_opt_type);
37 else
38 ndopts->nd_802154_opt_array[nd_opt->nd_opt_type] = nd_opt;
39 return 1;
40 default:
41 /* all others will be handled by ndisc IPv6 option parsing */
42 return 0;
43 }
44}
45
46static int lowpan_ndisc_parse_options(const struct net_device *dev,
47 struct nd_opt_hdr *nd_opt,
48 struct ndisc_options *ndopts)
49{
50 switch (nd_opt->nd_opt_type) {
51 case ND_OPT_SOURCE_LL_ADDR:
52 case ND_OPT_TARGET_LL_ADDR:
53 return lowpan_ndisc_parse_802154_options(dev, nd_opt, ndopts);
54 default:
55 return 0;
56 }
57}
58
59static void lowpan_ndisc_802154_update(struct neighbour *n, u32 flags,
60 u8 icmp6_type,
61 const struct ndisc_options *ndopts)
62{
63 struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n));
64 u8 *lladdr_short = NULL;
65
66 switch (icmp6_type) {
67 case NDISC_ROUTER_SOLICITATION:
68 case NDISC_ROUTER_ADVERTISEMENT:
69 case NDISC_NEIGHBOUR_SOLICITATION:
70 if (ndopts->nd_802154_opts_src_lladdr) {
71 lladdr_short = __ndisc_opt_addr_data(ndopts->nd_802154_opts_src_lladdr,
72 IEEE802154_SHORT_ADDR_LEN, 0);
73 if (!lladdr_short) {
74 ND_PRINTK(2, warn,
75 "NA: invalid short link-layer address length\n");
76 return;
77 }
78 }
79 break;
80 case NDISC_REDIRECT:
81 case NDISC_NEIGHBOUR_ADVERTISEMENT:
82 if (ndopts->nd_802154_opts_tgt_lladdr) {
83 lladdr_short = __ndisc_opt_addr_data(ndopts->nd_802154_opts_tgt_lladdr,
84 IEEE802154_SHORT_ADDR_LEN, 0);
85 if (!lladdr_short) {
86 ND_PRINTK(2, warn,
87 "NA: invalid short link-layer address length\n");
88 return;
89 }
90 }
91 break;
92 default:
93 break;
94 }
95
96 write_lock_bh(&n->lock);
97 if (lladdr_short)
98 ieee802154_be16_to_le16(&neigh->short_addr, lladdr_short);
99 else
100 neigh->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
101 write_unlock_bh(&n->lock);
102}
103
104static void lowpan_ndisc_update(const struct net_device *dev,
105 struct neighbour *n, u32 flags, u8 icmp6_type,
106 const struct ndisc_options *ndopts)
107{
108 if (!lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154))
109 return;
110
111 /* react on overrides only. TODO check if this is really right. */
112 if (flags & NEIGH_UPDATE_F_OVERRIDE)
113 lowpan_ndisc_802154_update(n, flags, icmp6_type, ndopts);
114}
115
116static int lowpan_ndisc_opt_addr_space(const struct net_device *dev,
117 u8 icmp6_type, struct neighbour *neigh,
118 u8 *ha_buf, u8 **ha)
119{
120 struct lowpan_802154_neigh *n;
121 struct wpan_dev *wpan_dev;
122 int addr_space = 0;
123
124 if (!lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154))
125 return 0;
126
127 switch (icmp6_type) {
128 case NDISC_REDIRECT:
129 n = lowpan_802154_neigh(neighbour_priv(neigh));
130
131 read_lock_bh(&neigh->lock);
132 if (lowpan_802154_is_valid_src_short_addr(n->short_addr)) {
133 memcpy(ha_buf, &n->short_addr,
134 IEEE802154_SHORT_ADDR_LEN);
135 read_unlock_bh(&neigh->lock);
136 addr_space += __ndisc_opt_addr_space(IEEE802154_SHORT_ADDR_LEN, 0);
137 *ha = ha_buf;
138 }
139 read_unlock_bh(&neigh->lock);
140 break;
141 case NDISC_NEIGHBOUR_ADVERTISEMENT:
142 case NDISC_NEIGHBOUR_SOLICITATION:
143 case NDISC_ROUTER_SOLICITATION:
144 wpan_dev = lowpan_802154_dev(dev)->wdev->ieee802154_ptr;
145
146 if (lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr))
147 addr_space = __ndisc_opt_addr_space(IEEE802154_SHORT_ADDR_LEN, 0);
148 break;
149 default:
150 break;
151 }
152
153 return addr_space;
154}
155
156static void lowpan_ndisc_fill_addr_option(const struct net_device *dev,
157 struct sk_buff *skb, u8 icmp6_type,
158 const u8 *ha)
159{
160 struct wpan_dev *wpan_dev;
161 __be16 short_addr;
162 u8 opt_type;
163
164 if (!lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154))
165 return;
166
167 switch (icmp6_type) {
168 case NDISC_REDIRECT:
169 if (ha) {
170 ieee802154_le16_to_be16(&short_addr, ha);
171 __ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR,
172 &short_addr,
173 IEEE802154_SHORT_ADDR_LEN, 0);
174 }
175 return;
176 case NDISC_NEIGHBOUR_ADVERTISEMENT:
177 opt_type = ND_OPT_TARGET_LL_ADDR;
178 break;
179 case NDISC_ROUTER_SOLICITATION:
180 case NDISC_NEIGHBOUR_SOLICITATION:
181 opt_type = ND_OPT_SOURCE_LL_ADDR;
182 break;
183 default:
184 return;
185 }
186
187 wpan_dev = lowpan_802154_dev(dev)->wdev->ieee802154_ptr;
188
189 if (lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr)) {
190 ieee802154_le16_to_be16(&short_addr,
191 &wpan_dev->short_addr);
192 __ndisc_fill_addr_option(skb, opt_type, &short_addr,
193 IEEE802154_SHORT_ADDR_LEN, 0);
194 }
195}
196
197static void lowpan_ndisc_prefix_rcv_add_addr(struct net *net,
198 struct net_device *dev,
199 const struct prefix_info *pinfo,
200 struct inet6_dev *in6_dev,
201 struct in6_addr *addr,
202 int addr_type, u32 addr_flags,
203 bool sllao, bool tokenized,
204 __u32 valid_lft,
205 u32 prefered_lft,
206 bool dev_addr_generated)
207{
208 int err;
209
210 /* generates short based address for RA PIO's */
211 if (lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154) && dev_addr_generated &&
212 !addrconf_ifid_802154_6lowpan(addr->s6_addr + 8, dev)) {
213 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
214 addr, addr_type, addr_flags,
215 sllao, tokenized, valid_lft,
216 prefered_lft);
217 if (err)
218 ND_PRINTK(2, warn,
219 "RA: could not add a short address based address for prefix: %pI6c\n",
220 &pinfo->prefix);
221 }
222}
223#endif
224
225const struct ndisc_ops lowpan_ndisc_ops = {
226 .is_useropt = lowpan_ndisc_is_useropt,
227#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
228 .parse_options = lowpan_ndisc_parse_options,
229 .update = lowpan_ndisc_update,
230 .opt_addr_space = lowpan_ndisc_opt_addr_space,
231 .fill_addr_option = lowpan_ndisc_fill_addr_option,
232 .prefix_rcv_add_addr = lowpan_ndisc_prefix_rcv_add_addr,
233#endif
234};
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 6c2901a86230..396c0134c5ab 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -654,7 +654,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
654 len + ETH_HLEN); 654 len + ETH_HLEN);
655 655
656 ret = NET_RX_SUCCESS; 656 ret = NET_RX_SUCCESS;
657 } else if (res == NET_XMIT_POLICED) { 657 } else if (res == -EINPROGRESS) {
658 /* skb was buffered and consumed */ 658 /* skb was buffered and consumed */
659 ret = NET_RX_SUCCESS; 659 ret = NET_RX_SUCCESS;
660 } 660 }
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index f2f125684ed9..b1a4e8a811c8 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -156,7 +156,7 @@ int batadv_send_unicast_skb(struct sk_buff *skb,
156 * attempted. 156 * attempted.
157 * 157 *
158 * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or 158 * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
159 * NET_XMIT_POLICED if the skb is buffered for later transmit. 159 * -EINPROGRESS if the skb is buffered for later transmit.
160 */ 160 */
161int batadv_send_skb_to_orig(struct sk_buff *skb, 161int batadv_send_skb_to_orig(struct sk_buff *skb,
162 struct batadv_orig_node *orig_node, 162 struct batadv_orig_node *orig_node,
@@ -188,7 +188,7 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
188 * network coding fails, then send the packet as usual. 188 * network coding fails, then send the packet as usual.
189 */ 189 */
190 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) { 190 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
191 ret = NET_XMIT_POLICED; 191 ret = -EINPROGRESS;
192 } else { 192 } else {
193 batadv_send_unicast_skb(skb, neigh_node); 193 batadv_send_unicast_skb(skb, neigh_node);
194 ret = NET_XMIT_SUCCESS; 194 ret = NET_XMIT_SUCCESS;
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 780089d75915..d020299baba4 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -627,20 +627,9 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
627 return err < 0 ? NET_XMIT_DROP : err; 627 return err < 0 ? NET_XMIT_DROP : err;
628} 628}
629 629
630static struct lock_class_key bt_tx_busylock;
631static struct lock_class_key bt_netdev_xmit_lock_key;
632
633static void bt_set_lockdep_class_one(struct net_device *dev,
634 struct netdev_queue *txq,
635 void *_unused)
636{
637 lockdep_set_class(&txq->_xmit_lock, &bt_netdev_xmit_lock_key);
638}
639
640static int bt_dev_init(struct net_device *dev) 630static int bt_dev_init(struct net_device *dev)
641{ 631{
642 netdev_for_each_tx_queue(dev, bt_set_lockdep_class_one, NULL); 632 netdev_lockdep_set_classes(dev);
643 dev->qdisc_tx_busylock = &bt_tx_busylock;
644 633
645 return 0; 634 return 0;
646} 635}
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 67a4a36febd1..3408ed51b611 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/if_ether.h> 15#include <linux/if_ether.h>
16#include <linux/moduleparam.h>
17#include <linux/ip.h> 16#include <linux/ip.h>
18#include <linux/sched.h> 17#include <linux/sched.h>
19#include <linux/sockios.h> 18#include <linux/sockios.h>
diff --git a/net/can/Makefile b/net/can/Makefile
index cef49eb1f5c7..10936754e3f2 100644
--- a/net/can/Makefile
+++ b/net/can/Makefile
@@ -3,7 +3,8 @@
3# 3#
4 4
5obj-$(CONFIG_CAN) += can.o 5obj-$(CONFIG_CAN) += can.o
6can-y := af_can.o proc.o 6can-y := af_can.o
7can-$(CONFIG_PROC_FS) += proc.o
7 8
8obj-$(CONFIG_CAN_RAW) += can-raw.o 9obj-$(CONFIG_CAN_RAW) += can-raw.o
9can-raw-y := raw.o 10can-raw-y := raw.o
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 166d436196c1..1108079d934f 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -911,14 +911,14 @@ static __init int can_init(void)
911 if (!rcv_cache) 911 if (!rcv_cache)
912 return -ENOMEM; 912 return -ENOMEM;
913 913
914 if (stats_timer) { 914 if (IS_ENABLED(CONFIG_PROC_FS)) {
915 if (stats_timer) {
915 /* the statistics are updated every second (timer triggered) */ 916 /* the statistics are updated every second (timer triggered) */
916 setup_timer(&can_stattimer, can_stat_update, 0); 917 setup_timer(&can_stattimer, can_stat_update, 0);
917 mod_timer(&can_stattimer, round_jiffies(jiffies + HZ)); 918 mod_timer(&can_stattimer, round_jiffies(jiffies + HZ));
918 } else 919 }
919 can_stattimer.function = NULL; 920 can_init_proc();
920 921 }
921 can_init_proc();
922 922
923 /* protocol register */ 923 /* protocol register */
924 sock_register(&can_family_ops); 924 sock_register(&can_family_ops);
@@ -933,10 +933,12 @@ static __exit void can_exit(void)
933{ 933{
934 struct net_device *dev; 934 struct net_device *dev;
935 935
936 if (stats_timer) 936 if (IS_ENABLED(CONFIG_PROC_FS)) {
937 del_timer_sync(&can_stattimer); 937 if (stats_timer)
938 del_timer_sync(&can_stattimer);
938 939
939 can_remove_proc(); 940 can_remove_proc();
941 }
940 942
941 /* protocol unregister */ 943 /* protocol unregister */
942 dev_remove_pack(&canfd_packet); 944 dev_remove_pack(&canfd_packet);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6863310d6973..8e999ffdf28b 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content 2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3 * 3 *
4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 4 * Copyright (c) 2002-2016 Volkswagen Group Electronic Research
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
@@ -67,27 +67,31 @@
67 */ 67 */
68#define MAX_NFRAMES 256 68#define MAX_NFRAMES 256
69 69
70/* use of last_frames[index].can_dlc */ 70/* use of last_frames[index].flags */
71#define RX_RECV 0x40 /* received data for this element */ 71#define RX_RECV 0x40 /* received data for this element */
72#define RX_THR 0x80 /* element not been sent due to throttle feature */ 72#define RX_THR 0x80 /* element not been sent due to throttle feature */
73#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ 73#define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
74 74
75/* get best masking value for can_rx_register() for a given single can_id */ 75/* get best masking value for can_rx_register() for a given single can_id */
76#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ 76#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
79 79
80#define CAN_BCM_VERSION CAN_VERSION 80#define CAN_BCM_VERSION "20160617"
81 81
82MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 82MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
83MODULE_LICENSE("Dual BSD/GPL"); 83MODULE_LICENSE("Dual BSD/GPL");
84MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 84MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
85MODULE_ALIAS("can-proto-2"); 85MODULE_ALIAS("can-proto-2");
86 86
87/* easy access to can_frame payload */ 87/*
88static inline u64 GET_U64(const struct can_frame *cp) 88 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
89 * 64 bit aligned so the offset has to be multiples of 8 which is ensured
90 * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
91 */
92static inline u64 get_u64(const struct canfd_frame *cp, int offset)
89{ 93{
90 return *(u64 *)cp->data; 94 return *(u64 *)(cp->data + offset);
91} 95}
92 96
93struct bcm_op { 97struct bcm_op {
@@ -101,13 +105,14 @@ struct bcm_op {
101 struct tasklet_struct tsklet, thrtsklet; 105 struct tasklet_struct tsklet, thrtsklet;
102 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 106 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
103 int rx_ifindex; 107 int rx_ifindex;
108 int cfsiz;
104 u32 count; 109 u32 count;
105 u32 nframes; 110 u32 nframes;
106 u32 currframe; 111 u32 currframe;
107 struct can_frame *frames; 112 struct canfd_frame *frames;
108 struct can_frame *last_frames; 113 struct canfd_frame *last_frames;
109 struct can_frame sframe; 114 struct canfd_frame sframe;
110 struct can_frame last_sframe; 115 struct canfd_frame last_sframe;
111 struct sock *sk; 116 struct sock *sk;
112 struct net_device *rx_reg_dev; 117 struct net_device *rx_reg_dev;
113}; 118};
@@ -136,7 +141,7 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
136 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); 141 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
137} 142}
138 143
139#define CFSIZ sizeof(struct can_frame) 144#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
140#define OPSIZ sizeof(struct bcm_op) 145#define OPSIZ sizeof(struct bcm_op)
141#define MHSIZ sizeof(struct bcm_msg_head) 146#define MHSIZ sizeof(struct bcm_msg_head)
142 147
@@ -183,43 +188,50 @@ static int bcm_proc_show(struct seq_file *m, void *v)
183 if (!op->frames_abs) 188 if (!op->frames_abs)
184 continue; 189 continue;
185 190
186 seq_printf(m, "rx_op: %03X %-5s ", 191 seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
187 op->can_id, bcm_proc_getifname(ifname, op->ifindex)); 192 bcm_proc_getifname(ifname, op->ifindex));
188 seq_printf(m, "[%u]%c ", op->nframes, 193
189 (op->flags & RX_CHECK_DLC)?'d':' '); 194 if (op->flags & CAN_FD_FRAME)
195 seq_printf(m, "(%u)", op->nframes);
196 else
197 seq_printf(m, "[%u]", op->nframes);
198
199 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
200
190 if (op->kt_ival1.tv64) 201 if (op->kt_ival1.tv64)
191 seq_printf(m, "timeo=%lld ", 202 seq_printf(m, "timeo=%lld ",
192 (long long) 203 (long long)ktime_to_us(op->kt_ival1));
193 ktime_to_us(op->kt_ival1));
194 204
195 if (op->kt_ival2.tv64) 205 if (op->kt_ival2.tv64)
196 seq_printf(m, "thr=%lld ", 206 seq_printf(m, "thr=%lld ",
197 (long long) 207 (long long)ktime_to_us(op->kt_ival2));
198 ktime_to_us(op->kt_ival2));
199 208
200 seq_printf(m, "# recv %ld (%ld) => reduction: ", 209 seq_printf(m, "# recv %ld (%ld) => reduction: ",
201 op->frames_filtered, op->frames_abs); 210 op->frames_filtered, op->frames_abs);
202 211
203 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; 212 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
204 213
205 seq_printf(m, "%s%ld%%\n", 214 seq_printf(m, "%s%ld%%\n",
206 (reduction == 100)?"near ":"", reduction); 215 (reduction == 100) ? "near " : "", reduction);
207 } 216 }
208 217
209 list_for_each_entry(op, &bo->tx_ops, list) { 218 list_for_each_entry(op, &bo->tx_ops, list) {
210 219
211 seq_printf(m, "tx_op: %03X %s [%u] ", 220 seq_printf(m, "tx_op: %03X %s ", op->can_id,
212 op->can_id, 221 bcm_proc_getifname(ifname, op->ifindex));
213 bcm_proc_getifname(ifname, op->ifindex), 222
214 op->nframes); 223 if (op->flags & CAN_FD_FRAME)
224 seq_printf(m, "(%u) ", op->nframes);
225 else
226 seq_printf(m, "[%u] ", op->nframes);
215 227
216 if (op->kt_ival1.tv64) 228 if (op->kt_ival1.tv64)
217 seq_printf(m, "t1=%lld ", 229 seq_printf(m, "t1=%lld ",
218 (long long) ktime_to_us(op->kt_ival1)); 230 (long long)ktime_to_us(op->kt_ival1));
219 231
220 if (op->kt_ival2.tv64) 232 if (op->kt_ival2.tv64)
221 seq_printf(m, "t2=%lld ", 233 seq_printf(m, "t2=%lld ",
222 (long long) ktime_to_us(op->kt_ival2)); 234 (long long)ktime_to_us(op->kt_ival2));
223 235
224 seq_printf(m, "# sent %ld\n", op->frames_abs); 236 seq_printf(m, "# sent %ld\n", op->frames_abs);
225 } 237 }
@@ -248,7 +260,7 @@ static void bcm_can_tx(struct bcm_op *op)
248{ 260{
249 struct sk_buff *skb; 261 struct sk_buff *skb;
250 struct net_device *dev; 262 struct net_device *dev;
251 struct can_frame *cf = &op->frames[op->currframe]; 263 struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
252 264
253 /* no target device? => exit */ 265 /* no target device? => exit */
254 if (!op->ifindex) 266 if (!op->ifindex)
@@ -260,7 +272,7 @@ static void bcm_can_tx(struct bcm_op *op)
260 return; 272 return;
261 } 273 }
262 274
263 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), gfp_any()); 275 skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
264 if (!skb) 276 if (!skb)
265 goto out; 277 goto out;
266 278
@@ -268,7 +280,7 @@ static void bcm_can_tx(struct bcm_op *op)
268 can_skb_prv(skb)->ifindex = dev->ifindex; 280 can_skb_prv(skb)->ifindex = dev->ifindex;
269 can_skb_prv(skb)->skbcnt = 0; 281 can_skb_prv(skb)->skbcnt = 0;
270 282
271 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); 283 memcpy(skb_put(skb, op->cfsiz), cf, op->cfsiz);
272 284
273 /* send with loopback */ 285 /* send with loopback */
274 skb->dev = dev; 286 skb->dev = dev;
@@ -282,7 +294,7 @@ static void bcm_can_tx(struct bcm_op *op)
282 /* reached last frame? */ 294 /* reached last frame? */
283 if (op->currframe >= op->nframes) 295 if (op->currframe >= op->nframes)
284 op->currframe = 0; 296 op->currframe = 0;
285 out: 297out:
286 dev_put(dev); 298 dev_put(dev);
287} 299}
288 300
@@ -291,13 +303,13 @@ static void bcm_can_tx(struct bcm_op *op)
291 * (consisting of bcm_msg_head + x CAN frames) 303 * (consisting of bcm_msg_head + x CAN frames)
292 */ 304 */
293static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, 305static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
294 struct can_frame *frames, int has_timestamp) 306 struct canfd_frame *frames, int has_timestamp)
295{ 307{
296 struct sk_buff *skb; 308 struct sk_buff *skb;
297 struct can_frame *firstframe; 309 struct canfd_frame *firstframe;
298 struct sockaddr_can *addr; 310 struct sockaddr_can *addr;
299 struct sock *sk = op->sk; 311 struct sock *sk = op->sk;
300 unsigned int datalen = head->nframes * CFSIZ; 312 unsigned int datalen = head->nframes * op->cfsiz;
301 int err; 313 int err;
302 314
303 skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); 315 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
@@ -307,19 +319,19 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
307 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head)); 319 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
308 320
309 if (head->nframes) { 321 if (head->nframes) {
310 /* can_frames starting here */ 322 /* CAN frames starting here */
311 firstframe = (struct can_frame *)skb_tail_pointer(skb); 323 firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
312 324
313 memcpy(skb_put(skb, datalen), frames, datalen); 325 memcpy(skb_put(skb, datalen), frames, datalen);
314 326
315 /* 327 /*
316 * the BCM uses the can_dlc-element of the can_frame 328 * the BCM uses the flags-element of the canfd_frame
317 * structure for internal purposes. This is only 329 * structure for internal purposes. This is only
318 * relevant for updates that are generated by the 330 * relevant for updates that are generated by the
319 * BCM, where nframes is 1 331 * BCM, where nframes is 1
320 */ 332 */
321 if (head->nframes == 1) 333 if (head->nframes == 1)
322 firstframe->can_dlc &= BCM_CAN_DLC_MASK; 334 firstframe->flags &= BCM_CAN_FLAGS_MASK;
323 } 335 }
324 336
325 if (has_timestamp) { 337 if (has_timestamp) {
@@ -406,7 +418,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
406/* 418/*
407 * bcm_rx_changed - create a RX_CHANGED notification due to changed content 419 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
408 */ 420 */
409static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) 421static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
410{ 422{
411 struct bcm_msg_head head; 423 struct bcm_msg_head head;
412 424
@@ -418,7 +430,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
418 op->frames_filtered = op->frames_abs = 0; 430 op->frames_filtered = op->frames_abs = 0;
419 431
420 /* this element is not throttled anymore */ 432 /* this element is not throttled anymore */
421 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV); 433 data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
422 434
423 head.opcode = RX_CHANGED; 435 head.opcode = RX_CHANGED;
424 head.flags = op->flags; 436 head.flags = op->flags;
@@ -437,13 +449,13 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
437 * 2. send a notification to the user (if possible) 449 * 2. send a notification to the user (if possible)
438 */ 450 */
439static void bcm_rx_update_and_send(struct bcm_op *op, 451static void bcm_rx_update_and_send(struct bcm_op *op,
440 struct can_frame *lastdata, 452 struct canfd_frame *lastdata,
441 const struct can_frame *rxdata) 453 const struct canfd_frame *rxdata)
442{ 454{
443 memcpy(lastdata, rxdata, CFSIZ); 455 memcpy(lastdata, rxdata, op->cfsiz);
444 456
445 /* mark as used and throttled by default */ 457 /* mark as used and throttled by default */
446 lastdata->can_dlc |= (RX_RECV|RX_THR); 458 lastdata->flags |= (RX_RECV|RX_THR);
447 459
448 /* throttling mode inactive ? */ 460 /* throttling mode inactive ? */
449 if (!op->kt_ival2.tv64) { 461 if (!op->kt_ival2.tv64) {
@@ -481,33 +493,36 @@ rx_changed_settime:
481 * received data stored in op->last_frames[] 493 * received data stored in op->last_frames[]
482 */ 494 */
483static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, 495static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
484 const struct can_frame *rxdata) 496 const struct canfd_frame *rxdata)
485{ 497{
498 struct canfd_frame *cf = op->frames + op->cfsiz * index;
499 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
500 int i;
501
486 /* 502 /*
487 * no one uses the MSBs of can_dlc for comparison, 503 * no one uses the MSBs of flags for comparison,
488 * so we use it here to detect the first time of reception 504 * so we use it here to detect the first time of reception
489 */ 505 */
490 506
491 if (!(op->last_frames[index].can_dlc & RX_RECV)) { 507 if (!(lcf->flags & RX_RECV)) {
492 /* received data for the first time => send update to user */ 508 /* received data for the first time => send update to user */
493 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 509 bcm_rx_update_and_send(op, lcf, rxdata);
494 return; 510 return;
495 } 511 }
496 512
497 /* do a real check in can_frame data section */ 513 /* do a real check in CAN frame data section */
498 514 for (i = 0; i < rxdata->len; i += 8) {
499 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) != 515 if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
500 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) { 516 (get_u64(cf, i) & get_u64(lcf, i))) {
501 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 517 bcm_rx_update_and_send(op, lcf, rxdata);
502 return; 518 return;
519 }
503 } 520 }
504 521
505 if (op->flags & RX_CHECK_DLC) { 522 if (op->flags & RX_CHECK_DLC) {
506 /* do a real check in can_frame dlc */ 523 /* do a real check in CAN frame length */
507 if (rxdata->can_dlc != (op->last_frames[index].can_dlc & 524 if (rxdata->len != lcf->len) {
508 BCM_CAN_DLC_MASK)) { 525 bcm_rx_update_and_send(op, lcf, rxdata);
509 bcm_rx_update_and_send(op, &op->last_frames[index],
510 rxdata);
511 return; 526 return;
512 } 527 }
513 } 528 }
@@ -556,8 +571,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
556 571
557 /* if user wants to be informed, when cyclic CAN-Messages come back */ 572 /* if user wants to be informed, when cyclic CAN-Messages come back */
558 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { 573 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
559 /* clear received can_frames to indicate 'nothing received' */ 574 /* clear received CAN frames to indicate 'nothing received' */
560 memset(op->last_frames, 0, op->nframes * CFSIZ); 575 memset(op->last_frames, 0, op->nframes * op->cfsiz);
561 } 576 }
562 577
563 return HRTIMER_NORESTART; 578 return HRTIMER_NORESTART;
@@ -569,9 +584,11 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
569static inline int bcm_rx_do_flush(struct bcm_op *op, int update, 584static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
570 unsigned int index) 585 unsigned int index)
571{ 586{
572 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { 587 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
588
589 if ((op->last_frames) && (lcf->flags & RX_THR)) {
573 if (update) 590 if (update)
574 bcm_rx_changed(op, &op->last_frames[index]); 591 bcm_rx_changed(op, lcf);
575 return 1; 592 return 1;
576 } 593 }
577 return 0; 594 return 0;
@@ -636,15 +653,19 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
636static void bcm_rx_handler(struct sk_buff *skb, void *data) 653static void bcm_rx_handler(struct sk_buff *skb, void *data)
637{ 654{
638 struct bcm_op *op = (struct bcm_op *)data; 655 struct bcm_op *op = (struct bcm_op *)data;
639 const struct can_frame *rxframe = (struct can_frame *)skb->data; 656 const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
640 unsigned int i; 657 unsigned int i;
641 658
642 /* disable timeout */
643 hrtimer_cancel(&op->timer);
644
645 if (op->can_id != rxframe->can_id) 659 if (op->can_id != rxframe->can_id)
646 return; 660 return;
647 661
662 /* make sure to handle the correct frame type (CAN / CAN FD) */
663 if (skb->len != op->cfsiz)
664 return;
665
666 /* disable timeout */
667 hrtimer_cancel(&op->timer);
668
648 /* save rx timestamp */ 669 /* save rx timestamp */
649 op->rx_stamp = skb->tstamp; 670 op->rx_stamp = skb->tstamp;
650 /* save originator for recvfrom() */ 671 /* save originator for recvfrom() */
@@ -675,13 +696,14 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
675 * multiplex compare 696 * multiplex compare
676 * 697 *
677 * find the first multiplex mask that fits. 698 * find the first multiplex mask that fits.
678 * Remark: The MUX-mask is stored in index 0 699 * Remark: The MUX-mask is stored in index 0 - but only the
700 * first 64 bits of the frame data[] are relevant (CAN FD)
679 */ 701 */
680 702
681 for (i = 1; i < op->nframes; i++) { 703 for (i = 1; i < op->nframes; i++) {
682 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) == 704 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
683 (GET_U64(&op->frames[0]) & 705 (get_u64(op->frames, 0) &
684 GET_U64(&op->frames[i]))) { 706 get_u64(op->frames + op->cfsiz * i, 0))) {
685 bcm_rx_cmp_to_index(op, i, rxframe); 707 bcm_rx_cmp_to_index(op, i, rxframe);
686 break; 708 break;
687 } 709 }
@@ -695,13 +717,14 @@ rx_starttimer:
695/* 717/*
696 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements 718 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
697 */ 719 */
698static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id, 720static struct bcm_op *bcm_find_op(struct list_head *ops,
699 int ifindex) 721 struct bcm_msg_head *mh, int ifindex)
700{ 722{
701 struct bcm_op *op; 723 struct bcm_op *op;
702 724
703 list_for_each_entry(op, ops, list) { 725 list_for_each_entry(op, ops, list) {
704 if ((op->can_id == can_id) && (op->ifindex == ifindex)) 726 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
727 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
705 return op; 728 return op;
706 } 729 }
707 730
@@ -744,12 +767,14 @@ static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
744/* 767/*
745 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) 768 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
746 */ 769 */
747static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex) 770static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
771 int ifindex)
748{ 772{
749 struct bcm_op *op, *n; 773 struct bcm_op *op, *n;
750 774
751 list_for_each_entry_safe(op, n, ops, list) { 775 list_for_each_entry_safe(op, n, ops, list) {
752 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 776 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
777 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
753 778
754 /* 779 /*
755 * Don't care if we're bound or not (due to netdev 780 * Don't care if we're bound or not (due to netdev
@@ -789,12 +814,14 @@ static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
789/* 814/*
790 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) 815 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
791 */ 816 */
792static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex) 817static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
818 int ifindex)
793{ 819{
794 struct bcm_op *op, *n; 820 struct bcm_op *op, *n;
795 821
796 list_for_each_entry_safe(op, n, ops, list) { 822 list_for_each_entry_safe(op, n, ops, list) {
797 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 823 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
824 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
798 list_del(&op->list); 825 list_del(&op->list);
799 bcm_remove_op(op); 826 bcm_remove_op(op);
800 return 1; /* done */ 827 return 1; /* done */
@@ -810,7 +837,7 @@ static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
810static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, 837static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
811 int ifindex) 838 int ifindex)
812{ 839{
813 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex); 840 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
814 841
815 if (!op) 842 if (!op)
816 return -EINVAL; 843 return -EINVAL;
@@ -835,6 +862,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
835{ 862{
836 struct bcm_sock *bo = bcm_sk(sk); 863 struct bcm_sock *bo = bcm_sk(sk);
837 struct bcm_op *op; 864 struct bcm_op *op;
865 struct canfd_frame *cf;
838 unsigned int i; 866 unsigned int i;
839 int err; 867 int err;
840 868
@@ -842,39 +870,46 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
842 if (!ifindex) 870 if (!ifindex)
843 return -ENODEV; 871 return -ENODEV;
844 872
845 /* check nframes boundaries - we need at least one can_frame */ 873 /* check nframes boundaries - we need at least one CAN frame */
846 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) 874 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
847 return -EINVAL; 875 return -EINVAL;
848 876
849 /* check the given can_id */ 877 /* check the given can_id */
850 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex); 878 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
851
852 if (op) { 879 if (op) {
853 /* update existing BCM operation */ 880 /* update existing BCM operation */
854 881
855 /* 882 /*
856 * Do we need more space for the can_frames than currently 883 * Do we need more space for the CAN frames than currently
857 * allocated? -> This is a _really_ unusual use-case and 884 * allocated? -> This is a _really_ unusual use-case and
858 * therefore (complexity / locking) it is not supported. 885 * therefore (complexity / locking) it is not supported.
859 */ 886 */
860 if (msg_head->nframes > op->nframes) 887 if (msg_head->nframes > op->nframes)
861 return -E2BIG; 888 return -E2BIG;
862 889
863 /* update can_frames content */ 890 /* update CAN frames content */
864 for (i = 0; i < msg_head->nframes; i++) { 891 for (i = 0; i < msg_head->nframes; i++) {
865 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
866 892
867 if (op->frames[i].can_dlc > 8) 893 cf = op->frames + op->cfsiz * i;
868 err = -EINVAL; 894 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
895
896 if (op->flags & CAN_FD_FRAME) {
897 if (cf->len > 64)
898 err = -EINVAL;
899 } else {
900 if (cf->len > 8)
901 err = -EINVAL;
902 }
869 903
870 if (err < 0) 904 if (err < 0)
871 return err; 905 return err;
872 906
873 if (msg_head->flags & TX_CP_CAN_ID) { 907 if (msg_head->flags & TX_CP_CAN_ID) {
874 /* copy can_id into frame */ 908 /* copy can_id into frame */
875 op->frames[i].can_id = msg_head->can_id; 909 cf->can_id = msg_head->can_id;
876 } 910 }
877 } 911 }
912 op->flags = msg_head->flags;
878 913
879 } else { 914 } else {
880 /* insert new BCM operation for the given can_id */ 915 /* insert new BCM operation for the given can_id */
@@ -883,11 +918,13 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
883 if (!op) 918 if (!op)
884 return -ENOMEM; 919 return -ENOMEM;
885 920
886 op->can_id = msg_head->can_id; 921 op->can_id = msg_head->can_id;
922 op->cfsiz = CFSIZ(msg_head->flags);
923 op->flags = msg_head->flags;
887 924
888 /* create array for can_frames and copy the data */ 925 /* create array for CAN frames and copy the data */
889 if (msg_head->nframes > 1) { 926 if (msg_head->nframes > 1) {
890 op->frames = kmalloc(msg_head->nframes * CFSIZ, 927 op->frames = kmalloc(msg_head->nframes * op->cfsiz,
891 GFP_KERNEL); 928 GFP_KERNEL);
892 if (!op->frames) { 929 if (!op->frames) {
893 kfree(op); 930 kfree(op);
@@ -897,10 +934,17 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
897 op->frames = &op->sframe; 934 op->frames = &op->sframe;
898 935
899 for (i = 0; i < msg_head->nframes; i++) { 936 for (i = 0; i < msg_head->nframes; i++) {
900 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
901 937
902 if (op->frames[i].can_dlc > 8) 938 cf = op->frames + op->cfsiz * i;
903 err = -EINVAL; 939 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
940
941 if (op->flags & CAN_FD_FRAME) {
942 if (cf->len > 64)
943 err = -EINVAL;
944 } else {
945 if (cf->len > 8)
946 err = -EINVAL;
947 }
904 948
905 if (err < 0) { 949 if (err < 0) {
906 if (op->frames != &op->sframe) 950 if (op->frames != &op->sframe)
@@ -911,7 +955,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
911 955
912 if (msg_head->flags & TX_CP_CAN_ID) { 956 if (msg_head->flags & TX_CP_CAN_ID) {
913 /* copy can_id into frame */ 957 /* copy can_id into frame */
914 op->frames[i].can_id = msg_head->can_id; 958 cf->can_id = msg_head->can_id;
915 } 959 }
916 } 960 }
917 961
@@ -946,8 +990,6 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
946 990
947 /* check flags */ 991 /* check flags */
948 992
949 op->flags = msg_head->flags;
950
951 if (op->flags & TX_RESET_MULTI_IDX) { 993 if (op->flags & TX_RESET_MULTI_IDX) {
952 /* start multiple frame transmission with index 0 */ 994 /* start multiple frame transmission with index 0 */
953 op->currframe = 0; 995 op->currframe = 0;
@@ -968,7 +1010,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
968 1010
969 if (op->flags & STARTTIMER) { 1011 if (op->flags & STARTTIMER) {
970 hrtimer_cancel(&op->timer); 1012 hrtimer_cancel(&op->timer);
971 /* spec: send can_frame when starting timer */ 1013 /* spec: send CAN frame when starting timer */
972 op->flags |= TX_ANNOUNCE; 1014 op->flags |= TX_ANNOUNCE;
973 } 1015 }
974 1016
@@ -981,7 +1023,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
981 if (op->flags & STARTTIMER) 1023 if (op->flags & STARTTIMER)
982 bcm_tx_start_timer(op); 1024 bcm_tx_start_timer(op);
983 1025
984 return msg_head->nframes * CFSIZ + MHSIZ; 1026 return msg_head->nframes * op->cfsiz + MHSIZ;
985} 1027}
986 1028
987/* 1029/*
@@ -1012,12 +1054,12 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1012 return -EINVAL; 1054 return -EINVAL;
1013 1055
1014 /* check the given can_id */ 1056 /* check the given can_id */
1015 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex); 1057 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1016 if (op) { 1058 if (op) {
1017 /* update existing BCM operation */ 1059 /* update existing BCM operation */
1018 1060
1019 /* 1061 /*
1020 * Do we need more space for the can_frames than currently 1062 * Do we need more space for the CAN frames than currently
1021 * allocated? -> This is a _really_ unusual use-case and 1063 * allocated? -> This is a _really_ unusual use-case and
1022 * therefore (complexity / locking) it is not supported. 1064 * therefore (complexity / locking) it is not supported.
1023 */ 1065 */
@@ -1025,17 +1067,18 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1025 return -E2BIG; 1067 return -E2BIG;
1026 1068
1027 if (msg_head->nframes) { 1069 if (msg_head->nframes) {
1028 /* update can_frames content */ 1070 /* update CAN frames content */
1029 err = memcpy_from_msg((u8 *)op->frames, msg, 1071 err = memcpy_from_msg((u8 *)op->frames, msg,
1030 msg_head->nframes * CFSIZ); 1072 msg_head->nframes * op->cfsiz);
1031 if (err < 0) 1073 if (err < 0)
1032 return err; 1074 return err;
1033 1075
1034 /* clear last_frames to indicate 'nothing received' */ 1076 /* clear last_frames to indicate 'nothing received' */
1035 memset(op->last_frames, 0, msg_head->nframes * CFSIZ); 1077 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
1036 } 1078 }
1037 1079
1038 op->nframes = msg_head->nframes; 1080 op->nframes = msg_head->nframes;
1081 op->flags = msg_head->flags;
1039 1082
1040 /* Only an update -> do not call can_rx_register() */ 1083 /* Only an update -> do not call can_rx_register() */
1041 do_rx_register = 0; 1084 do_rx_register = 0;
@@ -1046,20 +1089,22 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1046 if (!op) 1089 if (!op)
1047 return -ENOMEM; 1090 return -ENOMEM;
1048 1091
1049 op->can_id = msg_head->can_id; 1092 op->can_id = msg_head->can_id;
1050 op->nframes = msg_head->nframes; 1093 op->nframes = msg_head->nframes;
1094 op->cfsiz = CFSIZ(msg_head->flags);
1095 op->flags = msg_head->flags;
1051 1096
1052 if (msg_head->nframes > 1) { 1097 if (msg_head->nframes > 1) {
1053 /* create array for can_frames and copy the data */ 1098 /* create array for CAN frames and copy the data */
1054 op->frames = kmalloc(msg_head->nframes * CFSIZ, 1099 op->frames = kmalloc(msg_head->nframes * op->cfsiz,
1055 GFP_KERNEL); 1100 GFP_KERNEL);
1056 if (!op->frames) { 1101 if (!op->frames) {
1057 kfree(op); 1102 kfree(op);
1058 return -ENOMEM; 1103 return -ENOMEM;
1059 } 1104 }
1060 1105
1061 /* create and init array for received can_frames */ 1106 /* create and init array for received CAN frames */
1062 op->last_frames = kzalloc(msg_head->nframes * CFSIZ, 1107 op->last_frames = kzalloc(msg_head->nframes * op->cfsiz,
1063 GFP_KERNEL); 1108 GFP_KERNEL);
1064 if (!op->last_frames) { 1109 if (!op->last_frames) {
1065 kfree(op->frames); 1110 kfree(op->frames);
@@ -1074,7 +1119,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1074 1119
1075 if (msg_head->nframes) { 1120 if (msg_head->nframes) {
1076 err = memcpy_from_msg((u8 *)op->frames, msg, 1121 err = memcpy_from_msg((u8 *)op->frames, msg,
1077 msg_head->nframes * CFSIZ); 1122 msg_head->nframes * op->cfsiz);
1078 if (err < 0) { 1123 if (err < 0) {
1079 if (op->frames != &op->sframe) 1124 if (op->frames != &op->sframe)
1080 kfree(op->frames); 1125 kfree(op->frames);
@@ -1116,7 +1161,6 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1116 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ 1161 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1117 1162
1118 /* check flags */ 1163 /* check flags */
1119 op->flags = msg_head->flags;
1120 1164
1121 if (op->flags & RX_RTR_FRAME) { 1165 if (op->flags & RX_RTR_FRAME) {
1122 1166
@@ -1188,13 +1232,14 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1188 } 1232 }
1189 } 1233 }
1190 1234
1191 return msg_head->nframes * CFSIZ + MHSIZ; 1235 return msg_head->nframes * op->cfsiz + MHSIZ;
1192} 1236}
1193 1237
1194/* 1238/*
1195 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) 1239 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1196 */ 1240 */
1197static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk) 1241static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
1242 int cfsiz)
1198{ 1243{
1199 struct sk_buff *skb; 1244 struct sk_buff *skb;
1200 struct net_device *dev; 1245 struct net_device *dev;
@@ -1204,13 +1249,13 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1204 if (!ifindex) 1249 if (!ifindex)
1205 return -ENODEV; 1250 return -ENODEV;
1206 1251
1207 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), GFP_KERNEL); 1252 skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
1208 if (!skb) 1253 if (!skb)
1209 return -ENOMEM; 1254 return -ENOMEM;
1210 1255
1211 can_skb_reserve(skb); 1256 can_skb_reserve(skb);
1212 1257
1213 err = memcpy_from_msg(skb_put(skb, CFSIZ), msg, CFSIZ); 1258 err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
1214 if (err < 0) { 1259 if (err < 0) {
1215 kfree_skb(skb); 1260 kfree_skb(skb);
1216 return err; 1261 return err;
@@ -1232,7 +1277,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1232 if (err) 1277 if (err)
1233 return err; 1278 return err;
1234 1279
1235 return CFSIZ + MHSIZ; 1280 return cfsiz + MHSIZ;
1236} 1281}
1237 1282
1238/* 1283/*
@@ -1244,13 +1289,23 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1244 struct bcm_sock *bo = bcm_sk(sk); 1289 struct bcm_sock *bo = bcm_sk(sk);
1245 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ 1290 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1246 struct bcm_msg_head msg_head; 1291 struct bcm_msg_head msg_head;
1292 int cfsiz;
1247 int ret; /* read bytes or error codes as return value */ 1293 int ret; /* read bytes or error codes as return value */
1248 1294
1249 if (!bo->bound) 1295 if (!bo->bound)
1250 return -ENOTCONN; 1296 return -ENOTCONN;
1251 1297
1252 /* check for valid message length from userspace */ 1298 /* check for valid message length from userspace */
1253 if (size < MHSIZ || (size - MHSIZ) % CFSIZ) 1299 if (size < MHSIZ)
1300 return -EINVAL;
1301
1302 /* read message head information */
1303 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1304 if (ret < 0)
1305 return ret;
1306
1307 cfsiz = CFSIZ(msg_head.flags);
1308 if ((size - MHSIZ) % cfsiz)
1254 return -EINVAL; 1309 return -EINVAL;
1255 1310
1256 /* check for alternative ifindex for this bcm_op */ 1311 /* check for alternative ifindex for this bcm_op */
@@ -1284,12 +1339,6 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1284 } 1339 }
1285 } 1340 }
1286 1341
1287 /* read message head information */
1288
1289 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1290 if (ret < 0)
1291 return ret;
1292
1293 lock_sock(sk); 1342 lock_sock(sk);
1294 1343
1295 switch (msg_head.opcode) { 1344 switch (msg_head.opcode) {
@@ -1303,14 +1352,14 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1303 break; 1352 break;
1304 1353
1305 case TX_DELETE: 1354 case TX_DELETE:
1306 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex)) 1355 if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
1307 ret = MHSIZ; 1356 ret = MHSIZ;
1308 else 1357 else
1309 ret = -EINVAL; 1358 ret = -EINVAL;
1310 break; 1359 break;
1311 1360
1312 case RX_DELETE: 1361 case RX_DELETE:
1313 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex)) 1362 if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
1314 ret = MHSIZ; 1363 ret = MHSIZ;
1315 else 1364 else
1316 ret = -EINVAL; 1365 ret = -EINVAL;
@@ -1329,11 +1378,11 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1329 break; 1378 break;
1330 1379
1331 case TX_SEND: 1380 case TX_SEND:
1332 /* we need exactly one can_frame behind the msg head */ 1381 /* we need exactly one CAN frame behind the msg head */
1333 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ)) 1382 if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
1334 ret = -EINVAL; 1383 ret = -EINVAL;
1335 else 1384 else
1336 ret = bcm_tx_send(msg, ifindex, sk); 1385 ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
1337 break; 1386 break;
1338 1387
1339 default: 1388 default:
diff --git a/net/can/proc.c b/net/can/proc.c
index 1a19b985a868..85ef7bb0f176 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -517,8 +517,7 @@ void can_init_proc(void)
517 can_dir = proc_mkdir("can", init_net.proc_net); 517 can_dir = proc_mkdir("can", init_net.proc_net);
518 518
519 if (!can_dir) { 519 if (!can_dir) {
520 printk(KERN_INFO "can: failed to create /proc/net/can . " 520 pr_info("can: failed to create /proc/net/can.\n");
521 "CONFIG_PROC_FS missing?\n");
522 return; 521 return;
523 } 522 }
524 523
diff --git a/net/core/dev.c b/net/core/dev.c
index 904ff431d570..aba10d2a8bc3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -139,6 +139,7 @@
139#include <linux/hrtimer.h> 139#include <linux/hrtimer.h>
140#include <linux/netfilter_ingress.h> 140#include <linux/netfilter_ingress.h>
141#include <linux/sctp.h> 141#include <linux/sctp.h>
142#include <linux/crash_dump.h>
142 143
143#include "net-sysfs.h" 144#include "net-sysfs.h"
144 145
@@ -2249,11 +2250,12 @@ EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2249 */ 2250 */
2250int netif_get_num_default_rss_queues(void) 2251int netif_get_num_default_rss_queues(void)
2251{ 2252{
2252 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 2253 return is_kdump_kernel() ?
2254 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2253} 2255}
2254EXPORT_SYMBOL(netif_get_num_default_rss_queues); 2256EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2255 2257
2256static inline void __netif_reschedule(struct Qdisc *q) 2258static void __netif_reschedule(struct Qdisc *q)
2257{ 2259{
2258 struct softnet_data *sd; 2260 struct softnet_data *sd;
2259 unsigned long flags; 2261 unsigned long flags;
@@ -2420,7 +2422,7 @@ EXPORT_SYMBOL(__skb_tx_hash);
2420 2422
2421static void skb_warn_bad_offload(const struct sk_buff *skb) 2423static void skb_warn_bad_offload(const struct sk_buff *skb)
2422{ 2424{
2423 static const netdev_features_t null_features = 0; 2425 static const netdev_features_t null_features;
2424 struct net_device *dev = skb->dev; 2426 struct net_device *dev = skb->dev;
2425 const char *name = ""; 2427 const char *name = "";
2426 2428
@@ -3068,6 +3070,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3068 struct netdev_queue *txq) 3070 struct netdev_queue *txq)
3069{ 3071{
3070 spinlock_t *root_lock = qdisc_lock(q); 3072 spinlock_t *root_lock = qdisc_lock(q);
3073 struct sk_buff *to_free = NULL;
3071 bool contended; 3074 bool contended;
3072 int rc; 3075 int rc;
3073 3076
@@ -3075,7 +3078,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3075 /* 3078 /*
3076 * Heuristic to force contended enqueues to serialize on a 3079 * Heuristic to force contended enqueues to serialize on a
3077 * separate lock before trying to get qdisc main lock. 3080 * separate lock before trying to get qdisc main lock.
3078 * This permits __QDISC___STATE_RUNNING owner to get the lock more 3081 * This permits qdisc->running owner to get the lock more
3079 * often and dequeue packets faster. 3082 * often and dequeue packets faster.
3080 */ 3083 */
3081 contended = qdisc_is_running(q); 3084 contended = qdisc_is_running(q);
@@ -3084,7 +3087,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3084 3087
3085 spin_lock(root_lock); 3088 spin_lock(root_lock);
3086 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3089 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3087 kfree_skb(skb); 3090 __qdisc_drop(skb, &to_free);
3088 rc = NET_XMIT_DROP; 3091 rc = NET_XMIT_DROP;
3089 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3092 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3090 qdisc_run_begin(q)) { 3093 qdisc_run_begin(q)) {
@@ -3107,7 +3110,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3107 3110
3108 rc = NET_XMIT_SUCCESS; 3111 rc = NET_XMIT_SUCCESS;
3109 } else { 3112 } else {
3110 rc = q->enqueue(skb, q) & NET_XMIT_MASK; 3113 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3111 if (qdisc_run_begin(q)) { 3114 if (qdisc_run_begin(q)) {
3112 if (unlikely(contended)) { 3115 if (unlikely(contended)) {
3113 spin_unlock(&q->busylock); 3116 spin_unlock(&q->busylock);
@@ -3117,6 +3120,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3117 } 3120 }
3118 } 3121 }
3119 spin_unlock(root_lock); 3122 spin_unlock(root_lock);
3123 if (unlikely(to_free))
3124 kfree_skb_list(to_free);
3120 if (unlikely(contended)) 3125 if (unlikely(contended))
3121 spin_unlock(&q->busylock); 3126 spin_unlock(&q->busylock);
3122 return rc; 3127 return rc;
@@ -3142,8 +3147,6 @@ static void skb_update_prio(struct sk_buff *skb)
3142DEFINE_PER_CPU(int, xmit_recursion); 3147DEFINE_PER_CPU(int, xmit_recursion);
3143EXPORT_SYMBOL(xmit_recursion); 3148EXPORT_SYMBOL(xmit_recursion);
3144 3149
3145#define RECURSION_LIMIT 10
3146
3147/** 3150/**
3148 * dev_loopback_xmit - loop back @skb 3151 * dev_loopback_xmit - loop back @skb
3149 * @net: network namespace this loopback is happening in 3152 * @net: network namespace this loopback is happening in
@@ -3386,8 +3389,8 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
3386 int cpu = smp_processor_id(); /* ok because BHs are off */ 3389 int cpu = smp_processor_id(); /* ok because BHs are off */
3387 3390
3388 if (txq->xmit_lock_owner != cpu) { 3391 if (txq->xmit_lock_owner != cpu) {
3389 3392 if (unlikely(__this_cpu_read(xmit_recursion) >
3390 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) 3393 XMIT_RECURSION_LIMIT))
3391 goto recursion_alert; 3394 goto recursion_alert;
3392 3395
3393 skb = validate_xmit_skb(skb, dev); 3396 skb = validate_xmit_skb(skb, dev);
@@ -3898,22 +3901,14 @@ static void net_tx_action(struct softirq_action *h)
3898 head = head->next_sched; 3901 head = head->next_sched;
3899 3902
3900 root_lock = qdisc_lock(q); 3903 root_lock = qdisc_lock(q);
3901 if (spin_trylock(root_lock)) { 3904 spin_lock(root_lock);
3902 smp_mb__before_atomic(); 3905 /* We need to make sure head->next_sched is read
3903 clear_bit(__QDISC_STATE_SCHED, 3906 * before clearing __QDISC_STATE_SCHED
3904 &q->state); 3907 */
3905 qdisc_run(q); 3908 smp_mb__before_atomic();
3906 spin_unlock(root_lock); 3909 clear_bit(__QDISC_STATE_SCHED, &q->state);
3907 } else { 3910 qdisc_run(q);
3908 if (!test_bit(__QDISC_STATE_DEACTIVATED, 3911 spin_unlock(root_lock);
3909 &q->state)) {
3910 __netif_reschedule(q);
3911 } else {
3912 smp_mb__before_atomic();
3913 clear_bit(__QDISC_STATE_SCHED,
3914 &q->state);
3915 }
3916 }
3917 } 3912 }
3918 } 3913 }
3919} 3914}
@@ -5919,7 +5914,7 @@ static void netdev_adjacent_add_links(struct net_device *dev)
5919 struct net *net = dev_net(dev); 5914 struct net *net = dev_net(dev);
5920 5915
5921 list_for_each_entry(iter, &dev->adj_list.upper, list) { 5916 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5922 if (!net_eq(net,dev_net(iter->dev))) 5917 if (!net_eq(net, dev_net(iter->dev)))
5923 continue; 5918 continue;
5924 netdev_adjacent_sysfs_add(iter->dev, dev, 5919 netdev_adjacent_sysfs_add(iter->dev, dev,
5925 &iter->dev->adj_list.lower); 5920 &iter->dev->adj_list.lower);
@@ -5928,7 +5923,7 @@ static void netdev_adjacent_add_links(struct net_device *dev)
5928 } 5923 }
5929 5924
5930 list_for_each_entry(iter, &dev->adj_list.lower, list) { 5925 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5931 if (!net_eq(net,dev_net(iter->dev))) 5926 if (!net_eq(net, dev_net(iter->dev)))
5932 continue; 5927 continue;
5933 netdev_adjacent_sysfs_add(iter->dev, dev, 5928 netdev_adjacent_sysfs_add(iter->dev, dev,
5934 &iter->dev->adj_list.upper); 5929 &iter->dev->adj_list.upper);
@@ -5944,7 +5939,7 @@ static void netdev_adjacent_del_links(struct net_device *dev)
5944 struct net *net = dev_net(dev); 5939 struct net *net = dev_net(dev);
5945 5940
5946 list_for_each_entry(iter, &dev->adj_list.upper, list) { 5941 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5947 if (!net_eq(net,dev_net(iter->dev))) 5942 if (!net_eq(net, dev_net(iter->dev)))
5948 continue; 5943 continue;
5949 netdev_adjacent_sysfs_del(iter->dev, dev->name, 5944 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5950 &iter->dev->adj_list.lower); 5945 &iter->dev->adj_list.lower);
@@ -5953,7 +5948,7 @@ static void netdev_adjacent_del_links(struct net_device *dev)
5953 } 5948 }
5954 5949
5955 list_for_each_entry(iter, &dev->adj_list.lower, list) { 5950 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5956 if (!net_eq(net,dev_net(iter->dev))) 5951 if (!net_eq(net, dev_net(iter->dev)))
5957 continue; 5952 continue;
5958 netdev_adjacent_sysfs_del(iter->dev, dev->name, 5953 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5959 &iter->dev->adj_list.upper); 5954 &iter->dev->adj_list.upper);
@@ -5969,7 +5964,7 @@ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5969 struct net *net = dev_net(dev); 5964 struct net *net = dev_net(dev);
5970 5965
5971 list_for_each_entry(iter, &dev->adj_list.upper, list) { 5966 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5972 if (!net_eq(net,dev_net(iter->dev))) 5967 if (!net_eq(net, dev_net(iter->dev)))
5973 continue; 5968 continue;
5974 netdev_adjacent_sysfs_del(iter->dev, oldname, 5969 netdev_adjacent_sysfs_del(iter->dev, oldname,
5975 &iter->dev->adj_list.lower); 5970 &iter->dev->adj_list.lower);
@@ -5978,7 +5973,7 @@ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5978 } 5973 }
5979 5974
5980 list_for_each_entry(iter, &dev->adj_list.lower, list) { 5975 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5981 if (!net_eq(net,dev_net(iter->dev))) 5976 if (!net_eq(net, dev_net(iter->dev)))
5982 continue; 5977 continue;
5983 netdev_adjacent_sysfs_del(iter->dev, oldname, 5978 netdev_adjacent_sysfs_del(iter->dev, oldname,
5984 &iter->dev->adj_list.upper); 5979 &iter->dev->adj_list.upper);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f4034817d255..977489820eb9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -89,6 +89,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
89 [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", 89 [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
90 [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation", 90 [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
91 [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial", 91 [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial",
92 [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation",
92 93
93 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", 94 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
94 [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp", 95 [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 840acebbb80c..98298b11f534 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -173,7 +173,8 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
173EXPORT_SYMBOL_GPL(fib_rules_unregister); 173EXPORT_SYMBOL_GPL(fib_rules_unregister);
174 174
175static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, 175static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
176 struct flowi *fl, int flags) 176 struct flowi *fl, int flags,
177 struct fib_lookup_arg *arg)
177{ 178{
178 int ret = 0; 179 int ret = 0;
179 180
@@ -189,6 +190,9 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
189 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id)) 190 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
190 goto out; 191 goto out;
191 192
193 if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
194 goto out;
195
192 ret = ops->match(rule, fl, flags); 196 ret = ops->match(rule, fl, flags);
193out: 197out:
194 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; 198 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -204,7 +208,7 @@ int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
204 208
205 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 209 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
206jumped: 210jumped:
207 if (!fib_rule_match(rule, ops, fl, flags)) 211 if (!fib_rule_match(rule, ops, fl, flags, arg))
208 continue; 212 continue;
209 213
210 if (rule->action == FR_ACT_GOTO) { 214 if (rule->action == FR_ACT_GOTO) {
@@ -265,7 +269,7 @@ errout:
265 return err; 269 return err;
266} 270}
267 271
268static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh) 272int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh)
269{ 273{
270 struct net *net = sock_net(skb->sk); 274 struct net *net = sock_net(skb->sk);
271 struct fib_rule_hdr *frh = nlmsg_data(nlh); 275 struct fib_rule_hdr *frh = nlmsg_data(nlh);
@@ -336,6 +340,14 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
336 if (tb[FRA_TUN_ID]) 340 if (tb[FRA_TUN_ID])
337 rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]); 341 rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
338 342
343 if (tb[FRA_L3MDEV]) {
344#ifdef CONFIG_NET_L3_MASTER_DEV
345 rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]);
346 if (rule->l3mdev != 1)
347#endif
348 goto errout_free;
349 }
350
339 rule->action = frh->action; 351 rule->action = frh->action;
340 rule->flags = frh->flags; 352 rule->flags = frh->flags;
341 rule->table = frh_get_table(frh, tb); 353 rule->table = frh_get_table(frh, tb);
@@ -371,6 +383,9 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
371 } else if (rule->action == FR_ACT_GOTO) 383 } else if (rule->action == FR_ACT_GOTO)
372 goto errout_free; 384 goto errout_free;
373 385
386 if (rule->l3mdev && rule->table)
387 goto errout_free;
388
374 err = ops->configure(rule, skb, frh, tb); 389 err = ops->configure(rule, skb, frh, tb);
375 if (err < 0) 390 if (err < 0)
376 goto errout_free; 391 goto errout_free;
@@ -424,8 +439,9 @@ errout:
424 rules_ops_put(ops); 439 rules_ops_put(ops);
425 return err; 440 return err;
426} 441}
442EXPORT_SYMBOL_GPL(fib_nl_newrule);
427 443
428static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh) 444int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh)
429{ 445{
430 struct net *net = sock_net(skb->sk); 446 struct net *net = sock_net(skb->sk);
431 struct fib_rule_hdr *frh = nlmsg_data(nlh); 447 struct fib_rule_hdr *frh = nlmsg_data(nlh);
@@ -483,6 +499,10 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
483 (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID]))) 499 (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
484 continue; 500 continue;
485 501
502 if (tb[FRA_L3MDEV] &&
503 (rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV])))
504 continue;
505
486 if (!ops->compare(rule, frh, tb)) 506 if (!ops->compare(rule, frh, tb))
487 continue; 507 continue;
488 508
@@ -536,6 +556,7 @@ errout:
536 rules_ops_put(ops); 556 rules_ops_put(ops);
537 return err; 557 return err;
538} 558}
559EXPORT_SYMBOL_GPL(fib_nl_delrule);
539 560
540static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, 561static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
541 struct fib_rule *rule) 562 struct fib_rule *rule)
@@ -607,7 +628,9 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
607 (rule->target && 628 (rule->target &&
608 nla_put_u32(skb, FRA_GOTO, rule->target)) || 629 nla_put_u32(skb, FRA_GOTO, rule->target)) ||
609 (rule->tun_id && 630 (rule->tun_id &&
610 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD))) 631 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
632 (rule->l3mdev &&
633 nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)))
611 goto nla_put_failure; 634 goto nla_put_failure;
612 635
613 if (rule->suppress_ifgroup != -1) { 636 if (rule->suppress_ifgroup != -1) {
diff --git a/net/core/filter.c b/net/core/filter.c
index c4b330c85c02..cb9fc16cac46 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -748,6 +748,17 @@ static bool chk_code_allowed(u16 code_to_probe)
748 return codes[code_to_probe]; 748 return codes[code_to_probe];
749} 749}
750 750
751static bool bpf_check_basics_ok(const struct sock_filter *filter,
752 unsigned int flen)
753{
754 if (filter == NULL)
755 return false;
756 if (flen == 0 || flen > BPF_MAXINSNS)
757 return false;
758
759 return true;
760}
761
751/** 762/**
752 * bpf_check_classic - verify socket filter code 763 * bpf_check_classic - verify socket filter code
753 * @filter: filter to verify 764 * @filter: filter to verify
@@ -768,9 +779,6 @@ static int bpf_check_classic(const struct sock_filter *filter,
768 bool anc_found; 779 bool anc_found;
769 int pc; 780 int pc;
770 781
771 if (flen == 0 || flen > BPF_MAXINSNS)
772 return -EINVAL;
773
774 /* Check the filter code now */ 782 /* Check the filter code now */
775 for (pc = 0; pc < flen; pc++) { 783 for (pc = 0; pc < flen; pc++) {
776 const struct sock_filter *ftest = &filter[pc]; 784 const struct sock_filter *ftest = &filter[pc];
@@ -1065,7 +1073,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1065 struct bpf_prog *fp; 1073 struct bpf_prog *fp;
1066 1074
1067 /* Make sure new filter is there and in the right amounts. */ 1075 /* Make sure new filter is there and in the right amounts. */
1068 if (fprog->filter == NULL) 1076 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1069 return -EINVAL; 1077 return -EINVAL;
1070 1078
1071 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1079 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
@@ -1112,7 +1120,7 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1112 int err; 1120 int err;
1113 1121
1114 /* Make sure new filter is there and in the right amounts. */ 1122 /* Make sure new filter is there and in the right amounts. */
1115 if (fprog->filter == NULL) 1123 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1116 return -EINVAL; 1124 return -EINVAL;
1117 1125
1118 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1126 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
@@ -1207,7 +1215,6 @@ static
1207struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) 1215struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1208{ 1216{
1209 unsigned int fsize = bpf_classic_proglen(fprog); 1217 unsigned int fsize = bpf_classic_proglen(fprog);
1210 unsigned int bpf_fsize = bpf_prog_size(fprog->len);
1211 struct bpf_prog *prog; 1218 struct bpf_prog *prog;
1212 int err; 1219 int err;
1213 1220
@@ -1215,10 +1222,10 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1215 return ERR_PTR(-EPERM); 1222 return ERR_PTR(-EPERM);
1216 1223
1217 /* Make sure new filter is there and in the right amounts. */ 1224 /* Make sure new filter is there and in the right amounts. */
1218 if (fprog->filter == NULL) 1225 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1219 return ERR_PTR(-EINVAL); 1226 return ERR_PTR(-EINVAL);
1220 1227
1221 prog = bpf_prog_alloc(bpf_fsize, 0); 1228 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1222 if (!prog) 1229 if (!prog)
1223 return ERR_PTR(-ENOMEM); 1230 return ERR_PTR(-ENOMEM);
1224 1231
@@ -1603,9 +1610,36 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
1603 .arg5_type = ARG_ANYTHING, 1610 .arg5_type = ARG_ANYTHING,
1604}; 1611};
1605 1612
1613static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1614{
1615 if (skb_at_tc_ingress(skb))
1616 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1617
1618 return dev_forward_skb(dev, skb);
1619}
1620
1621static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1622{
1623 int ret;
1624
1625 if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
1626 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1627 kfree_skb(skb);
1628 return -ENETDOWN;
1629 }
1630
1631 skb->dev = dev;
1632
1633 __this_cpu_inc(xmit_recursion);
1634 ret = dev_queue_xmit(skb);
1635 __this_cpu_dec(xmit_recursion);
1636
1637 return ret;
1638}
1639
1606static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) 1640static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
1607{ 1641{
1608 struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2; 1642 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1609 struct net_device *dev; 1643 struct net_device *dev;
1610 1644
1611 if (unlikely(flags & ~(BPF_F_INGRESS))) 1645 if (unlikely(flags & ~(BPF_F_INGRESS)))
@@ -1615,19 +1649,12 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
1615 if (unlikely(!dev)) 1649 if (unlikely(!dev))
1616 return -EINVAL; 1650 return -EINVAL;
1617 1651
1618 skb2 = skb_clone(skb, GFP_ATOMIC); 1652 skb = skb_clone(skb, GFP_ATOMIC);
1619 if (unlikely(!skb2)) 1653 if (unlikely(!skb))
1620 return -ENOMEM; 1654 return -ENOMEM;
1621 1655
1622 if (flags & BPF_F_INGRESS) { 1656 return flags & BPF_F_INGRESS ?
1623 if (skb_at_tc_ingress(skb2)) 1657 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1624 skb_postpush_rcsum(skb2, skb_mac_header(skb2),
1625 skb2->mac_len);
1626 return dev_forward_skb(dev, skb2);
1627 }
1628
1629 skb2->dev = dev;
1630 return dev_queue_xmit(skb2);
1631} 1658}
1632 1659
1633static const struct bpf_func_proto bpf_clone_redirect_proto = { 1660static const struct bpf_func_proto bpf_clone_redirect_proto = {
@@ -1671,15 +1698,8 @@ int skb_do_redirect(struct sk_buff *skb)
1671 return -EINVAL; 1698 return -EINVAL;
1672 } 1699 }
1673 1700
1674 if (ri->flags & BPF_F_INGRESS) { 1701 return ri->flags & BPF_F_INGRESS ?
1675 if (skb_at_tc_ingress(skb)) 1702 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1676 skb_postpush_rcsum(skb, skb_mac_header(skb),
1677 skb->mac_len);
1678 return dev_forward_skb(dev, skb);
1679 }
1680
1681 skb->dev = dev;
1682 return dev_queue_xmit(skb);
1683} 1703}
1684 1704
1685static const struct bpf_func_proto bpf_redirect_proto = { 1705static const struct bpf_func_proto bpf_redirect_proto = {
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 4573d81093fe..cad8e791f28e 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -84,6 +84,7 @@ struct gen_estimator
84 struct gnet_stats_basic_packed *bstats; 84 struct gnet_stats_basic_packed *bstats;
85 struct gnet_stats_rate_est64 *rate_est; 85 struct gnet_stats_rate_est64 *rate_est;
86 spinlock_t *stats_lock; 86 spinlock_t *stats_lock;
87 seqcount_t *running;
87 int ewma_log; 88 int ewma_log;
88 u32 last_packets; 89 u32 last_packets;
89 unsigned long avpps; 90 unsigned long avpps;
@@ -121,26 +122,28 @@ static void est_timer(unsigned long arg)
121 unsigned long rate; 122 unsigned long rate;
122 u64 brate; 123 u64 brate;
123 124
124 spin_lock(e->stats_lock); 125 if (e->stats_lock)
126 spin_lock(e->stats_lock);
125 read_lock(&est_lock); 127 read_lock(&est_lock);
126 if (e->bstats == NULL) 128 if (e->bstats == NULL)
127 goto skip; 129 goto skip;
128 130
129 __gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats); 131 __gnet_stats_copy_basic(e->running, &b, e->cpu_bstats, e->bstats);
130 132
131 brate = (b.bytes - e->last_bytes)<<(7 - idx); 133 brate = (b.bytes - e->last_bytes)<<(7 - idx);
132 e->last_bytes = b.bytes; 134 e->last_bytes = b.bytes;
133 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); 135 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
134 e->rate_est->bps = (e->avbps+0xF)>>5; 136 WRITE_ONCE(e->rate_est->bps, (e->avbps + 0xF) >> 5);
135 137
136 rate = b.packets - e->last_packets; 138 rate = b.packets - e->last_packets;
137 rate <<= (7 - idx); 139 rate <<= (7 - idx);
138 e->last_packets = b.packets; 140 e->last_packets = b.packets;
139 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); 141 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
140 e->rate_est->pps = (e->avpps + 0xF) >> 5; 142 WRITE_ONCE(e->rate_est->pps, (e->avpps + 0xF) >> 5);
141skip: 143skip:
142 read_unlock(&est_lock); 144 read_unlock(&est_lock);
143 spin_unlock(e->stats_lock); 145 if (e->stats_lock)
146 spin_unlock(e->stats_lock);
144 } 147 }
145 148
146 if (!list_empty(&elist[idx].list)) 149 if (!list_empty(&elist[idx].list))
@@ -194,6 +197,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
194 * @cpu_bstats: bstats per cpu 197 * @cpu_bstats: bstats per cpu
195 * @rate_est: rate estimator statistics 198 * @rate_est: rate estimator statistics
196 * @stats_lock: statistics lock 199 * @stats_lock: statistics lock
200 * @running: qdisc running seqcount
197 * @opt: rate estimator configuration TLV 201 * @opt: rate estimator configuration TLV
198 * 202 *
199 * Creates a new rate estimator with &bstats as source and &rate_est 203 * Creates a new rate estimator with &bstats as source and &rate_est
@@ -209,6 +213,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
209 struct gnet_stats_basic_cpu __percpu *cpu_bstats, 213 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
210 struct gnet_stats_rate_est64 *rate_est, 214 struct gnet_stats_rate_est64 *rate_est,
211 spinlock_t *stats_lock, 215 spinlock_t *stats_lock,
216 seqcount_t *running,
212 struct nlattr *opt) 217 struct nlattr *opt)
213{ 218{
214 struct gen_estimator *est; 219 struct gen_estimator *est;
@@ -226,12 +231,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
226 if (est == NULL) 231 if (est == NULL)
227 return -ENOBUFS; 232 return -ENOBUFS;
228 233
229 __gnet_stats_copy_basic(&b, cpu_bstats, bstats); 234 __gnet_stats_copy_basic(running, &b, cpu_bstats, bstats);
230 235
231 idx = parm->interval + 2; 236 idx = parm->interval + 2;
232 est->bstats = bstats; 237 est->bstats = bstats;
233 est->rate_est = rate_est; 238 est->rate_est = rate_est;
234 est->stats_lock = stats_lock; 239 est->stats_lock = stats_lock;
240 est->running = running;
235 est->ewma_log = parm->ewma_log; 241 est->ewma_log = parm->ewma_log;
236 est->last_bytes = b.bytes; 242 est->last_bytes = b.bytes;
237 est->avbps = rate_est->bps<<5; 243 est->avbps = rate_est->bps<<5;
@@ -291,6 +297,7 @@ EXPORT_SYMBOL(gen_kill_estimator);
291 * @cpu_bstats: bstats per cpu 297 * @cpu_bstats: bstats per cpu
292 * @rate_est: rate estimator statistics 298 * @rate_est: rate estimator statistics
293 * @stats_lock: statistics lock 299 * @stats_lock: statistics lock
300 * @running: qdisc running seqcount (might be NULL)
294 * @opt: rate estimator configuration TLV 301 * @opt: rate estimator configuration TLV
295 * 302 *
296 * Replaces the configuration of a rate estimator by calling 303 * Replaces the configuration of a rate estimator by calling
@@ -301,10 +308,11 @@ EXPORT_SYMBOL(gen_kill_estimator);
301int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, 308int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
302 struct gnet_stats_basic_cpu __percpu *cpu_bstats, 309 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
303 struct gnet_stats_rate_est64 *rate_est, 310 struct gnet_stats_rate_est64 *rate_est,
304 spinlock_t *stats_lock, struct nlattr *opt) 311 spinlock_t *stats_lock,
312 seqcount_t *running, struct nlattr *opt)
305{ 313{
306 gen_kill_estimator(bstats, rate_est); 314 gen_kill_estimator(bstats, rate_est);
307 return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt); 315 return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
308} 316}
309EXPORT_SYMBOL(gen_replace_estimator); 317EXPORT_SYMBOL(gen_replace_estimator);
310 318
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index be873e4e3125..508e051304fb 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -32,10 +32,11 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
32 return 0; 32 return 0;
33 33
34nla_put_failure: 34nla_put_failure:
35 if (d->lock)
36 spin_unlock_bh(d->lock);
35 kfree(d->xstats); 37 kfree(d->xstats);
36 d->xstats = NULL; 38 d->xstats = NULL;
37 d->xstats_len = 0; 39 d->xstats_len = 0;
38 spin_unlock_bh(d->lock);
39 return -1; 40 return -1;
40} 41}
41 42
@@ -66,15 +67,16 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
66{ 67{
67 memset(d, 0, sizeof(*d)); 68 memset(d, 0, sizeof(*d));
68 69
69 spin_lock_bh(lock);
70 d->lock = lock;
71 if (type) 70 if (type)
72 d->tail = (struct nlattr *)skb_tail_pointer(skb); 71 d->tail = (struct nlattr *)skb_tail_pointer(skb);
73 d->skb = skb; 72 d->skb = skb;
74 d->compat_tc_stats = tc_stats_type; 73 d->compat_tc_stats = tc_stats_type;
75 d->compat_xstats = xstats_type; 74 d->compat_xstats = xstats_type;
76 d->padattr = padattr; 75 d->padattr = padattr;
77 76 if (lock) {
77 d->lock = lock;
78 spin_lock_bh(lock);
79 }
78 if (d->tail) 80 if (d->tail)
79 return gnet_stats_copy(d, type, NULL, 0, padattr); 81 return gnet_stats_copy(d, type, NULL, 0, padattr);
80 82
@@ -128,21 +130,29 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
128} 130}
129 131
130void 132void
131__gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, 133__gnet_stats_copy_basic(const seqcount_t *running,
134 struct gnet_stats_basic_packed *bstats,
132 struct gnet_stats_basic_cpu __percpu *cpu, 135 struct gnet_stats_basic_cpu __percpu *cpu,
133 struct gnet_stats_basic_packed *b) 136 struct gnet_stats_basic_packed *b)
134{ 137{
138 unsigned int seq;
139
135 if (cpu) { 140 if (cpu) {
136 __gnet_stats_copy_basic_cpu(bstats, cpu); 141 __gnet_stats_copy_basic_cpu(bstats, cpu);
137 } else { 142 return;
143 }
144 do {
145 if (running)
146 seq = read_seqcount_begin(running);
138 bstats->bytes = b->bytes; 147 bstats->bytes = b->bytes;
139 bstats->packets = b->packets; 148 bstats->packets = b->packets;
140 } 149 } while (running && read_seqcount_retry(running, seq));
141} 150}
142EXPORT_SYMBOL(__gnet_stats_copy_basic); 151EXPORT_SYMBOL(__gnet_stats_copy_basic);
143 152
144/** 153/**
145 * gnet_stats_copy_basic - copy basic statistics into statistic TLV 154 * gnet_stats_copy_basic - copy basic statistics into statistic TLV
155 * @running: seqcount_t pointer
146 * @d: dumping handle 156 * @d: dumping handle
147 * @cpu: copy statistic per cpu 157 * @cpu: copy statistic per cpu
148 * @b: basic statistics 158 * @b: basic statistics
@@ -154,13 +164,14 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
154 * if the room in the socket buffer was not sufficient. 164 * if the room in the socket buffer was not sufficient.
155 */ 165 */
156int 166int
157gnet_stats_copy_basic(struct gnet_dump *d, 167gnet_stats_copy_basic(const seqcount_t *running,
168 struct gnet_dump *d,
158 struct gnet_stats_basic_cpu __percpu *cpu, 169 struct gnet_stats_basic_cpu __percpu *cpu,
159 struct gnet_stats_basic_packed *b) 170 struct gnet_stats_basic_packed *b)
160{ 171{
161 struct gnet_stats_basic_packed bstats = {0}; 172 struct gnet_stats_basic_packed bstats = {0};
162 173
163 __gnet_stats_copy_basic(&bstats, cpu, b); 174 __gnet_stats_copy_basic(running, &bstats, cpu, b);
164 175
165 if (d->compat_tc_stats) { 176 if (d->compat_tc_stats) {
166 d->tc_stats.bytes = bstats.bytes; 177 d->tc_stats.bytes = bstats.bytes;
@@ -330,8 +341,9 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
330 return 0; 341 return 0;
331 342
332err_out: 343err_out:
344 if (d->lock)
345 spin_unlock_bh(d->lock);
333 d->xstats_len = 0; 346 d->xstats_len = 0;
334 spin_unlock_bh(d->lock);
335 return -1; 347 return -1;
336} 348}
337EXPORT_SYMBOL(gnet_stats_copy_app); 349EXPORT_SYMBOL(gnet_stats_copy_app);
@@ -365,10 +377,11 @@ gnet_stats_finish_copy(struct gnet_dump *d)
365 return -1; 377 return -1;
366 } 378 }
367 379
380 if (d->lock)
381 spin_unlock_bh(d->lock);
368 kfree(d->xstats); 382 kfree(d->xstats);
369 d->xstats = NULL; 383 d->xstats = NULL;
370 d->xstats_len = 0; 384 d->xstats_len = 0;
371 spin_unlock_bh(d->lock);
372 return 0; 385 return 0;
373} 386}
374EXPORT_SYMBOL(gnet_stats_finish_copy); 387EXPORT_SYMBOL(gnet_stats_finish_copy);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 8b02df0d354d..f74ab9c3b38f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3463,7 +3463,6 @@ xmit_more:
3463 break; 3463 break;
3464 case NET_XMIT_DROP: 3464 case NET_XMIT_DROP:
3465 case NET_XMIT_CN: 3465 case NET_XMIT_CN:
3466 case NET_XMIT_POLICED:
3467 /* skb has been consumed */ 3466 /* skb has been consumed */
3468 pkt_dev->errors++; 3467 pkt_dev->errors++;
3469 break; 3468 break;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d69c4644f8f2..eb49ca24274a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -71,9 +71,31 @@ void rtnl_lock(void)
71} 71}
72EXPORT_SYMBOL(rtnl_lock); 72EXPORT_SYMBOL(rtnl_lock);
73 73
74static struct sk_buff *defer_kfree_skb_list;
75void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
76{
77 if (head && tail) {
78 tail->next = defer_kfree_skb_list;
79 defer_kfree_skb_list = head;
80 }
81}
82EXPORT_SYMBOL(rtnl_kfree_skbs);
83
74void __rtnl_unlock(void) 84void __rtnl_unlock(void)
75{ 85{
86 struct sk_buff *head = defer_kfree_skb_list;
87
88 defer_kfree_skb_list = NULL;
89
76 mutex_unlock(&rtnl_mutex); 90 mutex_unlock(&rtnl_mutex);
91
92 while (head) {
93 struct sk_buff *next = head->next;
94
95 kfree_skb(head);
96 cond_resched();
97 head = next;
98 }
77} 99}
78 100
79void rtnl_unlock(void) 101void rtnl_unlock(void)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f2b77e549c03..e7ec6d3ad5f0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -49,6 +49,7 @@
49#include <linux/slab.h> 49#include <linux/slab.h>
50#include <linux/tcp.h> 50#include <linux/tcp.h>
51#include <linux/udp.h> 51#include <linux/udp.h>
52#include <linux/sctp.h>
52#include <linux/netdevice.h> 53#include <linux/netdevice.h>
53#ifdef CONFIG_NET_CLS_ACT 54#ifdef CONFIG_NET_CLS_ACT
54#include <net/pkt_sched.h> 55#include <net/pkt_sched.h>
@@ -3116,9 +3117,13 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
3116 int hsize; 3117 int hsize;
3117 int size; 3118 int size;
3118 3119
3119 len = head_skb->len - offset; 3120 if (unlikely(mss == GSO_BY_FRAGS)) {
3120 if (len > mss) 3121 len = list_skb->len;
3121 len = mss; 3122 } else {
3123 len = head_skb->len - offset;
3124 if (len > mss)
3125 len = mss;
3126 }
3122 3127
3123 hsize = skb_headlen(head_skb) - offset; 3128 hsize = skb_headlen(head_skb) - offset;
3124 if (hsize < 0) 3129 if (hsize < 0)
@@ -3438,6 +3443,7 @@ done:
3438 NAPI_GRO_CB(skb)->same_flow = 1; 3443 NAPI_GRO_CB(skb)->same_flow = 1;
3439 return 0; 3444 return 0;
3440} 3445}
3446EXPORT_SYMBOL_GPL(skb_gro_receive);
3441 3447
3442void __init skb_init(void) 3448void __init skb_init(void)
3443{ 3449{
@@ -4378,6 +4384,8 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4378 thlen += inner_tcp_hdrlen(skb); 4384 thlen += inner_tcp_hdrlen(skb);
4379 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 4385 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4380 thlen = tcp_hdrlen(skb); 4386 thlen = tcp_hdrlen(skb);
4387 } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) {
4388 thlen = sizeof(struct sctphdr);
4381 } 4389 }
4382 /* UFO sets gso_size to the size of the fragmentation 4390 /* UFO sets gso_size to the size of the fragmentation
4383 * payload, i.e. the size of the L4 (UDP) header is already 4391 * payload, i.e. the size of the L4 (UDP) header is already
@@ -4387,6 +4395,38 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4387} 4395}
4388EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 4396EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
4389 4397
4398/**
4399 * skb_gso_validate_mtu - Return in case such skb fits a given MTU
4400 *
4401 * @skb: GSO skb
4402 * @mtu: MTU to validate against
4403 *
4404 * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
4405 * once split.
4406 */
4407bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
4408{
4409 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4410 const struct sk_buff *iter;
4411 unsigned int hlen;
4412
4413 hlen = skb_gso_network_seglen(skb);
4414
4415 if (shinfo->gso_size != GSO_BY_FRAGS)
4416 return hlen <= mtu;
4417
4418 /* Undo this so we can re-use header sizes */
4419 hlen -= GSO_BY_FRAGS;
4420
4421 skb_walk_frags(skb, iter) {
4422 if (hlen + skb_headlen(iter) > mtu)
4423 return false;
4424 }
4425
4426 return true;
4427}
4428EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
4429
4390static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 4430static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4391{ 4431{
4392 if (skb_cow(skb, skb_headroom(skb)) < 0) { 4432 if (skb_cow(skb, skb_headroom(skb)) < 0) {
diff --git a/net/core/utils.c b/net/core/utils.c
index 3d17ca8b4744..cf5622b9ccc4 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -133,7 +133,7 @@ int in4_pton(const char *src, int srclen,
133 s = src; 133 s = src;
134 d = dbuf; 134 d = dbuf;
135 i = 0; 135 i = 0;
136 while(1) { 136 while (1) {
137 int c; 137 int c;
138 c = xdigit2bin(srclen > 0 ? *s : '\0', delim); 138 c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
139 if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) { 139 if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) {
@@ -283,11 +283,11 @@ cont:
283 i = 15; d--; 283 i = 15; d--;
284 284
285 if (dc) { 285 if (dc) {
286 while(d >= dc) 286 while (d >= dc)
287 dst[i--] = *d--; 287 dst[i--] = *d--;
288 while(i >= dc - dbuf) 288 while (i >= dc - dbuf)
289 dst[i--] = 0; 289 dst[i--] = 0;
290 while(i >= 0) 290 while (i >= 0)
291 dst[i--] = *d--; 291 dst[i--] = *d--;
292 } else 292 } else
293 memcpy(dst, dbuf, sizeof(dbuf)); 293 memcpy(dst, dbuf, sizeof(dbuf));
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index da06ed1df620..8af4ded70f1c 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -1,6 +1,6 @@
1# the core 1# the core
2obj-$(CONFIG_NET_DSA) += dsa_core.o 2obj-$(CONFIG_NET_DSA) += dsa_core.o
3dsa_core-y += dsa.o slave.o 3dsa_core-y += dsa.o slave.o dsa2.o
4 4
5# tagging formats 5# tagging formats
6dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o 6dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index eff5dfc2e33f..766d2a525ada 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -29,6 +29,33 @@
29 29
30char dsa_driver_version[] = "0.1"; 30char dsa_driver_version[] = "0.1";
31 31
32static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
33 struct net_device *dev)
34{
35 /* Just return the original SKB */
36 return skb;
37}
38
39static const struct dsa_device_ops none_ops = {
40 .xmit = dsa_slave_notag_xmit,
41 .rcv = NULL,
42};
43
44const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
45#ifdef CONFIG_NET_DSA_TAG_DSA
46 [DSA_TAG_PROTO_DSA] = &dsa_netdev_ops,
47#endif
48#ifdef CONFIG_NET_DSA_TAG_EDSA
49 [DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops,
50#endif
51#ifdef CONFIG_NET_DSA_TAG_TRAILER
52 [DSA_TAG_PROTO_TRAILER] = &trailer_netdev_ops,
53#endif
54#ifdef CONFIG_NET_DSA_TAG_BRCM
55 [DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops,
56#endif
57 [DSA_TAG_PROTO_NONE] = &none_ops,
58};
32 59
33/* switch driver registration ***********************************************/ 60/* switch driver registration ***********************************************/
34static DEFINE_MUTEX(dsa_switch_drivers_mutex); 61static DEFINE_MUTEX(dsa_switch_drivers_mutex);
@@ -180,41 +207,100 @@ __ATTRIBUTE_GROUPS(dsa_hwmon);
180#endif /* CONFIG_NET_DSA_HWMON */ 207#endif /* CONFIG_NET_DSA_HWMON */
181 208
182/* basic switch operations **************************************************/ 209/* basic switch operations **************************************************/
183static int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct net_device *master) 210int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
211 struct device_node *port_dn, int port)
184{ 212{
185 struct dsa_chip_data *cd = ds->cd;
186 struct device_node *port_dn;
187 struct phy_device *phydev; 213 struct phy_device *phydev;
188 int ret, port, mode; 214 int ret, mode;
215
216 if (of_phy_is_fixed_link(port_dn)) {
217 ret = of_phy_register_fixed_link(port_dn);
218 if (ret) {
219 dev_err(dev, "failed to register fixed PHY\n");
220 return ret;
221 }
222 phydev = of_phy_find_device(port_dn);
223
224 mode = of_get_phy_mode(port_dn);
225 if (mode < 0)
226 mode = PHY_INTERFACE_MODE_NA;
227 phydev->interface = mode;
228
229 genphy_config_init(phydev);
230 genphy_read_status(phydev);
231 if (ds->drv->adjust_link)
232 ds->drv->adjust_link(ds, port, phydev);
233 }
234
235 return 0;
236}
237
238static int dsa_cpu_dsa_setups(struct dsa_switch *ds, struct device *dev)
239{
240 struct device_node *port_dn;
241 int ret, port;
189 242
190 for (port = 0; port < DSA_MAX_PORTS; port++) { 243 for (port = 0; port < DSA_MAX_PORTS; port++) {
191 if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) 244 if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
192 continue; 245 continue;
193 246
194 port_dn = cd->port_dn[port]; 247 port_dn = ds->ports[port].dn;
195 if (of_phy_is_fixed_link(port_dn)) { 248 ret = dsa_cpu_dsa_setup(ds, dev, port_dn, port);
196 ret = of_phy_register_fixed_link(port_dn); 249 if (ret)
197 if (ret) { 250 return ret;
198 netdev_err(master, 251 }
199 "failed to register fixed PHY\n"); 252 return 0;
200 return ret; 253}
201 }
202 phydev = of_phy_find_device(port_dn);
203 254
204 mode = of_get_phy_mode(port_dn); 255const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol)
205 if (mode < 0) 256{
206 mode = PHY_INTERFACE_MODE_NA; 257 const struct dsa_device_ops *ops;
207 phydev->interface = mode; 258
259 if (tag_protocol >= DSA_TAG_LAST)
260 return ERR_PTR(-EINVAL);
261 ops = dsa_device_ops[tag_protocol];
262
263 if (!ops)
264 return ERR_PTR(-ENOPROTOOPT);
265
266 return ops;
267}
268
269int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds)
270{
271 struct net_device *master;
272 struct ethtool_ops *cpu_ops;
273
274 master = ds->dst->master_netdev;
275 if (ds->master_netdev)
276 master = ds->master_netdev;
277
278 cpu_ops = devm_kzalloc(ds->dev, sizeof(*cpu_ops), GFP_KERNEL);
279 if (!cpu_ops)
280 return -ENOMEM;
281
282 memcpy(&ds->dst->master_ethtool_ops, master->ethtool_ops,
283 sizeof(struct ethtool_ops));
284 ds->dst->master_orig_ethtool_ops = master->ethtool_ops;
285 memcpy(cpu_ops, &ds->dst->master_ethtool_ops,
286 sizeof(struct ethtool_ops));
287 dsa_cpu_port_ethtool_init(cpu_ops);
288 master->ethtool_ops = cpu_ops;
208 289
209 genphy_config_init(phydev);
210 genphy_read_status(phydev);
211 if (ds->drv->adjust_link)
212 ds->drv->adjust_link(ds, port, phydev);
213 }
214 }
215 return 0; 290 return 0;
216} 291}
217 292
293void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds)
294{
295 struct net_device *master;
296
297 master = ds->dst->master_netdev;
298 if (ds->master_netdev)
299 master = ds->master_netdev;
300
301 master->ethtool_ops = ds->dst->master_orig_ethtool_ops;
302}
303
218static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) 304static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
219{ 305{
220 struct dsa_switch_driver *drv = ds->drv; 306 struct dsa_switch_driver *drv = ds->drv;
@@ -243,6 +329,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
243 } 329 }
244 dst->cpu_switch = index; 330 dst->cpu_switch = index;
245 dst->cpu_port = i; 331 dst->cpu_port = i;
332 ds->cpu_port_mask |= 1 << i;
246 } else if (!strcmp(name, "dsa")) { 333 } else if (!strcmp(name, "dsa")) {
247 ds->dsa_port_mask |= 1 << i; 334 ds->dsa_port_mask |= 1 << i;
248 } else { 335 } else {
@@ -267,37 +354,17 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
267 * switch. 354 * switch.
268 */ 355 */
269 if (dst->cpu_switch == index) { 356 if (dst->cpu_switch == index) {
270 switch (drv->tag_protocol) { 357 dst->tag_ops = dsa_resolve_tag_protocol(drv->tag_protocol);
271#ifdef CONFIG_NET_DSA_TAG_DSA 358 if (IS_ERR(dst->tag_ops)) {
272 case DSA_TAG_PROTO_DSA: 359 ret = PTR_ERR(dst->tag_ops);
273 dst->rcv = dsa_netdev_ops.rcv;
274 break;
275#endif
276#ifdef CONFIG_NET_DSA_TAG_EDSA
277 case DSA_TAG_PROTO_EDSA:
278 dst->rcv = edsa_netdev_ops.rcv;
279 break;
280#endif
281#ifdef CONFIG_NET_DSA_TAG_TRAILER
282 case DSA_TAG_PROTO_TRAILER:
283 dst->rcv = trailer_netdev_ops.rcv;
284 break;
285#endif
286#ifdef CONFIG_NET_DSA_TAG_BRCM
287 case DSA_TAG_PROTO_BRCM:
288 dst->rcv = brcm_netdev_ops.rcv;
289 break;
290#endif
291 case DSA_TAG_PROTO_NONE:
292 break;
293 default:
294 ret = -ENOPROTOOPT;
295 goto out; 360 goto out;
296 } 361 }
297 362
298 dst->tag_protocol = drv->tag_protocol; 363 dst->rcv = dst->tag_ops->rcv;
299 } 364 }
300 365
366 memcpy(ds->rtable, cd->rtable, sizeof(ds->rtable));
367
301 /* 368 /*
302 * Do basic register setup. 369 * Do basic register setup.
303 */ 370 */
@@ -309,22 +376,25 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
309 if (ret < 0) 376 if (ret < 0)
310 goto out; 377 goto out;
311 378
312 ds->slave_mii_bus = devm_mdiobus_alloc(parent); 379 if (!ds->slave_mii_bus && drv->phy_read) {
313 if (ds->slave_mii_bus == NULL) { 380 ds->slave_mii_bus = devm_mdiobus_alloc(parent);
314 ret = -ENOMEM; 381 if (!ds->slave_mii_bus) {
315 goto out; 382 ret = -ENOMEM;
316 } 383 goto out;
317 dsa_slave_mii_bus_init(ds); 384 }
318 385 dsa_slave_mii_bus_init(ds);
319 ret = mdiobus_register(ds->slave_mii_bus);
320 if (ret < 0)
321 goto out;
322 386
387 ret = mdiobus_register(ds->slave_mii_bus);
388 if (ret < 0)
389 goto out;
390 }
323 391
324 /* 392 /*
325 * Create network devices for physical switch ports. 393 * Create network devices for physical switch ports.
326 */ 394 */
327 for (i = 0; i < DSA_MAX_PORTS; i++) { 395 for (i = 0; i < DSA_MAX_PORTS; i++) {
396 ds->ports[i].dn = cd->port_dn[i];
397
328 if (!(ds->enabled_port_mask & (1 << i))) 398 if (!(ds->enabled_port_mask & (1 << i)))
329 continue; 399 continue;
330 400
@@ -337,13 +407,17 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
337 } 407 }
338 408
339 /* Perform configuration of the CPU and DSA ports */ 409 /* Perform configuration of the CPU and DSA ports */
340 ret = dsa_cpu_dsa_setup(ds, dst->master_netdev); 410 ret = dsa_cpu_dsa_setups(ds, parent);
341 if (ret < 0) { 411 if (ret < 0) {
342 netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n", 412 netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n",
343 index); 413 index);
344 ret = 0; 414 ret = 0;
345 } 415 }
346 416
417 ret = dsa_cpu_port_ethtool_setup(ds);
418 if (ret)
419 return ret;
420
347#ifdef CONFIG_NET_DSA_HWMON 421#ifdef CONFIG_NET_DSA_HWMON
348 /* If the switch provides a temperature sensor, 422 /* If the switch provides a temperature sensor,
349 * register with hardware monitoring subsystem. 423 * register with hardware monitoring subsystem.
@@ -420,11 +494,21 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
420 return ds; 494 return ds;
421} 495}
422 496
423static void dsa_switch_destroy(struct dsa_switch *ds) 497void dsa_cpu_dsa_destroy(struct device_node *port_dn)
424{ 498{
425 struct device_node *port_dn;
426 struct phy_device *phydev; 499 struct phy_device *phydev;
427 struct dsa_chip_data *cd = ds->cd; 500
501 if (of_phy_is_fixed_link(port_dn)) {
502 phydev = of_phy_find_device(port_dn);
503 if (phydev) {
504 phy_device_free(phydev);
505 fixed_phy_unregister(phydev);
506 }
507 }
508}
509
510static void dsa_switch_destroy(struct dsa_switch *ds)
511{
428 int port; 512 int port;
429 513
430#ifdef CONFIG_NET_DSA_HWMON 514#ifdef CONFIG_NET_DSA_HWMON
@@ -437,26 +521,25 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
437 if (!(ds->enabled_port_mask & (1 << port))) 521 if (!(ds->enabled_port_mask & (1 << port)))
438 continue; 522 continue;
439 523
440 if (!ds->ports[port]) 524 if (!ds->ports[port].netdev)
441 continue; 525 continue;
442 526
443 dsa_slave_destroy(ds->ports[port]); 527 dsa_slave_destroy(ds->ports[port].netdev);
444 } 528 }
445 529
446 /* Remove any fixed link PHYs */ 530 /* Disable configuration of the CPU and DSA ports */
447 for (port = 0; port < DSA_MAX_PORTS; port++) { 531 for (port = 0; port < DSA_MAX_PORTS; port++) {
448 port_dn = cd->port_dn[port]; 532 if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
449 if (of_phy_is_fixed_link(port_dn)) { 533 continue;
450 phydev = of_phy_find_device(port_dn); 534 dsa_cpu_dsa_destroy(ds->ports[port].dn);
451 if (phydev) { 535
452 phy_device_free(phydev); 536 /* Clearing a bit which is not set does no harm */
453 of_node_put(port_dn); 537 ds->cpu_port_mask |= ~(1 << port);
454 fixed_phy_unregister(phydev); 538 ds->dsa_port_mask |= ~(1 << port);
455 }
456 }
457 } 539 }
458 540
459 mdiobus_unregister(ds->slave_mii_bus); 541 if (ds->slave_mii_bus && ds->drv->phy_read)
542 mdiobus_unregister(ds->slave_mii_bus);
460} 543}
461 544
462#ifdef CONFIG_PM_SLEEP 545#ifdef CONFIG_PM_SLEEP
@@ -469,7 +552,7 @@ static int dsa_switch_suspend(struct dsa_switch *ds)
469 if (!dsa_is_port_initialized(ds, i)) 552 if (!dsa_is_port_initialized(ds, i))
470 continue; 553 continue;
471 554
472 ret = dsa_slave_suspend(ds->ports[i]); 555 ret = dsa_slave_suspend(ds->ports[i].netdev);
473 if (ret) 556 if (ret)
474 return ret; 557 return ret;
475 } 558 }
@@ -495,7 +578,7 @@ static int dsa_switch_resume(struct dsa_switch *ds)
495 if (!dsa_is_port_initialized(ds, i)) 578 if (!dsa_is_port_initialized(ds, i))
496 continue; 579 continue;
497 580
498 ret = dsa_slave_resume(ds->ports[i]); 581 ret = dsa_slave_resume(ds->ports[i].netdev);
499 if (ret) 582 if (ret)
500 return ret; 583 return ret;
501 } 584 }
@@ -587,17 +670,6 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
587 if (link_sw_addr >= pd->nr_chips) 670 if (link_sw_addr >= pd->nr_chips)
588 return -EINVAL; 671 return -EINVAL;
589 672
590 /* First time routing table allocation */
591 if (!cd->rtable) {
592 cd->rtable = kmalloc_array(pd->nr_chips, sizeof(s8),
593 GFP_KERNEL);
594 if (!cd->rtable)
595 return -ENOMEM;
596
597 /* default to no valid uplink/downlink */
598 memset(cd->rtable, -1, pd->nr_chips * sizeof(s8));
599 }
600
601 cd->rtable[link_sw_addr] = port_index; 673 cd->rtable[link_sw_addr] = port_index;
602 674
603 return 0; 675 return 0;
@@ -639,7 +711,6 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
639 kfree(pd->chip[i].port_names[port_index]); 711 kfree(pd->chip[i].port_names[port_index]);
640 port_index++; 712 port_index++;
641 } 713 }
642 kfree(pd->chip[i].rtable);
643 714
644 /* Drop our reference to the MDIO bus device */ 715 /* Drop our reference to the MDIO bus device */
645 if (pd->chip[i].host_dev) 716 if (pd->chip[i].host_dev)
@@ -931,6 +1002,8 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
931 dsa_switch_destroy(ds); 1002 dsa_switch_destroy(ds);
932 } 1003 }
933 1004
1005 dsa_cpu_port_ethtool_restore(dst->ds[0]);
1006
934 dev_put(dst->master_netdev); 1007 dev_put(dst->master_netdev);
935} 1008}
936 1009
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
new file mode 100644
index 000000000000..83b95fc4cede
--- /dev/null
+++ b/net/dsa/dsa2.c
@@ -0,0 +1,690 @@
1/*
2 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
3 * Copyright (c) 2008-2009 Marvell Semiconductor
4 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
5 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/list.h>
16#include <linux/slab.h>
17#include <linux/rtnetlink.h>
18#include <net/dsa.h>
19#include <linux/of.h>
20#include <linux/of_net.h>
21#include "dsa_priv.h"
22
23static LIST_HEAD(dsa_switch_trees);
24static DEFINE_MUTEX(dsa2_mutex);
25
26static struct dsa_switch_tree *dsa_get_dst(u32 tree)
27{
28 struct dsa_switch_tree *dst;
29
30 list_for_each_entry(dst, &dsa_switch_trees, list)
31 if (dst->tree == tree)
32 return dst;
33 return NULL;
34}
35
36static void dsa_free_dst(struct kref *ref)
37{
38 struct dsa_switch_tree *dst = container_of(ref, struct dsa_switch_tree,
39 refcount);
40
41 list_del(&dst->list);
42 kfree(dst);
43}
44
45static void dsa_put_dst(struct dsa_switch_tree *dst)
46{
47 kref_put(&dst->refcount, dsa_free_dst);
48}
49
50static struct dsa_switch_tree *dsa_add_dst(u32 tree)
51{
52 struct dsa_switch_tree *dst;
53
54 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
55 if (!dst)
56 return NULL;
57 dst->tree = tree;
58 dst->cpu_switch = -1;
59 INIT_LIST_HEAD(&dst->list);
60 list_add_tail(&dsa_switch_trees, &dst->list);
61 kref_init(&dst->refcount);
62
63 return dst;
64}
65
66static void dsa_dst_add_ds(struct dsa_switch_tree *dst,
67 struct dsa_switch *ds, u32 index)
68{
69 kref_get(&dst->refcount);
70 dst->ds[index] = ds;
71}
72
73static void dsa_dst_del_ds(struct dsa_switch_tree *dst,
74 struct dsa_switch *ds, u32 index)
75{
76 dst->ds[index] = NULL;
77 kref_put(&dst->refcount, dsa_free_dst);
78}
79
80static bool dsa_port_is_dsa(struct device_node *port)
81{
82 const char *name;
83
84 name = of_get_property(port, "label", NULL);
85 if (!name)
86 return false;
87
88 if (!strcmp(name, "dsa"))
89 return true;
90
91 return false;
92}
93
94static bool dsa_port_is_cpu(struct device_node *port)
95{
96 const char *name;
97
98 name = of_get_property(port, "label", NULL);
99 if (!name)
100 return false;
101
102 if (!strcmp(name, "cpu"))
103 return true;
104
105 return false;
106}
107
108static bool dsa_ds_find_port(struct dsa_switch *ds,
109 struct device_node *port)
110{
111 u32 index;
112
113 for (index = 0; index < DSA_MAX_PORTS; index++)
114 if (ds->ports[index].dn == port)
115 return true;
116 return false;
117}
118
119static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
120 struct device_node *port)
121{
122 struct dsa_switch *ds;
123 u32 index;
124
125 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
126 ds = dst->ds[index];
127 if (!ds)
128 continue;
129
130 if (dsa_ds_find_port(ds, port))
131 return ds;
132 }
133
134 return NULL;
135}
136
137static int dsa_port_complete(struct dsa_switch_tree *dst,
138 struct dsa_switch *src_ds,
139 struct device_node *port,
140 u32 src_port)
141{
142 struct device_node *link;
143 int index;
144 struct dsa_switch *dst_ds;
145
146 for (index = 0;; index++) {
147 link = of_parse_phandle(port, "link", index);
148 if (!link)
149 break;
150
151 dst_ds = dsa_dst_find_port(dst, link);
152 of_node_put(link);
153
154 if (!dst_ds)
155 return 1;
156
157 src_ds->rtable[dst_ds->index] = src_port;
158 }
159
160 return 0;
161}
162
163/* A switch is complete if all the DSA ports phandles point to ports
164 * known in the tree. A return value of 1 means the tree is not
165 * complete. This is not an error condition. A value of 0 is
166 * success.
167 */
168static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds)
169{
170 struct device_node *port;
171 u32 index;
172 int err;
173
174 for (index = 0; index < DSA_MAX_PORTS; index++) {
175 port = ds->ports[index].dn;
176 if (!port)
177 continue;
178
179 if (!dsa_port_is_dsa(port))
180 continue;
181
182 err = dsa_port_complete(dst, ds, port, index);
183 if (err != 0)
184 return err;
185
186 ds->dsa_port_mask |= BIT(index);
187 }
188
189 return 0;
190}
191
192/* A tree is complete if all the DSA ports phandles point to ports
193 * known in the tree. A return value of 1 means the tree is not
194 * complete. This is not an error condition. A value of 0 is
195 * success.
196 */
197static int dsa_dst_complete(struct dsa_switch_tree *dst)
198{
199 struct dsa_switch *ds;
200 u32 index;
201 int err;
202
203 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
204 ds = dst->ds[index];
205 if (!ds)
206 continue;
207
208 err = dsa_ds_complete(dst, ds);
209 if (err != 0)
210 return err;
211 }
212
213 return 0;
214}
215
216static int dsa_dsa_port_apply(struct device_node *port, u32 index,
217 struct dsa_switch *ds)
218{
219 int err;
220
221 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
222 if (err) {
223 dev_warn(ds->dev, "Failed to setup dsa port %d: %d\n",
224 index, err);
225 return err;
226 }
227
228 return 0;
229}
230
231static void dsa_dsa_port_unapply(struct device_node *port, u32 index,
232 struct dsa_switch *ds)
233{
234 dsa_cpu_dsa_destroy(port);
235}
236
237static int dsa_cpu_port_apply(struct device_node *port, u32 index,
238 struct dsa_switch *ds)
239{
240 int err;
241
242 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
243 if (err) {
244 dev_warn(ds->dev, "Failed to setup cpu port %d: %d\n",
245 index, err);
246 return err;
247 }
248
249 ds->cpu_port_mask |= BIT(index);
250
251 return 0;
252}
253
254static void dsa_cpu_port_unapply(struct device_node *port, u32 index,
255 struct dsa_switch *ds)
256{
257 dsa_cpu_dsa_destroy(port);
258 ds->cpu_port_mask &= ~BIT(index);
259
260}
261
262static int dsa_user_port_apply(struct device_node *port, u32 index,
263 struct dsa_switch *ds)
264{
265 const char *name;
266 int err;
267
268 name = of_get_property(port, "label", NULL);
269
270 err = dsa_slave_create(ds, ds->dev, index, name);
271 if (err) {
272 dev_warn(ds->dev, "Failed to create slave %d: %d\n",
273 index, err);
274 return err;
275 }
276
277 return 0;
278}
279
280static void dsa_user_port_unapply(struct device_node *port, u32 index,
281 struct dsa_switch *ds)
282{
283 if (ds->ports[index].netdev) {
284 dsa_slave_destroy(ds->ports[index].netdev);
285 ds->ports[index].netdev = NULL;
286 ds->enabled_port_mask &= ~(1 << index);
287 }
288}
289
290static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
291{
292 struct device_node *port;
293 u32 index;
294 int err;
295
296 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
297 * driver and before drv->setup() has run, since the switch drivers and
298 * the slave MDIO bus driver rely on these values for probing PHY
299 * devices or not
300 */
301 ds->phys_mii_mask = ds->enabled_port_mask;
302
303 err = ds->drv->setup(ds);
304 if (err < 0)
305 return err;
306
307 err = ds->drv->set_addr(ds, dst->master_netdev->dev_addr);
308 if (err < 0)
309 return err;
310
311 err = ds->drv->set_addr(ds, dst->master_netdev->dev_addr);
312 if (err < 0)
313 return err;
314
315 if (!ds->slave_mii_bus && ds->drv->phy_read) {
316 ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
317 if (!ds->slave_mii_bus)
318 return -ENOMEM;
319
320 dsa_slave_mii_bus_init(ds);
321
322 err = mdiobus_register(ds->slave_mii_bus);
323 if (err < 0)
324 return err;
325 }
326
327 for (index = 0; index < DSA_MAX_PORTS; index++) {
328 port = ds->ports[index].dn;
329 if (!port)
330 continue;
331
332 if (dsa_port_is_dsa(port)) {
333 err = dsa_dsa_port_apply(port, index, ds);
334 if (err)
335 return err;
336 continue;
337 }
338
339 if (dsa_port_is_cpu(port)) {
340 err = dsa_cpu_port_apply(port, index, ds);
341 if (err)
342 return err;
343 continue;
344 }
345
346 err = dsa_user_port_apply(port, index, ds);
347 if (err)
348 continue;
349 }
350
351 return 0;
352}
353
354static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
355{
356 struct device_node *port;
357 u32 index;
358
359 for (index = 0; index < DSA_MAX_PORTS; index++) {
360 port = ds->ports[index].dn;
361 if (!port)
362 continue;
363
364 if (dsa_port_is_dsa(port)) {
365 dsa_dsa_port_unapply(port, index, ds);
366 continue;
367 }
368
369 if (dsa_port_is_cpu(port)) {
370 dsa_cpu_port_unapply(port, index, ds);
371 continue;
372 }
373
374 dsa_user_port_unapply(port, index, ds);
375 }
376
377 if (ds->slave_mii_bus && ds->drv->phy_read)
378 mdiobus_unregister(ds->slave_mii_bus);
379}
380
381static int dsa_dst_apply(struct dsa_switch_tree *dst)
382{
383 struct dsa_switch *ds;
384 u32 index;
385 int err;
386
387 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
388 ds = dst->ds[index];
389 if (!ds)
390 continue;
391
392 err = dsa_ds_apply(dst, ds);
393 if (err)
394 return err;
395 }
396
397 err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
398 if (err)
399 return err;
400
401 /* If we use a tagging format that doesn't have an ethertype
402 * field, make sure that all packets from this point on get
403 * sent to the tag format's receive function.
404 */
405 wmb();
406 dst->master_netdev->dsa_ptr = (void *)dst;
407 dst->applied = true;
408
409 return 0;
410}
411
412static void dsa_dst_unapply(struct dsa_switch_tree *dst)
413{
414 struct dsa_switch *ds;
415 u32 index;
416
417 if (!dst->applied)
418 return;
419
420 dst->master_netdev->dsa_ptr = NULL;
421
422 /* If we used a tagging format that doesn't have an ethertype
423 * field, make sure that all packets from this point get sent
424 * without the tag and go through the regular receive path.
425 */
426 wmb();
427
428 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
429 ds = dst->ds[index];
430 if (!ds)
431 continue;
432
433 dsa_ds_unapply(dst, ds);
434 }
435
436 dsa_cpu_port_ethtool_restore(dst->ds[0]);
437
438 pr_info("DSA: tree %d unapplied\n", dst->tree);
439 dst->applied = false;
440}
441
442static int dsa_cpu_parse(struct device_node *port, u32 index,
443 struct dsa_switch_tree *dst,
444 struct dsa_switch *ds)
445{
446 struct net_device *ethernet_dev;
447 struct device_node *ethernet;
448
449 ethernet = of_parse_phandle(port, "ethernet", 0);
450 if (!ethernet)
451 return -EINVAL;
452
453 ethernet_dev = of_find_net_device_by_node(ethernet);
454 if (!ethernet_dev)
455 return -EPROBE_DEFER;
456
457 if (!ds->master_netdev)
458 ds->master_netdev = ethernet_dev;
459
460 if (!dst->master_netdev)
461 dst->master_netdev = ethernet_dev;
462
463 if (dst->cpu_switch == -1) {
464 dst->cpu_switch = ds->index;
465 dst->cpu_port = index;
466 }
467
468 dst->tag_ops = dsa_resolve_tag_protocol(ds->drv->tag_protocol);
469 if (IS_ERR(dst->tag_ops)) {
470 dev_warn(ds->dev, "No tagger for this switch\n");
471 return PTR_ERR(dst->tag_ops);
472 }
473
474 dst->rcv = dst->tag_ops->rcv;
475
476 return 0;
477}
478
479static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds)
480{
481 struct device_node *port;
482 u32 index;
483 int err;
484
485 for (index = 0; index < DSA_MAX_PORTS; index++) {
486 port = ds->ports[index].dn;
487 if (!port)
488 continue;
489
490 if (dsa_port_is_cpu(port)) {
491 err = dsa_cpu_parse(port, index, dst, ds);
492 if (err)
493 return err;
494 }
495 }
496
497 pr_info("DSA: switch %d %d parsed\n", dst->tree, ds->index);
498
499 return 0;
500}
501
502static int dsa_dst_parse(struct dsa_switch_tree *dst)
503{
504 struct dsa_switch *ds;
505 u32 index;
506 int err;
507
508 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
509 ds = dst->ds[index];
510 if (!ds)
511 continue;
512
513 err = dsa_ds_parse(dst, ds);
514 if (err)
515 return err;
516 }
517
518 if (!dst->master_netdev) {
519 pr_warn("Tree has no master device\n");
520 return -EINVAL;
521 }
522
523 pr_info("DSA: tree %d parsed\n", dst->tree);
524
525 return 0;
526}
527
528static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
529{
530 struct device_node *port;
531 int err;
532 u32 reg;
533
534 for_each_available_child_of_node(ports, port) {
535 err = of_property_read_u32(port, "reg", &reg);
536 if (err)
537 return err;
538
539 if (reg >= DSA_MAX_PORTS)
540 return -EINVAL;
541
542 ds->ports[reg].dn = port;
543
544 /* Initialize enabled_port_mask now for drv->setup()
545 * to have access to a correct value, just like what
546 * net/dsa/dsa.c::dsa_switch_setup_one does.
547 */
548 if (!dsa_port_is_cpu(port))
549 ds->enabled_port_mask |= 1 << reg;
550 }
551
552 return 0;
553}
554
555static int dsa_parse_member(struct device_node *np, u32 *tree, u32 *index)
556{
557 int err;
558
559 *tree = *index = 0;
560
561 err = of_property_read_u32_index(np, "dsa,member", 0, tree);
562 if (err) {
563 /* Does not exist, but it is optional */
564 if (err == -EINVAL)
565 return 0;
566 return err;
567 }
568
569 err = of_property_read_u32_index(np, "dsa,member", 1, index);
570 if (err)
571 return err;
572
573 if (*index >= DSA_MAX_SWITCHES)
574 return -EINVAL;
575
576 return 0;
577}
578
579static struct device_node *dsa_get_ports(struct dsa_switch *ds,
580 struct device_node *np)
581{
582 struct device_node *ports;
583
584 ports = of_get_child_by_name(np, "ports");
585 if (!ports) {
586 dev_err(ds->dev, "no ports child node found\n");
587 return ERR_PTR(-EINVAL);
588 }
589
590 return ports;
591}
592
593static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
594{
595 struct device_node *ports = dsa_get_ports(ds, np);
596 struct dsa_switch_tree *dst;
597 u32 tree, index;
598 int err;
599
600 err = dsa_parse_member(np, &tree, &index);
601 if (err)
602 return err;
603
604 if (IS_ERR(ports))
605 return PTR_ERR(ports);
606
607 err = dsa_parse_ports_dn(ports, ds);
608 if (err)
609 return err;
610
611 dst = dsa_get_dst(tree);
612 if (!dst) {
613 dst = dsa_add_dst(tree);
614 if (!dst)
615 return -ENOMEM;
616 }
617
618 if (dst->ds[index]) {
619 err = -EBUSY;
620 goto out;
621 }
622
623 ds->dst = dst;
624 ds->index = index;
625 dsa_dst_add_ds(dst, ds, index);
626
627 err = dsa_dst_complete(dst);
628 if (err < 0)
629 goto out_del_dst;
630
631 if (err == 1) {
632 /* Not all switches registered yet */
633 err = 0;
634 goto out;
635 }
636
637 if (dst->applied) {
638 pr_info("DSA: Disjoint trees?\n");
639 return -EINVAL;
640 }
641
642 err = dsa_dst_parse(dst);
643 if (err)
644 goto out_del_dst;
645
646 err = dsa_dst_apply(dst);
647 if (err) {
648 dsa_dst_unapply(dst);
649 goto out_del_dst;
650 }
651
652 dsa_put_dst(dst);
653 return 0;
654
655out_del_dst:
656 dsa_dst_del_ds(dst, ds, ds->index);
657out:
658 dsa_put_dst(dst);
659
660 return err;
661}
662
663int dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
664{
665 int err;
666
667 mutex_lock(&dsa2_mutex);
668 err = _dsa_register_switch(ds, np);
669 mutex_unlock(&dsa2_mutex);
670
671 return err;
672}
673EXPORT_SYMBOL_GPL(dsa_register_switch);
674
675void _dsa_unregister_switch(struct dsa_switch *ds)
676{
677 struct dsa_switch_tree *dst = ds->dst;
678
679 dsa_dst_unapply(dst);
680
681 dsa_dst_del_ds(dst, ds, ds->index);
682}
683
684void dsa_unregister_switch(struct dsa_switch *ds)
685{
686 mutex_lock(&dsa2_mutex);
687 _dsa_unregister_switch(ds);
688 mutex_unlock(&dsa2_mutex);
689}
690EXPORT_SYMBOL_GPL(dsa_unregister_switch);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index dfa33779d49c..00077a9c97f4 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -50,12 +50,19 @@ struct dsa_slave_priv {
50 50
51/* dsa.c */ 51/* dsa.c */
52extern char dsa_driver_version[]; 52extern char dsa_driver_version[];
53int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
54 struct device_node *port_dn, int port);
55void dsa_cpu_dsa_destroy(struct device_node *port_dn);
56const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol);
57int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds);
58void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds);
53 59
54/* slave.c */ 60/* slave.c */
55extern const struct dsa_device_ops notag_netdev_ops; 61extern const struct dsa_device_ops notag_netdev_ops;
56void dsa_slave_mii_bus_init(struct dsa_switch *ds); 62void dsa_slave_mii_bus_init(struct dsa_switch *ds);
63void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops);
57int dsa_slave_create(struct dsa_switch *ds, struct device *parent, 64int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
58 int port, char *name); 65 int port, const char *name);
59void dsa_slave_destroy(struct net_device *slave_dev); 66void dsa_slave_destroy(struct net_device *slave_dev);
60int dsa_slave_suspend(struct net_device *slave_dev); 67int dsa_slave_suspend(struct net_device *slave_dev);
61int dsa_slave_resume(struct net_device *slave_dev); 68int dsa_slave_resume(struct net_device *slave_dev);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 152436cdab30..7236eb26dc97 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -49,8 +49,8 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
49 ds->slave_mii_bus->name = "dsa slave smi"; 49 ds->slave_mii_bus->name = "dsa slave smi";
50 ds->slave_mii_bus->read = dsa_slave_phy_read; 50 ds->slave_mii_bus->read = dsa_slave_phy_read;
51 ds->slave_mii_bus->write = dsa_slave_phy_write; 51 ds->slave_mii_bus->write = dsa_slave_phy_write;
52 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x", 52 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
53 ds->index, ds->cd->sw_addr); 53 ds->dst->tree, ds->index);
54 ds->slave_mii_bus->parent = ds->dev; 54 ds->slave_mii_bus->parent = ds->dev;
55 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; 55 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
56} 56}
@@ -522,14 +522,6 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
522 return NETDEV_TX_OK; 522 return NETDEV_TX_OK;
523} 523}
524 524
525static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
526 struct net_device *dev)
527{
528 /* Just return the original SKB */
529 return skb;
530}
531
532
533/* ethtool operations *******************************************************/ 525/* ethtool operations *******************************************************/
534static int 526static int
535dsa_slave_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 527dsa_slave_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -615,7 +607,7 @@ static int dsa_slave_get_eeprom_len(struct net_device *dev)
615 struct dsa_slave_priv *p = netdev_priv(dev); 607 struct dsa_slave_priv *p = netdev_priv(dev);
616 struct dsa_switch *ds = p->parent; 608 struct dsa_switch *ds = p->parent;
617 609
618 if (ds->cd->eeprom_len) 610 if (ds->cd && ds->cd->eeprom_len)
619 return ds->cd->eeprom_len; 611 return ds->cd->eeprom_len;
620 612
621 if (ds->drv->get_eeprom_len) 613 if (ds->drv->get_eeprom_len)
@@ -873,6 +865,13 @@ static void dsa_slave_poll_controller(struct net_device *dev)
873} 865}
874#endif 866#endif
875 867
868void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops)
869{
870 ops->get_sset_count = dsa_cpu_port_get_sset_count;
871 ops->get_ethtool_stats = dsa_cpu_port_get_ethtool_stats;
872 ops->get_strings = dsa_cpu_port_get_strings;
873}
874
876static const struct ethtool_ops dsa_slave_ethtool_ops = { 875static const struct ethtool_ops dsa_slave_ethtool_ops = {
877 .get_settings = dsa_slave_get_settings, 876 .get_settings = dsa_slave_get_settings,
878 .set_settings = dsa_slave_set_settings, 877 .set_settings = dsa_slave_set_settings,
@@ -893,8 +892,6 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
893 .get_eee = dsa_slave_get_eee, 892 .get_eee = dsa_slave_get_eee,
894}; 893};
895 894
896static struct ethtool_ops dsa_cpu_port_ethtool_ops;
897
898static const struct net_device_ops dsa_slave_netdev_ops = { 895static const struct net_device_ops dsa_slave_netdev_ops = {
899 .ndo_open = dsa_slave_open, 896 .ndo_open = dsa_slave_open,
900 .ndo_stop = dsa_slave_close, 897 .ndo_stop = dsa_slave_close,
@@ -999,13 +996,12 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
999 struct net_device *slave_dev) 996 struct net_device *slave_dev)
1000{ 997{
1001 struct dsa_switch *ds = p->parent; 998 struct dsa_switch *ds = p->parent;
1002 struct dsa_chip_data *cd = ds->cd;
1003 struct device_node *phy_dn, *port_dn; 999 struct device_node *phy_dn, *port_dn;
1004 bool phy_is_fixed = false; 1000 bool phy_is_fixed = false;
1005 u32 phy_flags = 0; 1001 u32 phy_flags = 0;
1006 int mode, ret; 1002 int mode, ret;
1007 1003
1008 port_dn = cd->port_dn[p->port]; 1004 port_dn = ds->ports[p->port].dn;
1009 mode = of_get_phy_mode(port_dn); 1005 mode = of_get_phy_mode(port_dn);
1010 if (mode < 0) 1006 if (mode < 0)
1011 mode = PHY_INTERFACE_MODE_NA; 1007 mode = PHY_INTERFACE_MODE_NA;
@@ -1109,14 +1105,18 @@ int dsa_slave_resume(struct net_device *slave_dev)
1109} 1105}
1110 1106
1111int dsa_slave_create(struct dsa_switch *ds, struct device *parent, 1107int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
1112 int port, char *name) 1108 int port, const char *name)
1113{ 1109{
1114 struct net_device *master = ds->dst->master_netdev;
1115 struct dsa_switch_tree *dst = ds->dst; 1110 struct dsa_switch_tree *dst = ds->dst;
1111 struct net_device *master;
1116 struct net_device *slave_dev; 1112 struct net_device *slave_dev;
1117 struct dsa_slave_priv *p; 1113 struct dsa_slave_priv *p;
1118 int ret; 1114 int ret;
1119 1115
1116 master = ds->dst->master_netdev;
1117 if (ds->master_netdev)
1118 master = ds->master_netdev;
1119
1120 slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv), name, 1120 slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv), name,
1121 NET_NAME_UNKNOWN, ether_setup); 1121 NET_NAME_UNKNOWN, ether_setup);
1122 if (slave_dev == NULL) 1122 if (slave_dev == NULL)
@@ -1124,19 +1124,6 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
1124 1124
1125 slave_dev->features = master->vlan_features; 1125 slave_dev->features = master->vlan_features;
1126 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 1126 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1127 if (master->ethtool_ops != &dsa_cpu_port_ethtool_ops) {
1128 memcpy(&dst->master_ethtool_ops, master->ethtool_ops,
1129 sizeof(struct ethtool_ops));
1130 memcpy(&dsa_cpu_port_ethtool_ops, &dst->master_ethtool_ops,
1131 sizeof(struct ethtool_ops));
1132 dsa_cpu_port_ethtool_ops.get_sset_count =
1133 dsa_cpu_port_get_sset_count;
1134 dsa_cpu_port_ethtool_ops.get_ethtool_stats =
1135 dsa_cpu_port_get_ethtool_stats;
1136 dsa_cpu_port_ethtool_ops.get_strings =
1137 dsa_cpu_port_get_strings;
1138 master->ethtool_ops = &dsa_cpu_port_ethtool_ops;
1139 }
1140 eth_hw_addr_inherit(slave_dev, master); 1127 eth_hw_addr_inherit(slave_dev, master);
1141 slave_dev->priv_flags |= IFF_NO_QUEUE; 1128 slave_dev->priv_flags |= IFF_NO_QUEUE;
1142 slave_dev->netdev_ops = &dsa_slave_netdev_ops; 1129 slave_dev->netdev_ops = &dsa_slave_netdev_ops;
@@ -1147,49 +1134,24 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
1147 NULL); 1134 NULL);
1148 1135
1149 SET_NETDEV_DEV(slave_dev, parent); 1136 SET_NETDEV_DEV(slave_dev, parent);
1150 slave_dev->dev.of_node = ds->cd->port_dn[port]; 1137 slave_dev->dev.of_node = ds->ports[port].dn;
1151 slave_dev->vlan_features = master->vlan_features; 1138 slave_dev->vlan_features = master->vlan_features;
1152 1139
1153 p = netdev_priv(slave_dev); 1140 p = netdev_priv(slave_dev);
1154 p->parent = ds; 1141 p->parent = ds;
1155 p->port = port; 1142 p->port = port;
1156 1143 p->xmit = dst->tag_ops->xmit;
1157 switch (ds->dst->tag_protocol) {
1158#ifdef CONFIG_NET_DSA_TAG_DSA
1159 case DSA_TAG_PROTO_DSA:
1160 p->xmit = dsa_netdev_ops.xmit;
1161 break;
1162#endif
1163#ifdef CONFIG_NET_DSA_TAG_EDSA
1164 case DSA_TAG_PROTO_EDSA:
1165 p->xmit = edsa_netdev_ops.xmit;
1166 break;
1167#endif
1168#ifdef CONFIG_NET_DSA_TAG_TRAILER
1169 case DSA_TAG_PROTO_TRAILER:
1170 p->xmit = trailer_netdev_ops.xmit;
1171 break;
1172#endif
1173#ifdef CONFIG_NET_DSA_TAG_BRCM
1174 case DSA_TAG_PROTO_BRCM:
1175 p->xmit = brcm_netdev_ops.xmit;
1176 break;
1177#endif
1178 default:
1179 p->xmit = dsa_slave_notag_xmit;
1180 break;
1181 }
1182 1144
1183 p->old_pause = -1; 1145 p->old_pause = -1;
1184 p->old_link = -1; 1146 p->old_link = -1;
1185 p->old_duplex = -1; 1147 p->old_duplex = -1;
1186 1148
1187 ds->ports[port] = slave_dev; 1149 ds->ports[port].netdev = slave_dev;
1188 ret = register_netdev(slave_dev); 1150 ret = register_netdev(slave_dev);
1189 if (ret) { 1151 if (ret) {
1190 netdev_err(master, "error %d registering interface %s\n", 1152 netdev_err(master, "error %d registering interface %s\n",
1191 ret, slave_dev->name); 1153 ret, slave_dev->name);
1192 ds->ports[port] = NULL; 1154 ds->ports[port].netdev = NULL;
1193 free_netdev(slave_dev); 1155 free_netdev(slave_dev);
1194 return ret; 1156 return ret;
1195 } 1157 }
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index e2aadb73111d..21bffde6e4bf 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -127,7 +127,7 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
127 source_port = brcm_tag[3] & BRCM_EG_PID_MASK; 127 source_port = brcm_tag[3] & BRCM_EG_PID_MASK;
128 128
129 /* Validate port against switch setup, either the port is totally */ 129 /* Validate port against switch setup, either the port is totally */
130 if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL) 130 if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
131 goto out_drop; 131 goto out_drop;
132 132
133 /* Remove Broadcom tag and update checksum */ 133 /* Remove Broadcom tag and update checksum */
@@ -140,7 +140,7 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
140 140
141 skb_push(skb, ETH_HLEN); 141 skb_push(skb, ETH_HLEN);
142 skb->pkt_type = PACKET_HOST; 142 skb->pkt_type = PACKET_HOST;
143 skb->dev = ds->ports[source_port]; 143 skb->dev = ds->ports[source_port].netdev;
144 skb->protocol = eth_type_trans(skb, skb->dev); 144 skb->protocol = eth_type_trans(skb, skb->dev);
145 145
146 skb->dev->stats.rx_packets++; 146 skb->dev->stats.rx_packets++;
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index aa780e4ac0bd..bce79ffe342b 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -107,10 +107,14 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
107 * Check that the source device exists and that the source 107 * Check that the source device exists and that the source
108 * port is a registered DSA port. 108 * port is a registered DSA port.
109 */ 109 */
110 if (source_device >= dst->pd->nr_chips) 110 if (source_device >= DSA_MAX_SWITCHES)
111 goto out_drop; 111 goto out_drop;
112
112 ds = dst->ds[source_device]; 113 ds = dst->ds[source_device];
113 if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL) 114 if (!ds)
115 goto out_drop;
116
117 if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
114 goto out_drop; 118 goto out_drop;
115 119
116 /* 120 /*
@@ -159,7 +163,7 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
159 2 * ETH_ALEN); 163 2 * ETH_ALEN);
160 } 164 }
161 165
162 skb->dev = ds->ports[source_port]; 166 skb->dev = ds->ports[source_port].netdev;
163 skb_push(skb, ETH_HLEN); 167 skb_push(skb, ETH_HLEN);
164 skb->pkt_type = PACKET_HOST; 168 skb->pkt_type = PACKET_HOST;
165 skb->protocol = eth_type_trans(skb, skb->dev); 169 skb->protocol = eth_type_trans(skb, skb->dev);
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 2288c8098c42..6c1720e88537 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -120,10 +120,14 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
120 * Check that the source device exists and that the source 120 * Check that the source device exists and that the source
121 * port is a registered DSA port. 121 * port is a registered DSA port.
122 */ 122 */
123 if (source_device >= dst->pd->nr_chips) 123 if (source_device >= DSA_MAX_SWITCHES)
124 goto out_drop; 124 goto out_drop;
125
125 ds = dst->ds[source_device]; 126 ds = dst->ds[source_device];
126 if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL) 127 if (!ds)
128 goto out_drop;
129
130 if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
127 goto out_drop; 131 goto out_drop;
128 132
129 /* 133 /*
@@ -178,7 +182,7 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
178 2 * ETH_ALEN); 182 2 * ETH_ALEN);
179 } 183 }
180 184
181 skb->dev = ds->ports[source_port]; 185 skb->dev = ds->ports[source_port].netdev;
182 skb_push(skb, ETH_HLEN); 186 skb_push(skb, ETH_HLEN);
183 skb->pkt_type = PACKET_HOST; 187 skb->pkt_type = PACKET_HOST;
184 skb->protocol = eth_type_trans(skb, skb->dev); 188 skb->protocol = eth_type_trans(skb, skb->dev);
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index b6ca0890d018..5e3903eb1afa 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -82,12 +82,12 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
82 goto out_drop; 82 goto out_drop;
83 83
84 source_port = trailer[1] & 7; 84 source_port = trailer[1] & 7;
85 if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL) 85 if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
86 goto out_drop; 86 goto out_drop;
87 87
88 pskb_trim_rcsum(skb, skb->len - 4); 88 pskb_trim_rcsum(skb, skb->len - 4);
89 89
90 skb->dev = ds->ports[source_port]; 90 skb->dev = ds->ports[source_port].netdev;
91 skb_push(skb, ETH_HLEN); 91 skb_push(skb, ETH_HLEN);
92 skb->pkt_type = PACKET_HOST; 92 skb->pkt_type = PACKET_HOST;
93 skb->protocol = eth_type_trans(skb, skb->dev); 93 skb->protocol = eth_type_trans(skb, skb->dev);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index dd085db8580e..8c004a0c8d64 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -58,21 +58,10 @@ static struct header_ops lowpan_header_ops = {
58 .create = lowpan_header_create, 58 .create = lowpan_header_create,
59}; 59};
60 60
61static struct lock_class_key lowpan_tx_busylock;
62static struct lock_class_key lowpan_netdev_xmit_lock_key;
63
64static void lowpan_set_lockdep_class_one(struct net_device *ldev,
65 struct netdev_queue *txq,
66 void *_unused)
67{
68 lockdep_set_class(&txq->_xmit_lock,
69 &lowpan_netdev_xmit_lock_key);
70}
71
72static int lowpan_dev_init(struct net_device *ldev) 61static int lowpan_dev_init(struct net_device *ldev)
73{ 62{
74 netdev_for_each_tx_queue(ldev, lowpan_set_lockdep_class_one, NULL); 63 netdev_lockdep_set_classes(ldev);
75 ldev->qdisc_tx_busylock = &lowpan_tx_busylock; 64
76 return 0; 65 return 0;
77} 66}
78 67
@@ -92,11 +81,21 @@ static int lowpan_stop(struct net_device *dev)
92 return 0; 81 return 0;
93} 82}
94 83
84static int lowpan_neigh_construct(struct neighbour *n)
85{
86 struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n));
87
88 /* default no short_addr is available for a neighbour */
89 neigh->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
90 return 0;
91}
92
95static const struct net_device_ops lowpan_netdev_ops = { 93static const struct net_device_ops lowpan_netdev_ops = {
96 .ndo_init = lowpan_dev_init, 94 .ndo_init = lowpan_dev_init,
97 .ndo_start_xmit = lowpan_xmit, 95 .ndo_start_xmit = lowpan_xmit,
98 .ndo_open = lowpan_open, 96 .ndo_open = lowpan_open,
99 .ndo_stop = lowpan_stop, 97 .ndo_stop = lowpan_stop,
98 .ndo_neigh_construct = lowpan_neigh_construct,
100}; 99};
101 100
102static void lowpan_setup(struct net_device *ldev) 101static void lowpan_setup(struct net_device *ldev)
@@ -161,6 +160,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev,
161 wdev->needed_headroom; 160 wdev->needed_headroom;
162 ldev->needed_tailroom = wdev->needed_tailroom; 161 ldev->needed_tailroom = wdev->needed_tailroom;
163 162
163 ldev->neigh_priv_len = sizeof(struct lowpan_802154_neigh);
164
164 ret = lowpan_register_netdevice(ldev, LOWPAN_LLTYPE_IEEE802154); 165 ret = lowpan_register_netdevice(ldev, LOWPAN_LLTYPE_IEEE802154);
165 if (ret < 0) { 166 if (ret < 0) {
166 dev_put(wdev); 167 dev_put(wdev);
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index e459afd16bb3..dbb476d7d38f 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <net/6lowpan.h> 11#include <net/6lowpan.h>
12#include <net/ndisc.h>
12#include <net/ieee802154_netdev.h> 13#include <net/ieee802154_netdev.h>
13#include <net/mac802154.h> 14#include <net/mac802154.h>
14 15
@@ -17,19 +18,9 @@
17#define LOWPAN_FRAG1_HEAD_SIZE 0x4 18#define LOWPAN_FRAG1_HEAD_SIZE 0x4
18#define LOWPAN_FRAGN_HEAD_SIZE 0x5 19#define LOWPAN_FRAGN_HEAD_SIZE 0x5
19 20
20/* don't save pan id, it's intra pan */
21struct lowpan_addr {
22 u8 mode;
23 union {
24 /* IPv6 needs big endian here */
25 __be64 extended_addr;
26 __be16 short_addr;
27 } u;
28};
29
30struct lowpan_addr_info { 21struct lowpan_addr_info {
31 struct lowpan_addr daddr; 22 struct ieee802154_addr daddr;
32 struct lowpan_addr saddr; 23 struct ieee802154_addr saddr;
33}; 24};
34 25
35static inline struct 26static inline struct
@@ -48,12 +39,14 @@ lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
48 * RAW/DGRAM sockets. 39 * RAW/DGRAM sockets.
49 */ 40 */
50int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev, 41int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
51 unsigned short type, const void *_daddr, 42 unsigned short type, const void *daddr,
52 const void *_saddr, unsigned int len) 43 const void *saddr, unsigned int len)
53{ 44{
54 const u8 *saddr = _saddr; 45 struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
55 const u8 *daddr = _daddr; 46 struct lowpan_addr_info *info = lowpan_skb_priv(skb);
56 struct lowpan_addr_info *info; 47 struct lowpan_802154_neigh *llneigh = NULL;
48 const struct ipv6hdr *hdr = ipv6_hdr(skb);
49 struct neighbour *n;
57 50
58 /* TODO: 51 /* TODO:
59 * if this package isn't ipv6 one, where should it be routed? 52 * if this package isn't ipv6 one, where should it be routed?
@@ -61,21 +54,50 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
61 if (type != ETH_P_IPV6) 54 if (type != ETH_P_IPV6)
62 return 0; 55 return 0;
63 56
64 if (!saddr) 57 /* intra-pan communication */
65 saddr = ldev->dev_addr; 58 info->saddr.pan_id = wpan_dev->pan_id;
59 info->daddr.pan_id = info->saddr.pan_id;
66 60
67 raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8); 61 if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
68 raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8); 62 info->daddr.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
63 info->daddr.mode = IEEE802154_ADDR_SHORT;
64 } else {
65 __le16 short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
66
67 n = neigh_lookup(&nd_tbl, &hdr->daddr, ldev);
68 if (n) {
69 llneigh = lowpan_802154_neigh(neighbour_priv(n));
70 read_lock_bh(&n->lock);
71 short_addr = llneigh->short_addr;
72 read_unlock_bh(&n->lock);
73 }
69 74
70 info = lowpan_skb_priv(skb); 75 if (llneigh &&
76 lowpan_802154_is_valid_src_short_addr(short_addr)) {
77 info->daddr.short_addr = short_addr;
78 info->daddr.mode = IEEE802154_ADDR_SHORT;
79 } else {
80 info->daddr.mode = IEEE802154_ADDR_LONG;
81 ieee802154_be64_to_le64(&info->daddr.extended_addr,
82 daddr);
83 }
71 84
72 /* TODO: Currently we only support extended_addr */ 85 if (n)
73 info->daddr.mode = IEEE802154_ADDR_LONG; 86 neigh_release(n);
74 memcpy(&info->daddr.u.extended_addr, daddr, 87 }
75 sizeof(info->daddr.u.extended_addr)); 88
76 info->saddr.mode = IEEE802154_ADDR_LONG; 89 if (!saddr) {
77 memcpy(&info->saddr.u.extended_addr, saddr, 90 if (lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr)) {
78 sizeof(info->daddr.u.extended_addr)); 91 info->saddr.mode = IEEE802154_ADDR_SHORT;
92 info->saddr.short_addr = wpan_dev->short_addr;
93 } else {
94 info->saddr.mode = IEEE802154_ADDR_LONG;
95 info->saddr.extended_addr = wpan_dev->extended_addr;
96 }
97 } else {
98 info->saddr.mode = IEEE802154_ADDR_LONG;
99 ieee802154_be64_to_le64(&info->saddr.extended_addr, saddr);
100 }
79 101
80 return 0; 102 return 0;
81} 103}
@@ -209,47 +231,26 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
209 u16 *dgram_size, u16 *dgram_offset) 231 u16 *dgram_size, u16 *dgram_offset)
210{ 232{
211 struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr; 233 struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
212 struct ieee802154_addr sa, da;
213 struct ieee802154_mac_cb *cb = mac_cb_init(skb); 234 struct ieee802154_mac_cb *cb = mac_cb_init(skb);
214 struct lowpan_addr_info info; 235 struct lowpan_addr_info info;
215 void *daddr, *saddr;
216 236
217 memcpy(&info, lowpan_skb_priv(skb), sizeof(info)); 237 memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
218 238
219 /* TODO: Currently we only support extended_addr */
220 daddr = &info.daddr.u.extended_addr;
221 saddr = &info.saddr.u.extended_addr;
222
223 *dgram_size = skb->len; 239 *dgram_size = skb->len;
224 lowpan_header_compress(skb, ldev, daddr, saddr); 240 lowpan_header_compress(skb, ldev, &info.daddr, &info.saddr);
225 /* dgram_offset = (saved bytes after compression) + lowpan header len */ 241 /* dgram_offset = (saved bytes after compression) + lowpan header len */
226 *dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb); 242 *dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
227 243
228 cb->type = IEEE802154_FC_TYPE_DATA; 244 cb->type = IEEE802154_FC_TYPE_DATA;
229 245
230 /* prepare wpan address data */ 246 if (info.daddr.mode == IEEE802154_ADDR_SHORT &&
231 sa.mode = IEEE802154_ADDR_LONG; 247 ieee802154_is_broadcast_short_addr(info.daddr.short_addr))
232 sa.pan_id = wpan_dev->pan_id;
233 sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
234
235 /* intra-PAN communications */
236 da.pan_id = sa.pan_id;
237
238 /* if the destination address is the broadcast address, use the
239 * corresponding short address
240 */
241 if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
242 da.mode = IEEE802154_ADDR_SHORT;
243 da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
244 cb->ackreq = false; 248 cb->ackreq = false;
245 } else { 249 else
246 da.mode = IEEE802154_ADDR_LONG;
247 da.extended_addr = ieee802154_devaddr_from_raw(daddr);
248 cb->ackreq = wpan_dev->ackreq; 250 cb->ackreq = wpan_dev->ackreq;
249 }
250 251
251 return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev, &da, 252 return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev,
252 &sa, 0); 253 &info.daddr, &info.saddr, 0);
253} 254}
254 255
255netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) 256netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 238225b0c970..50d6a9b49f6c 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -532,6 +532,22 @@ config TCP_CONG_VEGAS
532 window. TCP Vegas should provide less packet loss, but it is 532 window. TCP Vegas should provide less packet loss, but it is
533 not as aggressive as TCP Reno. 533 not as aggressive as TCP Reno.
534 534
535config TCP_CONG_NV
536 tristate "TCP NV"
537 default n
538 ---help---
539 TCP NV is a follow up to TCP Vegas. It has been modified to deal with
540 10G networks, measurement noise introduced by LRO, GRO and interrupt
541 coalescence. In addition, it will decrease its cwnd multiplicatively
542 instead of linearly.
543
544 Note that in general congestion avoidance (cwnd decreased when # packets
545 queued grows) cannot coexist with congestion control (cwnd decreased only
546 when there is packet loss) due to fairness issues. One scenario when they
547 can coexist safely is when the CA flows have RTTs << CC flows RTTs.
548
549 For further details see http://www.brakmo.org/networking/tcp-nv/
550
535config TCP_CONG_SCALABLE 551config TCP_CONG_SCALABLE
536 tristate "Scalable TCP" 552 tristate "Scalable TCP"
537 default n 553 default n
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index bfa133691cde..24629b6f57cc 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_TCP_CONG_HSTCP) += tcp_highspeed.o
50obj-$(CONFIG_TCP_CONG_HYBLA) += tcp_hybla.o 50obj-$(CONFIG_TCP_CONG_HYBLA) += tcp_hybla.o
51obj-$(CONFIG_TCP_CONG_HTCP) += tcp_htcp.o 51obj-$(CONFIG_TCP_CONG_HTCP) += tcp_htcp.o
52obj-$(CONFIG_TCP_CONG_VEGAS) += tcp_vegas.o 52obj-$(CONFIG_TCP_CONG_VEGAS) += tcp_vegas.o
53obj-$(CONFIG_TCP_CONG_NV) += tcp_nv.o
53obj-$(CONFIG_TCP_CONG_VENO) += tcp_veno.o 54obj-$(CONFIG_TCP_CONG_VENO) += tcp_veno.o
54obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o 55obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
55obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o 56obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index f2bda9e89c61..6e9ea69e5f75 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -76,6 +76,7 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
76{ 76{
77 int err = -EAGAIN; 77 int err = -EAGAIN;
78 struct fib_table *tbl; 78 struct fib_table *tbl;
79 u32 tb_id;
79 80
80 switch (rule->action) { 81 switch (rule->action) {
81 case FR_ACT_TO_TBL: 82 case FR_ACT_TO_TBL:
@@ -94,7 +95,8 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
94 95
95 rcu_read_lock(); 96 rcu_read_lock();
96 97
97 tbl = fib_get_table(rule->fr_net, rule->table); 98 tb_id = fib_rule_get_table(rule, arg);
99 tbl = fib_get_table(rule->fr_net, tb_id);
98 if (tbl) 100 if (tbl)
99 err = fib_table_lookup(tbl, &flp->u.ip4, 101 err = fib_table_lookup(tbl, &flp->u.ip4,
100 (struct fib_result *)arg->result, 102 (struct fib_result *)arg->result,
@@ -180,7 +182,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
180 if (err) 182 if (err)
181 goto errout; 183 goto errout;
182 184
183 if (rule->table == RT_TABLE_UNSPEC) { 185 if (rule->table == RT_TABLE_UNSPEC && !rule->l3mdev) {
184 if (rule->action == FR_ACT_TO_TBL) { 186 if (rule->action == FR_ACT_TO_TBL) {
185 struct fib_table *table; 187 struct fib_table *table;
186 188
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 5f9207c039e7..321d57f825ce 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -129,6 +129,36 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
129 129
130 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; 130 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
131 131
132 switch (guehdr->version) {
133 case 0: /* Full GUE header present */
134 break;
135
136 case 1: {
137 /* Direct encasulation of IPv4 or IPv6 */
138
139 int prot;
140
141 switch (((struct iphdr *)guehdr)->version) {
142 case 4:
143 prot = IPPROTO_IPIP;
144 break;
145 case 6:
146 prot = IPPROTO_IPV6;
147 break;
148 default:
149 goto drop;
150 }
151
152 if (fou_recv_pull(skb, fou, sizeof(struct udphdr)))
153 goto drop;
154
155 return -prot;
156 }
157
158 default: /* Undefined version */
159 goto drop;
160 }
161
132 optlen = guehdr->hlen << 2; 162 optlen = guehdr->hlen << 2;
133 len += optlen; 163 len += optlen;
134 164
@@ -289,6 +319,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
289 int flush = 1; 319 int flush = 1;
290 struct fou *fou = fou_from_sock(sk); 320 struct fou *fou = fou_from_sock(sk);
291 struct gro_remcsum grc; 321 struct gro_remcsum grc;
322 u8 proto;
292 323
293 skb_gro_remcsum_init(&grc); 324 skb_gro_remcsum_init(&grc);
294 325
@@ -302,6 +333,25 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
302 goto out; 333 goto out;
303 } 334 }
304 335
336 switch (guehdr->version) {
337 case 0:
338 break;
339 case 1:
340 switch (((struct iphdr *)guehdr)->version) {
341 case 4:
342 proto = IPPROTO_IPIP;
343 break;
344 case 6:
345 proto = IPPROTO_IPV6;
346 break;
347 default:
348 goto out;
349 }
350 goto next_proto;
351 default:
352 goto out;
353 }
354
305 optlen = guehdr->hlen << 2; 355 optlen = guehdr->hlen << 2;
306 len += optlen; 356 len += optlen;
307 357
@@ -370,6 +420,10 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
370 } 420 }
371 } 421 }
372 422
423 proto = guehdr->proto_ctype;
424
425next_proto:
426
373 /* We can clear the encap_mark for GUE as we are essentially doing 427 /* We can clear the encap_mark for GUE as we are essentially doing
374 * one of two possible things. We are either adding an L4 tunnel 428 * one of two possible things. We are either adding an L4 tunnel
375 * header to the outer L3 tunnel header, or we are are simply 429 * header to the outer L3 tunnel header, or we are are simply
@@ -383,7 +437,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
383 437
384 rcu_read_lock(); 438 rcu_read_lock();
385 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 439 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
386 ops = rcu_dereference(offloads[guehdr->proto_ctype]); 440 ops = rcu_dereference(offloads[proto]);
387 if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) 441 if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
388 goto out_unlock; 442 goto out_unlock;
389 443
@@ -404,13 +458,30 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
404 const struct net_offload **offloads; 458 const struct net_offload **offloads;
405 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff); 459 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
406 const struct net_offload *ops; 460 const struct net_offload *ops;
407 unsigned int guehlen; 461 unsigned int guehlen = 0;
408 u8 proto; 462 u8 proto;
409 int err = -ENOENT; 463 int err = -ENOENT;
410 464
411 proto = guehdr->proto_ctype; 465 switch (guehdr->version) {
412 466 case 0:
413 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2); 467 proto = guehdr->proto_ctype;
468 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
469 break;
470 case 1:
471 switch (((struct iphdr *)guehdr)->version) {
472 case 4:
473 proto = IPPROTO_IPIP;
474 break;
475 case 6:
476 proto = IPPROTO_IPV6;
477 break;
478 default:
479 return err;
480 }
481 break;
482 default:
483 return err;
484 }
414 485
415 rcu_read_lock(); 486 rcu_read_lock();
416 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 487 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index de1d119a4497..b798862b6be5 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -117,6 +117,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
117 if ((*(u8 *)options & 0xF0) != 0x40) 117 if ((*(u8 *)options & 0xF0) != 0x40)
118 hdr_len += 4; 118 hdr_len += 4;
119 } 119 }
120 tpi->hdr_len = hdr_len;
120 return hdr_len; 121 return hdr_len;
121} 122}
122EXPORT_SYMBOL(gre_parse_header); 123EXPORT_SYMBOL(gre_parse_header);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 25af1243649b..38c2c47fe0e8 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -44,6 +44,7 @@ struct inet_diag_entry {
44 u16 dport; 44 u16 dport;
45 u16 family; 45 u16 family;
46 u16 userlocks; 46 u16 userlocks;
47 u32 ifindex;
47}; 48};
48 49
49static DEFINE_MUTEX(inet_diag_table_mutex); 50static DEFINE_MUTEX(inet_diag_table_mutex);
@@ -571,6 +572,14 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
571 yes = 0; 572 yes = 0;
572 break; 573 break;
573 } 574 }
575 case INET_DIAG_BC_DEV_COND: {
576 u32 ifindex;
577
578 ifindex = *((const u32 *)(op + 1));
579 if (ifindex != entry->ifindex)
580 yes = 0;
581 break;
582 }
574 } 583 }
575 584
576 if (yes) { 585 if (yes) {
@@ -613,6 +622,7 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
613 entry_fill_addrs(&entry, sk); 622 entry_fill_addrs(&entry, sk);
614 entry.sport = inet->inet_num; 623 entry.sport = inet->inet_num;
615 entry.dport = ntohs(inet->inet_dport); 624 entry.dport = ntohs(inet->inet_dport);
625 entry.ifindex = sk->sk_bound_dev_if;
616 entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0; 626 entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
617 627
618 return inet_diag_bc_run(bc, &entry); 628 return inet_diag_bc_run(bc, &entry);
@@ -636,6 +646,17 @@ static int valid_cc(const void *bc, int len, int cc)
636 return 0; 646 return 0;
637} 647}
638 648
649/* data is u32 ifindex */
650static bool valid_devcond(const struct inet_diag_bc_op *op, int len,
651 int *min_len)
652{
653 /* Check ifindex space. */
654 *min_len += sizeof(u32);
655 if (len < *min_len)
656 return false;
657
658 return true;
659}
639/* Validate an inet_diag_hostcond. */ 660/* Validate an inet_diag_hostcond. */
640static bool valid_hostcond(const struct inet_diag_bc_op *op, int len, 661static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
641 int *min_len) 662 int *min_len)
@@ -700,6 +721,10 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
700 if (!valid_hostcond(bc, len, &min_len)) 721 if (!valid_hostcond(bc, len, &min_len))
701 return -EINVAL; 722 return -EINVAL;
702 break; 723 break;
724 case INET_DIAG_BC_DEV_COND:
725 if (!valid_devcond(bc, len, &min_len))
726 return -EINVAL;
727 break;
703 case INET_DIAG_BC_S_GE: 728 case INET_DIAG_BC_S_GE:
704 case INET_DIAG_BC_S_LE: 729 case INET_DIAG_BC_S_LE:
705 case INET_DIAG_BC_D_GE: 730 case INET_DIAG_BC_D_GE:
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 3a88b0c73797..b5e9317eaf9e 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -355,7 +355,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
355{ 355{
356 struct inet_frag_queue *q; 356 struct inet_frag_queue *q;
357 357
358 if (frag_mem_limit(nf) > nf->high_thresh) { 358 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
359 inet_frag_schedule_worker(f); 359 inet_frag_schedule_worker(f);
360 return NULL; 360 return NULL;
361 } 361 }
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index cbfb1808fcc4..9f0a7b96646f 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -54,7 +54,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
54 if (skb->ignore_df) 54 if (skb->ignore_df)
55 return false; 55 return false;
56 56
57 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 57 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
58 return false; 58 return false;
59 59
60 return true; 60 return true;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 1d000af7f561..5b1481be0282 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -138,6 +138,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
138 const struct iphdr *iph; 138 const struct iphdr *iph;
139 const int type = icmp_hdr(skb)->type; 139 const int type = icmp_hdr(skb)->type;
140 const int code = icmp_hdr(skb)->code; 140 const int code = icmp_hdr(skb)->code;
141 unsigned int data_len = 0;
141 struct ip_tunnel *t; 142 struct ip_tunnel *t;
142 143
143 switch (type) { 144 switch (type) {
@@ -163,6 +164,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
163 case ICMP_TIME_EXCEEDED: 164 case ICMP_TIME_EXCEEDED:
164 if (code != ICMP_EXC_TTL) 165 if (code != ICMP_EXC_TTL)
165 return; 166 return;
167 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
166 break; 168 break;
167 169
168 case ICMP_REDIRECT: 170 case ICMP_REDIRECT:
@@ -181,6 +183,13 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
181 if (!t) 183 if (!t)
182 return; 184 return;
183 185
186#if IS_ENABLED(CONFIG_IPV6)
187 if (tpi->proto == htons(ETH_P_IPV6) &&
188 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
189 type, data_len))
190 return;
191#endif
192
184 if (t->parms.iph.daddr == 0 || 193 if (t->parms.iph.daddr == 0 ||
185 ipv4_is_multicast(t->parms.iph.daddr)) 194 ipv4_is_multicast(t->parms.iph.daddr))
186 return; 195 return;
@@ -837,17 +846,19 @@ out:
837 return ipgre_tunnel_validate(tb, data); 846 return ipgre_tunnel_validate(tb, data);
838} 847}
839 848
840static void ipgre_netlink_parms(struct net_device *dev, 849static int ipgre_netlink_parms(struct net_device *dev,
841 struct nlattr *data[], 850 struct nlattr *data[],
842 struct nlattr *tb[], 851 struct nlattr *tb[],
843 struct ip_tunnel_parm *parms) 852 struct ip_tunnel_parm *parms)
844{ 853{
854 struct ip_tunnel *t = netdev_priv(dev);
855
845 memset(parms, 0, sizeof(*parms)); 856 memset(parms, 0, sizeof(*parms));
846 857
847 parms->iph.protocol = IPPROTO_GRE; 858 parms->iph.protocol = IPPROTO_GRE;
848 859
849 if (!data) 860 if (!data)
850 return; 861 return 0;
851 862
852 if (data[IFLA_GRE_LINK]) 863 if (data[IFLA_GRE_LINK])
853 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 864 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
@@ -876,16 +887,26 @@ static void ipgre_netlink_parms(struct net_device *dev,
876 if (data[IFLA_GRE_TOS]) 887 if (data[IFLA_GRE_TOS])
877 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]); 888 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
878 889
879 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) 890 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
891 if (t->ignore_df)
892 return -EINVAL;
880 parms->iph.frag_off = htons(IP_DF); 893 parms->iph.frag_off = htons(IP_DF);
894 }
881 895
882 if (data[IFLA_GRE_COLLECT_METADATA]) { 896 if (data[IFLA_GRE_COLLECT_METADATA]) {
883 struct ip_tunnel *t = netdev_priv(dev);
884
885 t->collect_md = true; 897 t->collect_md = true;
886 if (dev->type == ARPHRD_IPGRE) 898 if (dev->type == ARPHRD_IPGRE)
887 dev->type = ARPHRD_NONE; 899 dev->type = ARPHRD_NONE;
888 } 900 }
901
902 if (data[IFLA_GRE_IGNORE_DF]) {
903 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
904 && (parms->iph.frag_off & htons(IP_DF)))
905 return -EINVAL;
906 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
907 }
908
909 return 0;
889} 910}
890 911
891/* This function returns true when ENCAP attributes are present in the nl msg */ 912/* This function returns true when ENCAP attributes are present in the nl msg */
@@ -956,16 +977,19 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev,
956{ 977{
957 struct ip_tunnel_parm p; 978 struct ip_tunnel_parm p;
958 struct ip_tunnel_encap ipencap; 979 struct ip_tunnel_encap ipencap;
980 int err;
959 981
960 if (ipgre_netlink_encap_parms(data, &ipencap)) { 982 if (ipgre_netlink_encap_parms(data, &ipencap)) {
961 struct ip_tunnel *t = netdev_priv(dev); 983 struct ip_tunnel *t = netdev_priv(dev);
962 int err = ip_tunnel_encap_setup(t, &ipencap); 984 err = ip_tunnel_encap_setup(t, &ipencap);
963 985
964 if (err < 0) 986 if (err < 0)
965 return err; 987 return err;
966 } 988 }
967 989
968 ipgre_netlink_parms(dev, data, tb, &p); 990 err = ipgre_netlink_parms(dev, data, tb, &p);
991 if (err < 0)
992 return err;
969 return ip_tunnel_newlink(dev, tb, &p); 993 return ip_tunnel_newlink(dev, tb, &p);
970} 994}
971 995
@@ -974,16 +998,19 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
974{ 998{
975 struct ip_tunnel_parm p; 999 struct ip_tunnel_parm p;
976 struct ip_tunnel_encap ipencap; 1000 struct ip_tunnel_encap ipencap;
1001 int err;
977 1002
978 if (ipgre_netlink_encap_parms(data, &ipencap)) { 1003 if (ipgre_netlink_encap_parms(data, &ipencap)) {
979 struct ip_tunnel *t = netdev_priv(dev); 1004 struct ip_tunnel *t = netdev_priv(dev);
980 int err = ip_tunnel_encap_setup(t, &ipencap); 1005 err = ip_tunnel_encap_setup(t, &ipencap);
981 1006
982 if (err < 0) 1007 if (err < 0)
983 return err; 1008 return err;
984 } 1009 }
985 1010
986 ipgre_netlink_parms(dev, data, tb, &p); 1011 err = ipgre_netlink_parms(dev, data, tb, &p);
1012 if (err < 0)
1013 return err;
987 return ip_tunnel_changelink(dev, tb, &p); 1014 return ip_tunnel_changelink(dev, tb, &p);
988} 1015}
989 1016
@@ -1020,6 +1047,8 @@ static size_t ipgre_get_size(const struct net_device *dev)
1020 nla_total_size(2) + 1047 nla_total_size(2) +
1021 /* IFLA_GRE_COLLECT_METADATA */ 1048 /* IFLA_GRE_COLLECT_METADATA */
1022 nla_total_size(0) + 1049 nla_total_size(0) +
1050 /* IFLA_GRE_IGNORE_DF */
1051 nla_total_size(1) +
1023 0; 1052 0;
1024} 1053}
1025 1054
@@ -1053,6 +1082,9 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1053 t->encap.flags)) 1082 t->encap.flags))
1054 goto nla_put_failure; 1083 goto nla_put_failure;
1055 1084
1085 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1086 goto nla_put_failure;
1087
1056 if (t->collect_md) { 1088 if (t->collect_md) {
1057 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) 1089 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1058 goto nla_put_failure; 1090 goto nla_put_failure;
@@ -1080,6 +1112,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1080 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, 1112 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1081 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, 1113 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1082 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, 1114 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1115 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1083}; 1116};
1084 1117
1085static struct rtnl_link_ops ipgre_link_ops __read_mostly = { 1118static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 124bf0a66328..cbac493c913a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -225,7 +225,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
225 225
226 /* common case: locally created skb or seglen is <= mtu */ 226 /* common case: locally created skb or seglen is <= mtu */
227 if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) || 227 if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
228 skb_gso_network_seglen(skb) <= mtu) 228 skb_gso_validate_mtu(skb, mtu))
229 return ip_finish_output2(net, sk, skb); 229 return ip_finish_output2(net, sk, skb);
230 230
231 /* Slowpath - GSO segment length is exceeding the dst MTU. 231 /* Slowpath - GSO segment length is exceeding the dst MTU.
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index d8f5e0a269f5..95649ebd2874 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -682,7 +682,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
682 } 682 }
683 683
684 df = tnl_params->frag_off; 684 df = tnl_params->frag_off;
685 if (skb->protocol == htons(ETH_P_IP)) 685 if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
686 df |= (inner_iph->frag_off&htons(IP_DF)); 686 df |= (inner_iph->frag_off&htons(IP_DF));
687 687
688 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) 688 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 7e538f71f5fb..10d728b6804c 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -293,7 +293,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
293 */ 293 */
294 if (ext & (1 << (INET_DIAG_DCTCPINFO - 1)) || 294 if (ext & (1 << (INET_DIAG_DCTCPINFO - 1)) ||
295 ext & (1 << (INET_DIAG_VEGASINFO - 1))) { 295 ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
296 memset(info, 0, sizeof(struct tcp_dctcp_info)); 296 memset(&info->dctcp, 0, sizeof(info->dctcp));
297 if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) { 297 if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) {
298 info->dctcp.dctcp_enabled = 1; 298 info->dctcp.dctcp_enabled = 1;
299 info->dctcp.dctcp_ce_state = (u16) ca->ce_state; 299 info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
@@ -303,7 +303,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
303 } 303 }
304 304
305 *attr = INET_DIAG_DCTCPINFO; 305 *attr = INET_DIAG_DCTCPINFO;
306 return sizeof(*info); 306 return sizeof(info->dctcp);
307 } 307 }
308 return 0; 308 return 0;
309} 309}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d6c8f4cd0800..94d4aff97523 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3115,6 +3115,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3115 long ca_rtt_us = -1L; 3115 long ca_rtt_us = -1L;
3116 struct sk_buff *skb; 3116 struct sk_buff *skb;
3117 u32 pkts_acked = 0; 3117 u32 pkts_acked = 0;
3118 u32 last_in_flight = 0;
3118 bool rtt_update; 3119 bool rtt_update;
3119 int flag = 0; 3120 int flag = 0;
3120 3121
@@ -3154,6 +3155,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3154 if (!first_ackt.v64) 3155 if (!first_ackt.v64)
3155 first_ackt = last_ackt; 3156 first_ackt = last_ackt;
3156 3157
3158 last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
3157 reord = min(pkts_acked, reord); 3159 reord = min(pkts_acked, reord);
3158 if (!after(scb->end_seq, tp->high_seq)) 3160 if (!after(scb->end_seq, tp->high_seq))
3159 flag |= FLAG_ORIG_SACK_ACKED; 3161 flag |= FLAG_ORIG_SACK_ACKED;
@@ -3250,7 +3252,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3250 3252
3251 if (icsk->icsk_ca_ops->pkts_acked) { 3253 if (icsk->icsk_ca_ops->pkts_acked) {
3252 struct ack_sample sample = { .pkts_acked = pkts_acked, 3254 struct ack_sample sample = { .pkts_acked = pkts_acked,
3253 .rtt_us = ca_rtt_us }; 3255 .rtt_us = ca_rtt_us,
3256 .in_flight = last_in_flight };
3254 3257
3255 icsk->icsk_ca_ops->pkts_acked(sk, &sample); 3258 icsk->icsk_ca_ops->pkts_acked(sk, &sample);
3256 } 3259 }
@@ -5159,6 +5162,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5159 const struct tcphdr *th, int syn_inerr) 5162 const struct tcphdr *th, int syn_inerr)
5160{ 5163{
5161 struct tcp_sock *tp = tcp_sk(sk); 5164 struct tcp_sock *tp = tcp_sk(sk);
5165 bool rst_seq_match = false;
5162 5166
5163 /* RFC1323: H1. Apply PAWS check first. */ 5167 /* RFC1323: H1. Apply PAWS check first. */
5164 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5168 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
@@ -5195,13 +5199,32 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5195 5199
5196 /* Step 2: check RST bit */ 5200 /* Step 2: check RST bit */
5197 if (th->rst) { 5201 if (th->rst) {
5198 /* RFC 5961 3.2 : 5202 /* RFC 5961 3.2 (extend to match against SACK too if available):
5199 * If sequence number exactly matches RCV.NXT, then 5203 * If seq num matches RCV.NXT or the right-most SACK block,
5204 * then
5200 * RESET the connection 5205 * RESET the connection
5201 * else 5206 * else
5202 * Send a challenge ACK 5207 * Send a challenge ACK
5203 */ 5208 */
5204 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) 5209 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
5210 rst_seq_match = true;
5211 } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) {
5212 struct tcp_sack_block *sp = &tp->selective_acks[0];
5213 int max_sack = sp[0].end_seq;
5214 int this_sack;
5215
5216 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;
5217 ++this_sack) {
5218 max_sack = after(sp[this_sack].end_seq,
5219 max_sack) ?
5220 sp[this_sack].end_seq : max_sack;
5221 }
5222
5223 if (TCP_SKB_CB(skb)->seq == max_sack)
5224 rst_seq_match = true;
5225 }
5226
5227 if (rst_seq_match)
5205 tcp_reset(sk); 5228 tcp_reset(sk);
5206 else 5229 else
5207 tcp_send_challenge_ack(sk, skb); 5230 tcp_send_challenge_ack(sk, skb);
diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
new file mode 100644
index 000000000000..5de82a8d4d87
--- /dev/null
+++ b/net/ipv4/tcp_nv.c
@@ -0,0 +1,476 @@
1/*
2 * TCP NV: TCP with Congestion Avoidance
3 *
4 * TCP-NV is a successor of TCP-Vegas that has been developed to
5 * deal with the issues that occur in modern networks.
6 * Like TCP-Vegas, TCP-NV supports true congestion avoidance,
7 * the ability to detect congestion before packet losses occur.
8 * When congestion (queue buildup) starts to occur, TCP-NV
9 * predicts what the cwnd size should be for the current
10 * throughput and it reduces the cwnd proportionally to
11 * the difference between the current cwnd and the predicted cwnd.
12 *
13 * NV is only recommeneded for traffic within a data center, and when
14 * all the flows are NV (at least those within the data center). This
15 * is due to the inherent unfairness between flows using losses to
16 * detect congestion (congestion control) and those that use queue
17 * buildup to detect congestion (congestion avoidance).
18 *
19 * Note: High NIC coalescence values may lower the performance of NV
20 * due to the increased noise in RTT values. In particular, we have
21 * seen issues with rx-frames values greater than 8.
22 *
23 * TODO:
24 * 1) Add mechanism to deal with reverse congestion.
25 */
26
27#include <linux/mm.h>
28#include <linux/module.h>
29#include <linux/math64.h>
30#include <net/tcp.h>
31#include <linux/inet_diag.h>
32
33/* TCP NV parameters
34 *
35 * nv_pad Max number of queued packets allowed in network
36 * nv_pad_buffer Do not grow cwnd if this closed to nv_pad
37 * nv_reset_period How often (in) seconds)to reset min_rtt
38 * nv_min_cwnd Don't decrease cwnd below this if there are no losses
39 * nv_cong_dec_mult Decrease cwnd by X% (30%) of congestion when detected
40 * nv_ssthresh_factor On congestion set ssthresh to this * <desired cwnd> / 8
41 * nv_rtt_factor RTT averaging factor
42 * nv_loss_dec_factor Decrease cwnd by this (50%) when losses occur
43 * nv_dec_eval_min_calls Wait this many RTT measurements before dec cwnd
44 * nv_inc_eval_min_calls Wait this many RTT measurements before inc cwnd
45 * nv_ssthresh_eval_min_calls Wait this many RTT measurements before stopping
46 * slow-start due to congestion
47 * nv_stop_rtt_cnt Only grow cwnd for this many RTTs after non-congestion
48 * nv_rtt_min_cnt Wait these many RTTs before making congesion decision
49 * nv_cwnd_growth_rate_neg
50 * nv_cwnd_growth_rate_pos
51 * How quickly to double growth rate (not rate) of cwnd when not
52 * congested. One value (nv_cwnd_growth_rate_neg) for when
53 * rate < 1 pkt/RTT (after losses). The other (nv_cwnd_growth_rate_pos)
54 * otherwise.
55 */
56
57static int nv_pad __read_mostly = 10;
58static int nv_pad_buffer __read_mostly = 2;
59static int nv_reset_period __read_mostly = 5; /* in seconds */
60static int nv_min_cwnd __read_mostly = 2;
61static int nv_cong_dec_mult __read_mostly = 30 * 128 / 100; /* = 30% */
62static int nv_ssthresh_factor __read_mostly = 8; /* = 1 */
63static int nv_rtt_factor __read_mostly = 128; /* = 1/2*old + 1/2*new */
64static int nv_loss_dec_factor __read_mostly = 512; /* => 50% */
65static int nv_cwnd_growth_rate_neg __read_mostly = 8;
66static int nv_cwnd_growth_rate_pos __read_mostly; /* 0 => fixed like Reno */
67static int nv_dec_eval_min_calls __read_mostly = 60;
68static int nv_inc_eval_min_calls __read_mostly = 20;
69static int nv_ssthresh_eval_min_calls __read_mostly = 30;
70static int nv_stop_rtt_cnt __read_mostly = 10;
71static int nv_rtt_min_cnt __read_mostly = 2;
72
73module_param(nv_pad, int, 0644);
74MODULE_PARM_DESC(nv_pad, "max queued packets allowed in network");
75module_param(nv_reset_period, int, 0644);
76MODULE_PARM_DESC(nv_reset_period, "nv_min_rtt reset period (secs)");
77module_param(nv_min_cwnd, int, 0644);
78MODULE_PARM_DESC(nv_min_cwnd, "NV will not decrease cwnd below this value"
79 " without losses");
80
81/* TCP NV Parameters */
82struct tcpnv {
83 unsigned long nv_min_rtt_reset_jiffies; /* when to switch to
84 * nv_min_rtt_new */
85 s8 cwnd_growth_factor; /* Current cwnd growth factor,
86 * < 0 => less than 1 packet/RTT */
87 u8 available8;
88 u16 available16;
89 u32 loss_cwnd; /* cwnd at last loss */
90 u8 nv_allow_cwnd_growth:1, /* whether cwnd can grow */
91 nv_reset:1, /* whether to reset values */
92 nv_catchup:1; /* whether we are growing because
93 * of temporary cwnd decrease */
94 u8 nv_eval_call_cnt; /* call count since last eval */
95 u8 nv_min_cwnd; /* nv won't make a ca decision if cwnd is
96 * smaller than this. It may grow to handle
97 * TSO, LRO and interrupt coalescence because
98 * with these a small cwnd cannot saturate
99 * the link. Note that this is different from
100 * the file local nv_min_cwnd */
101 u8 nv_rtt_cnt; /* RTTs without making ca decision */;
102 u32 nv_last_rtt; /* last rtt */
103 u32 nv_min_rtt; /* active min rtt. Used to determine slope */
104 u32 nv_min_rtt_new; /* min rtt for future use */
105 u32 nv_rtt_max_rate; /* max rate seen during current RTT */
106 u32 nv_rtt_start_seq; /* current RTT ends when packet arrives
107 * acking beyond nv_rtt_start_seq */
108 u32 nv_last_snd_una; /* Previous value of tp->snd_una. It is
109 * used to determine bytes acked since last
110 * call to bictcp_acked */
111 u32 nv_no_cong_cnt; /* Consecutive no congestion decisions */
112};
113
114#define NV_INIT_RTT U32_MAX
115#define NV_MIN_CWND 4
116#define NV_MIN_CWND_GROW 2
117#define NV_TSO_CWND_BOUND 80
118
119static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk)
120{
121 struct tcp_sock *tp = tcp_sk(sk);
122
123 ca->nv_reset = 0;
124 ca->loss_cwnd = 0;
125 ca->nv_no_cong_cnt = 0;
126 ca->nv_rtt_cnt = 0;
127 ca->nv_last_rtt = 0;
128 ca->nv_rtt_max_rate = 0;
129 ca->nv_rtt_start_seq = tp->snd_una;
130 ca->nv_eval_call_cnt = 0;
131 ca->nv_last_snd_una = tp->snd_una;
132}
133
134static void tcpnv_init(struct sock *sk)
135{
136 struct tcpnv *ca = inet_csk_ca(sk);
137
138 tcpnv_reset(ca, sk);
139
140 ca->nv_allow_cwnd_growth = 1;
141 ca->nv_min_rtt_reset_jiffies = jiffies + 2 * HZ;
142 ca->nv_min_rtt = NV_INIT_RTT;
143 ca->nv_min_rtt_new = NV_INIT_RTT;
144 ca->nv_min_cwnd = NV_MIN_CWND;
145 ca->nv_catchup = 0;
146 ca->cwnd_growth_factor = 0;
147}
148
149static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked)
150{
151 struct tcp_sock *tp = tcp_sk(sk);
152 struct tcpnv *ca = inet_csk_ca(sk);
153 u32 cnt;
154
155 if (!tcp_is_cwnd_limited(sk))
156 return;
157
158 /* Only grow cwnd if NV has not detected congestion */
159 if (!ca->nv_allow_cwnd_growth)
160 return;
161
162 if (tcp_in_slow_start(tp)) {
163 acked = tcp_slow_start(tp, acked);
164 if (!acked)
165 return;
166 }
167
168 if (ca->cwnd_growth_factor < 0) {
169 cnt = tp->snd_cwnd << -ca->cwnd_growth_factor;
170 tcp_cong_avoid_ai(tp, cnt, acked);
171 } else {
172 cnt = max(4U, tp->snd_cwnd >> ca->cwnd_growth_factor);
173 tcp_cong_avoid_ai(tp, cnt, acked);
174 }
175}
176
177static u32 tcpnv_recalc_ssthresh(struct sock *sk)
178{
179 const struct tcp_sock *tp = tcp_sk(sk);
180 struct tcpnv *ca = inet_csk_ca(sk);
181
182 ca->loss_cwnd = tp->snd_cwnd;
183 return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U);
184}
185
186static u32 tcpnv_undo_cwnd(struct sock *sk)
187{
188 struct tcpnv *ca = inet_csk_ca(sk);
189
190 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
191}
192
193static void tcpnv_state(struct sock *sk, u8 new_state)
194{
195 struct tcpnv *ca = inet_csk_ca(sk);
196
197 if (new_state == TCP_CA_Open && ca->nv_reset) {
198 tcpnv_reset(ca, sk);
199 } else if (new_state == TCP_CA_Loss || new_state == TCP_CA_CWR ||
200 new_state == TCP_CA_Recovery) {
201 ca->nv_reset = 1;
202 ca->nv_allow_cwnd_growth = 0;
203 if (new_state == TCP_CA_Loss) {
204 /* Reset cwnd growth factor to Reno value */
205 if (ca->cwnd_growth_factor > 0)
206 ca->cwnd_growth_factor = 0;
207 /* Decrease growth rate if allowed */
208 if (nv_cwnd_growth_rate_neg > 0 &&
209 ca->cwnd_growth_factor > -8)
210 ca->cwnd_growth_factor--;
211 }
212 }
213}
214
215/* Do congestion avoidance calculations for TCP-NV
216 */
217static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
218{
219 const struct inet_connection_sock *icsk = inet_csk(sk);
220 struct tcp_sock *tp = tcp_sk(sk);
221 struct tcpnv *ca = inet_csk_ca(sk);
222 unsigned long now = jiffies;
223 s64 rate64 = 0;
224 u32 rate, max_win, cwnd_by_slope;
225 u32 avg_rtt;
226 u32 bytes_acked = 0;
227
228 /* Some calls are for duplicates without timetamps */
229 if (sample->rtt_us < 0)
230 return;
231
232 /* If not in TCP_CA_Open or TCP_CA_Disorder states, skip. */
233 if (icsk->icsk_ca_state != TCP_CA_Open &&
234 icsk->icsk_ca_state != TCP_CA_Disorder)
235 return;
236
237 /* Stop cwnd growth if we were in catch up mode */
238 if (ca->nv_catchup && tp->snd_cwnd >= nv_min_cwnd) {
239 ca->nv_catchup = 0;
240 ca->nv_allow_cwnd_growth = 0;
241 }
242
243 bytes_acked = tp->snd_una - ca->nv_last_snd_una;
244 ca->nv_last_snd_una = tp->snd_una;
245
246 if (sample->in_flight == 0)
247 return;
248
249 /* Calculate moving average of RTT */
250 if (nv_rtt_factor > 0) {
251 if (ca->nv_last_rtt > 0) {
252 avg_rtt = (((u64)sample->rtt_us) * nv_rtt_factor +
253 ((u64)ca->nv_last_rtt)
254 * (256 - nv_rtt_factor)) >> 8;
255 } else {
256 avg_rtt = sample->rtt_us;
257 ca->nv_min_rtt = avg_rtt << 1;
258 }
259 ca->nv_last_rtt = avg_rtt;
260 } else {
261 avg_rtt = sample->rtt_us;
262 }
263
264 /* rate in 100's bits per second */
265 rate64 = ((u64)sample->in_flight) * 8000000;
266 rate = (u32)div64_u64(rate64, (u64)(avg_rtt * 100));
267
268 /* Remember the maximum rate seen during this RTT
269 * Note: It may be more than one RTT. This function should be
270 * called at least nv_dec_eval_min_calls times.
271 */
272 if (ca->nv_rtt_max_rate < rate)
273 ca->nv_rtt_max_rate = rate;
274
275 /* We have valid information, increment counter */
276 if (ca->nv_eval_call_cnt < 255)
277 ca->nv_eval_call_cnt++;
278
279 /* update min rtt if necessary */
280 if (avg_rtt < ca->nv_min_rtt)
281 ca->nv_min_rtt = avg_rtt;
282
283 /* update future min_rtt if necessary */
284 if (avg_rtt < ca->nv_min_rtt_new)
285 ca->nv_min_rtt_new = avg_rtt;
286
287 /* nv_min_rtt is updated with the minimum (possibley averaged) rtt
288 * seen in the last sysctl_tcp_nv_reset_period seconds (i.e. a
289 * warm reset). This new nv_min_rtt will be continued to be updated
290 * and be used for another sysctl_tcp_nv_reset_period seconds,
291 * when it will be updated again.
292 * In practice we introduce some randomness, so the actual period used
293 * is chosen randomly from the range:
294 * [sysctl_tcp_nv_reset_period*3/4, sysctl_tcp_nv_reset_period*5/4)
295 */
296 if (time_after_eq(now, ca->nv_min_rtt_reset_jiffies)) {
297 unsigned char rand;
298
299 ca->nv_min_rtt = ca->nv_min_rtt_new;
300 ca->nv_min_rtt_new = NV_INIT_RTT;
301 get_random_bytes(&rand, 1);
302 ca->nv_min_rtt_reset_jiffies =
303 now + ((nv_reset_period * (384 + rand) * HZ) >> 9);
304 /* Every so often we decrease ca->nv_min_cwnd in case previous
305 * value is no longer accurate.
306 */
307 ca->nv_min_cwnd = max(ca->nv_min_cwnd / 2, NV_MIN_CWND);
308 }
309
310 /* Once per RTT check if we need to do congestion avoidance */
311 if (before(ca->nv_rtt_start_seq, tp->snd_una)) {
312 ca->nv_rtt_start_seq = tp->snd_nxt;
313 if (ca->nv_rtt_cnt < 0xff)
314 /* Increase counter for RTTs without CA decision */
315 ca->nv_rtt_cnt++;
316
317 /* If this function is only called once within an RTT
318 * the cwnd is probably too small (in some cases due to
319 * tso, lro or interrupt coalescence), so we increase
320 * ca->nv_min_cwnd.
321 */
322 if (ca->nv_eval_call_cnt == 1 &&
323 bytes_acked >= (ca->nv_min_cwnd - 1) * tp->mss_cache &&
324 ca->nv_min_cwnd < (NV_TSO_CWND_BOUND + 1)) {
325 ca->nv_min_cwnd = min(ca->nv_min_cwnd
326 + NV_MIN_CWND_GROW,
327 NV_TSO_CWND_BOUND + 1);
328 ca->nv_rtt_start_seq = tp->snd_nxt +
329 ca->nv_min_cwnd * tp->mss_cache;
330 ca->nv_eval_call_cnt = 0;
331 ca->nv_allow_cwnd_growth = 1;
332 return;
333 }
334
335 /* Find the ideal cwnd for current rate from slope
336 * slope = 80000.0 * mss / nv_min_rtt
337 * cwnd_by_slope = nv_rtt_max_rate / slope
338 */
339 cwnd_by_slope = (u32)
340 div64_u64(((u64)ca->nv_rtt_max_rate) * ca->nv_min_rtt,
341 (u64)(80000 * tp->mss_cache));
342 max_win = cwnd_by_slope + nv_pad;
343
344 /* If cwnd > max_win, decrease cwnd
345 * if cwnd < max_win, grow cwnd
346 * else leave the same
347 */
348 if (tp->snd_cwnd > max_win) {
349 /* there is congestion, check that it is ok
350 * to make a CA decision
351 * 1. We should have at least nv_dec_eval_min_calls
352 * data points before making a CA decision
353 * 2. We only make a congesion decision after
354 * nv_rtt_min_cnt RTTs
355 */
356 if (ca->nv_rtt_cnt < nv_rtt_min_cnt) {
357 return;
358 } else if (tp->snd_ssthresh == TCP_INFINITE_SSTHRESH) {
359 if (ca->nv_eval_call_cnt <
360 nv_ssthresh_eval_min_calls)
361 return;
362 /* otherwise we will decrease cwnd */
363 } else if (ca->nv_eval_call_cnt <
364 nv_dec_eval_min_calls) {
365 if (ca->nv_allow_cwnd_growth &&
366 ca->nv_rtt_cnt > nv_stop_rtt_cnt)
367 ca->nv_allow_cwnd_growth = 0;
368 return;
369 }
370
371 /* We have enough data to determine we are congested */
372 ca->nv_allow_cwnd_growth = 0;
373 tp->snd_ssthresh =
374 (nv_ssthresh_factor * max_win) >> 3;
375 if (tp->snd_cwnd - max_win > 2) {
376 /* gap > 2, we do exponential cwnd decrease */
377 int dec;
378
379 dec = max(2U, ((tp->snd_cwnd - max_win) *
380 nv_cong_dec_mult) >> 7);
381 tp->snd_cwnd -= dec;
382 } else if (nv_cong_dec_mult > 0) {
383 tp->snd_cwnd = max_win;
384 }
385 if (ca->cwnd_growth_factor > 0)
386 ca->cwnd_growth_factor = 0;
387 ca->nv_no_cong_cnt = 0;
388 } else if (tp->snd_cwnd <= max_win - nv_pad_buffer) {
389 /* There is no congestion, grow cwnd if allowed*/
390 if (ca->nv_eval_call_cnt < nv_inc_eval_min_calls)
391 return;
392
393 ca->nv_allow_cwnd_growth = 1;
394 ca->nv_no_cong_cnt++;
395 if (ca->cwnd_growth_factor < 0 &&
396 nv_cwnd_growth_rate_neg > 0 &&
397 ca->nv_no_cong_cnt > nv_cwnd_growth_rate_neg) {
398 ca->cwnd_growth_factor++;
399 ca->nv_no_cong_cnt = 0;
400 } else if (ca->cwnd_growth_factor >= 0 &&
401 nv_cwnd_growth_rate_pos > 0 &&
402 ca->nv_no_cong_cnt >
403 nv_cwnd_growth_rate_pos) {
404 ca->cwnd_growth_factor++;
405 ca->nv_no_cong_cnt = 0;
406 }
407 } else {
408 /* cwnd is in-between, so do nothing */
409 return;
410 }
411
412 /* update state */
413 ca->nv_eval_call_cnt = 0;
414 ca->nv_rtt_cnt = 0;
415 ca->nv_rtt_max_rate = 0;
416
417 /* Don't want to make cwnd < nv_min_cwnd
418 * (it wasn't before, if it is now is because nv
419 * decreased it).
420 */
421 if (tp->snd_cwnd < nv_min_cwnd)
422 tp->snd_cwnd = nv_min_cwnd;
423 }
424}
425
426/* Extract info for Tcp socket info provided via netlink */
427size_t tcpnv_get_info(struct sock *sk, u32 ext, int *attr,
428 union tcp_cc_info *info)
429{
430 const struct tcpnv *ca = inet_csk_ca(sk);
431
432 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
433 info->vegas.tcpv_enabled = 1;
434 info->vegas.tcpv_rttcnt = ca->nv_rtt_cnt;
435 info->vegas.tcpv_rtt = ca->nv_last_rtt;
436 info->vegas.tcpv_minrtt = ca->nv_min_rtt;
437
438 *attr = INET_DIAG_VEGASINFO;
439 return sizeof(struct tcpvegas_info);
440 }
441 return 0;
442}
443EXPORT_SYMBOL_GPL(tcpnv_get_info);
444
445static struct tcp_congestion_ops tcpnv __read_mostly = {
446 .init = tcpnv_init,
447 .ssthresh = tcpnv_recalc_ssthresh,
448 .cong_avoid = tcpnv_cong_avoid,
449 .set_state = tcpnv_state,
450 .undo_cwnd = tcpnv_undo_cwnd,
451 .pkts_acked = tcpnv_acked,
452 .get_info = tcpnv_get_info,
453
454 .owner = THIS_MODULE,
455 .name = "nv",
456};
457
458static int __init tcpnv_register(void)
459{
460 BUILD_BUG_ON(sizeof(struct tcpnv) > ICSK_CA_PRIV_SIZE);
461
462 return tcp_register_congestion_control(&tcpnv);
463}
464
465static void __exit tcpnv_unregister(void)
466{
467 tcp_unregister_congestion_control(&tcpnv);
468}
469
470module_init(tcpnv_register);
471module_exit(tcpnv_unregister);
472
473MODULE_AUTHOR("Lawrence Brakmo");
474MODULE_LICENSE("GPL");
475MODULE_DESCRIPTION("TCP NV");
476MODULE_VERSION("1.0");
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e00e972c4e6a..b26aa870adc0 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -911,9 +911,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
911 int err; 911 int err;
912 912
913 BUG_ON(!skb || !tcp_skb_pcount(skb)); 913 BUG_ON(!skb || !tcp_skb_pcount(skb));
914 tp = tcp_sk(sk);
914 915
915 if (clone_it) { 916 if (clone_it) {
916 skb_mstamp_get(&skb->skb_mstamp); 917 skb_mstamp_get(&skb->skb_mstamp);
918 TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
919 - tp->snd_una;
917 920
918 if (unlikely(skb_cloned(skb))) 921 if (unlikely(skb_cloned(skb)))
919 skb = pskb_copy(skb, gfp_mask); 922 skb = pskb_copy(skb, gfp_mask);
@@ -924,7 +927,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
924 } 927 }
925 928
926 inet = inet_sk(sk); 929 inet = inet_sk(sk);
927 tp = tcp_sk(sk);
928 tcb = TCP_SKB_CB(skb); 930 tcb = TCP_SKB_CB(skb);
929 memset(&opts, 0, sizeof(opts)); 931 memset(&opts, 0, sizeof(opts));
930 932
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 47f12c73d959..58bd39fb14b4 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -76,6 +76,67 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
76} 76}
77EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); 77EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
78 78
79void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
80 unsigned short type)
81{
82 struct sock *sk = sock->sk;
83 struct udp_tunnel_info ti;
84
85 if (!dev->netdev_ops->ndo_udp_tunnel_add)
86 return;
87
88 ti.type = type;
89 ti.sa_family = sk->sk_family;
90 ti.port = inet_sk(sk)->inet_sport;
91
92 dev->netdev_ops->ndo_udp_tunnel_add(dev, &ti);
93}
94EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port);
95
96/* Notify netdevs that UDP port started listening */
97void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type)
98{
99 struct sock *sk = sock->sk;
100 struct net *net = sock_net(sk);
101 struct udp_tunnel_info ti;
102 struct net_device *dev;
103
104 ti.type = type;
105 ti.sa_family = sk->sk_family;
106 ti.port = inet_sk(sk)->inet_sport;
107
108 rcu_read_lock();
109 for_each_netdev_rcu(net, dev) {
110 if (!dev->netdev_ops->ndo_udp_tunnel_add)
111 continue;
112 dev->netdev_ops->ndo_udp_tunnel_add(dev, &ti);
113 }
114 rcu_read_unlock();
115}
116EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port);
117
118/* Notify netdevs that UDP port is no more listening */
119void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type)
120{
121 struct sock *sk = sock->sk;
122 struct net *net = sock_net(sk);
123 struct udp_tunnel_info ti;
124 struct net_device *dev;
125
126 ti.type = type;
127 ti.sa_family = sk->sk_family;
128 ti.port = inet_sk(sk)->inet_sport;
129
130 rcu_read_lock();
131 for_each_netdev_rcu(net, dev) {
132 if (!dev->netdev_ops->ndo_udp_tunnel_del)
133 continue;
134 dev->netdev_ops->ndo_udp_tunnel_del(dev, &ti);
135 }
136 rcu_read_unlock();
137}
138EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port);
139
79void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, 140void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
80 __be32 src, __be32 dst, __u8 tos, __u8 ttl, 141 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
81 __be16 df, __be16 src_port, __be16 dst_port, 142 __be16 df, __be16 src_port, __be16 dst_port,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 7b0edb37a115..b644a23c3db0 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -295,7 +295,7 @@ static struct ctl_table xfrm4_policy_table[] = {
295 { } 295 { }
296}; 296};
297 297
298static int __net_init xfrm4_net_sysctl_init(struct net *net) 298static __net_init int xfrm4_net_sysctl_init(struct net *net)
299{ 299{
300 struct ctl_table *table; 300 struct ctl_table *table;
301 struct ctl_table_header *hdr; 301 struct ctl_table_header *hdr;
@@ -323,7 +323,7 @@ err_alloc:
323 return -ENOMEM; 323 return -ENOMEM;
324} 324}
325 325
326static void __net_exit xfrm4_net_sysctl_exit(struct net *net) 326static __net_exit void xfrm4_net_sysctl_exit(struct net *net)
327{ 327{
328 struct ctl_table *table; 328 struct ctl_table *table;
329 329
@@ -336,12 +336,12 @@ static void __net_exit xfrm4_net_sysctl_exit(struct net *net)
336 kfree(table); 336 kfree(table);
337} 337}
338#else /* CONFIG_SYSCTL */ 338#else /* CONFIG_SYSCTL */
339static int inline xfrm4_net_sysctl_init(struct net *net) 339static inline int xfrm4_net_sysctl_init(struct net *net)
340{ 340{
341 return 0; 341 return 0;
342} 342}
343 343
344static void inline xfrm4_net_sysctl_exit(struct net *net) 344static inline void xfrm4_net_sysctl_exit(struct net *net)
345{ 345{
346} 346}
347#endif 347#endif
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 47f837a58e0a..a1f6b7b31531 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1524,6 +1524,28 @@ out:
1524 return hiscore_idx; 1524 return hiscore_idx;
1525} 1525}
1526 1526
1527static int ipv6_get_saddr_master(struct net *net,
1528 const struct net_device *dst_dev,
1529 const struct net_device *master,
1530 struct ipv6_saddr_dst *dst,
1531 struct ipv6_saddr_score *scores,
1532 int hiscore_idx)
1533{
1534 struct inet6_dev *idev;
1535
1536 idev = __in6_dev_get(dst_dev);
1537 if (idev)
1538 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1539 scores, hiscore_idx);
1540
1541 idev = __in6_dev_get(master);
1542 if (idev)
1543 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1544 scores, hiscore_idx);
1545
1546 return hiscore_idx;
1547}
1548
1527int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev, 1549int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1528 const struct in6_addr *daddr, unsigned int prefs, 1550 const struct in6_addr *daddr, unsigned int prefs,
1529 struct in6_addr *saddr) 1551 struct in6_addr *saddr)
@@ -1577,13 +1599,39 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1577 if (idev) 1599 if (idev)
1578 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx); 1600 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1579 } else { 1601 } else {
1602 const struct net_device *master;
1603 int master_idx = 0;
1604
1605 /* if dst_dev exists and is enslaved to an L3 device, then
1606 * prefer addresses from dst_dev and then the master over
1607 * any other enslaved devices in the L3 domain.
1608 */
1609 master = l3mdev_master_dev_rcu(dst_dev);
1610 if (master) {
1611 master_idx = master->ifindex;
1612
1613 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1614 master, &dst,
1615 scores, hiscore_idx);
1616
1617 if (scores[hiscore_idx].ifa)
1618 goto out;
1619 }
1620
1580 for_each_netdev_rcu(net, dev) { 1621 for_each_netdev_rcu(net, dev) {
1622 /* only consider addresses on devices in the
1623 * same L3 domain
1624 */
1625 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1626 continue;
1581 idev = __in6_dev_get(dev); 1627 idev = __in6_dev_get(dev);
1582 if (!idev) 1628 if (!idev)
1583 continue; 1629 continue;
1584 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx); 1630 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1585 } 1631 }
1586 } 1632 }
1633
1634out:
1587 rcu_read_unlock(); 1635 rcu_read_unlock();
1588 1636
1589 hiscore = &scores[hiscore_idx]; 1637 hiscore = &scores[hiscore_idx];
@@ -2254,7 +2302,7 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2254 return ERR_PTR(-EACCES); 2302 return ERR_PTR(-EACCES);
2255 2303
2256 /* Add default multicast route */ 2304 /* Add default multicast route */
2257 if (!(dev->flags & IFF_LOOPBACK)) 2305 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2258 addrconf_add_mroute(dev); 2306 addrconf_add_mroute(dev);
2259 2307
2260 return idev; 2308 return idev;
@@ -2333,12 +2381,109 @@ static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2333 idev->addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM; 2381 idev->addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2334} 2382}
2335 2383
2384int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2385 const struct prefix_info *pinfo,
2386 struct inet6_dev *in6_dev,
2387 const struct in6_addr *addr, int addr_type,
2388 u32 addr_flags, bool sllao, bool tokenized,
2389 __u32 valid_lft, u32 prefered_lft)
2390{
2391 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2392 int create = 0, update_lft = 0;
2393
2394 if (!ifp && valid_lft) {
2395 int max_addresses = in6_dev->cnf.max_addresses;
2396
2397#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2398 if (in6_dev->cnf.optimistic_dad &&
2399 !net->ipv6.devconf_all->forwarding && sllao)
2400 addr_flags |= IFA_F_OPTIMISTIC;
2401#endif
2402
2403 /* Do not allow to create too much of autoconfigured
2404 * addresses; this would be too easy way to crash kernel.
2405 */
2406 if (!max_addresses ||
2407 ipv6_count_addresses(in6_dev) < max_addresses)
2408 ifp = ipv6_add_addr(in6_dev, addr, NULL,
2409 pinfo->prefix_len,
2410 addr_type&IPV6_ADDR_SCOPE_MASK,
2411 addr_flags, valid_lft,
2412 prefered_lft);
2413
2414 if (IS_ERR_OR_NULL(ifp))
2415 return -1;
2416
2417 update_lft = 0;
2418 create = 1;
2419 spin_lock_bh(&ifp->lock);
2420 ifp->flags |= IFA_F_MANAGETEMPADDR;
2421 ifp->cstamp = jiffies;
2422 ifp->tokenized = tokenized;
2423 spin_unlock_bh(&ifp->lock);
2424 addrconf_dad_start(ifp);
2425 }
2426
2427 if (ifp) {
2428 u32 flags;
2429 unsigned long now;
2430 u32 stored_lft;
2431
2432 /* update lifetime (RFC2462 5.5.3 e) */
2433 spin_lock_bh(&ifp->lock);
2434 now = jiffies;
2435 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2436 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2437 else
2438 stored_lft = 0;
2439 if (!update_lft && !create && stored_lft) {
2440 const u32 minimum_lft = min_t(u32,
2441 stored_lft, MIN_VALID_LIFETIME);
2442 valid_lft = max(valid_lft, minimum_lft);
2443
2444 /* RFC4862 Section 5.5.3e:
2445 * "Note that the preferred lifetime of the
2446 * corresponding address is always reset to
2447 * the Preferred Lifetime in the received
2448 * Prefix Information option, regardless of
2449 * whether the valid lifetime is also reset or
2450 * ignored."
2451 *
2452 * So we should always update prefered_lft here.
2453 */
2454 update_lft = 1;
2455 }
2456
2457 if (update_lft) {
2458 ifp->valid_lft = valid_lft;
2459 ifp->prefered_lft = prefered_lft;
2460 ifp->tstamp = now;
2461 flags = ifp->flags;
2462 ifp->flags &= ~IFA_F_DEPRECATED;
2463 spin_unlock_bh(&ifp->lock);
2464
2465 if (!(flags&IFA_F_TENTATIVE))
2466 ipv6_ifa_notify(0, ifp);
2467 } else
2468 spin_unlock_bh(&ifp->lock);
2469
2470 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2471 create, now);
2472
2473 in6_ifa_put(ifp);
2474 addrconf_verify();
2475 }
2476
2477 return 0;
2478}
2479EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2480
2336void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) 2481void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2337{ 2482{
2338 struct prefix_info *pinfo; 2483 struct prefix_info *pinfo;
2339 __u32 valid_lft; 2484 __u32 valid_lft;
2340 __u32 prefered_lft; 2485 __u32 prefered_lft;
2341 int addr_type; 2486 int addr_type, err;
2342 u32 addr_flags = 0; 2487 u32 addr_flags = 0;
2343 struct inet6_dev *in6_dev; 2488 struct inet6_dev *in6_dev;
2344 struct net *net = dev_net(dev); 2489 struct net *net = dev_net(dev);
@@ -2432,10 +2577,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2432 /* Try to figure out our local address for this prefix */ 2577 /* Try to figure out our local address for this prefix */
2433 2578
2434 if (pinfo->autoconf && in6_dev->cnf.autoconf) { 2579 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2435 struct inet6_ifaddr *ifp;
2436 struct in6_addr addr; 2580 struct in6_addr addr;
2437 int create = 0, update_lft = 0; 2581 bool tokenized = false, dev_addr_generated = false;
2438 bool tokenized = false;
2439 2582
2440 if (pinfo->prefix_len == 64) { 2583 if (pinfo->prefix_len == 64) {
2441 memcpy(&addr, &pinfo->prefix, 8); 2584 memcpy(&addr, &pinfo->prefix, 8);
@@ -2453,106 +2596,36 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2453 goto ok; 2596 goto ok;
2454 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) && 2597 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2455 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) { 2598 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2456 in6_dev_put(in6_dev); 2599 goto put;
2457 return; 2600 } else {
2601 dev_addr_generated = true;
2458 } 2602 }
2459 goto ok; 2603 goto ok;
2460 } 2604 }
2461 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n", 2605 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2462 pinfo->prefix_len); 2606 pinfo->prefix_len);
2463 in6_dev_put(in6_dev); 2607 goto put;
2464 return;
2465 2608
2466ok: 2609ok:
2610 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2611 &addr, addr_type,
2612 addr_flags, sllao,
2613 tokenized, valid_lft,
2614 prefered_lft);
2615 if (err)
2616 goto put;
2467 2617
2468 ifp = ipv6_get_ifaddr(net, &addr, dev, 1); 2618 /* Ignore error case here because previous prefix add addr was
2469 2619 * successful which will be notified.
2470 if (!ifp && valid_lft) { 2620 */
2471 int max_addresses = in6_dev->cnf.max_addresses; 2621 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2472 2622 addr_type, addr_flags, sllao,
2473#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 2623 tokenized, valid_lft,
2474 if (in6_dev->cnf.optimistic_dad && 2624 prefered_lft,
2475 !net->ipv6.devconf_all->forwarding && sllao) 2625 dev_addr_generated);
2476 addr_flags |= IFA_F_OPTIMISTIC;
2477#endif
2478
2479 /* Do not allow to create too much of autoconfigured
2480 * addresses; this would be too easy way to crash kernel.
2481 */
2482 if (!max_addresses ||
2483 ipv6_count_addresses(in6_dev) < max_addresses)
2484 ifp = ipv6_add_addr(in6_dev, &addr, NULL,
2485 pinfo->prefix_len,
2486 addr_type&IPV6_ADDR_SCOPE_MASK,
2487 addr_flags, valid_lft,
2488 prefered_lft);
2489
2490 if (IS_ERR_OR_NULL(ifp)) {
2491 in6_dev_put(in6_dev);
2492 return;
2493 }
2494
2495 update_lft = 0;
2496 create = 1;
2497 spin_lock_bh(&ifp->lock);
2498 ifp->flags |= IFA_F_MANAGETEMPADDR;
2499 ifp->cstamp = jiffies;
2500 ifp->tokenized = tokenized;
2501 spin_unlock_bh(&ifp->lock);
2502 addrconf_dad_start(ifp);
2503 }
2504
2505 if (ifp) {
2506 u32 flags;
2507 unsigned long now;
2508 u32 stored_lft;
2509
2510 /* update lifetime (RFC2462 5.5.3 e) */
2511 spin_lock_bh(&ifp->lock);
2512 now = jiffies;
2513 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2514 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2515 else
2516 stored_lft = 0;
2517 if (!update_lft && !create && stored_lft) {
2518 const u32 minimum_lft = min_t(u32,
2519 stored_lft, MIN_VALID_LIFETIME);
2520 valid_lft = max(valid_lft, minimum_lft);
2521
2522 /* RFC4862 Section 5.5.3e:
2523 * "Note that the preferred lifetime of the
2524 * corresponding address is always reset to
2525 * the Preferred Lifetime in the received
2526 * Prefix Information option, regardless of
2527 * whether the valid lifetime is also reset or
2528 * ignored."
2529 *
2530 * So we should always update prefered_lft here.
2531 */
2532 update_lft = 1;
2533 }
2534
2535 if (update_lft) {
2536 ifp->valid_lft = valid_lft;
2537 ifp->prefered_lft = prefered_lft;
2538 ifp->tstamp = now;
2539 flags = ifp->flags;
2540 ifp->flags &= ~IFA_F_DEPRECATED;
2541 spin_unlock_bh(&ifp->lock);
2542
2543 if (!(flags&IFA_F_TENTATIVE))
2544 ipv6_ifa_notify(0, ifp);
2545 } else
2546 spin_unlock_bh(&ifp->lock);
2547
2548 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2549 create, now);
2550
2551 in6_ifa_put(ifp);
2552 addrconf_verify();
2553 }
2554 } 2626 }
2555 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo); 2627 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2628put:
2556 in6_dev_put(in6_dev); 2629 in6_dev_put(in6_dev);
2557} 2630}
2558 2631
@@ -2947,8 +3020,8 @@ static void init_loopback(struct net_device *dev)
2947 } 3020 }
2948} 3021}
2949 3022
2950static void addrconf_add_linklocal(struct inet6_dev *idev, 3023void addrconf_add_linklocal(struct inet6_dev *idev,
2951 const struct in6_addr *addr, u32 flags) 3024 const struct in6_addr *addr, u32 flags)
2952{ 3025{
2953 struct inet6_ifaddr *ifp; 3026 struct inet6_ifaddr *ifp;
2954 u32 addr_flags = flags | IFA_F_PERMANENT; 3027 u32 addr_flags = flags | IFA_F_PERMANENT;
@@ -2967,6 +3040,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev,
2967 in6_ifa_put(ifp); 3040 in6_ifa_put(ifp);
2968 } 3041 }
2969} 3042}
3043EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
2970 3044
2971static bool ipv6_reserved_interfaceid(struct in6_addr address) 3045static bool ipv6_reserved_interfaceid(struct in6_addr address)
2972{ 3046{
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index bfa86f040c16..2076c21107d0 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -92,6 +92,12 @@ MODULE_PARM_DESC(disable_ipv6, "Disable IPv6 on all interfaces");
92module_param_named(autoconf, ipv6_defaults.autoconf, int, 0444); 92module_param_named(autoconf, ipv6_defaults.autoconf, int, 0444);
93MODULE_PARM_DESC(autoconf, "Enable IPv6 address autoconfiguration on all interfaces"); 93MODULE_PARM_DESC(autoconf, "Enable IPv6 address autoconfiguration on all interfaces");
94 94
95bool ipv6_mod_enabled(void)
96{
97 return disable_ipv6_mod == 0;
98}
99EXPORT_SYMBOL_GPL(ipv6_mod_enabled);
100
95static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) 101static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
96{ 102{
97 const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo); 103 const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index ed33abf57abd..5857c1fc8b67 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -67,6 +67,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
67 struct net *net = rule->fr_net; 67 struct net *net = rule->fr_net;
68 pol_lookup_t lookup = arg->lookup_ptr; 68 pol_lookup_t lookup = arg->lookup_ptr;
69 int err = 0; 69 int err = 0;
70 u32 tb_id;
70 71
71 switch (rule->action) { 72 switch (rule->action) {
72 case FR_ACT_TO_TBL: 73 case FR_ACT_TO_TBL:
@@ -86,7 +87,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
86 goto discard_pkt; 87 goto discard_pkt;
87 } 88 }
88 89
89 table = fib6_get_table(net, rule->table); 90 tb_id = fib_rule_get_table(rule, arg);
91 table = fib6_get_table(net, tb_id);
90 if (!table) { 92 if (!table) {
91 err = -EAGAIN; 93 err = -EAGAIN;
92 goto out; 94 goto out;
@@ -199,7 +201,7 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
199 struct net *net = sock_net(skb->sk); 201 struct net *net = sock_net(skb->sk);
200 struct fib6_rule *rule6 = (struct fib6_rule *) rule; 202 struct fib6_rule *rule6 = (struct fib6_rule *) rule;
201 203
202 if (rule->action == FR_ACT_TO_TBL) { 204 if (rule->action == FR_ACT_TO_TBL && !rule->l3mdev) {
203 if (rule->table == RT6_TABLE_UNSPEC) 205 if (rule->table == RT6_TABLE_UNSPEC)
204 goto errout; 206 goto errout;
205 207
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index a4fa84076969..bd59c343d35f 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -388,7 +388,8 @@ relookup_failed:
388/* 388/*
389 * Send an ICMP message in response to a packet in error 389 * Send an ICMP message in response to a packet in error
390 */ 390 */
391static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) 391static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
392 const struct in6_addr *force_saddr)
392{ 393{
393 struct net *net = dev_net(skb->dev); 394 struct net *net = dev_net(skb->dev);
394 struct inet6_dev *idev = NULL; 395 struct inet6_dev *idev = NULL;
@@ -475,6 +476,8 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
475 memset(&fl6, 0, sizeof(fl6)); 476 memset(&fl6, 0, sizeof(fl6));
476 fl6.flowi6_proto = IPPROTO_ICMPV6; 477 fl6.flowi6_proto = IPPROTO_ICMPV6;
477 fl6.daddr = hdr->saddr; 478 fl6.daddr = hdr->saddr;
479 if (force_saddr)
480 saddr = force_saddr;
478 if (saddr) 481 if (saddr)
479 fl6.saddr = *saddr; 482 fl6.saddr = *saddr;
480 fl6.flowi6_mark = mark; 483 fl6.flowi6_mark = mark;
@@ -502,12 +505,14 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
502 else if (!fl6.flowi6_oif) 505 else if (!fl6.flowi6_oif)
503 fl6.flowi6_oif = np->ucast_oif; 506 fl6.flowi6_oif = np->ucast_oif;
504 507
508 ipc6.tclass = np->tclass;
509 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
510
505 dst = icmpv6_route_lookup(net, skb, sk, &fl6); 511 dst = icmpv6_route_lookup(net, skb, sk, &fl6);
506 if (IS_ERR(dst)) 512 if (IS_ERR(dst))
507 goto out; 513 goto out;
508 514
509 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); 515 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
510 ipc6.tclass = np->tclass;
511 ipc6.dontfrag = np->dontfrag; 516 ipc6.dontfrag = np->dontfrag;
512 ipc6.opt = NULL; 517 ipc6.opt = NULL;
513 518
@@ -549,10 +554,75 @@ out:
549 */ 554 */
550void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) 555void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
551{ 556{
552 icmp6_send(skb, ICMPV6_PARAMPROB, code, pos); 557 icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL);
553 kfree_skb(skb); 558 kfree_skb(skb);
554} 559}
555 560
561/* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH
562 * if sufficient data bytes are available
563 * @nhs is the size of the tunnel header(s) :
564 * Either an IPv4 header for SIT encap
565 * an IPv4 header + GRE header for GRE encap
566 */
567int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
568 unsigned int data_len)
569{
570 struct in6_addr temp_saddr;
571 struct rt6_info *rt;
572 struct sk_buff *skb2;
573 u32 info = 0;
574
575 if (!pskb_may_pull(skb, nhs + sizeof(struct ipv6hdr) + 8))
576 return 1;
577
578 /* RFC 4884 (partial) support for ICMP extensions */
579 if (data_len < 128 || (data_len & 7) || skb->len < data_len)
580 data_len = 0;
581
582 skb2 = data_len ? skb_copy(skb, GFP_ATOMIC) : skb_clone(skb, GFP_ATOMIC);
583
584 if (!skb2)
585 return 1;
586
587 skb_dst_drop(skb2);
588 skb_pull(skb2, nhs);
589 skb_reset_network_header(skb2);
590
591 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
592
593 if (rt && rt->dst.dev)
594 skb2->dev = rt->dst.dev;
595
596 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &temp_saddr);
597
598 if (data_len) {
599 /* RFC 4884 (partial) support :
600 * insert 0 padding at the end, before the extensions
601 */
602 __skb_push(skb2, nhs);
603 skb_reset_network_header(skb2);
604 memmove(skb2->data, skb2->data + nhs, data_len - nhs);
605 memset(skb2->data + data_len - nhs, 0, nhs);
606 /* RFC 4884 4.5 : Length is measured in 64-bit words,
607 * and stored in reserved[0]
608 */
609 info = (data_len/8) << 24;
610 }
611 if (type == ICMP_TIME_EXCEEDED)
612 icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
613 info, &temp_saddr);
614 else
615 icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH,
616 info, &temp_saddr);
617 if (rt)
618 ip6_rt_put(rt);
619
620 kfree_skb(skb2);
621
622 return 0;
623}
624EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach);
625
556static void icmpv6_echo_reply(struct sk_buff *skb) 626static void icmpv6_echo_reply(struct sk_buff *skb)
557{ 627{
558 struct net *net = dev_net(skb->dev); 628 struct net *net = dev_net(skb->dev);
@@ -585,7 +655,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
585 fl6.daddr = ipv6_hdr(skb)->saddr; 655 fl6.daddr = ipv6_hdr(skb)->saddr;
586 if (saddr) 656 if (saddr)
587 fl6.saddr = *saddr; 657 fl6.saddr = *saddr;
588 fl6.flowi6_oif = l3mdev_fib_oif(skb->dev); 658 fl6.flowi6_oif = skb->dev->ifindex;
589 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; 659 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
590 fl6.flowi6_mark = mark; 660 fl6.flowi6_mark = mark;
591 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 661 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h
index d08fd2d48a78..e0170f62bc39 100644
--- a/net/ipv6/ila/ila.h
+++ b/net/ipv6/ila/ila.h
@@ -109,7 +109,8 @@ static inline bool ila_csum_neutral_set(struct ila_identifier ident)
109 return !!(ident.csum_neutral); 109 return !!(ident.csum_neutral);
110} 110}
111 111
112void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p); 112void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
113 bool set_csum_neutral);
113 114
114void ila_init_saved_csum(struct ila_params *p); 115void ila_init_saved_csum(struct ila_params *p);
115 116
diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c
index 0e94042d1289..ec9efbcdad35 100644
--- a/net/ipv6/ila/ila_common.c
+++ b/net/ipv6/ila/ila_common.c
@@ -34,12 +34,12 @@ static void ila_csum_do_neutral(struct ila_addr *iaddr,
34 if (p->locator_match.v64) { 34 if (p->locator_match.v64) {
35 diff = p->csum_diff; 35 diff = p->csum_diff;
36 } else { 36 } else {
37 diff = compute_csum_diff8((__be32 *)iaddr, 37 diff = compute_csum_diff8((__be32 *)&p->locator,
38 (__be32 *)&p->locator); 38 (__be32 *)iaddr);
39 } 39 }
40 40
41 fval = (__force __wsum)(ila_csum_neutral_set(iaddr->ident) ? 41 fval = (__force __wsum)(ila_csum_neutral_set(iaddr->ident) ?
42 ~CSUM_NEUTRAL_FLAG : CSUM_NEUTRAL_FLAG); 42 CSUM_NEUTRAL_FLAG : ~CSUM_NEUTRAL_FLAG);
43 43
44 diff = csum_add(diff, fval); 44 diff = csum_add(diff, fval);
45 45
@@ -103,7 +103,8 @@ static void ila_csum_adjust_transport(struct sk_buff *skb,
103 iaddr->loc = p->locator; 103 iaddr->loc = p->locator;
104} 104}
105 105
106void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p) 106void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
107 bool set_csum_neutral)
107{ 108{
108 struct ipv6hdr *ip6h = ipv6_hdr(skb); 109 struct ipv6hdr *ip6h = ipv6_hdr(skb);
109 struct ila_addr *iaddr = ila_a2i(&ip6h->daddr); 110 struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
@@ -114,7 +115,8 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
114 * is a locator being translated to a SIR address. 115 * is a locator being translated to a SIR address.
115 * Perform (receiver) checksum-neutral translation. 116 * Perform (receiver) checksum-neutral translation.
116 */ 117 */
117 ila_csum_do_neutral(iaddr, p); 118 if (!set_csum_neutral)
119 ila_csum_do_neutral(iaddr, p);
118 } else { 120 } else {
119 switch (p->csum_mode) { 121 switch (p->csum_mode) {
120 case ILA_CSUM_ADJUST_TRANSPORT: 122 case ILA_CSUM_ADJUST_TRANSPORT:
@@ -138,8 +140,8 @@ void ila_init_saved_csum(struct ila_params *p)
138 return; 140 return;
139 141
140 p->csum_diff = compute_csum_diff8( 142 p->csum_diff = compute_csum_diff8(
141 (__be32 *)&p->locator_match, 143 (__be32 *)&p->locator,
142 (__be32 *)&p->locator); 144 (__be32 *)&p->locator_match);
143} 145}
144 146
145static int __init ila_init(void) 147static int __init ila_init(void)
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 1dfb64166d7d..c8314c6b6154 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -26,7 +26,7 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
26 if (skb->protocol != htons(ETH_P_IPV6)) 26 if (skb->protocol != htons(ETH_P_IPV6))
27 goto drop; 27 goto drop;
28 28
29 ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate)); 29 ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate), true);
30 30
31 return dst->lwtstate->orig_output(net, sk, skb); 31 return dst->lwtstate->orig_output(net, sk, skb);
32 32
@@ -42,7 +42,7 @@ static int ila_input(struct sk_buff *skb)
42 if (skb->protocol != htons(ETH_P_IPV6)) 42 if (skb->protocol != htons(ETH_P_IPV6))
43 goto drop; 43 goto drop;
44 44
45 ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate)); 45 ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate), false);
46 46
47 return dst->lwtstate->orig_input(skb); 47 return dst->lwtstate->orig_input(skb);
48 48
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index a90e57229c6c..e6eca5fdf4c9 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -210,14 +210,14 @@ static void ila_free_cb(void *ptr, void *arg)
210 } 210 }
211} 211}
212 212
213static int ila_xlat_addr(struct sk_buff *skb); 213static int ila_xlat_addr(struct sk_buff *skb, bool set_csum_neutral);
214 214
215static unsigned int 215static unsigned int
216ila_nf_input(void *priv, 216ila_nf_input(void *priv,
217 struct sk_buff *skb, 217 struct sk_buff *skb,
218 const struct nf_hook_state *state) 218 const struct nf_hook_state *state)
219{ 219{
220 ila_xlat_addr(skb); 220 ila_xlat_addr(skb, false);
221 return NF_ACCEPT; 221 return NF_ACCEPT;
222} 222}
223 223
@@ -597,7 +597,7 @@ static struct pernet_operations ila_net_ops = {
597 .size = sizeof(struct ila_net), 597 .size = sizeof(struct ila_net),
598}; 598};
599 599
600static int ila_xlat_addr(struct sk_buff *skb) 600static int ila_xlat_addr(struct sk_buff *skb, bool set_csum_neutral)
601{ 601{
602 struct ila_map *ila; 602 struct ila_map *ila;
603 struct ipv6hdr *ip6h = ipv6_hdr(skb); 603 struct ipv6hdr *ip6h = ipv6_hdr(skb);
@@ -616,7 +616,7 @@ static int ila_xlat_addr(struct sk_buff *skb)
616 616
617 ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan); 617 ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
618 if (ila) 618 if (ila)
619 ila_update_ipv6_locator(skb, &ila->xp.ip); 619 ila_update_ipv6_locator(skb, &ila->xp.ip, set_csum_neutral);
620 620
621 rcu_read_unlock(); 621 rcu_read_unlock();
622 622
diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c
index 14dacc544c3e..713676f14a0e 100644
--- a/net/ipv6/ip6_icmp.c
+++ b/net/ipv6/ip6_icmp.c
@@ -39,7 +39,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
39 39
40 if (!send) 40 if (!send)
41 goto out; 41 goto out;
42 send(skb, type, code, info); 42 send(skb, type, code, info, NULL);
43out: 43out:
44 rcu_read_unlock(); 44 rcu_read_unlock();
45} 45}
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 94611e450ec9..aacfb4bce153 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -323,6 +323,7 @@ int ip6_input(struct sk_buff *skb)
323 dev_net(skb->dev), NULL, skb, skb->dev, NULL, 323 dev_net(skb->dev), NULL, skb, skb->dev, NULL,
324 ip6_input_finish); 324 ip6_input_finish);
325} 325}
326EXPORT_SYMBOL_GPL(ip6_input);
326 327
327int ip6_mc_input(struct sk_buff *skb) 328int ip6_mc_input(struct sk_buff *skb)
328{ 329{
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 635b8d340cdb..1dfc402d9ad1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -368,7 +368,7 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
368 if (skb->ignore_df) 368 if (skb->ignore_df)
369 return false; 369 return false;
370 370
371 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 371 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
372 return false; 372 return false;
373 373
374 return true; 374 return true;
@@ -910,6 +910,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
910 int err; 910 int err;
911 int flags = 0; 911 int flags = 0;
912 912
913 if (ipv6_addr_any(&fl6->saddr) && fl6->flowi6_oif &&
914 (!*dst || !(*dst)->error)) {
915 err = l3mdev_get_saddr6(net, sk, fl6);
916 if (err)
917 goto out_err;
918 }
919
913 /* The correct way to handle this would be to do 920 /* The correct way to handle this would be to do
914 * ip6_route_get_saddr, and then ip6_route_output; however, 921 * ip6_route_get_saddr, and then ip6_route_output; however,
915 * the route-specific preferred source forces the 922 * the route-specific preferred source forces the
@@ -999,10 +1006,11 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
999 return 0; 1006 return 0;
1000 1007
1001out_err_release: 1008out_err_release:
1002 if (err == -ENETUNREACH)
1003 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1004 dst_release(*dst); 1009 dst_release(*dst);
1005 *dst = NULL; 1010 *dst = NULL;
1011out_err:
1012 if (err == -ENETUNREACH)
1013 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1006 return err; 1014 return err;
1007} 1015}
1008 1016
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c245895a3d41..fe65cdc28a45 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -73,15 +73,6 @@
73#include <linux/netfilter.h> 73#include <linux/netfilter.h>
74#include <linux/netfilter_ipv6.h> 74#include <linux/netfilter_ipv6.h>
75 75
76/* Set to 3 to get tracing... */
77#define ND_DEBUG 1
78
79#define ND_PRINTK(val, level, fmt, ...) \
80do { \
81 if (val <= ND_DEBUG) \
82 net_##level##_ratelimited(fmt, ##__VA_ARGS__); \
83} while (0)
84
85static u32 ndisc_hash(const void *pkey, 76static u32 ndisc_hash(const void *pkey,
86 const struct net_device *dev, 77 const struct net_device *dev,
87 __u32 *hash_rnd); 78 __u32 *hash_rnd);
@@ -150,11 +141,10 @@ struct neigh_table nd_tbl = {
150}; 141};
151EXPORT_SYMBOL_GPL(nd_tbl); 142EXPORT_SYMBOL_GPL(nd_tbl);
152 143
153static void ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data) 144void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data,
145 int data_len, int pad)
154{ 146{
155 int pad = ndisc_addr_option_pad(skb->dev->type); 147 int space = __ndisc_opt_addr_space(data_len, pad);
156 int data_len = skb->dev->addr_len;
157 int space = ndisc_opt_addr_space(skb->dev);
158 u8 *opt = skb_put(skb, space); 148 u8 *opt = skb_put(skb, space);
159 149
160 opt[0] = type; 150 opt[0] = type;
@@ -171,6 +161,23 @@ static void ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data)
171 if (space > 0) 161 if (space > 0)
172 memset(opt, 0, space); 162 memset(opt, 0, space);
173} 163}
164EXPORT_SYMBOL_GPL(__ndisc_fill_addr_option);
165
166static inline void ndisc_fill_addr_option(struct sk_buff *skb, int type,
167 void *data, u8 icmp6_type)
168{
169 __ndisc_fill_addr_option(skb, type, data, skb->dev->addr_len,
170 ndisc_addr_option_pad(skb->dev->type));
171 ndisc_ops_fill_addr_option(skb->dev, skb, icmp6_type);
172}
173
174static inline void ndisc_fill_redirect_addr_option(struct sk_buff *skb,
175 void *ha,
176 const u8 *ops_data)
177{
178 ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha, NDISC_REDIRECT);
179 ndisc_ops_fill_redirect_addr_option(skb->dev, skb, ops_data);
180}
174 181
175static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, 182static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
176 struct nd_opt_hdr *end) 183 struct nd_opt_hdr *end)
@@ -185,24 +192,28 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
185 return cur <= end && cur->nd_opt_type == type ? cur : NULL; 192 return cur <= end && cur->nd_opt_type == type ? cur : NULL;
186} 193}
187 194
188static inline int ndisc_is_useropt(struct nd_opt_hdr *opt) 195static inline int ndisc_is_useropt(const struct net_device *dev,
196 struct nd_opt_hdr *opt)
189{ 197{
190 return opt->nd_opt_type == ND_OPT_RDNSS || 198 return opt->nd_opt_type == ND_OPT_RDNSS ||
191 opt->nd_opt_type == ND_OPT_DNSSL; 199 opt->nd_opt_type == ND_OPT_DNSSL ||
200 ndisc_ops_is_useropt(dev, opt->nd_opt_type);
192} 201}
193 202
194static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, 203static struct nd_opt_hdr *ndisc_next_useropt(const struct net_device *dev,
204 struct nd_opt_hdr *cur,
195 struct nd_opt_hdr *end) 205 struct nd_opt_hdr *end)
196{ 206{
197 if (!cur || !end || cur >= end) 207 if (!cur || !end || cur >= end)
198 return NULL; 208 return NULL;
199 do { 209 do {
200 cur = ((void *)cur) + (cur->nd_opt_len << 3); 210 cur = ((void *)cur) + (cur->nd_opt_len << 3);
201 } while (cur < end && !ndisc_is_useropt(cur)); 211 } while (cur < end && !ndisc_is_useropt(dev, cur));
202 return cur <= end && ndisc_is_useropt(cur) ? cur : NULL; 212 return cur <= end && ndisc_is_useropt(dev, cur) ? cur : NULL;
203} 213}
204 214
205struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, 215struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
216 u8 *opt, int opt_len,
206 struct ndisc_options *ndopts) 217 struct ndisc_options *ndopts)
207{ 218{
208 struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)opt; 219 struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)opt;
@@ -217,6 +228,8 @@ struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
217 l = nd_opt->nd_opt_len << 3; 228 l = nd_opt->nd_opt_len << 3;
218 if (opt_len < l || l == 0) 229 if (opt_len < l || l == 0)
219 return NULL; 230 return NULL;
231 if (ndisc_ops_parse_options(dev, nd_opt, ndopts))
232 goto next_opt;
220 switch (nd_opt->nd_opt_type) { 233 switch (nd_opt->nd_opt_type) {
221 case ND_OPT_SOURCE_LL_ADDR: 234 case ND_OPT_SOURCE_LL_ADDR:
222 case ND_OPT_TARGET_LL_ADDR: 235 case ND_OPT_TARGET_LL_ADDR:
@@ -243,7 +256,7 @@ struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
243 break; 256 break;
244#endif 257#endif
245 default: 258 default:
246 if (ndisc_is_useropt(nd_opt)) { 259 if (ndisc_is_useropt(dev, nd_opt)) {
247 ndopts->nd_useropts_end = nd_opt; 260 ndopts->nd_useropts_end = nd_opt;
248 if (!ndopts->nd_useropts) 261 if (!ndopts->nd_useropts)
249 ndopts->nd_useropts = nd_opt; 262 ndopts->nd_useropts = nd_opt;
@@ -260,6 +273,7 @@ struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
260 nd_opt->nd_opt_len); 273 nd_opt->nd_opt_len);
261 } 274 }
262 } 275 }
276next_opt:
263 opt_len -= l; 277 opt_len -= l;
264 nd_opt = ((void *)nd_opt) + l; 278 nd_opt = ((void *)nd_opt) + l;
265 } 279 }
@@ -509,7 +523,8 @@ void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
509 if (!dev->addr_len) 523 if (!dev->addr_len)
510 inc_opt = 0; 524 inc_opt = 0;
511 if (inc_opt) 525 if (inc_opt)
512 optlen += ndisc_opt_addr_space(dev); 526 optlen += ndisc_opt_addr_space(dev,
527 NDISC_NEIGHBOUR_ADVERTISEMENT);
513 528
514 skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen); 529 skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen);
515 if (!skb) 530 if (!skb)
@@ -528,8 +543,8 @@ void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
528 543
529 if (inc_opt) 544 if (inc_opt)
530 ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, 545 ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR,
531 dev->dev_addr); 546 dev->dev_addr,
532 547 NDISC_NEIGHBOUR_ADVERTISEMENT);
533 548
534 ndisc_send_skb(skb, daddr, src_addr); 549 ndisc_send_skb(skb, daddr, src_addr);
535} 550}
@@ -574,7 +589,8 @@ void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
574 if (ipv6_addr_any(saddr)) 589 if (ipv6_addr_any(saddr))
575 inc_opt = false; 590 inc_opt = false;
576 if (inc_opt) 591 if (inc_opt)
577 optlen += ndisc_opt_addr_space(dev); 592 optlen += ndisc_opt_addr_space(dev,
593 NDISC_NEIGHBOUR_SOLICITATION);
578 594
579 skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen); 595 skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen);
580 if (!skb) 596 if (!skb)
@@ -590,7 +606,8 @@ void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
590 606
591 if (inc_opt) 607 if (inc_opt)
592 ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR, 608 ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
593 dev->dev_addr); 609 dev->dev_addr,
610 NDISC_NEIGHBOUR_SOLICITATION);
594 611
595 ndisc_send_skb(skb, daddr, saddr); 612 ndisc_send_skb(skb, daddr, saddr);
596} 613}
@@ -626,7 +643,7 @@ void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
626 } 643 }
627#endif 644#endif
628 if (send_sllao) 645 if (send_sllao)
629 optlen += ndisc_opt_addr_space(dev); 646 optlen += ndisc_opt_addr_space(dev, NDISC_ROUTER_SOLICITATION);
630 647
631 skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen); 648 skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen);
632 if (!skb) 649 if (!skb)
@@ -641,7 +658,8 @@ void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
641 658
642 if (send_sllao) 659 if (send_sllao)
643 ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR, 660 ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
644 dev->dev_addr); 661 dev->dev_addr,
662 NDISC_ROUTER_SOLICITATION);
645 663
646 ndisc_send_skb(skb, daddr, saddr); 664 ndisc_send_skb(skb, daddr, saddr);
647} 665}
@@ -702,6 +720,15 @@ static int pndisc_is_router(const void *pkey,
702 return ret; 720 return ret;
703} 721}
704 722
723void ndisc_update(const struct net_device *dev, struct neighbour *neigh,
724 const u8 *lladdr, u8 new, u32 flags, u8 icmp6_type,
725 struct ndisc_options *ndopts)
726{
727 neigh_update(neigh, lladdr, new, flags);
728 /* report ndisc ops about neighbour update */
729 ndisc_ops_update(dev, neigh, flags, icmp6_type, ndopts);
730}
731
705static void ndisc_recv_ns(struct sk_buff *skb) 732static void ndisc_recv_ns(struct sk_buff *skb)
706{ 733{
707 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 734 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
@@ -738,7 +765,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
738 return; 765 return;
739 } 766 }
740 767
741 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { 768 if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts)) {
742 ND_PRINTK(2, warn, "NS: invalid ND options\n"); 769 ND_PRINTK(2, warn, "NS: invalid ND options\n");
743 return; 770 return;
744 } 771 }
@@ -856,9 +883,10 @@ have_ifp:
856 neigh = __neigh_lookup(&nd_tbl, saddr, dev, 883 neigh = __neigh_lookup(&nd_tbl, saddr, dev,
857 !inc || lladdr || !dev->addr_len); 884 !inc || lladdr || !dev->addr_len);
858 if (neigh) 885 if (neigh)
859 neigh_update(neigh, lladdr, NUD_STALE, 886 ndisc_update(dev, neigh, lladdr, NUD_STALE,
860 NEIGH_UPDATE_F_WEAK_OVERRIDE| 887 NEIGH_UPDATE_F_WEAK_OVERRIDE|
861 NEIGH_UPDATE_F_OVERRIDE); 888 NEIGH_UPDATE_F_OVERRIDE,
889 NDISC_NEIGHBOUR_SOLICITATION, &ndopts);
862 if (neigh || !dev->header_ops) { 890 if (neigh || !dev->header_ops) {
863 ndisc_send_na(dev, saddr, &msg->target, !!is_router, 891 ndisc_send_na(dev, saddr, &msg->target, !!is_router,
864 true, (ifp != NULL && inc), inc); 892 true, (ifp != NULL && inc), inc);
@@ -911,7 +939,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
911 idev->cnf.drop_unsolicited_na) 939 idev->cnf.drop_unsolicited_na)
912 return; 940 return;
913 941
914 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { 942 if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts)) {
915 ND_PRINTK(2, warn, "NS: invalid ND option\n"); 943 ND_PRINTK(2, warn, "NS: invalid ND option\n");
916 return; 944 return;
917 } 945 }
@@ -967,12 +995,13 @@ static void ndisc_recv_na(struct sk_buff *skb)
967 goto out; 995 goto out;
968 } 996 }
969 997
970 neigh_update(neigh, lladdr, 998 ndisc_update(dev, neigh, lladdr,
971 msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE, 999 msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE,
972 NEIGH_UPDATE_F_WEAK_OVERRIDE| 1000 NEIGH_UPDATE_F_WEAK_OVERRIDE|
973 (msg->icmph.icmp6_override ? NEIGH_UPDATE_F_OVERRIDE : 0)| 1001 (msg->icmph.icmp6_override ? NEIGH_UPDATE_F_OVERRIDE : 0)|
974 NEIGH_UPDATE_F_OVERRIDE_ISROUTER| 1002 NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
975 (msg->icmph.icmp6_router ? NEIGH_UPDATE_F_ISROUTER : 0)); 1003 (msg->icmph.icmp6_router ? NEIGH_UPDATE_F_ISROUTER : 0),
1004 NDISC_NEIGHBOUR_ADVERTISEMENT, &ndopts);
976 1005
977 if ((old_flags & ~neigh->flags) & NTF_ROUTER) { 1006 if ((old_flags & ~neigh->flags) & NTF_ROUTER) {
978 /* 1007 /*
@@ -1017,7 +1046,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1017 goto out; 1046 goto out;
1018 1047
1019 /* Parse ND options */ 1048 /* Parse ND options */
1020 if (!ndisc_parse_options(rs_msg->opt, ndoptlen, &ndopts)) { 1049 if (!ndisc_parse_options(skb->dev, rs_msg->opt, ndoptlen, &ndopts)) {
1021 ND_PRINTK(2, notice, "NS: invalid ND option, ignored\n"); 1050 ND_PRINTK(2, notice, "NS: invalid ND option, ignored\n");
1022 goto out; 1051 goto out;
1023 } 1052 }
@@ -1031,10 +1060,11 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1031 1060
1032 neigh = __neigh_lookup(&nd_tbl, saddr, skb->dev, 1); 1061 neigh = __neigh_lookup(&nd_tbl, saddr, skb->dev, 1);
1033 if (neigh) { 1062 if (neigh) {
1034 neigh_update(neigh, lladdr, NUD_STALE, 1063 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
1035 NEIGH_UPDATE_F_WEAK_OVERRIDE| 1064 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1036 NEIGH_UPDATE_F_OVERRIDE| 1065 NEIGH_UPDATE_F_OVERRIDE|
1037 NEIGH_UPDATE_F_OVERRIDE_ISROUTER); 1066 NEIGH_UPDATE_F_OVERRIDE_ISROUTER,
1067 NDISC_ROUTER_SOLICITATION, &ndopts);
1038 neigh_release(neigh); 1068 neigh_release(neigh);
1039 } 1069 }
1040out: 1070out:
@@ -1135,7 +1165,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1135 return; 1165 return;
1136 } 1166 }
1137 1167
1138 if (!ndisc_parse_options(opt, optlen, &ndopts)) { 1168 if (!ndisc_parse_options(skb->dev, opt, optlen, &ndopts)) {
1139 ND_PRINTK(2, warn, "RA: invalid ND options\n"); 1169 ND_PRINTK(2, warn, "RA: invalid ND options\n");
1140 return; 1170 return;
1141 } 1171 }
@@ -1329,11 +1359,12 @@ skip_linkparms:
1329 goto out; 1359 goto out;
1330 } 1360 }
1331 } 1361 }
1332 neigh_update(neigh, lladdr, NUD_STALE, 1362 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
1333 NEIGH_UPDATE_F_WEAK_OVERRIDE| 1363 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1334 NEIGH_UPDATE_F_OVERRIDE| 1364 NEIGH_UPDATE_F_OVERRIDE|
1335 NEIGH_UPDATE_F_OVERRIDE_ISROUTER| 1365 NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1336 NEIGH_UPDATE_F_ISROUTER); 1366 NEIGH_UPDATE_F_ISROUTER,
1367 NDISC_ROUTER_ADVERTISEMENT, &ndopts);
1337 } 1368 }
1338 1369
1339 if (!ipv6_accept_ra(in6_dev)) { 1370 if (!ipv6_accept_ra(in6_dev)) {
@@ -1421,7 +1452,8 @@ skip_routeinfo:
1421 struct nd_opt_hdr *p; 1452 struct nd_opt_hdr *p;
1422 for (p = ndopts.nd_useropts; 1453 for (p = ndopts.nd_useropts;
1423 p; 1454 p;
1424 p = ndisc_next_useropt(p, ndopts.nd_useropts_end)) { 1455 p = ndisc_next_useropt(skb->dev, p,
1456 ndopts.nd_useropts_end)) {
1425 ndisc_ra_useropt(skb, p); 1457 ndisc_ra_useropt(skb, p);
1426 } 1458 }
1427 } 1459 }
@@ -1459,7 +1491,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1459 return; 1491 return;
1460 } 1492 }
1461 1493
1462 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) 1494 if (!ndisc_parse_options(skb->dev, msg->opt, ndoptlen, &ndopts))
1463 return; 1495 return;
1464 1496
1465 if (!ndopts.nd_opts_rh) { 1497 if (!ndopts.nd_opts_rh) {
@@ -1504,7 +1536,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1504 struct dst_entry *dst; 1536 struct dst_entry *dst;
1505 struct flowi6 fl6; 1537 struct flowi6 fl6;
1506 int rd_len; 1538 int rd_len;
1507 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; 1539 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL,
1540 ops_data_buf[NDISC_OPS_REDIRECT_DATA_SPACE], *ops_data = NULL;
1508 int oif = l3mdev_fib_oif(dev); 1541 int oif = l3mdev_fib_oif(dev);
1509 bool ret; 1542 bool ret;
1510 1543
@@ -1563,7 +1596,9 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1563 memcpy(ha_buf, neigh->ha, dev->addr_len); 1596 memcpy(ha_buf, neigh->ha, dev->addr_len);
1564 read_unlock_bh(&neigh->lock); 1597 read_unlock_bh(&neigh->lock);
1565 ha = ha_buf; 1598 ha = ha_buf;
1566 optlen += ndisc_opt_addr_space(dev); 1599 optlen += ndisc_redirect_opt_addr_space(dev, neigh,
1600 ops_data_buf,
1601 &ops_data);
1567 } else 1602 } else
1568 read_unlock_bh(&neigh->lock); 1603 read_unlock_bh(&neigh->lock);
1569 1604
@@ -1594,7 +1629,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1594 */ 1629 */
1595 1630
1596 if (ha) 1631 if (ha)
1597 ndisc_fill_addr_option(buff, ND_OPT_TARGET_LL_ADDR, ha); 1632 ndisc_fill_redirect_addr_option(buff, ha, ops_data);
1598 1633
1599 /* 1634 /*
1600 * build redirect option and copy skb over to the new packet. 1635 * build redirect option and copy skb over to the new packet.
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 3ee3e444a66b..fed40d1ec29b 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -116,6 +116,9 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
116 else if (!fl6.flowi6_oif) 116 else if (!fl6.flowi6_oif)
117 fl6.flowi6_oif = np->ucast_oif; 117 fl6.flowi6_oif = np->ucast_oif;
118 118
119 ipc6.tclass = np->tclass;
120 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
121
119 dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr); 122 dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr);
120 if (IS_ERR(dst)) 123 if (IS_ERR(dst))
121 return PTR_ERR(dst); 124 return PTR_ERR(dst);
@@ -140,7 +143,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
140 pfh.family = AF_INET6; 143 pfh.family = AF_INET6;
141 144
142 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); 145 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
143 ipc6.tclass = np->tclass;
144 ipc6.dontfrag = np->dontfrag; 146 ipc6.dontfrag = np->dontfrag;
145 ipc6.opt = NULL; 147 ipc6.opt = NULL;
146 148
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 896350df6423..590dd1f7746f 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -878,6 +878,11 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
878 if (inet->hdrincl) 878 if (inet->hdrincl)
879 fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH; 879 fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
880 880
881 if (ipc6.tclass < 0)
882 ipc6.tclass = np->tclass;
883
884 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
885
881 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 886 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
882 if (IS_ERR(dst)) { 887 if (IS_ERR(dst)) {
883 err = PTR_ERR(dst); 888 err = PTR_ERR(dst);
@@ -886,9 +891,6 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
886 if (ipc6.hlimit < 0) 891 if (ipc6.hlimit < 0)
887 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); 892 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
888 893
889 if (ipc6.tclass < 0)
890 ipc6.tclass = np->tclass;
891
892 if (ipc6.dontfrag < 0) 894 if (ipc6.dontfrag < 0)
893 ipc6.dontfrag = np->dontfrag; 895 ipc6.dontfrag = np->dontfrag;
894 896
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 520b7884d0c2..49817555449e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1042,8 +1042,8 @@ static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1042 return pcpu_rt; 1042 return pcpu_rt;
1043} 1043}
1044 1044
1045static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif, 1045struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1046 struct flowi6 *fl6, int flags) 1046 int oif, struct flowi6 *fl6, int flags)
1047{ 1047{
1048 struct fib6_node *fn, *saved_fn; 1048 struct fib6_node *fn, *saved_fn;
1049 struct rt6_info *rt; 1049 struct rt6_info *rt;
@@ -1139,6 +1139,7 @@ redo_rt6_select:
1139 1139
1140 } 1140 }
1141} 1141}
1142EXPORT_SYMBOL_GPL(ip6_pol_route);
1142 1143
1143static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, 1144static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1144 struct flowi6 *fl6, int flags) 1145 struct flowi6 *fl6, int flags)
@@ -2200,7 +2201,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
2200 * first-hop router for the specified ICMP Destination Address. 2201 * first-hop router for the specified ICMP Destination Address.
2201 */ 2202 */
2202 2203
2203 if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) { 2204 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
2204 net_dbg_ratelimited("rt6_redirect: invalid ND options\n"); 2205 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2205 return; 2206 return;
2206 } 2207 }
@@ -2235,12 +2236,12 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
2235 * We have finally decided to accept it. 2236 * We have finally decided to accept it.
2236 */ 2237 */
2237 2238
2238 neigh_update(neigh, lladdr, NUD_STALE, 2239 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
2239 NEIGH_UPDATE_F_WEAK_OVERRIDE| 2240 NEIGH_UPDATE_F_WEAK_OVERRIDE|
2240 NEIGH_UPDATE_F_OVERRIDE| 2241 NEIGH_UPDATE_F_OVERRIDE|
2241 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| 2242 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
2242 NEIGH_UPDATE_F_ISROUTER)) 2243 NEIGH_UPDATE_F_ISROUTER)),
2243 ); 2244 NDISC_REDIRECT, &ndopts);
2244 2245
2245 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL); 2246 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
2246 if (!nrt) 2247 if (!nrt)
@@ -2585,23 +2586,6 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2585 return rt; 2586 return rt;
2586} 2587}
2587 2588
2588int ip6_route_get_saddr(struct net *net,
2589 struct rt6_info *rt,
2590 const struct in6_addr *daddr,
2591 unsigned int prefs,
2592 struct in6_addr *saddr)
2593{
2594 struct inet6_dev *idev =
2595 rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
2596 int err = 0;
2597 if (rt && rt->rt6i_prefsrc.plen)
2598 *saddr = rt->rt6i_prefsrc.addr;
2599 else
2600 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2601 daddr, prefs, saddr);
2602 return err;
2603}
2604
2605/* remove deleted ip from prefsrc entries */ 2589/* remove deleted ip from prefsrc entries */
2606struct arg_dev_net_ip { 2590struct arg_dev_net_ip {
2607 struct net_device *dev; 2591 struct net_device *dev;
@@ -3306,6 +3290,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
3306 3290
3307 err = -EINVAL; 3291 err = -EINVAL;
3308 memset(&fl6, 0, sizeof(fl6)); 3292 memset(&fl6, 0, sizeof(fl6));
3293 rtm = nlmsg_data(nlh);
3294 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
3309 3295
3310 if (tb[RTA_SRC]) { 3296 if (tb[RTA_SRC]) {
3311 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) 3297 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 0619ac70836d..917a5cd4b8fc 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -479,47 +479,12 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
479 dev_put(dev); 479 dev_put(dev);
480} 480}
481 481
482/* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH
483 * if sufficient data bytes are available
484 */
485static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
486{
487 int ihl = ((const struct iphdr *)skb->data)->ihl*4;
488 struct rt6_info *rt;
489 struct sk_buff *skb2;
490
491 if (!pskb_may_pull(skb, ihl + sizeof(struct ipv6hdr) + 8))
492 return 1;
493
494 skb2 = skb_clone(skb, GFP_ATOMIC);
495
496 if (!skb2)
497 return 1;
498
499 skb_dst_drop(skb2);
500 skb_pull(skb2, ihl);
501 skb_reset_network_header(skb2);
502
503 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
504
505 if (rt && rt->dst.dev)
506 skb2->dev = rt->dst.dev;
507
508 icmpv6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
509
510 if (rt)
511 ip6_rt_put(rt);
512
513 kfree_skb(skb2);
514
515 return 0;
516}
517
518static int ipip6_err(struct sk_buff *skb, u32 info) 482static int ipip6_err(struct sk_buff *skb, u32 info)
519{ 483{
520 const struct iphdr *iph = (const struct iphdr *)skb->data; 484 const struct iphdr *iph = (const struct iphdr *)skb->data;
521 const int type = icmp_hdr(skb)->type; 485 const int type = icmp_hdr(skb)->type;
522 const int code = icmp_hdr(skb)->code; 486 const int code = icmp_hdr(skb)->code;
487 unsigned int data_len = 0;
523 struct ip_tunnel *t; 488 struct ip_tunnel *t;
524 int err; 489 int err;
525 490
@@ -544,6 +509,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
544 case ICMP_TIME_EXCEEDED: 509 case ICMP_TIME_EXCEEDED:
545 if (code != ICMP_EXC_TTL) 510 if (code != ICMP_EXC_TTL)
546 return 0; 511 return 0;
512 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
547 break; 513 break;
548 case ICMP_REDIRECT: 514 case ICMP_REDIRECT:
549 break; 515 break;
@@ -571,11 +537,11 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
571 goto out; 537 goto out;
572 } 538 }
573 539
574 if (t->parms.iph.daddr == 0) 540 err = 0;
541 if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
575 goto out; 542 goto out;
576 543
577 err = 0; 544 if (t->parms.iph.daddr == 0)
578 if (!ipip6_err_gen_icmpv6_unreach(skb))
579 goto out; 545 goto out;
580 546
581 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 547 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
@@ -825,9 +791,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
825 u8 protocol = IPPROTO_IPV6; 791 u8 protocol = IPPROTO_IPV6;
826 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 792 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
827 793
828 if (skb->protocol != htons(ETH_P_IPV6))
829 goto tx_error;
830
831 if (tos == 1) 794 if (tos == 1)
832 tos = ipv6_get_dsfield(iph6); 795 tos = ipv6_get_dsfield(iph6);
833 796
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 005dc82c2138..0a71a312d0d8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1207,6 +1207,11 @@ do_udp_sendmsg:
1207 1207
1208 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 1208 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1209 1209
1210 if (ipc6.tclass < 0)
1211 ipc6.tclass = np->tclass;
1212
1213 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1214
1210 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p); 1215 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p);
1211 if (IS_ERR(dst)) { 1216 if (IS_ERR(dst)) {
1212 err = PTR_ERR(dst); 1217 err = PTR_ERR(dst);
@@ -1217,9 +1222,6 @@ do_udp_sendmsg:
1217 if (ipc6.hlimit < 0) 1222 if (ipc6.hlimit < 0)
1218 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); 1223 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1219 1224
1220 if (ipc6.tclass < 0)
1221 ipc6.tclass = np->tclass;
1222
1223 if (msg->msg_flags&MSG_CONFIRM) 1225 if (msg->msg_flags&MSG_CONFIRM)
1224 goto do_confirm; 1226 goto do_confirm;
1225back_from_confirm: 1227back_from_confirm:
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index c074771a10f7..6cc97003e4a9 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -366,12 +366,12 @@ static void __net_exit xfrm6_net_sysctl_exit(struct net *net)
366 kfree(table); 366 kfree(table);
367} 367}
368#else /* CONFIG_SYSCTL */ 368#else /* CONFIG_SYSCTL */
369static int inline xfrm6_net_sysctl_init(struct net *net) 369static inline int xfrm6_net_sysctl_init(struct net *net)
370{ 370{
371 return 0; 371 return 0;
372} 372}
373 373
374static void inline xfrm6_net_sysctl_exit(struct net *net) 374static inline void xfrm6_net_sysctl_exit(struct net *net)
375{ 375{
376} 376}
377#endif 377#endif
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index fc3598a922b0..37d674e6f8a9 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1033,6 +1033,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1033{ 1033{
1034 struct sock *sk = sock->sk; 1034 struct sock *sk = sock->sk;
1035 struct iucv_sock *iucv = iucv_sk(sk); 1035 struct iucv_sock *iucv = iucv_sk(sk);
1036 size_t headroom, linear;
1036 struct sk_buff *skb; 1037 struct sk_buff *skb;
1037 struct iucv_message txmsg = {0}; 1038 struct iucv_message txmsg = {0};
1038 struct cmsghdr *cmsg; 1039 struct cmsghdr *cmsg;
@@ -1110,20 +1111,31 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1110 * this is fine for SOCK_SEQPACKET (unless we want to support 1111 * this is fine for SOCK_SEQPACKET (unless we want to support
1111 * segmented records using the MSG_EOR flag), but 1112 * segmented records using the MSG_EOR flag), but
1112 * for SOCK_STREAM we might want to improve it in future */ 1113 * for SOCK_STREAM we might want to improve it in future */
1113 if (iucv->transport == AF_IUCV_TRANS_HIPER) 1114 headroom = (iucv->transport == AF_IUCV_TRANS_HIPER)
1114 skb = sock_alloc_send_skb(sk, 1115 ? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0;
1115 len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN, 1116 if (headroom + len < PAGE_SIZE) {
1116 noblock, &err); 1117 linear = len;
1117 else 1118 } else {
1118 skb = sock_alloc_send_skb(sk, len, noblock, &err); 1119 /* In nonlinear "classic" iucv skb,
1120 * reserve space for iucv_array
1121 */
1122 if (iucv->transport != AF_IUCV_TRANS_HIPER)
1123 headroom += sizeof(struct iucv_array) *
1124 (MAX_SKB_FRAGS + 1);
1125 linear = PAGE_SIZE - headroom;
1126 }
1127 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1128 noblock, &err, 0);
1119 if (!skb) 1129 if (!skb)
1120 goto out; 1130 goto out;
1121 if (iucv->transport == AF_IUCV_TRANS_HIPER) 1131 if (headroom)
1122 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); 1132 skb_reserve(skb, headroom);
1123 if (memcpy_from_msg(skb_put(skb, len), msg, len)) { 1133 skb_put(skb, linear);
1124 err = -EFAULT; 1134 skb->len = len;
1135 skb->data_len = len - linear;
1136 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1137 if (err)
1125 goto fail; 1138 goto fail;
1126 }
1127 1139
1128 /* wait if outstanding messages for iucv path has reached */ 1140 /* wait if outstanding messages for iucv path has reached */
1129 timeo = sock_sndtimeo(sk, noblock); 1141 timeo = sock_sndtimeo(sk, noblock);
@@ -1148,49 +1160,67 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1148 atomic_dec(&iucv->msg_sent); 1160 atomic_dec(&iucv->msg_sent);
1149 goto fail; 1161 goto fail;
1150 } 1162 }
1151 goto release; 1163 } else { /* Classic VM IUCV transport */
1152 } 1164 skb_queue_tail(&iucv->send_skb_q, skb);
1153 skb_queue_tail(&iucv->send_skb_q, skb); 1165
1154 1166 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1155 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) 1167 skb->len <= 7) {
1156 && skb->len <= 7) { 1168 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1157 err = iucv_send_iprm(iucv->path, &txmsg, skb); 1169
1170 /* on success: there is no message_complete callback */
1171 /* for an IPRMDATA msg; remove skb from send queue */
1172 if (err == 0) {
1173 skb_unlink(skb, &iucv->send_skb_q);
1174 kfree_skb(skb);
1175 }
1158 1176
1159 /* on success: there is no message_complete callback 1177 /* this error should never happen since the */
1160 * for an IPRMDATA msg; remove skb from send queue */ 1178 /* IUCV_IPRMDATA path flag is set... sever path */
1161 if (err == 0) { 1179 if (err == 0x15) {
1162 skb_unlink(skb, &iucv->send_skb_q); 1180 pr_iucv->path_sever(iucv->path, NULL);
1163 kfree_skb(skb); 1181 skb_unlink(skb, &iucv->send_skb_q);
1182 err = -EPIPE;
1183 goto fail;
1184 }
1185 } else if (skb_is_nonlinear(skb)) {
1186 struct iucv_array *iba = (struct iucv_array *)skb->head;
1187 int i;
1188
1189 /* skip iucv_array lying in the headroom */
1190 iba[0].address = (u32)(addr_t)skb->data;
1191 iba[0].length = (u32)skb_headlen(skb);
1192 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1193 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1194
1195 iba[i + 1].address =
1196 (u32)(addr_t)skb_frag_address(frag);
1197 iba[i + 1].length = (u32)skb_frag_size(frag);
1198 }
1199 err = pr_iucv->message_send(iucv->path, &txmsg,
1200 IUCV_IPBUFLST, 0,
1201 (void *)iba, skb->len);
1202 } else { /* non-IPRM Linear skb */
1203 err = pr_iucv->message_send(iucv->path, &txmsg,
1204 0, 0, (void *)skb->data, skb->len);
1164 } 1205 }
1165 1206 if (err) {
1166 /* this error should never happen since the 1207 if (err == 3) {
1167 * IUCV_IPRMDATA path flag is set... sever path */ 1208 user_id[8] = 0;
1168 if (err == 0x15) { 1209 memcpy(user_id, iucv->dst_user_id, 8);
1169 pr_iucv->path_sever(iucv->path, NULL); 1210 appl_id[8] = 0;
1211 memcpy(appl_id, iucv->dst_name, 8);
1212 pr_err(
1213 "Application %s on z/VM guest %s exceeds message limit\n",
1214 appl_id, user_id);
1215 err = -EAGAIN;
1216 } else {
1217 err = -EPIPE;
1218 }
1170 skb_unlink(skb, &iucv->send_skb_q); 1219 skb_unlink(skb, &iucv->send_skb_q);
1171 err = -EPIPE;
1172 goto fail; 1220 goto fail;
1173 } 1221 }
1174 } else
1175 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1176 (void *) skb->data, skb->len);
1177 if (err) {
1178 if (err == 3) {
1179 user_id[8] = 0;
1180 memcpy(user_id, iucv->dst_user_id, 8);
1181 appl_id[8] = 0;
1182 memcpy(appl_id, iucv->dst_name, 8);
1183 pr_err("Application %s on z/VM guest %s"
1184 " exceeds message limit\n",
1185 appl_id, user_id);
1186 err = -EAGAIN;
1187 } else
1188 err = -EPIPE;
1189 skb_unlink(skb, &iucv->send_skb_q);
1190 goto fail;
1191 } 1222 }
1192 1223
1193release:
1194 release_sock(sk); 1224 release_sock(sk);
1195 return len; 1225 return len;
1196 1226
@@ -1201,42 +1231,32 @@ out:
1201 return err; 1231 return err;
1202} 1232}
1203 1233
1204/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's 1234static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
1205 *
1206 * Locking: must be called with message_q.lock held
1207 */
1208static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1209{ 1235{
1210 int dataleft, size, copied = 0; 1236 size_t headroom, linear;
1211 struct sk_buff *nskb; 1237 struct sk_buff *skb;
1212 1238 int err;
1213 dataleft = len;
1214 while (dataleft) {
1215 if (dataleft >= sk->sk_rcvbuf / 4)
1216 size = sk->sk_rcvbuf / 4;
1217 else
1218 size = dataleft;
1219
1220 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1221 if (!nskb)
1222 return -ENOMEM;
1223
1224 /* copy target class to control buffer of new skb */
1225 IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
1226
1227 /* copy data fragment */
1228 memcpy(nskb->data, skb->data + copied, size);
1229 copied += size;
1230 dataleft -= size;
1231
1232 skb_reset_transport_header(nskb);
1233 skb_reset_network_header(nskb);
1234 nskb->len = size;
1235 1239
1236 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); 1240 if (len < PAGE_SIZE) {
1241 headroom = 0;
1242 linear = len;
1243 } else {
1244 headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
1245 linear = PAGE_SIZE - headroom;
1246 }
1247 skb = alloc_skb_with_frags(headroom + linear, len - linear,
1248 0, &err, GFP_ATOMIC | GFP_DMA);
1249 WARN_ONCE(!skb,
1250 "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1251 len, err);
1252 if (skb) {
1253 if (headroom)
1254 skb_reserve(skb, headroom);
1255 skb_put(skb, linear);
1256 skb->len = len;
1257 skb->data_len = len - linear;
1237 } 1258 }
1238 1259 return skb;
1239 return 0;
1240} 1260}
1241 1261
1242/* iucv_process_message() - Receive a single outstanding IUCV message 1262/* iucv_process_message() - Receive a single outstanding IUCV message
@@ -1263,31 +1283,32 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1263 skb->len = 0; 1283 skb->len = 0;
1264 } 1284 }
1265 } else { 1285 } else {
1266 rc = pr_iucv->message_receive(path, msg, 1286 if (skb_is_nonlinear(skb)) {
1287 struct iucv_array *iba = (struct iucv_array *)skb->head;
1288 int i;
1289
1290 iba[0].address = (u32)(addr_t)skb->data;
1291 iba[0].length = (u32)skb_headlen(skb);
1292 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1293 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1294
1295 iba[i + 1].address =
1296 (u32)(addr_t)skb_frag_address(frag);
1297 iba[i + 1].length = (u32)skb_frag_size(frag);
1298 }
1299 rc = pr_iucv->message_receive(path, msg,
1300 IUCV_IPBUFLST,
1301 (void *)iba, len, NULL);
1302 } else {
1303 rc = pr_iucv->message_receive(path, msg,
1267 msg->flags & IUCV_IPRMDATA, 1304 msg->flags & IUCV_IPRMDATA,
1268 skb->data, len, NULL); 1305 skb->data, len, NULL);
1306 }
1269 if (rc) { 1307 if (rc) {
1270 kfree_skb(skb); 1308 kfree_skb(skb);
1271 return; 1309 return;
1272 } 1310 }
1273 /* we need to fragment iucv messages for SOCK_STREAM only; 1311 WARN_ON_ONCE(skb->len != len);
1274 * for SOCK_SEQPACKET, it is only relevant if we support
1275 * record segmentation using MSG_EOR (see also recvmsg()) */
1276 if (sk->sk_type == SOCK_STREAM &&
1277 skb->truesize >= sk->sk_rcvbuf / 4) {
1278 rc = iucv_fragment_skb(sk, skb, len);
1279 kfree_skb(skb);
1280 skb = NULL;
1281 if (rc) {
1282 pr_iucv->path_sever(path, NULL);
1283 return;
1284 }
1285 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1286 } else {
1287 skb_reset_transport_header(skb);
1288 skb_reset_network_header(skb);
1289 skb->len = len;
1290 }
1291 } 1312 }
1292 1313
1293 IUCV_SKB_CB(skb)->offset = 0; 1314 IUCV_SKB_CB(skb)->offset = 0;
@@ -1306,7 +1327,7 @@ static void iucv_process_message_q(struct sock *sk)
1306 struct sock_msg_q *p, *n; 1327 struct sock_msg_q *p, *n;
1307 1328
1308 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 1329 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1309 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA); 1330 skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
1310 if (!skb) 1331 if (!skb)
1311 break; 1332 break;
1312 iucv_process_message(sk, skb, p->path, &p->msg); 1333 iucv_process_message(sk, skb, p->path, &p->msg);
@@ -1801,7 +1822,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1801 if (len > sk->sk_rcvbuf) 1822 if (len > sk->sk_rcvbuf)
1802 goto save_message; 1823 goto save_message;
1803 1824
1804 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA); 1825 skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
1805 if (!skb) 1826 if (!skb)
1806 goto save_message; 1827 goto save_message;
1807 1828
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index e253c26f31ac..57fc5a46ce06 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -67,7 +67,6 @@ static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
67 return net_generic(net, l2tp_eth_net_id); 67 return net_generic(net, l2tp_eth_net_id);
68} 68}
69 69
70static struct lock_class_key l2tp_eth_tx_busylock;
71static int l2tp_eth_dev_init(struct net_device *dev) 70static int l2tp_eth_dev_init(struct net_device *dev)
72{ 71{
73 struct l2tp_eth *priv = netdev_priv(dev); 72 struct l2tp_eth *priv = netdev_priv(dev);
@@ -75,7 +74,8 @@ static int l2tp_eth_dev_init(struct net_device *dev)
75 priv->dev = dev; 74 priv->dev = dev;
76 eth_hw_addr_random(dev); 75 eth_hw_addr_random(dev);
77 eth_broadcast_addr(dev->broadcast); 76 eth_broadcast_addr(dev->broadcast);
78 dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock; 77 netdev_lockdep_set_classes(dev);
78
79 return 0; 79 return 0;
80} 80}
81 81
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 6c54e03fe9c1..ea2ae6664cc8 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -611,6 +611,11 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
611 611
612 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 612 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
613 613
614 if (ipc6.tclass < 0)
615 ipc6.tclass = np->tclass;
616
617 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
618
614 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 619 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
615 if (IS_ERR(dst)) { 620 if (IS_ERR(dst)) {
616 err = PTR_ERR(dst); 621 err = PTR_ERR(dst);
@@ -620,9 +625,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
620 if (ipc6.hlimit < 0) 625 if (ipc6.hlimit < 0)
621 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); 626 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
622 627
623 if (ipc6.tclass < 0)
624 ipc6.tclass = np->tclass;
625
626 if (ipc6.dontfrag < 0) 628 if (ipc6.dontfrag < 0)
627 ipc6.dontfrag = np->dontfrag; 629 ipc6.dontfrag = np->dontfrag;
628 630
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index 6651a78e100c..c4a1c3e84e12 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <net/fib_rules.h>
13#include <net/l3mdev.h> 14#include <net/l3mdev.h>
14 15
15/** 16/**
@@ -107,7 +108,7 @@ EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
107 */ 108 */
108 109
109struct dst_entry *l3mdev_get_rt6_dst(struct net *net, 110struct dst_entry *l3mdev_get_rt6_dst(struct net *net,
110 const struct flowi6 *fl6) 111 struct flowi6 *fl6)
111{ 112{
112 struct dst_entry *dst = NULL; 113 struct dst_entry *dst = NULL;
113 struct net_device *dev; 114 struct net_device *dev;
@@ -160,3 +161,64 @@ int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4)
160 return rc; 161 return rc;
161} 162}
162EXPORT_SYMBOL_GPL(l3mdev_get_saddr); 163EXPORT_SYMBOL_GPL(l3mdev_get_saddr);
164
165int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
166 struct flowi6 *fl6)
167{
168 struct net_device *dev;
169 int rc = 0;
170
171 if (fl6->flowi6_oif) {
172 rcu_read_lock();
173
174 dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
175 if (dev && netif_is_l3_slave(dev))
176 dev = netdev_master_upper_dev_get_rcu(dev);
177
178 if (dev && netif_is_l3_master(dev) &&
179 dev->l3mdev_ops->l3mdev_get_saddr6)
180 rc = dev->l3mdev_ops->l3mdev_get_saddr6(dev, sk, fl6);
181
182 rcu_read_unlock();
183 }
184
185 return rc;
186}
187EXPORT_SYMBOL_GPL(l3mdev_get_saddr6);
188
189/**
190 * l3mdev_fib_rule_match - Determine if flowi references an
191 * L3 master device
192 * @net: network namespace for device index lookup
193 * @fl: flow struct
194 */
195
196int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
197 struct fib_lookup_arg *arg)
198{
199 struct net_device *dev;
200 int rc = 0;
201
202 rcu_read_lock();
203
204 dev = dev_get_by_index_rcu(net, fl->flowi_oif);
205 if (dev && netif_is_l3_master(dev) &&
206 dev->l3mdev_ops->l3mdev_fib_table) {
207 arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
208 rc = 1;
209 goto out;
210 }
211
212 dev = dev_get_by_index_rcu(net, fl->flowi_iif);
213 if (dev && netif_is_l3_master(dev) &&
214 dev->l3mdev_ops->l3mdev_fib_table) {
215 arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
216 rc = 1;
217 goto out;
218 }
219
220out:
221 rcu_read_unlock();
222
223 return rc;
224}
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 42fa81031dfa..5650c46bf91a 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -194,17 +194,21 @@ static void
194ieee80211_agg_stop_txq(struct sta_info *sta, int tid) 194ieee80211_agg_stop_txq(struct sta_info *sta, int tid)
195{ 195{
196 struct ieee80211_txq *txq = sta->sta.txq[tid]; 196 struct ieee80211_txq *txq = sta->sta.txq[tid];
197 struct ieee80211_sub_if_data *sdata;
198 struct fq *fq;
197 struct txq_info *txqi; 199 struct txq_info *txqi;
198 200
199 if (!txq) 201 if (!txq)
200 return; 202 return;
201 203
202 txqi = to_txq_info(txq); 204 txqi = to_txq_info(txq);
205 sdata = vif_to_sdata(txq->vif);
206 fq = &sdata->local->fq;
203 207
204 /* Lock here to protect against further seqno updates on dequeue */ 208 /* Lock here to protect against further seqno updates on dequeue */
205 spin_lock_bh(&txqi->queue.lock); 209 spin_lock_bh(&fq->lock);
206 set_bit(IEEE80211_TXQ_STOP, &txqi->flags); 210 set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
207 spin_unlock_bh(&txqi->queue.lock); 211 spin_unlock_bh(&fq->lock);
208} 212}
209 213
210static void 214static void
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index b251b2f7f8dd..2906c1004e1a 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/debugfs.h> 11#include <linux/debugfs.h>
12#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
13#include <linux/vmalloc.h>
13#include "ieee80211_i.h" 14#include "ieee80211_i.h"
14#include "driver-ops.h" 15#include "driver-ops.h"
15#include "rate.h" 16#include "rate.h"
@@ -70,6 +71,177 @@ DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
70DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s", 71DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
71 local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver"); 72 local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
72 73
74struct aqm_info {
75 struct ieee80211_local *local;
76 size_t size;
77 size_t len;
78 unsigned char buf[0];
79};
80
81#define AQM_HDR_LEN 200
82#define AQM_HW_ENTRY_LEN 40
83#define AQM_TXQ_ENTRY_LEN 110
84
85static int aqm_open(struct inode *inode, struct file *file)
86{
87 struct ieee80211_local *local = inode->i_private;
88 struct ieee80211_sub_if_data *sdata;
89 struct sta_info *sta;
90 struct txq_info *txqi;
91 struct fq *fq = &local->fq;
92 struct aqm_info *info = NULL;
93 int len = 0;
94 int i;
95
96 if (!local->ops->wake_tx_queue)
97 return -EOPNOTSUPP;
98
99 len += AQM_HDR_LEN;
100 len += 6 * AQM_HW_ENTRY_LEN;
101
102 rcu_read_lock();
103 list_for_each_entry_rcu(sdata, &local->interfaces, list)
104 len += AQM_TXQ_ENTRY_LEN;
105 list_for_each_entry_rcu(sta, &local->sta_list, list)
106 len += AQM_TXQ_ENTRY_LEN * ARRAY_SIZE(sta->sta.txq);
107 rcu_read_unlock();
108
109 info = vmalloc(len);
110 if (!info)
111 return -ENOMEM;
112
113 spin_lock_bh(&local->fq.lock);
114 rcu_read_lock();
115
116 file->private_data = info;
117 info->local = local;
118 info->size = len;
119 len = 0;
120
121 len += scnprintf(info->buf + len, info->size - len,
122 "* hw\n"
123 "access name value\n"
124 "R fq_flows_cnt %u\n"
125 "R fq_backlog %u\n"
126 "R fq_overlimit %u\n"
127 "R fq_collisions %u\n"
128 "RW fq_limit %u\n"
129 "RW fq_quantum %u\n",
130 fq->flows_cnt,
131 fq->backlog,
132 fq->overlimit,
133 fq->collisions,
134 fq->limit,
135 fq->quantum);
136
137 len += scnprintf(info->buf + len,
138 info->size - len,
139 "* vif\n"
140 "ifname addr ac backlog-bytes backlog-packets flows overlimit collisions tx-bytes tx-packets\n");
141
142 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
143 txqi = to_txq_info(sdata->vif.txq);
144 len += scnprintf(info->buf + len, info->size - len,
145 "%s %pM %u %u %u %u %u %u %u %u\n",
146 sdata->name,
147 sdata->vif.addr,
148 txqi->txq.ac,
149 txqi->tin.backlog_bytes,
150 txqi->tin.backlog_packets,
151 txqi->tin.flows,
152 txqi->tin.overlimit,
153 txqi->tin.collisions,
154 txqi->tin.tx_bytes,
155 txqi->tin.tx_packets);
156 }
157
158 len += scnprintf(info->buf + len,
159 info->size - len,
160 "* sta\n"
161 "ifname addr tid ac backlog-bytes backlog-packets flows overlimit collisions tx-bytes tx-packets\n");
162
163 list_for_each_entry_rcu(sta, &local->sta_list, list) {
164 sdata = sta->sdata;
165 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
166 txqi = to_txq_info(sta->sta.txq[i]);
167 len += scnprintf(info->buf + len, info->size - len,
168 "%s %pM %d %d %u %u %u %u %u %u %u\n",
169 sdata->name,
170 sta->sta.addr,
171 txqi->txq.tid,
172 txqi->txq.ac,
173 txqi->tin.backlog_bytes,
174 txqi->tin.backlog_packets,
175 txqi->tin.flows,
176 txqi->tin.overlimit,
177 txqi->tin.collisions,
178 txqi->tin.tx_bytes,
179 txqi->tin.tx_packets);
180 }
181 }
182
183 info->len = len;
184
185 rcu_read_unlock();
186 spin_unlock_bh(&local->fq.lock);
187
188 return 0;
189}
190
191static int aqm_release(struct inode *inode, struct file *file)
192{
193 vfree(file->private_data);
194 return 0;
195}
196
197static ssize_t aqm_read(struct file *file,
198 char __user *user_buf,
199 size_t count,
200 loff_t *ppos)
201{
202 struct aqm_info *info = file->private_data;
203
204 return simple_read_from_buffer(user_buf, count, ppos,
205 info->buf, info->len);
206}
207
208static ssize_t aqm_write(struct file *file,
209 const char __user *user_buf,
210 size_t count,
211 loff_t *ppos)
212{
213 struct aqm_info *info = file->private_data;
214 struct ieee80211_local *local = info->local;
215 char buf[100];
216 size_t len;
217
218 if (count > sizeof(buf))
219 return -EINVAL;
220
221 if (copy_from_user(buf, user_buf, count))
222 return -EFAULT;
223
224 buf[sizeof(buf) - 1] = '\0';
225 len = strlen(buf);
226 if (len > 0 && buf[len-1] == '\n')
227 buf[len-1] = 0;
228
229 if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1)
230 return count;
231 else if (sscanf(buf, "fq_quantum %u", &local->fq.quantum) == 1)
232 return count;
233
234 return -EINVAL;
235}
236
237static const struct file_operations aqm_ops = {
238 .write = aqm_write,
239 .read = aqm_read,
240 .open = aqm_open,
241 .release = aqm_release,
242 .llseek = default_llseek,
243};
244
73#ifdef CONFIG_PM 245#ifdef CONFIG_PM
74static ssize_t reset_write(struct file *file, const char __user *user_buf, 246static ssize_t reset_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos) 247 size_t count, loff_t *ppos)
@@ -256,6 +428,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
256 DEBUGFS_ADD(hwflags); 428 DEBUGFS_ADD(hwflags);
257 DEBUGFS_ADD(user_power); 429 DEBUGFS_ADD(user_power);
258 DEBUGFS_ADD(power); 430 DEBUGFS_ADD(power);
431 DEBUGFS_ADD_MODE(aqm, 0600);
259 432
260 statsd = debugfs_create_dir("statistics", phyd); 433 statsd = debugfs_create_dir("statistics", phyd);
261 434
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 33dfcbc2bf9c..fd334133ff45 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -328,14 +328,88 @@ STA_OPS(ht_capa);
328static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf, 328static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
329 size_t count, loff_t *ppos) 329 size_t count, loff_t *ppos)
330{ 330{
331 char buf[128], *p = buf; 331 char buf[512], *p = buf;
332 struct sta_info *sta = file->private_data; 332 struct sta_info *sta = file->private_data;
333 struct ieee80211_sta_vht_cap *vhtc = &sta->sta.vht_cap; 333 struct ieee80211_sta_vht_cap *vhtc = &sta->sta.vht_cap;
334 334
335 p += scnprintf(p, sizeof(buf) + buf - p, "VHT %ssupported\n", 335 p += scnprintf(p, sizeof(buf) + buf - p, "VHT %ssupported\n",
336 vhtc->vht_supported ? "" : "not "); 336 vhtc->vht_supported ? "" : "not ");
337 if (vhtc->vht_supported) { 337 if (vhtc->vht_supported) {
338 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.8x\n", vhtc->cap); 338 p += scnprintf(p, sizeof(buf) + buf - p, "cap: %#.8x\n",
339 vhtc->cap);
340#define PFLAG(a, b) \
341 do { \
342 if (vhtc->cap & IEEE80211_VHT_CAP_ ## a) \
343 p += scnprintf(p, sizeof(buf) + buf - p, \
344 "\t\t%s\n", b); \
345 } while (0)
346
347 switch (vhtc->cap & 0x3) {
348 case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
349 p += scnprintf(p, sizeof(buf) + buf - p,
350 "\t\tMAX-MPDU-3895\n");
351 break;
352 case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
353 p += scnprintf(p, sizeof(buf) + buf - p,
354 "\t\tMAX-MPDU-7991\n");
355 break;
356 case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
357 p += scnprintf(p, sizeof(buf) + buf - p,
358 "\t\tMAX-MPDU-11454\n");
359 break;
360 default:
361 p += scnprintf(p, sizeof(buf) + buf - p,
362 "\t\tMAX-MPDU-UNKNOWN\n");
363 };
364 switch (vhtc->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
365 case 0:
366 p += scnprintf(p, sizeof(buf) + buf - p,
367 "\t\t80Mhz\n");
368 break;
369 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
370 p += scnprintf(p, sizeof(buf) + buf - p,
371 "\t\t160Mhz\n");
372 break;
373 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
374 p += scnprintf(p, sizeof(buf) + buf - p,
375 "\t\t80+80Mhz\n");
376 break;
377 default:
378 p += scnprintf(p, sizeof(buf) + buf - p,
379 "\t\tUNKNOWN-MHZ: 0x%x\n",
380 (vhtc->cap >> 2) & 0x3);
381 };
382 PFLAG(RXLDPC, "RXLDPC");
383 PFLAG(SHORT_GI_80, "SHORT-GI-80");
384 PFLAG(SHORT_GI_160, "SHORT-GI-160");
385 PFLAG(TXSTBC, "TXSTBC");
386 p += scnprintf(p, sizeof(buf) + buf - p,
387 "\t\tRXSTBC_%d\n", (vhtc->cap >> 8) & 0x7);
388 PFLAG(SU_BEAMFORMER_CAPABLE, "SU-BEAMFORMER-CAPABLE");
389 PFLAG(SU_BEAMFORMEE_CAPABLE, "SU-BEAMFORMEE-CAPABLE");
390 p += scnprintf(p, sizeof(buf) + buf - p,
391 "\t\tBEAMFORMEE-STS: 0x%x\n",
392 (vhtc->cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK) >>
393 IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
394 p += scnprintf(p, sizeof(buf) + buf - p,
395 "\t\tSOUNDING-DIMENSIONS: 0x%x\n",
396 (vhtc->cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK)
397 >> IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT);
398 PFLAG(MU_BEAMFORMER_CAPABLE, "MU-BEAMFORMER-CAPABLE");
399 PFLAG(MU_BEAMFORMEE_CAPABLE, "MU-BEAMFORMEE-CAPABLE");
400 PFLAG(VHT_TXOP_PS, "TXOP-PS");
401 PFLAG(HTC_VHT, "HTC-VHT");
402 p += scnprintf(p, sizeof(buf) + buf - p,
403 "\t\tMPDU-LENGTH-EXPONENT: 0x%x\n",
404 (vhtc->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
405 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
406 PFLAG(VHT_LINK_ADAPTATION_VHT_UNSOL_MFB,
407 "LINK-ADAPTATION-VHT-UNSOL-MFB");
408 p += scnprintf(p, sizeof(buf) + buf - p,
409 "\t\tLINK-ADAPTATION-VHT-MRQ-MFB: 0x%x\n",
410 (vhtc->cap & IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB) >> 26);
411 PFLAG(RX_ANTENNA_PATTERN, "RX-ANTENNA-PATTERN");
412 PFLAG(TX_ANTENNA_PATTERN, "TX-ANTENNA-PATTERN");
339 413
340 p += scnprintf(p, sizeof(buf)+buf-p, "RX MCS: %.4x\n", 414 p += scnprintf(p, sizeof(buf)+buf-p, "RX MCS: %.4x\n",
341 le16_to_cpu(vhtc->vht_mcs.rx_mcs_map)); 415 le16_to_cpu(vhtc->vht_mcs.rx_mcs_map));
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 9438c9406687..54edfb6fc1d1 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -30,6 +30,7 @@
30#include <net/ieee80211_radiotap.h> 30#include <net/ieee80211_radiotap.h>
31#include <net/cfg80211.h> 31#include <net/cfg80211.h>
32#include <net/mac80211.h> 32#include <net/mac80211.h>
33#include <net/fq.h>
33#include "key.h" 34#include "key.h"
34#include "sta_info.h" 35#include "sta_info.h"
35#include "debug.h" 36#include "debug.h"
@@ -805,10 +806,19 @@ enum txq_info_flags {
805 IEEE80211_TXQ_NO_AMSDU, 806 IEEE80211_TXQ_NO_AMSDU,
806}; 807};
807 808
809/**
810 * struct txq_info - per tid queue
811 *
812 * @tin: contains packets split into multiple flows
813 * @def_flow: used as a fallback flow when a packet destined to @tin hashes to
814 * a fq_flow which is already owned by a different tin
815 * @def_cvars: codel vars for @def_flow
816 */
808struct txq_info { 817struct txq_info {
809 struct sk_buff_head queue; 818 struct fq_tin tin;
819 struct fq_flow def_flow;
820 struct codel_vars def_cvars;
810 unsigned long flags; 821 unsigned long flags;
811 unsigned long byte_cnt;
812 822
813 /* keep last! */ 823 /* keep last! */
814 struct ieee80211_txq txq; 824 struct ieee80211_txq txq;
@@ -856,7 +866,7 @@ struct ieee80211_sub_if_data {
856 bool control_port_no_encrypt; 866 bool control_port_no_encrypt;
857 int encrypt_headroom; 867 int encrypt_headroom;
858 868
859 atomic_t txqs_len[IEEE80211_NUM_ACS]; 869 atomic_t num_tx_queued;
860 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; 870 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
861 struct mac80211_qos_map __rcu *qos_map; 871 struct mac80211_qos_map __rcu *qos_map;
862 872
@@ -1099,6 +1109,11 @@ struct ieee80211_local {
1099 * it first anyway so they become a no-op */ 1109 * it first anyway so they become a no-op */
1100 struct ieee80211_hw hw; 1110 struct ieee80211_hw hw;
1101 1111
1112 struct fq fq;
1113 struct codel_vars *cvars;
1114 struct codel_params cparams;
1115 struct codel_stats cstats;
1116
1102 const struct ieee80211_ops *ops; 1117 const struct ieee80211_ops *ops;
1103 1118
1104 /* 1119 /*
@@ -1931,9 +1946,13 @@ static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
1931 return true; 1946 return true;
1932} 1947}
1933 1948
1934void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata, 1949int ieee80211_txq_setup_flows(struct ieee80211_local *local);
1935 struct sta_info *sta, 1950void ieee80211_txq_teardown_flows(struct ieee80211_local *local);
1936 struct txq_info *txq, int tid); 1951void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
1952 struct sta_info *sta,
1953 struct txq_info *txq, int tid);
1954void ieee80211_txq_purge(struct ieee80211_local *local,
1955 struct txq_info *txqi);
1937void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1956void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1938 u16 transaction, u16 auth_alg, u16 status, 1957 u16 transaction, u16 auth_alg, u16 status,
1939 const u8 *extra, size_t extra_len, const u8 *bssid, 1958 const u8 *extra, size_t extra_len, const u8 *bssid,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index c59af3eb9fa4..b123a9e325b3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -779,6 +779,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
779 bool going_down) 779 bool going_down)
780{ 780{
781 struct ieee80211_local *local = sdata->local; 781 struct ieee80211_local *local = sdata->local;
782 struct fq *fq = &local->fq;
782 unsigned long flags; 783 unsigned long flags;
783 struct sk_buff *skb, *tmp; 784 struct sk_buff *skb, *tmp;
784 u32 hw_reconf_flags = 0; 785 u32 hw_reconf_flags = 0;
@@ -977,12 +978,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
977 if (sdata->vif.txq) { 978 if (sdata->vif.txq) {
978 struct txq_info *txqi = to_txq_info(sdata->vif.txq); 979 struct txq_info *txqi = to_txq_info(sdata->vif.txq);
979 980
980 spin_lock_bh(&txqi->queue.lock); 981 spin_lock_bh(&fq->lock);
981 ieee80211_purge_tx_queue(&local->hw, &txqi->queue); 982 ieee80211_txq_purge(local, txqi);
982 txqi->byte_cnt = 0; 983 spin_unlock_bh(&fq->lock);
983 spin_unlock_bh(&txqi->queue.lock);
984
985 atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
986 } 984 }
987 985
988 if (local->open_count == 0) 986 if (local->open_count == 0)
@@ -1198,6 +1196,12 @@ static void ieee80211_if_setup(struct net_device *dev)
1198 dev->destructor = ieee80211_if_free; 1196 dev->destructor = ieee80211_if_free;
1199} 1197}
1200 1198
1199static void ieee80211_if_setup_no_queue(struct net_device *dev)
1200{
1201 ieee80211_if_setup(dev);
1202 dev->priv_flags |= IFF_NO_QUEUE;
1203}
1204
1201static void ieee80211_iface_work(struct work_struct *work) 1205static void ieee80211_iface_work(struct work_struct *work)
1202{ 1206{
1203 struct ieee80211_sub_if_data *sdata = 1207 struct ieee80211_sub_if_data *sdata =
@@ -1707,6 +1711,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1707 struct net_device *ndev = NULL; 1711 struct net_device *ndev = NULL;
1708 struct ieee80211_sub_if_data *sdata = NULL; 1712 struct ieee80211_sub_if_data *sdata = NULL;
1709 struct txq_info *txqi; 1713 struct txq_info *txqi;
1714 void (*if_setup)(struct net_device *dev);
1710 int ret, i; 1715 int ret, i;
1711 int txqs = 1; 1716 int txqs = 1;
1712 1717
@@ -1734,12 +1739,17 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1734 txq_size += sizeof(struct txq_info) + 1739 txq_size += sizeof(struct txq_info) +
1735 local->hw.txq_data_size; 1740 local->hw.txq_data_size;
1736 1741
1742 if (local->ops->wake_tx_queue)
1743 if_setup = ieee80211_if_setup_no_queue;
1744 else
1745 if_setup = ieee80211_if_setup;
1746
1737 if (local->hw.queues >= IEEE80211_NUM_ACS) 1747 if (local->hw.queues >= IEEE80211_NUM_ACS)
1738 txqs = IEEE80211_NUM_ACS; 1748 txqs = IEEE80211_NUM_ACS;
1739 1749
1740 ndev = alloc_netdev_mqs(size + txq_size, 1750 ndev = alloc_netdev_mqs(size + txq_size,
1741 name, name_assign_type, 1751 name, name_assign_type,
1742 ieee80211_if_setup, txqs, 1); 1752 if_setup, txqs, 1);
1743 if (!ndev) 1753 if (!ndev)
1744 return -ENOMEM; 1754 return -ENOMEM;
1745 dev_net_set(ndev, wiphy_net(local->hw.wiphy)); 1755 dev_net_set(ndev, wiphy_net(local->hw.wiphy));
@@ -1780,7 +1790,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1780 1790
1781 if (txq_size) { 1791 if (txq_size) {
1782 txqi = netdev_priv(ndev) + size; 1792 txqi = netdev_priv(ndev) + size;
1783 ieee80211_init_tx_queue(sdata, NULL, txqi, 0); 1793 ieee80211_txq_init(sdata, NULL, txqi, 0);
1784 } 1794 }
1785 1795
1786 sdata->dev = ndev; 1796 sdata->dev = ndev;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 7ee91d6151d1..d00ea9b13f49 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1055,9 +1055,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1055 1055
1056 local->dynamic_ps_forced_timeout = -1; 1056 local->dynamic_ps_forced_timeout = -1;
1057 1057
1058 if (!local->hw.txq_ac_max_pending)
1059 local->hw.txq_ac_max_pending = 64;
1060
1061 result = ieee80211_wep_init(local); 1058 result = ieee80211_wep_init(local);
1062 if (result < 0) 1059 if (result < 0)
1063 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 1060 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
@@ -1089,6 +1086,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1089 1086
1090 rtnl_unlock(); 1087 rtnl_unlock();
1091 1088
1089 result = ieee80211_txq_setup_flows(local);
1090 if (result)
1091 goto fail_flows;
1092
1092#ifdef CONFIG_INET 1093#ifdef CONFIG_INET
1093 local->ifa_notifier.notifier_call = ieee80211_ifa_changed; 1094 local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
1094 result = register_inetaddr_notifier(&local->ifa_notifier); 1095 result = register_inetaddr_notifier(&local->ifa_notifier);
@@ -1114,6 +1115,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1114#if defined(CONFIG_INET) || defined(CONFIG_IPV6) 1115#if defined(CONFIG_INET) || defined(CONFIG_IPV6)
1115 fail_ifa: 1116 fail_ifa:
1116#endif 1117#endif
1118 ieee80211_txq_teardown_flows(local);
1119 fail_flows:
1117 rtnl_lock(); 1120 rtnl_lock();
1118 rate_control_deinitialize(local); 1121 rate_control_deinitialize(local);
1119 ieee80211_remove_interfaces(local); 1122 ieee80211_remove_interfaces(local);
@@ -1172,6 +1175,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1172 skb_queue_purge(&local->skb_queue); 1175 skb_queue_purge(&local->skb_queue);
1173 skb_queue_purge(&local->skb_queue_unreliable); 1176 skb_queue_purge(&local->skb_queue_unreliable);
1174 skb_queue_purge(&local->skb_queue_tdls_chsw); 1177 skb_queue_purge(&local->skb_queue_tdls_chsw);
1178 ieee80211_txq_teardown_flows(local);
1175 1179
1176 destroy_workqueue(local->workqueue); 1180 destroy_workqueue(local->workqueue);
1177 wiphy_unregister(local->hw.wiphy); 1181 wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5e65e838992a..9a1eb70cb120 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1268,7 +1268,7 @@ static void sta_ps_start(struct sta_info *sta)
1268 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1268 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
1269 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 1269 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
1270 1270
1271 if (!skb_queue_len(&txqi->queue)) 1271 if (!txqi->tin.backlog_packets)
1272 set_bit(tid, &sta->txq_buffered_tids); 1272 set_bit(tid, &sta->txq_buffered_tids);
1273 else 1273 else
1274 clear_bit(tid, &sta->txq_buffered_tids); 1274 clear_bit(tid, &sta->txq_buffered_tids);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 5ccfdbd406bd..76b737dcc36f 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -90,6 +90,7 @@ static void __cleanup_single_sta(struct sta_info *sta)
90 struct tid_ampdu_tx *tid_tx; 90 struct tid_ampdu_tx *tid_tx;
91 struct ieee80211_sub_if_data *sdata = sta->sdata; 91 struct ieee80211_sub_if_data *sdata = sta->sdata;
92 struct ieee80211_local *local = sdata->local; 92 struct ieee80211_local *local = sdata->local;
93 struct fq *fq = &local->fq;
93 struct ps_data *ps; 94 struct ps_data *ps;
94 95
95 if (test_sta_flag(sta, WLAN_STA_PS_STA) || 96 if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
@@ -113,11 +114,10 @@ static void __cleanup_single_sta(struct sta_info *sta)
113 if (sta->sta.txq[0]) { 114 if (sta->sta.txq[0]) {
114 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 115 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
115 struct txq_info *txqi = to_txq_info(sta->sta.txq[i]); 116 struct txq_info *txqi = to_txq_info(sta->sta.txq[i]);
116 int n = skb_queue_len(&txqi->queue);
117 117
118 ieee80211_purge_tx_queue(&local->hw, &txqi->queue); 118 spin_lock_bh(&fq->lock);
119 atomic_sub(n, &sdata->txqs_len[txqi->txq.ac]); 119 ieee80211_txq_purge(local, txqi);
120 txqi->byte_cnt = 0; 120 spin_unlock_bh(&fq->lock);
121 } 121 }
122 } 122 }
123 123
@@ -368,7 +368,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
368 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 368 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
369 struct txq_info *txq = txq_data + i * size; 369 struct txq_info *txq = txq_data + i * size;
370 370
371 ieee80211_init_tx_queue(sdata, sta, txq, i); 371 ieee80211_txq_init(sdata, sta, txq, i);
372 } 372 }
373 } 373 }
374 374
@@ -1211,7 +1211,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
1211 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 1211 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
1212 struct txq_info *txqi = to_txq_info(sta->sta.txq[i]); 1212 struct txq_info *txqi = to_txq_info(sta->sta.txq[i]);
1213 1213
1214 if (!skb_queue_len(&txqi->queue)) 1214 if (!txqi->tin.backlog_packets)
1215 continue; 1215 continue;
1216 1216
1217 drv_wake_tx_queue(local, txqi); 1217 drv_wake_tx_queue(local, txqi);
@@ -1648,7 +1648,7 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1648 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1648 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
1649 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 1649 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
1650 1650
1651 if (!(tids & BIT(tid)) || skb_queue_len(&txqi->queue)) 1651 if (!(tids & BIT(tid)) || txqi->tin.backlog_packets)
1652 continue; 1652 continue;
1653 1653
1654 sta_info_recalc_tim(sta); 1654 sta_info_recalc_tim(sta);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 203044379ce0..44ec605a5682 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -24,7 +24,10 @@
24#include <net/ieee80211_radiotap.h> 24#include <net/ieee80211_radiotap.h>
25#include <net/cfg80211.h> 25#include <net/cfg80211.h>
26#include <net/mac80211.h> 26#include <net/mac80211.h>
27#include <net/codel.h>
28#include <net/codel_impl.h>
27#include <asm/unaligned.h> 29#include <asm/unaligned.h>
30#include <net/fq_impl.h>
28 31
29#include "ieee80211_i.h" 32#include "ieee80211_i.h"
30#include "driver-ops.h" 33#include "driver-ops.h"
@@ -1236,27 +1239,21 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1236 return TX_CONTINUE; 1239 return TX_CONTINUE;
1237} 1240}
1238 1241
1239static void ieee80211_drv_tx(struct ieee80211_local *local, 1242static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
1240 struct ieee80211_vif *vif, 1243 struct ieee80211_vif *vif,
1241 struct ieee80211_sta *pubsta, 1244 struct ieee80211_sta *pubsta,
1242 struct sk_buff *skb) 1245 struct sk_buff *skb)
1243{ 1246{
1244 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1247 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1245 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1246 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1248 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1247 struct ieee80211_tx_control control = {
1248 .sta = pubsta,
1249 };
1250 struct ieee80211_txq *txq = NULL; 1249 struct ieee80211_txq *txq = NULL;
1251 struct txq_info *txqi;
1252 u8 ac;
1253 1250
1254 if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) || 1251 if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
1255 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) 1252 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
1256 goto tx_normal; 1253 return NULL;
1257 1254
1258 if (!ieee80211_is_data(hdr->frame_control)) 1255 if (!ieee80211_is_data(hdr->frame_control))
1259 goto tx_normal; 1256 return NULL;
1260 1257
1261 if (pubsta) { 1258 if (pubsta) {
1262 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1259 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
@@ -1267,51 +1264,230 @@ static void ieee80211_drv_tx(struct ieee80211_local *local,
1267 } 1264 }
1268 1265
1269 if (!txq) 1266 if (!txq)
1270 goto tx_normal; 1267 return NULL;
1271 1268
1272 ac = txq->ac; 1269 return to_txq_info(txq);
1273 txqi = to_txq_info(txq); 1270}
1274 atomic_inc(&sdata->txqs_len[ac]);
1275 if (atomic_read(&sdata->txqs_len[ac]) >= local->hw.txq_ac_max_pending)
1276 netif_stop_subqueue(sdata->dev, ac);
1277 1271
1278 spin_lock_bh(&txqi->queue.lock); 1272static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
1279 txqi->byte_cnt += skb->len; 1273{
1280 __skb_queue_tail(&txqi->queue, skb); 1274 IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
1281 spin_unlock_bh(&txqi->queue.lock); 1275}
1282 1276
1283 drv_wake_tx_queue(local, txqi); 1277static void ieee80211_set_skb_vif(struct sk_buff *skb, struct txq_info *txqi)
1278{
1279 IEEE80211_SKB_CB(skb)->control.vif = txqi->txq.vif;
1280}
1284 1281
1285 return; 1282static u32 codel_skb_len_func(const struct sk_buff *skb)
1283{
1284 return skb->len;
1285}
1286
1287static codel_time_t codel_skb_time_func(const struct sk_buff *skb)
1288{
1289 const struct ieee80211_tx_info *info;
1286 1290
1287tx_normal: 1291 info = (const struct ieee80211_tx_info *)skb->cb;
1288 drv_tx(local, &control, skb); 1292 return info->control.enqueue_time;
1293}
1294
1295static struct sk_buff *codel_dequeue_func(struct codel_vars *cvars,
1296 void *ctx)
1297{
1298 struct ieee80211_local *local;
1299 struct txq_info *txqi;
1300 struct fq *fq;
1301 struct fq_flow *flow;
1302
1303 txqi = ctx;
1304 local = vif_to_sdata(txqi->txq.vif)->local;
1305 fq = &local->fq;
1306
1307 if (cvars == &txqi->def_cvars)
1308 flow = &txqi->def_flow;
1309 else
1310 flow = &fq->flows[cvars - local->cvars];
1311
1312 return fq_flow_dequeue(fq, flow);
1313}
1314
1315static void codel_drop_func(struct sk_buff *skb,
1316 void *ctx)
1317{
1318 struct ieee80211_local *local;
1319 struct ieee80211_hw *hw;
1320 struct txq_info *txqi;
1321
1322 txqi = ctx;
1323 local = vif_to_sdata(txqi->txq.vif)->local;
1324 hw = &local->hw;
1325
1326 ieee80211_free_txskb(hw, skb);
1327}
1328
1329static struct sk_buff *fq_tin_dequeue_func(struct fq *fq,
1330 struct fq_tin *tin,
1331 struct fq_flow *flow)
1332{
1333 struct ieee80211_local *local;
1334 struct txq_info *txqi;
1335 struct codel_vars *cvars;
1336 struct codel_params *cparams;
1337 struct codel_stats *cstats;
1338
1339 local = container_of(fq, struct ieee80211_local, fq);
1340 txqi = container_of(tin, struct txq_info, tin);
1341 cparams = &local->cparams;
1342 cstats = &local->cstats;
1343
1344 if (flow == &txqi->def_flow)
1345 cvars = &txqi->def_cvars;
1346 else
1347 cvars = &local->cvars[flow - fq->flows];
1348
1349 return codel_dequeue(txqi,
1350 &flow->backlog,
1351 cparams,
1352 cvars,
1353 cstats,
1354 codel_skb_len_func,
1355 codel_skb_time_func,
1356 codel_drop_func,
1357 codel_dequeue_func);
1358}
1359
1360static void fq_skb_free_func(struct fq *fq,
1361 struct fq_tin *tin,
1362 struct fq_flow *flow,
1363 struct sk_buff *skb)
1364{
1365 struct ieee80211_local *local;
1366
1367 local = container_of(fq, struct ieee80211_local, fq);
1368 ieee80211_free_txskb(&local->hw, skb);
1369}
1370
1371static struct fq_flow *fq_flow_get_default_func(struct fq *fq,
1372 struct fq_tin *tin,
1373 int idx,
1374 struct sk_buff *skb)
1375{
1376 struct txq_info *txqi;
1377
1378 txqi = container_of(tin, struct txq_info, tin);
1379 return &txqi->def_flow;
1380}
1381
1382static void ieee80211_txq_enqueue(struct ieee80211_local *local,
1383 struct txq_info *txqi,
1384 struct sk_buff *skb)
1385{
1386 struct fq *fq = &local->fq;
1387 struct fq_tin *tin = &txqi->tin;
1388
1389 ieee80211_set_skb_enqueue_time(skb);
1390 fq_tin_enqueue(fq, tin, skb,
1391 fq_skb_free_func,
1392 fq_flow_get_default_func);
1393}
1394
1395void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
1396 struct sta_info *sta,
1397 struct txq_info *txqi, int tid)
1398{
1399 fq_tin_init(&txqi->tin);
1400 fq_flow_init(&txqi->def_flow);
1401 codel_vars_init(&txqi->def_cvars);
1402
1403 txqi->txq.vif = &sdata->vif;
1404
1405 if (sta) {
1406 txqi->txq.sta = &sta->sta;
1407 sta->sta.txq[tid] = &txqi->txq;
1408 txqi->txq.tid = tid;
1409 txqi->txq.ac = ieee802_1d_to_ac[tid & 7];
1410 } else {
1411 sdata->vif.txq = &txqi->txq;
1412 txqi->txq.tid = 0;
1413 txqi->txq.ac = IEEE80211_AC_BE;
1414 }
1415}
1416
1417void ieee80211_txq_purge(struct ieee80211_local *local,
1418 struct txq_info *txqi)
1419{
1420 struct fq *fq = &local->fq;
1421 struct fq_tin *tin = &txqi->tin;
1422
1423 fq_tin_reset(fq, tin, fq_skb_free_func);
1424}
1425
1426int ieee80211_txq_setup_flows(struct ieee80211_local *local)
1427{
1428 struct fq *fq = &local->fq;
1429 int ret;
1430 int i;
1431
1432 if (!local->ops->wake_tx_queue)
1433 return 0;
1434
1435 ret = fq_init(fq, 4096);
1436 if (ret)
1437 return ret;
1438
1439 codel_params_init(&local->cparams);
1440 codel_stats_init(&local->cstats);
1441 local->cparams.interval = MS2TIME(100);
1442 local->cparams.target = MS2TIME(20);
1443 local->cparams.ecn = true;
1444
1445 local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]),
1446 GFP_KERNEL);
1447 if (!local->cvars) {
1448 fq_reset(fq, fq_skb_free_func);
1449 return -ENOMEM;
1450 }
1451
1452 for (i = 0; i < fq->flows_cnt; i++)
1453 codel_vars_init(&local->cvars[i]);
1454
1455 return 0;
1456}
1457
1458void ieee80211_txq_teardown_flows(struct ieee80211_local *local)
1459{
1460 struct fq *fq = &local->fq;
1461
1462 if (!local->ops->wake_tx_queue)
1463 return;
1464
1465 kfree(local->cvars);
1466 local->cvars = NULL;
1467
1468 fq_reset(fq, fq_skb_free_func);
1289} 1469}
1290 1470
1291struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, 1471struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
1292 struct ieee80211_txq *txq) 1472 struct ieee80211_txq *txq)
1293{ 1473{
1294 struct ieee80211_local *local = hw_to_local(hw); 1474 struct ieee80211_local *local = hw_to_local(hw);
1295 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
1296 struct txq_info *txqi = container_of(txq, struct txq_info, txq); 1475 struct txq_info *txqi = container_of(txq, struct txq_info, txq);
1297 struct ieee80211_hdr *hdr; 1476 struct ieee80211_hdr *hdr;
1298 struct sk_buff *skb = NULL; 1477 struct sk_buff *skb = NULL;
1299 u8 ac = txq->ac; 1478 struct fq *fq = &local->fq;
1479 struct fq_tin *tin = &txqi->tin;
1300 1480
1301 spin_lock_bh(&txqi->queue.lock); 1481 spin_lock_bh(&fq->lock);
1302 1482
1303 if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags)) 1483 if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags))
1304 goto out; 1484 goto out;
1305 1485
1306 skb = __skb_dequeue(&txqi->queue); 1486 skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
1307 if (!skb) 1487 if (!skb)
1308 goto out; 1488 goto out;
1309 1489
1310 txqi->byte_cnt -= skb->len; 1490 ieee80211_set_skb_vif(skb, txqi);
1311
1312 atomic_dec(&sdata->txqs_len[ac]);
1313 if (__netif_subqueue_stopped(sdata->dev, ac))
1314 ieee80211_propagate_queue_wake(local, sdata->vif.hw_queue[ac]);
1315 1491
1316 hdr = (struct ieee80211_hdr *)skb->data; 1492 hdr = (struct ieee80211_hdr *)skb->data;
1317 if (txq->sta && ieee80211_is_data_qos(hdr->frame_control)) { 1493 if (txq->sta && ieee80211_is_data_qos(hdr->frame_control)) {
@@ -1327,7 +1503,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
1327 } 1503 }
1328 1504
1329out: 1505out:
1330 spin_unlock_bh(&txqi->queue.lock); 1506 spin_unlock_bh(&fq->lock);
1331 1507
1332 if (skb && skb_has_frag_list(skb) && 1508 if (skb && skb_has_frag_list(skb) &&
1333 !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) 1509 !ieee80211_hw_check(&local->hw, TX_FRAG_LIST))
@@ -1343,7 +1519,10 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1343 struct sk_buff_head *skbs, 1519 struct sk_buff_head *skbs,
1344 bool txpending) 1520 bool txpending)
1345{ 1521{
1522 struct ieee80211_tx_control control = {};
1523 struct fq *fq = &local->fq;
1346 struct sk_buff *skb, *tmp; 1524 struct sk_buff *skb, *tmp;
1525 struct txq_info *txqi;
1347 unsigned long flags; 1526 unsigned long flags;
1348 1527
1349 skb_queue_walk_safe(skbs, skb, tmp) { 1528 skb_queue_walk_safe(skbs, skb, tmp) {
@@ -1358,6 +1537,21 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1358 } 1537 }
1359#endif 1538#endif
1360 1539
1540 txqi = ieee80211_get_txq(local, vif, sta, skb);
1541 if (txqi) {
1542 info->control.vif = vif;
1543
1544 __skb_unlink(skb, skbs);
1545
1546 spin_lock_bh(&fq->lock);
1547 ieee80211_txq_enqueue(local, txqi, skb);
1548 spin_unlock_bh(&fq->lock);
1549
1550 drv_wake_tx_queue(local, txqi);
1551
1552 continue;
1553 }
1554
1361 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 1555 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1362 if (local->queue_stop_reasons[q] || 1556 if (local->queue_stop_reasons[q] ||
1363 (!txpending && !skb_queue_empty(&local->pending[q]))) { 1557 (!txpending && !skb_queue_empty(&local->pending[q]))) {
@@ -1400,9 +1594,10 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1400 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1594 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1401 1595
1402 info->control.vif = vif; 1596 info->control.vif = vif;
1597 control.sta = sta;
1403 1598
1404 __skb_unlink(skb, skbs); 1599 __skb_unlink(skb, skbs);
1405 ieee80211_drv_tx(local, vif, sta, skb); 1600 drv_tx(local, &control, skb);
1406 } 1601 }
1407 1602
1408 return true; 1603 return true;
@@ -2882,6 +3077,9 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
2882 struct sk_buff *skb) 3077 struct sk_buff *skb)
2883{ 3078{
2884 struct ieee80211_local *local = sdata->local; 3079 struct ieee80211_local *local = sdata->local;
3080 struct fq *fq = &local->fq;
3081 struct fq_tin *tin;
3082 struct fq_flow *flow;
2885 u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 3083 u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
2886 struct ieee80211_txq *txq = sta->sta.txq[tid]; 3084 struct ieee80211_txq *txq = sta->sta.txq[tid];
2887 struct txq_info *txqi; 3085 struct txq_info *txqi;
@@ -2893,6 +3091,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
2893 __be16 len; 3091 __be16 len;
2894 void *data; 3092 void *data;
2895 bool ret = false; 3093 bool ret = false;
3094 unsigned int orig_len;
2896 int n = 1, nfrags; 3095 int n = 1, nfrags;
2897 3096
2898 if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) 3097 if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
@@ -2909,12 +3108,20 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
2909 max_amsdu_len = min_t(int, max_amsdu_len, 3108 max_amsdu_len = min_t(int, max_amsdu_len,
2910 sta->sta.max_rc_amsdu_len); 3109 sta->sta.max_rc_amsdu_len);
2911 3110
2912 spin_lock_bh(&txqi->queue.lock); 3111 spin_lock_bh(&fq->lock);
3112
3113 /* TODO: Ideally aggregation should be done on dequeue to remain
3114 * responsive to environment changes.
3115 */
2913 3116
2914 head = skb_peek_tail(&txqi->queue); 3117 tin = &txqi->tin;
3118 flow = fq_flow_classify(fq, tin, skb, fq_flow_get_default_func);
3119 head = skb_peek_tail(&flow->queue);
2915 if (!head) 3120 if (!head)
2916 goto out; 3121 goto out;
2917 3122
3123 orig_len = head->len;
3124
2918 if (skb->len + head->len > max_amsdu_len) 3125 if (skb->len + head->len > max_amsdu_len)
2919 goto out; 3126 goto out;
2920 3127
@@ -2953,8 +3160,13 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
2953 head->data_len += skb->len; 3160 head->data_len += skb->len;
2954 *frag_tail = skb; 3161 *frag_tail = skb;
2955 3162
3163 flow->backlog += head->len - orig_len;
3164 tin->backlog_bytes += head->len - orig_len;
3165
3166 fq_recalc_backlog(fq, tin, flow);
3167
2956out: 3168out:
2957 spin_unlock_bh(&txqi->queue.lock); 3169 spin_unlock_bh(&fq->lock);
2958 3170
2959 return ret; 3171 return ret;
2960} 3172}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 905003f75c4d..42bf0b6685e8 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -244,6 +244,9 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
244 struct ieee80211_sub_if_data *sdata; 244 struct ieee80211_sub_if_data *sdata;
245 int n_acs = IEEE80211_NUM_ACS; 245 int n_acs = IEEE80211_NUM_ACS;
246 246
247 if (local->ops->wake_tx_queue)
248 return;
249
247 if (local->hw.queues < IEEE80211_NUM_ACS) 250 if (local->hw.queues < IEEE80211_NUM_ACS)
248 n_acs = 1; 251 n_acs = 1;
249 252
@@ -260,11 +263,6 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
260 for (ac = 0; ac < n_acs; ac++) { 263 for (ac = 0; ac < n_acs; ac++) {
261 int ac_queue = sdata->vif.hw_queue[ac]; 264 int ac_queue = sdata->vif.hw_queue[ac];
262 265
263 if (local->ops->wake_tx_queue &&
264 (atomic_read(&sdata->txqs_len[ac]) >
265 local->hw.txq_ac_max_pending))
266 continue;
267
268 if (ac_queue == queue || 266 if (ac_queue == queue ||
269 (sdata->vif.cab_queue == queue && 267 (sdata->vif.cab_queue == queue &&
270 local->queue_stop_reasons[ac_queue] == 0 && 268 local->queue_stop_reasons[ac_queue] == 0 &&
@@ -352,6 +350,9 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
352 if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue])) 350 if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue]))
353 return; 351 return;
354 352
353 if (local->ops->wake_tx_queue)
354 return;
355
355 if (local->hw.queues < IEEE80211_NUM_ACS) 356 if (local->hw.queues < IEEE80211_NUM_ACS)
356 n_acs = 1; 357 n_acs = 1;
357 358
@@ -3388,25 +3389,6 @@ u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo)
3388 return buf; 3389 return buf;
3389} 3390}
3390 3391
3391void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata,
3392 struct sta_info *sta,
3393 struct txq_info *txqi, int tid)
3394{
3395 skb_queue_head_init(&txqi->queue);
3396 txqi->txq.vif = &sdata->vif;
3397
3398 if (sta) {
3399 txqi->txq.sta = &sta->sta;
3400 sta->sta.txq[tid] = &txqi->txq;
3401 txqi->txq.tid = tid;
3402 txqi->txq.ac = ieee802_1d_to_ac[tid & 7];
3403 } else {
3404 sdata->vif.txq = &txqi->txq;
3405 txqi->txq.tid = 0;
3406 txqi->txq.ac = IEEE80211_AC_BE;
3407 }
3408}
3409
3410void ieee80211_txq_get_depth(struct ieee80211_txq *txq, 3392void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
3411 unsigned long *frame_cnt, 3393 unsigned long *frame_cnt,
3412 unsigned long *byte_cnt) 3394 unsigned long *byte_cnt)
@@ -3414,9 +3396,9 @@ void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
3414 struct txq_info *txqi = to_txq_info(txq); 3396 struct txq_info *txqi = to_txq_info(txq);
3415 3397
3416 if (frame_cnt) 3398 if (frame_cnt)
3417 *frame_cnt = txqi->queue.qlen; 3399 *frame_cnt = txqi->tin.backlog_packets;
3418 3400
3419 if (byte_cnt) 3401 if (byte_cnt)
3420 *byte_cnt = txqi->byte_cnt; 3402 *byte_cnt = txqi->tin.backlog_bytes;
3421} 3403}
3422EXPORT_SYMBOL(ieee80211_txq_get_depth); 3404EXPORT_SYMBOL(ieee80211_txq_get_depth);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 0b80a7140cc4..e9beaa58573c 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -91,7 +91,7 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
91 if (skb->len <= mtu) 91 if (skb->len <= mtu)
92 return false; 92 return false;
93 93
94 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 94 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
95 return false; 95 return false;
96 96
97 return true; 97 return true;
@@ -1009,9 +1009,10 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
1009 unsigned int flags; 1009 unsigned int flags;
1010 1010
1011 if (event == NETDEV_REGISTER) { 1011 if (event == NETDEV_REGISTER) {
1012 /* For now just support ethernet devices */ 1012 /* For now just support Ethernet and IPGRE devices */
1013 if ((dev->type == ARPHRD_ETHER) || 1013 if (dev->type == ARPHRD_ETHER ||
1014 (dev->type == ARPHRD_LOOPBACK)) { 1014 dev->type == ARPHRD_LOOPBACK ||
1015 dev->type == ARPHRD_IPGRE) {
1015 mdev = mpls_add_dev(dev); 1016 mdev = mpls_add_dev(dev);
1016 if (IS_ERR(mdev)) 1017 if (IS_ERR(mdev))
1017 return notifier_from_errno(PTR_ERR(mdev)); 1018 return notifier_from_errno(PTR_ERR(mdev));
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 604df6fae6fc..515131f9e021 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -137,7 +137,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
137 cfg.est.ewma_log = info->ewma_log; 137 cfg.est.ewma_log = info->ewma_log;
138 138
139 ret = gen_new_estimator(&est->bstats, NULL, &est->rstats, 139 ret = gen_new_estimator(&est->bstats, NULL, &est->rstats,
140 &est->lock, &cfg.opt); 140 &est->lock, NULL, &cfg.opt);
141 if (ret < 0) 141 if (ret < 0)
142 goto err2; 142 goto err2;
143 143
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index e68ef9ccd703..3cfd6cc60504 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -8,20 +8,6 @@
8#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) 8#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
9#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long)) 9#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
10 10
11struct netlink_ring {
12 void **pg_vec;
13 unsigned int head;
14 unsigned int frames_per_block;
15 unsigned int frame_size;
16 unsigned int frame_max;
17
18 unsigned int pg_vec_order;
19 unsigned int pg_vec_pages;
20 unsigned int pg_vec_len;
21
22 atomic_t pending;
23};
24
25struct netlink_sock { 11struct netlink_sock {
26 /* struct sock has to be the first member of netlink_sock */ 12 /* struct sock has to be the first member of netlink_sock */
27 struct sock sk; 13 struct sock sk;
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 9a3eb7a0ebf4..1ecbd7715f6d 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -750,6 +750,14 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
750 750
751 if (likely(vport)) { 751 if (likely(vport)) {
752 u16 mru = OVS_CB(skb)->mru; 752 u16 mru = OVS_CB(skb)->mru;
753 u32 cutlen = OVS_CB(skb)->cutlen;
754
755 if (unlikely(cutlen > 0)) {
756 if (skb->len - cutlen > ETH_HLEN)
757 pskb_trim(skb, skb->len - cutlen);
758 else
759 pskb_trim(skb, ETH_HLEN);
760 }
753 761
754 if (likely(!mru || (skb->len <= mru + ETH_HLEN))) { 762 if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
755 ovs_vport_send(vport, skb); 763 ovs_vport_send(vport, skb);
@@ -775,7 +783,8 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
775 783
776static int output_userspace(struct datapath *dp, struct sk_buff *skb, 784static int output_userspace(struct datapath *dp, struct sk_buff *skb,
777 struct sw_flow_key *key, const struct nlattr *attr, 785 struct sw_flow_key *key, const struct nlattr *attr,
778 const struct nlattr *actions, int actions_len) 786 const struct nlattr *actions, int actions_len,
787 uint32_t cutlen)
779{ 788{
780 struct dp_upcall_info upcall; 789 struct dp_upcall_info upcall;
781 const struct nlattr *a; 790 const struct nlattr *a;
@@ -822,7 +831,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
822 } /* End of switch. */ 831 } /* End of switch. */
823 } 832 }
824 833
825 return ovs_dp_upcall(dp, skb, key, &upcall); 834 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
826} 835}
827 836
828static int sample(struct datapath *dp, struct sk_buff *skb, 837static int sample(struct datapath *dp, struct sk_buff *skb,
@@ -832,6 +841,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
832 const struct nlattr *acts_list = NULL; 841 const struct nlattr *acts_list = NULL;
833 const struct nlattr *a; 842 const struct nlattr *a;
834 int rem; 843 int rem;
844 u32 cutlen = 0;
835 845
836 for (a = nla_data(attr), rem = nla_len(attr); rem > 0; 846 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
837 a = nla_next(a, &rem)) { 847 a = nla_next(a, &rem)) {
@@ -858,13 +868,24 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
858 return 0; 868 return 0;
859 869
860 /* The only known usage of sample action is having a single user-space 870 /* The only known usage of sample action is having a single user-space
871 * action, or having a truncate action followed by a single user-space
861 * action. Treat this usage as a special case. 872 * action. Treat this usage as a special case.
862 * The output_userspace() should clone the skb to be sent to the 873 * The output_userspace() should clone the skb to be sent to the
863 * user space. This skb will be consumed by its caller. 874 * user space. This skb will be consumed by its caller.
864 */ 875 */
876 if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) {
877 struct ovs_action_trunc *trunc = nla_data(a);
878
879 if (skb->len > trunc->max_len)
880 cutlen = skb->len - trunc->max_len;
881
882 a = nla_next(a, &rem);
883 }
884
865 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE && 885 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
866 nla_is_last(a, rem))) 886 nla_is_last(a, rem)))
867 return output_userspace(dp, skb, key, a, actions, actions_len); 887 return output_userspace(dp, skb, key, a, actions,
888 actions_len, cutlen);
868 889
869 skb = skb_clone(skb, GFP_ATOMIC); 890 skb = skb_clone(skb, GFP_ATOMIC);
870 if (!skb) 891 if (!skb)
@@ -1051,6 +1072,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1051 if (out_skb) 1072 if (out_skb)
1052 do_output(dp, out_skb, prev_port, key); 1073 do_output(dp, out_skb, prev_port, key);
1053 1074
1075 OVS_CB(skb)->cutlen = 0;
1054 prev_port = -1; 1076 prev_port = -1;
1055 } 1077 }
1056 1078
@@ -1059,8 +1081,18 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1059 prev_port = nla_get_u32(a); 1081 prev_port = nla_get_u32(a);
1060 break; 1082 break;
1061 1083
1084 case OVS_ACTION_ATTR_TRUNC: {
1085 struct ovs_action_trunc *trunc = nla_data(a);
1086
1087 if (skb->len > trunc->max_len)
1088 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1089 break;
1090 }
1091
1062 case OVS_ACTION_ATTR_USERSPACE: 1092 case OVS_ACTION_ATTR_USERSPACE:
1063 output_userspace(dp, skb, key, a, attr, len); 1093 output_userspace(dp, skb, key, a, attr,
1094 len, OVS_CB(skb)->cutlen);
1095 OVS_CB(skb)->cutlen = 0;
1064 break; 1096 break;
1065 1097
1066 case OVS_ACTION_ATTR_HASH: 1098 case OVS_ACTION_ATTR_HASH:
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index d84312584ee4..b4069a90e375 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -834,6 +834,17 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
834 return 0; 834 return 0;
835} 835}
836 836
837static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
838{
839 size_t i;
840
841 for (i = 0; i < sizeof(*labels); i++)
842 if (labels->ct_labels[i])
843 return true;
844
845 return false;
846}
847
837/* Lookup connection and confirm if unconfirmed. */ 848/* Lookup connection and confirm if unconfirmed. */
838static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, 849static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
839 const struct ovs_conntrack_info *info, 850 const struct ovs_conntrack_info *info,
@@ -844,24 +855,32 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
844 err = __ovs_ct_lookup(net, key, info, skb); 855 err = __ovs_ct_lookup(net, key, info, skb);
845 if (err) 856 if (err)
846 return err; 857 return err;
847 /* This is a no-op if the connection has already been confirmed. */ 858
859 /* Apply changes before confirming the connection so that the initial
860 * conntrack NEW netlink event carries the values given in the CT
861 * action.
862 */
863 if (info->mark.mask) {
864 err = ovs_ct_set_mark(skb, key, info->mark.value,
865 info->mark.mask);
866 if (err)
867 return err;
868 }
869 if (labels_nonzero(&info->labels.mask)) {
870 err = ovs_ct_set_labels(skb, key, &info->labels.value,
871 &info->labels.mask);
872 if (err)
873 return err;
874 }
875 /* This will take care of sending queued events even if the connection
876 * is already confirmed.
877 */
848 if (nf_conntrack_confirm(skb) != NF_ACCEPT) 878 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
849 return -EINVAL; 879 return -EINVAL;
850 880
851 return 0; 881 return 0;
852} 882}
853 883
854static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
855{
856 size_t i;
857
858 for (i = 0; i < sizeof(*labels); i++)
859 if (labels->ct_labels[i])
860 return true;
861
862 return false;
863}
864
865/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero 884/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
866 * value if 'skb' is freed. 885 * value if 'skb' is freed.
867 */ 886 */
@@ -886,19 +905,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
886 err = ovs_ct_commit(net, key, info, skb); 905 err = ovs_ct_commit(net, key, info, skb);
887 else 906 else
888 err = ovs_ct_lookup(net, key, info, skb); 907 err = ovs_ct_lookup(net, key, info, skb);
889 if (err)
890 goto err;
891 908
892 if (info->mark.mask) {
893 err = ovs_ct_set_mark(skb, key, info->mark.value,
894 info->mark.mask);
895 if (err)
896 goto err;
897 }
898 if (labels_nonzero(&info->labels.mask))
899 err = ovs_ct_set_labels(skb, key, &info->labels.value,
900 &info->labels.mask);
901err:
902 skb_push(skb, nh_ofs); 909 skb_push(skb, nh_ofs);
903 if (err) 910 if (err)
904 kfree_skb(skb); 911 kfree_skb(skb);
@@ -1155,6 +1162,20 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
1155 } 1162 }
1156 } 1163 }
1157 1164
1165#ifdef CONFIG_NF_CONNTRACK_MARK
1166 if (!info->commit && info->mark.mask) {
1167 OVS_NLERR(log,
1168 "Setting conntrack mark requires 'commit' flag.");
1169 return -EINVAL;
1170 }
1171#endif
1172#ifdef CONFIG_NF_CONNTRACK_LABELS
1173 if (!info->commit && labels_nonzero(&info->labels.mask)) {
1174 OVS_NLERR(log,
1175 "Setting conntrack labels requires 'commit' flag.");
1176 return -EINVAL;
1177 }
1178#endif
1158 if (rem > 0) { 1179 if (rem > 0) {
1159 OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem); 1180 OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem);
1160 return -EINVAL; 1181 return -EINVAL;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 856bd8dba676..524c0fd3078e 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -137,10 +137,12 @@ EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held);
137static struct vport *new_vport(const struct vport_parms *); 137static struct vport *new_vport(const struct vport_parms *);
138static int queue_gso_packets(struct datapath *dp, struct sk_buff *, 138static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139 const struct sw_flow_key *, 139 const struct sw_flow_key *,
140 const struct dp_upcall_info *); 140 const struct dp_upcall_info *,
141 uint32_t cutlen);
141static int queue_userspace_packet(struct datapath *dp, struct sk_buff *, 142static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
142 const struct sw_flow_key *, 143 const struct sw_flow_key *,
143 const struct dp_upcall_info *); 144 const struct dp_upcall_info *,
145 uint32_t cutlen);
144 146
145/* Must be called with rcu_read_lock. */ 147/* Must be called with rcu_read_lock. */
146static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex) 148static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
@@ -275,7 +277,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
275 upcall.cmd = OVS_PACKET_CMD_MISS; 277 upcall.cmd = OVS_PACKET_CMD_MISS;
276 upcall.portid = ovs_vport_find_upcall_portid(p, skb); 278 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
277 upcall.mru = OVS_CB(skb)->mru; 279 upcall.mru = OVS_CB(skb)->mru;
278 error = ovs_dp_upcall(dp, skb, key, &upcall); 280 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
279 if (unlikely(error)) 281 if (unlikely(error))
280 kfree_skb(skb); 282 kfree_skb(skb);
281 else 283 else
@@ -300,7 +302,8 @@ out:
300 302
301int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, 303int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
302 const struct sw_flow_key *key, 304 const struct sw_flow_key *key,
303 const struct dp_upcall_info *upcall_info) 305 const struct dp_upcall_info *upcall_info,
306 uint32_t cutlen)
304{ 307{
305 struct dp_stats_percpu *stats; 308 struct dp_stats_percpu *stats;
306 int err; 309 int err;
@@ -311,9 +314,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
311 } 314 }
312 315
313 if (!skb_is_gso(skb)) 316 if (!skb_is_gso(skb))
314 err = queue_userspace_packet(dp, skb, key, upcall_info); 317 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
315 else 318 else
316 err = queue_gso_packets(dp, skb, key, upcall_info); 319 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
317 if (err) 320 if (err)
318 goto err; 321 goto err;
319 322
@@ -331,7 +334,8 @@ err:
331 334
332static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, 335static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
333 const struct sw_flow_key *key, 336 const struct sw_flow_key *key,
334 const struct dp_upcall_info *upcall_info) 337 const struct dp_upcall_info *upcall_info,
338 uint32_t cutlen)
335{ 339{
336 unsigned short gso_type = skb_shinfo(skb)->gso_type; 340 unsigned short gso_type = skb_shinfo(skb)->gso_type;
337 struct sw_flow_key later_key; 341 struct sw_flow_key later_key;
@@ -360,7 +364,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
360 if (gso_type & SKB_GSO_UDP && skb != segs) 364 if (gso_type & SKB_GSO_UDP && skb != segs)
361 key = &later_key; 365 key = &later_key;
362 366
363 err = queue_userspace_packet(dp, skb, key, upcall_info); 367 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
364 if (err) 368 if (err)
365 break; 369 break;
366 370
@@ -383,7 +387,8 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
383{ 387{
384 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) 388 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
385 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ 389 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
386 + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */ 390 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
391 + nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
387 392
388 /* OVS_PACKET_ATTR_USERDATA */ 393 /* OVS_PACKET_ATTR_USERDATA */
389 if (upcall_info->userdata) 394 if (upcall_info->userdata)
@@ -416,7 +421,8 @@ static void pad_packet(struct datapath *dp, struct sk_buff *skb)
416 421
417static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, 422static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
418 const struct sw_flow_key *key, 423 const struct sw_flow_key *key,
419 const struct dp_upcall_info *upcall_info) 424 const struct dp_upcall_info *upcall_info,
425 uint32_t cutlen)
420{ 426{
421 struct ovs_header *upcall; 427 struct ovs_header *upcall;
422 struct sk_buff *nskb = NULL; 428 struct sk_buff *nskb = NULL;
@@ -461,7 +467,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
461 else 467 else
462 hlen = skb->len; 468 hlen = skb->len;
463 469
464 len = upcall_msg_size(upcall_info, hlen); 470 len = upcall_msg_size(upcall_info, hlen - cutlen);
465 user_skb = genlmsg_new(len, GFP_ATOMIC); 471 user_skb = genlmsg_new(len, GFP_ATOMIC);
466 if (!user_skb) { 472 if (!user_skb) {
467 err = -ENOMEM; 473 err = -ENOMEM;
@@ -509,15 +515,25 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
509 pad_packet(dp, user_skb); 515 pad_packet(dp, user_skb);
510 } 516 }
511 517
518 /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
519 if (cutlen > 0) {
520 if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
521 skb->len)) {
522 err = -ENOBUFS;
523 goto out;
524 }
525 pad_packet(dp, user_skb);
526 }
527
512 /* Only reserve room for attribute header, packet data is added 528 /* Only reserve room for attribute header, packet data is added
513 * in skb_zerocopy() */ 529 * in skb_zerocopy() */
514 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) { 530 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
515 err = -ENOBUFS; 531 err = -ENOBUFS;
516 goto out; 532 goto out;
517 } 533 }
518 nla->nla_len = nla_attr_size(skb->len); 534 nla->nla_len = nla_attr_size(skb->len - cutlen);
519 535
520 err = skb_zerocopy(user_skb, skb, skb->len, hlen); 536 err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
521 if (err) 537 if (err)
522 goto out; 538 goto out;
523 539
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 427e39a045cf..ab85c1cae255 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -100,11 +100,13 @@ struct datapath {
100 * @input_vport: The original vport packet came in on. This value is cached 100 * @input_vport: The original vport packet came in on. This value is cached
101 * when a packet is received by OVS. 101 * when a packet is received by OVS.
102 * @mru: The maximum received fragement size; 0 if the packet is not 102 * @mru: The maximum received fragement size; 0 if the packet is not
103 * @cutlen: The number of bytes from the packet end to be removed.
103 * fragmented. 104 * fragmented.
104 */ 105 */
105struct ovs_skb_cb { 106struct ovs_skb_cb {
106 struct vport *input_vport; 107 struct vport *input_vport;
107 u16 mru; 108 u16 mru;
109 u32 cutlen;
108}; 110};
109#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) 111#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
110 112
@@ -194,7 +196,8 @@ extern struct genl_family dp_vport_genl_family;
194void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key); 196void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key);
195void ovs_dp_detach_port(struct vport *); 197void ovs_dp_detach_port(struct vport *);
196int ovs_dp_upcall(struct datapath *, struct sk_buff *, 198int ovs_dp_upcall(struct datapath *, struct sk_buff *,
197 const struct sw_flow_key *, const struct dp_upcall_info *); 199 const struct sw_flow_key *, const struct dp_upcall_info *,
200 uint32_t cutlen);
198 201
199const char *ovs_dp_name(const struct datapath *dp); 202const char *ovs_dp_name(const struct datapath *dp);
200struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, 203struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 0bb650f4f219..c78a6a1476fb 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2229,6 +2229,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2229 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1, 2229 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
2230 [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash), 2230 [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash),
2231 [OVS_ACTION_ATTR_CT] = (u32)-1, 2231 [OVS_ACTION_ATTR_CT] = (u32)-1,
2232 [OVS_ACTION_ATTR_TRUNC] = sizeof(struct ovs_action_trunc),
2232 }; 2233 };
2233 const struct ovs_action_push_vlan *vlan; 2234 const struct ovs_action_push_vlan *vlan;
2234 int type = nla_type(a); 2235 int type = nla_type(a);
@@ -2255,6 +2256,14 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2255 return -EINVAL; 2256 return -EINVAL;
2256 break; 2257 break;
2257 2258
2259 case OVS_ACTION_ATTR_TRUNC: {
2260 const struct ovs_action_trunc *trunc = nla_data(a);
2261
2262 if (trunc->max_len < ETH_HLEN)
2263 return -EINVAL;
2264 break;
2265 }
2266
2258 case OVS_ACTION_ATTR_HASH: { 2267 case OVS_ACTION_ATTR_HASH: {
2259 const struct ovs_action_hash *act_hash = nla_data(a); 2268 const struct ovs_action_hash *act_hash = nla_data(a);
2260 2269
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 2ee48e447b72..434e04c3a189 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -195,7 +195,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
195 } 195 }
196 196
197 vport->dev = alloc_netdev(sizeof(struct internal_dev), 197 vport->dev = alloc_netdev(sizeof(struct internal_dev),
198 parms->name, NET_NAME_UNKNOWN, do_setup); 198 parms->name, NET_NAME_USER, do_setup);
199 if (!vport->dev) { 199 if (!vport->dev) {
200 err = -ENOMEM; 200 err = -ENOMEM;
201 goto error_free_vport; 201 goto error_free_vport;
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 31cbc8c5c7db..6b21fd068d87 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -444,6 +444,7 @@ int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
444 444
445 OVS_CB(skb)->input_vport = vport; 445 OVS_CB(skb)->input_vport = vport;
446 OVS_CB(skb)->mru = 0; 446 OVS_CB(skb)->mru = 0;
447 OVS_CB(skb)->cutlen = 0;
447 if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) { 448 if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
448 u32 mark; 449 u32 mark;
449 450
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9bff6ef16fa7..d1f3b9e977e5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1979,40 +1979,8 @@ static int __packet_rcv_vnet(const struct sk_buff *skb,
1979{ 1979{
1980 *vnet_hdr = (const struct virtio_net_hdr) { 0 }; 1980 *vnet_hdr = (const struct virtio_net_hdr) { 0 };
1981 1981
1982 if (skb_is_gso(skb)) { 1982 if (virtio_net_hdr_from_skb(skb, vnet_hdr, vio_le()))
1983 struct skb_shared_info *sinfo = skb_shinfo(skb); 1983 BUG();
1984
1985 /* This is a hint as to how much should be linear. */
1986 vnet_hdr->hdr_len =
1987 __cpu_to_virtio16(vio_le(), skb_headlen(skb));
1988 vnet_hdr->gso_size =
1989 __cpu_to_virtio16(vio_le(), sinfo->gso_size);
1990
1991 if (sinfo->gso_type & SKB_GSO_TCPV4)
1992 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1993 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1994 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1995 else if (sinfo->gso_type & SKB_GSO_UDP)
1996 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
1997 else if (sinfo->gso_type & SKB_GSO_FCOE)
1998 return -EINVAL;
1999 else
2000 BUG();
2001
2002 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2003 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2004 } else
2005 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
2006
2007 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2008 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2009 vnet_hdr->csum_start = __cpu_to_virtio16(vio_le(),
2010 skb_checksum_start_offset(skb));
2011 vnet_hdr->csum_offset = __cpu_to_virtio16(vio_le(),
2012 skb->csum_offset);
2013 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2014 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
2015 } /* else everything is zero */
2016 1984
2017 return 0; 1985 return 0;
2018} 1986}
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 6641bcf7c185..8398fee7c866 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -235,7 +235,8 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
235 * therefore trigger warnings. 235 * therefore trigger warnings.
236 * Defer the xmit to rds_send_worker() instead. 236 * Defer the xmit to rds_send_worker() instead.
237 */ 237 */
238 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 238 queue_delayed_work(rds_wq,
239 &conn->c_path[0].cp_send_w, 0);
239 } 240 }
240 } 241 }
241 242
diff --git a/net/rds/connection.c b/net/rds/connection.c
index e3b118cae81d..a4b07c899d89 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -95,14 +95,16 @@ static struct rds_connection *rds_conn_lookup(struct net *net,
95 * and receiving over this connection again in the future. It is up to 95 * and receiving over this connection again in the future. It is up to
96 * the transport to have serialized this call with its send and recv. 96 * the transport to have serialized this call with its send and recv.
97 */ 97 */
98static void rds_conn_reset(struct rds_connection *conn) 98static void rds_conn_path_reset(struct rds_conn_path *cp)
99{ 99{
100 struct rds_connection *conn = cp->cp_conn;
101
100 rdsdebug("connection %pI4 to %pI4 reset\n", 102 rdsdebug("connection %pI4 to %pI4 reset\n",
101 &conn->c_laddr, &conn->c_faddr); 103 &conn->c_laddr, &conn->c_faddr);
102 104
103 rds_stats_inc(s_conn_reset); 105 rds_stats_inc(s_conn_reset);
104 rds_send_reset(conn); 106 rds_send_path_reset(cp);
105 conn->c_flags = 0; 107 cp->cp_flags = 0;
106 108
107 /* Do not clear next_rx_seq here, else we cannot distinguish 109 /* Do not clear next_rx_seq here, else we cannot distinguish
108 * retransmitted packets from new packets, and will hand all 110 * retransmitted packets from new packets, and will hand all
@@ -110,6 +112,32 @@ static void rds_conn_reset(struct rds_connection *conn)
110 * reliability guarantees of RDS. */ 112 * reliability guarantees of RDS. */
111} 113}
112 114
115static void __rds_conn_path_init(struct rds_connection *conn,
116 struct rds_conn_path *cp, bool is_outgoing)
117{
118 spin_lock_init(&cp->cp_lock);
119 cp->cp_next_tx_seq = 1;
120 init_waitqueue_head(&cp->cp_waitq);
121 INIT_LIST_HEAD(&cp->cp_send_queue);
122 INIT_LIST_HEAD(&cp->cp_retrans);
123
124 cp->cp_conn = conn;
125 atomic_set(&cp->cp_state, RDS_CONN_DOWN);
126 cp->cp_send_gen = 0;
127 /* cp_outgoing is per-path. So we can only set it here
128 * for the single-path transports.
129 */
130 if (!conn->c_trans->t_mp_capable)
131 cp->cp_outgoing = (is_outgoing ? 1 : 0);
132 cp->cp_reconnect_jiffies = 0;
133 INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
134 INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
135 INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
136 INIT_WORK(&cp->cp_down_w, rds_shutdown_worker);
137 mutex_init(&cp->cp_cm_lock);
138 cp->cp_flags = 0;
139}
140
113/* 141/*
114 * There is only every one 'conn' for a given pair of addresses in the 142 * There is only every one 'conn' for a given pair of addresses in the
115 * system at a time. They contain messages to be retransmitted and so 143 * system at a time. They contain messages to be retransmitted and so
@@ -153,13 +181,8 @@ static struct rds_connection *__rds_conn_create(struct net *net,
153 INIT_HLIST_NODE(&conn->c_hash_node); 181 INIT_HLIST_NODE(&conn->c_hash_node);
154 conn->c_laddr = laddr; 182 conn->c_laddr = laddr;
155 conn->c_faddr = faddr; 183 conn->c_faddr = faddr;
156 spin_lock_init(&conn->c_lock);
157 conn->c_next_tx_seq = 1;
158 rds_conn_net_set(conn, net);
159 184
160 init_waitqueue_head(&conn->c_waitq); 185 rds_conn_net_set(conn, net);
161 INIT_LIST_HEAD(&conn->c_send_queue);
162 INIT_LIST_HEAD(&conn->c_retrans);
163 186
164 ret = rds_cong_get_maps(conn); 187 ret = rds_cong_get_maps(conn);
165 if (ret) { 188 if (ret) {
@@ -195,17 +218,6 @@ static struct rds_connection *__rds_conn_create(struct net *net,
195 goto out; 218 goto out;
196 } 219 }
197 220
198 atomic_set(&conn->c_state, RDS_CONN_DOWN);
199 conn->c_send_gen = 0;
200 conn->c_outgoing = (is_outgoing ? 1 : 0);
201 conn->c_reconnect_jiffies = 0;
202 INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
203 INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
204 INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker);
205 INIT_WORK(&conn->c_down_w, rds_shutdown_worker);
206 mutex_init(&conn->c_cm_lock);
207 conn->c_flags = 0;
208
209 rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n", 221 rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
210 conn, &laddr, &faddr, 222 conn, &laddr, &faddr,
211 trans->t_name ? trans->t_name : "[unknown]", 223 trans->t_name ? trans->t_name : "[unknown]",
@@ -222,7 +234,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
222 if (parent) { 234 if (parent) {
223 /* Creating passive conn */ 235 /* Creating passive conn */
224 if (parent->c_passive) { 236 if (parent->c_passive) {
225 trans->conn_free(conn->c_transport_data); 237 trans->conn_free(conn->c_path[0].cp_transport_data);
226 kmem_cache_free(rds_conn_slab, conn); 238 kmem_cache_free(rds_conn_slab, conn);
227 conn = parent->c_passive; 239 conn = parent->c_passive;
228 } else { 240 } else {
@@ -236,10 +248,26 @@ static struct rds_connection *__rds_conn_create(struct net *net,
236 248
237 found = rds_conn_lookup(net, head, laddr, faddr, trans); 249 found = rds_conn_lookup(net, head, laddr, faddr, trans);
238 if (found) { 250 if (found) {
239 trans->conn_free(conn->c_transport_data); 251 struct rds_conn_path *cp;
252 int i;
253
254 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
255 cp = &conn->c_path[i];
256 trans->conn_free(cp->cp_transport_data);
257 if (!trans->t_mp_capable)
258 break;
259 }
240 kmem_cache_free(rds_conn_slab, conn); 260 kmem_cache_free(rds_conn_slab, conn);
241 conn = found; 261 conn = found;
242 } else { 262 } else {
263 int i;
264
265 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
266 __rds_conn_path_init(conn, &conn->c_path[i],
267 is_outgoing);
268 conn->c_path[i].cp_index = i;
269 }
270
243 hlist_add_head_rcu(&conn->c_hash_node, head); 271 hlist_add_head_rcu(&conn->c_hash_node, head);
244 rds_cong_add_conn(conn); 272 rds_cong_add_conn(conn);
245 rds_conn_count++; 273 rds_conn_count++;
@@ -267,10 +295,12 @@ struct rds_connection *rds_conn_create_outgoing(struct net *net,
267} 295}
268EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); 296EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
269 297
270void rds_conn_shutdown(struct rds_connection *conn) 298void rds_conn_shutdown(struct rds_conn_path *cp)
271{ 299{
300 struct rds_connection *conn = cp->cp_conn;
301
272 /* shut it down unless it's down already */ 302 /* shut it down unless it's down already */
273 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { 303 if (!rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
274 /* 304 /*
275 * Quiesce the connection mgmt handlers before we start tearing 305 * Quiesce the connection mgmt handlers before we start tearing
276 * things down. We don't hold the mutex for the entire 306 * things down. We don't hold the mutex for the entire
@@ -278,35 +308,41 @@ void rds_conn_shutdown(struct rds_connection *conn)
278 * deadlocking with the CM handler. Instead, the CM event 308 * deadlocking with the CM handler. Instead, the CM event
279 * handler is supposed to check for state DISCONNECTING 309 * handler is supposed to check for state DISCONNECTING
280 */ 310 */
281 mutex_lock(&conn->c_cm_lock); 311 mutex_lock(&cp->cp_cm_lock);
282 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) 312 if (!rds_conn_path_transition(cp, RDS_CONN_UP,
283 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { 313 RDS_CONN_DISCONNECTING) &&
284 rds_conn_error(conn, "shutdown called in state %d\n", 314 !rds_conn_path_transition(cp, RDS_CONN_ERROR,
285 atomic_read(&conn->c_state)); 315 RDS_CONN_DISCONNECTING)) {
286 mutex_unlock(&conn->c_cm_lock); 316 rds_conn_path_error(cp,
317 "shutdown called in state %d\n",
318 atomic_read(&cp->cp_state));
319 mutex_unlock(&cp->cp_cm_lock);
287 return; 320 return;
288 } 321 }
289 mutex_unlock(&conn->c_cm_lock); 322 mutex_unlock(&cp->cp_cm_lock);
290 323
291 wait_event(conn->c_waitq, 324 wait_event(cp->cp_waitq,
292 !test_bit(RDS_IN_XMIT, &conn->c_flags)); 325 !test_bit(RDS_IN_XMIT, &cp->cp_flags));
293 wait_event(conn->c_waitq, 326 wait_event(cp->cp_waitq,
294 !test_bit(RDS_RECV_REFILL, &conn->c_flags)); 327 !test_bit(RDS_RECV_REFILL, &cp->cp_flags));
295 328
296 conn->c_trans->conn_shutdown(conn); 329 if (!conn->c_trans->t_mp_capable)
297 rds_conn_reset(conn); 330 conn->c_trans->conn_shutdown(conn);
331 else
332 conn->c_trans->conn_path_shutdown(cp);
333 rds_conn_path_reset(cp);
298 334
299 if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { 335 if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING,
336 RDS_CONN_DOWN)) {
300 /* This can happen - eg when we're in the middle of tearing 337 /* This can happen - eg when we're in the middle of tearing
301 * down the connection, and someone unloads the rds module. 338 * down the connection, and someone unloads the rds module.
302 * Quite reproduceable with loopback connections. 339 * Quite reproduceable with loopback connections.
303 * Mostly harmless. 340 * Mostly harmless.
304 */ 341 */
305 rds_conn_error(conn, 342 rds_conn_path_error(cp, "%s: failed to transition "
306 "%s: failed to transition to state DOWN, " 343 "to state DOWN, current state "
307 "current state is %d\n", 344 "is %d\n", __func__,
308 __func__, 345 atomic_read(&cp->cp_state));
309 atomic_read(&conn->c_state));
310 return; 346 return;
311 } 347 }
312 } 348 }
@@ -315,18 +351,46 @@ void rds_conn_shutdown(struct rds_connection *conn)
315 * The passive side of an IB loopback connection is never added 351 * The passive side of an IB loopback connection is never added
316 * to the conn hash, so we never trigger a reconnect on this 352 * to the conn hash, so we never trigger a reconnect on this
317 * conn - the reconnect is always triggered by the active peer. */ 353 * conn - the reconnect is always triggered by the active peer. */
318 cancel_delayed_work_sync(&conn->c_conn_w); 354 cancel_delayed_work_sync(&cp->cp_conn_w);
319 rcu_read_lock(); 355 rcu_read_lock();
320 if (!hlist_unhashed(&conn->c_hash_node)) { 356 if (!hlist_unhashed(&conn->c_hash_node)) {
321 rcu_read_unlock(); 357 rcu_read_unlock();
322 if (conn->c_trans->t_type != RDS_TRANS_TCP || 358 if (conn->c_trans->t_type != RDS_TRANS_TCP ||
323 conn->c_outgoing == 1) 359 cp->cp_outgoing == 1)
324 rds_queue_reconnect(conn); 360 rds_queue_reconnect(cp);
325 } else { 361 } else {
326 rcu_read_unlock(); 362 rcu_read_unlock();
327 } 363 }
328} 364}
329 365
366/* destroy a single rds_conn_path. rds_conn_destroy() iterates over
367 * all paths using rds_conn_path_destroy()
368 */
369static void rds_conn_path_destroy(struct rds_conn_path *cp)
370{
371 struct rds_message *rm, *rtmp;
372
373 rds_conn_path_drop(cp);
374 flush_work(&cp->cp_down_w);
375
376 /* make sure lingering queued work won't try to ref the conn */
377 cancel_delayed_work_sync(&cp->cp_send_w);
378 cancel_delayed_work_sync(&cp->cp_recv_w);
379
380 /* tear down queued messages */
381 list_for_each_entry_safe(rm, rtmp,
382 &cp->cp_send_queue,
383 m_conn_item) {
384 list_del_init(&rm->m_conn_item);
385 BUG_ON(!list_empty(&rm->m_sock_item));
386 rds_message_put(rm);
387 }
388 if (cp->cp_xmit_rm)
389 rds_message_put(cp->cp_xmit_rm);
390
391 cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
392}
393
330/* 394/*
331 * Stop and free a connection. 395 * Stop and free a connection.
332 * 396 *
@@ -336,7 +400,6 @@ void rds_conn_shutdown(struct rds_connection *conn)
336 */ 400 */
337void rds_conn_destroy(struct rds_connection *conn) 401void rds_conn_destroy(struct rds_connection *conn)
338{ 402{
339 struct rds_message *rm, *rtmp;
340 unsigned long flags; 403 unsigned long flags;
341 404
342 rdsdebug("freeing conn %p for %pI4 -> " 405 rdsdebug("freeing conn %p for %pI4 -> "
@@ -350,25 +413,19 @@ void rds_conn_destroy(struct rds_connection *conn)
350 synchronize_rcu(); 413 synchronize_rcu();
351 414
352 /* shut the connection down */ 415 /* shut the connection down */
353 rds_conn_drop(conn); 416 if (!conn->c_trans->t_mp_capable) {
354 flush_work(&conn->c_down_w); 417 rds_conn_path_destroy(&conn->c_path[0]);
355 418 BUG_ON(!list_empty(&conn->c_path[0].cp_retrans));
356 /* make sure lingering queued work won't try to ref the conn */ 419 } else {
357 cancel_delayed_work_sync(&conn->c_send_w); 420 int i;
358 cancel_delayed_work_sync(&conn->c_recv_w); 421 struct rds_conn_path *cp;
359 422
360 /* tear down queued messages */ 423 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
361 list_for_each_entry_safe(rm, rtmp, 424 cp = &conn->c_path[i];
362 &conn->c_send_queue, 425 rds_conn_path_destroy(cp);
363 m_conn_item) { 426 BUG_ON(!list_empty(&cp->cp_retrans));
364 list_del_init(&rm->m_conn_item); 427 }
365 BUG_ON(!list_empty(&rm->m_sock_item));
366 rds_message_put(rm);
367 } 428 }
368 if (conn->c_xmit_rm)
369 rds_message_put(conn->c_xmit_rm);
370
371 conn->c_trans->conn_free(conn->c_transport_data);
372 429
373 /* 430 /*
374 * The congestion maps aren't freed up here. They're 431 * The congestion maps aren't freed up here. They're
@@ -377,7 +434,6 @@ void rds_conn_destroy(struct rds_connection *conn)
377 */ 434 */
378 rds_cong_remove_conn(conn); 435 rds_cong_remove_conn(conn);
379 436
380 BUG_ON(!list_empty(&conn->c_retrans));
381 kmem_cache_free(rds_conn_slab, conn); 437 kmem_cache_free(rds_conn_slab, conn);
382 438
383 spin_lock_irqsave(&rds_conn_lock, flags); 439 spin_lock_irqsave(&rds_conn_lock, flags);
@@ -398,6 +454,7 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
398 unsigned int total = 0; 454 unsigned int total = 0;
399 unsigned long flags; 455 unsigned long flags;
400 size_t i; 456 size_t i;
457 int j;
401 458
402 len /= sizeof(struct rds_info_message); 459 len /= sizeof(struct rds_info_message);
403 460
@@ -406,23 +463,32 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
406 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 463 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
407 i++, head++) { 464 i++, head++) {
408 hlist_for_each_entry_rcu(conn, head, c_hash_node) { 465 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
409 if (want_send) 466 struct rds_conn_path *cp;
410 list = &conn->c_send_queue; 467
411 else 468 for (j = 0; j < RDS_MPATH_WORKERS; j++) {
412 list = &conn->c_retrans; 469 cp = &conn->c_path[j];
413 470 if (want_send)
414 spin_lock_irqsave(&conn->c_lock, flags); 471 list = &cp->cp_send_queue;
415 472 else
416 /* XXX too lazy to maintain counts.. */ 473 list = &cp->cp_retrans;
417 list_for_each_entry(rm, list, m_conn_item) { 474
418 total++; 475 spin_lock_irqsave(&cp->cp_lock, flags);
419 if (total <= len) 476
420 rds_inc_info_copy(&rm->m_inc, iter, 477 /* XXX too lazy to maintain counts.. */
421 conn->c_laddr, 478 list_for_each_entry(rm, list, m_conn_item) {
422 conn->c_faddr, 0); 479 total++;
480 if (total <= len)
481 rds_inc_info_copy(&rm->m_inc,
482 iter,
483 conn->c_laddr,
484 conn->c_faddr,
485 0);
486 }
487
488 spin_unlock_irqrestore(&cp->cp_lock, flags);
489 if (!conn->c_trans->t_mp_capable)
490 break;
423 } 491 }
424
425 spin_unlock_irqrestore(&conn->c_lock, flags);
426 } 492 }
427 } 493 }
428 rcu_read_unlock(); 494 rcu_read_unlock();
@@ -484,27 +550,72 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
484} 550}
485EXPORT_SYMBOL_GPL(rds_for_each_conn_info); 551EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
486 552
487static int rds_conn_info_visitor(struct rds_connection *conn, 553void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
488 void *buffer) 554 struct rds_info_iterator *iter,
555 struct rds_info_lengths *lens,
556 int (*visitor)(struct rds_conn_path *, void *),
557 size_t item_len)
558{
559 u64 buffer[(item_len + 7) / 8];
560 struct hlist_head *head;
561 struct rds_connection *conn;
562 size_t i;
563 int j;
564
565 rcu_read_lock();
566
567 lens->nr = 0;
568 lens->each = item_len;
569
570 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
571 i++, head++) {
572 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
573 struct rds_conn_path *cp;
574
575 for (j = 0; j < RDS_MPATH_WORKERS; j++) {
576 cp = &conn->c_path[j];
577
578 /* XXX no cp_lock usage.. */
579 if (!visitor(cp, buffer))
580 continue;
581 if (!conn->c_trans->t_mp_capable)
582 break;
583 }
584
585 /* We copy as much as we can fit in the buffer,
586 * but we count all items so that the caller
587 * can resize the buffer.
588 */
589 if (len >= item_len) {
590 rds_info_copy(iter, buffer, item_len);
591 len -= item_len;
592 }
593 lens->nr++;
594 }
595 }
596 rcu_read_unlock();
597}
598
599static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
489{ 600{
490 struct rds_info_connection *cinfo = buffer; 601 struct rds_info_connection *cinfo = buffer;
491 602
492 cinfo->next_tx_seq = conn->c_next_tx_seq; 603 cinfo->next_tx_seq = cp->cp_next_tx_seq;
493 cinfo->next_rx_seq = conn->c_next_rx_seq; 604 cinfo->next_rx_seq = cp->cp_next_rx_seq;
494 cinfo->laddr = conn->c_laddr; 605 cinfo->laddr = cp->cp_conn->c_laddr;
495 cinfo->faddr = conn->c_faddr; 606 cinfo->faddr = cp->cp_conn->c_faddr;
496 strncpy(cinfo->transport, conn->c_trans->t_name, 607 strncpy(cinfo->transport, cp->cp_conn->c_trans->t_name,
497 sizeof(cinfo->transport)); 608 sizeof(cinfo->transport));
498 cinfo->flags = 0; 609 cinfo->flags = 0;
499 610
500 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags), 611 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
501 SENDING); 612 SENDING);
502 /* XXX Future: return the state rather than these funky bits */ 613 /* XXX Future: return the state rather than these funky bits */
503 rds_conn_info_set(cinfo->flags, 614 rds_conn_info_set(cinfo->flags,
504 atomic_read(&conn->c_state) == RDS_CONN_CONNECTING, 615 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
505 CONNECTING); 616 CONNECTING);
506 rds_conn_info_set(cinfo->flags, 617 rds_conn_info_set(cinfo->flags,
507 atomic_read(&conn->c_state) == RDS_CONN_UP, 618 atomic_read(&cp->cp_state) == RDS_CONN_UP,
508 CONNECTED); 619 CONNECTED);
509 return 1; 620 return 1;
510} 621}
@@ -513,7 +624,7 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
513 struct rds_info_iterator *iter, 624 struct rds_info_iterator *iter,
514 struct rds_info_lengths *lens) 625 struct rds_info_lengths *lens)
515{ 626{
516 rds_for_each_conn_info(sock, len, iter, lens, 627 rds_walk_conn_path_info(sock, len, iter, lens,
517 rds_conn_info_visitor, 628 rds_conn_info_visitor,
518 sizeof(struct rds_info_connection)); 629 sizeof(struct rds_info_connection));
519} 630}
@@ -553,10 +664,16 @@ void rds_conn_exit(void)
553/* 664/*
554 * Force a disconnect 665 * Force a disconnect
555 */ 666 */
667void rds_conn_path_drop(struct rds_conn_path *cp)
668{
669 atomic_set(&cp->cp_state, RDS_CONN_ERROR);
670 queue_work(rds_wq, &cp->cp_down_w);
671}
672EXPORT_SYMBOL_GPL(rds_conn_path_drop);
673
556void rds_conn_drop(struct rds_connection *conn) 674void rds_conn_drop(struct rds_connection *conn)
557{ 675{
558 atomic_set(&conn->c_state, RDS_CONN_ERROR); 676 rds_conn_path_drop(&conn->c_path[0]);
559 queue_work(rds_wq, &conn->c_down_w);
560} 677}
561EXPORT_SYMBOL_GPL(rds_conn_drop); 678EXPORT_SYMBOL_GPL(rds_conn_drop);
562 679
@@ -564,11 +681,17 @@ EXPORT_SYMBOL_GPL(rds_conn_drop);
564 * If the connection is down, trigger a connect. We may have scheduled a 681 * If the connection is down, trigger a connect. We may have scheduled a
565 * delayed reconnect however - in this case we should not interfere. 682 * delayed reconnect however - in this case we should not interfere.
566 */ 683 */
684void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
685{
686 if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
687 !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
688 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
689}
690
567void rds_conn_connect_if_down(struct rds_connection *conn) 691void rds_conn_connect_if_down(struct rds_connection *conn)
568{ 692{
569 if (rds_conn_state(conn) == RDS_CONN_DOWN && 693 WARN_ON(conn->c_trans->t_mp_capable);
570 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) 694 rds_conn_path_connect_if_down(&conn->c_path[0]);
571 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
572} 695}
573EXPORT_SYMBOL_GPL(rds_conn_connect_if_down); 696EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
574 697
@@ -586,3 +709,15 @@ __rds_conn_error(struct rds_connection *conn, const char *fmt, ...)
586 709
587 rds_conn_drop(conn); 710 rds_conn_drop(conn);
588} 711}
712
713void
714__rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...)
715{
716 va_list ap;
717
718 va_start(ap, fmt);
719 vprintk(fmt, ap);
720 va_end(ap);
721
722 rds_conn_path_drop(cp);
723}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index b5342fddaf98..44946a681a8c 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -40,6 +40,7 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/module.h> 41#include <linux/module.h>
42 42
43#include "rds_single_path.h"
43#include "rds.h" 44#include "rds.h"
44#include "ib.h" 45#include "ib.h"
45#include "ib_mr.h" 46#include "ib_mr.h"
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 7c2a65a6af5c..e48bb1ba3dfc 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -36,6 +36,7 @@
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/ratelimit.h> 37#include <linux/ratelimit.h>
38 38
39#include "rds_single_path.h"
39#include "rds.h" 40#include "rds.h"
40#include "ib.h" 41#include "ib.h"
41 42
@@ -273,7 +274,7 @@ static void rds_ib_tasklet_fn_send(unsigned long data)
273 if (rds_conn_up(conn) && 274 if (rds_conn_up(conn) &&
274 (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) || 275 (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
275 test_bit(0, &conn->c_map_queued))) 276 test_bit(0, &conn->c_map_queued)))
276 rds_send_xmit(ic->conn); 277 rds_send_xmit(&ic->conn->c_path[0]);
277} 278}
278 279
279static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq, 280static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index f7164ac1ffc1..977f69886c00 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -35,6 +35,7 @@
35#include <linux/rculist.h> 35#include <linux/rculist.h>
36#include <linux/llist.h> 36#include <linux/llist.h>
37 37
38#include "rds_single_path.h"
38#include "ib_mr.h" 39#include "ib_mr.h"
39 40
40struct workqueue_struct *rds_ib_mr_wq; 41struct workqueue_struct *rds_ib_mr_wq;
@@ -618,7 +619,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
618 619
619int rds_ib_mr_init(void) 620int rds_ib_mr_init(void)
620{ 621{
621 rds_ib_mr_wq = create_workqueue("rds_mr_flushd"); 622 rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
622 if (!rds_ib_mr_wq) 623 if (!rds_ib_mr_wq)
623 return -ENOMEM; 624 return -ENOMEM;
624 return 0; 625 return 0;
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index abc8cc805e8d..4ea8cb17cc7a 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -36,6 +36,7 @@
36#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <rdma/rdma_cm.h> 37#include <rdma/rdma_cm.h>
38 38
39#include "rds_single_path.h"
39#include "rds.h" 40#include "rds.h"
40#include "ib.h" 41#include "ib.h"
41 42
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index f27d2c82b036..6e4110aa5135 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -36,6 +36,7 @@
36#include <linux/dmapool.h> 36#include <linux/dmapool.h>
37#include <linux/ratelimit.h> 37#include <linux/ratelimit.h>
38 38
39#include "rds_single_path.h"
39#include "rds.h" 40#include "rds.h"
40#include "ib.h" 41#include "ib.h"
41 42
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 814173b466d9..15f83db78f0c 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/in.h> 35#include <linux/in.h>
36 36
37#include "rds_single_path.h"
37#include "rds.h" 38#include "rds.h"
38#include "loop.h" 39#include "loop.h"
39 40
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 7220bebcf558..345f09059e9f 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <rdma/rdma_cm.h> 34#include <rdma/rdma_cm.h>
35 35
36#include "rds_single_path.h"
36#include "rdma_transport.h" 37#include "rdma_transport.h"
37#include "ib.h" 38#include "ib.h"
38 39
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 387df5f32e49..2e35b738176f 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -84,56 +84,69 @@ enum {
84#define RDS_IN_XMIT 2 84#define RDS_IN_XMIT 2
85#define RDS_RECV_REFILL 3 85#define RDS_RECV_REFILL 3
86 86
87/* Max number of multipaths per RDS connection. Must be a power of 2 */
88#define RDS_MPATH_WORKERS 1
89
90/* Per mpath connection state */
91struct rds_conn_path {
92 struct rds_connection *cp_conn;
93 struct rds_message *cp_xmit_rm;
94 unsigned long cp_xmit_sg;
95 unsigned int cp_xmit_hdr_off;
96 unsigned int cp_xmit_data_off;
97 unsigned int cp_xmit_atomic_sent;
98 unsigned int cp_xmit_rdma_sent;
99 unsigned int cp_xmit_data_sent;
100
101 spinlock_t cp_lock; /* protect msg queues */
102 u64 cp_next_tx_seq;
103 struct list_head cp_send_queue;
104 struct list_head cp_retrans;
105
106 u64 cp_next_rx_seq;
107
108 void *cp_transport_data;
109
110 atomic_t cp_state;
111 unsigned long cp_send_gen;
112 unsigned long cp_flags;
113 unsigned long cp_reconnect_jiffies;
114 struct delayed_work cp_send_w;
115 struct delayed_work cp_recv_w;
116 struct delayed_work cp_conn_w;
117 struct work_struct cp_down_w;
118 struct mutex cp_cm_lock; /* protect cp_state & cm */
119 wait_queue_head_t cp_waitq;
120
121 unsigned int cp_unacked_packets;
122 unsigned int cp_unacked_bytes;
123 unsigned int cp_outgoing:1,
124 cp_pad_to_32:31;
125 unsigned int cp_index;
126};
127
128/* One rds_connection per RDS address pair */
87struct rds_connection { 129struct rds_connection {
88 struct hlist_node c_hash_node; 130 struct hlist_node c_hash_node;
89 __be32 c_laddr; 131 __be32 c_laddr;
90 __be32 c_faddr; 132 __be32 c_faddr;
91 unsigned int c_loopback:1, 133 unsigned int c_loopback:1,
92 c_outgoing:1, 134 c_pad_to_32:31;
93 c_pad_to_32:30; 135 int c_npaths;
94 struct rds_connection *c_passive; 136 struct rds_connection *c_passive;
137 struct rds_transport *c_trans;
95 138
96 struct rds_cong_map *c_lcong; 139 struct rds_cong_map *c_lcong;
97 struct rds_cong_map *c_fcong; 140 struct rds_cong_map *c_fcong;
98 141
99 struct rds_message *c_xmit_rm; 142 /* Protocol version */
100 unsigned long c_xmit_sg; 143 unsigned int c_version;
101 unsigned int c_xmit_hdr_off; 144 possible_net_t c_net;
102 unsigned int c_xmit_data_off;
103 unsigned int c_xmit_atomic_sent;
104 unsigned int c_xmit_rdma_sent;
105 unsigned int c_xmit_data_sent;
106
107 spinlock_t c_lock; /* protect msg queues */
108 u64 c_next_tx_seq;
109 struct list_head c_send_queue;
110 struct list_head c_retrans;
111
112 u64 c_next_rx_seq;
113
114 struct rds_transport *c_trans;
115 void *c_transport_data;
116
117 atomic_t c_state;
118 unsigned long c_send_gen;
119 unsigned long c_flags;
120 unsigned long c_reconnect_jiffies;
121 struct delayed_work c_send_w;
122 struct delayed_work c_recv_w;
123 struct delayed_work c_conn_w;
124 struct work_struct c_down_w;
125 struct mutex c_cm_lock; /* protect conn state & cm */
126 wait_queue_head_t c_waitq;
127 145
128 struct list_head c_map_item; 146 struct list_head c_map_item;
129 unsigned long c_map_queued; 147 unsigned long c_map_queued;
130 148
131 unsigned int c_unacked_packets; 149 struct rds_conn_path c_path[RDS_MPATH_WORKERS];
132 unsigned int c_unacked_bytes;
133
134 /* Protocol version */
135 unsigned int c_version;
136 possible_net_t c_net;
137}; 150};
138 151
139static inline 152static inline
@@ -218,6 +231,7 @@ struct rds_incoming {
218 atomic_t i_refcount; 231 atomic_t i_refcount;
219 struct list_head i_item; 232 struct list_head i_item;
220 struct rds_connection *i_conn; 233 struct rds_connection *i_conn;
234 struct rds_conn_path *i_conn_path;
221 struct rds_header i_hdr; 235 struct rds_header i_hdr;
222 unsigned long i_rx_jiffies; 236 unsigned long i_rx_jiffies;
223 __be32 i_saddr; 237 __be32 i_saddr;
@@ -433,7 +447,8 @@ struct rds_transport {
433 char t_name[TRANSNAMSIZ]; 447 char t_name[TRANSNAMSIZ];
434 struct list_head t_item; 448 struct list_head t_item;
435 struct module *t_owner; 449 struct module *t_owner;
436 unsigned int t_prefer_loopback:1; 450 unsigned int t_prefer_loopback:1,
451 t_mp_capable:1;
437 unsigned int t_type; 452 unsigned int t_type;
438 453
439 int (*laddr_check)(struct net *net, __be32 addr); 454 int (*laddr_check)(struct net *net, __be32 addr);
@@ -441,8 +456,11 @@ struct rds_transport {
441 void (*conn_free)(void *data); 456 void (*conn_free)(void *data);
442 int (*conn_connect)(struct rds_connection *conn); 457 int (*conn_connect)(struct rds_connection *conn);
443 void (*conn_shutdown)(struct rds_connection *conn); 458 void (*conn_shutdown)(struct rds_connection *conn);
459 void (*conn_path_shutdown)(struct rds_conn_path *conn);
444 void (*xmit_prepare)(struct rds_connection *conn); 460 void (*xmit_prepare)(struct rds_connection *conn);
461 void (*xmit_path_prepare)(struct rds_conn_path *cp);
445 void (*xmit_complete)(struct rds_connection *conn); 462 void (*xmit_complete)(struct rds_connection *conn);
463 void (*xmit_path_complete)(struct rds_conn_path *cp);
446 int (*xmit)(struct rds_connection *conn, struct rds_message *rm, 464 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
447 unsigned int hdr_off, unsigned int sg, unsigned int off); 465 unsigned int hdr_off, unsigned int sg, unsigned int off);
448 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op); 466 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
@@ -636,10 +654,12 @@ struct rds_connection *rds_conn_create(struct net *net,
636struct rds_connection *rds_conn_create_outgoing(struct net *net, 654struct rds_connection *rds_conn_create_outgoing(struct net *net,
637 __be32 laddr, __be32 faddr, 655 __be32 laddr, __be32 faddr,
638 struct rds_transport *trans, gfp_t gfp); 656 struct rds_transport *trans, gfp_t gfp);
639void rds_conn_shutdown(struct rds_connection *conn); 657void rds_conn_shutdown(struct rds_conn_path *cpath);
640void rds_conn_destroy(struct rds_connection *conn); 658void rds_conn_destroy(struct rds_connection *conn);
641void rds_conn_drop(struct rds_connection *conn); 659void rds_conn_drop(struct rds_connection *conn);
660void rds_conn_path_drop(struct rds_conn_path *cpath);
642void rds_conn_connect_if_down(struct rds_connection *conn); 661void rds_conn_connect_if_down(struct rds_connection *conn);
662void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
643void rds_for_each_conn_info(struct socket *sock, unsigned int len, 663void rds_for_each_conn_info(struct socket *sock, unsigned int len,
644 struct rds_info_iterator *iter, 664 struct rds_info_iterator *iter,
645 struct rds_info_lengths *lens, 665 struct rds_info_lengths *lens,
@@ -650,28 +670,60 @@ void __rds_conn_error(struct rds_connection *conn, const char *, ...);
650#define rds_conn_error(conn, fmt...) \ 670#define rds_conn_error(conn, fmt...) \
651 __rds_conn_error(conn, KERN_WARNING "RDS: " fmt) 671 __rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
652 672
673void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
674#define rds_conn_path_error(cp, fmt...) \
675 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
676
677static inline int
678rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
679{
680 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
681}
682
653static inline int 683static inline int
654rds_conn_transition(struct rds_connection *conn, int old, int new) 684rds_conn_transition(struct rds_connection *conn, int old, int new)
655{ 685{
656 return atomic_cmpxchg(&conn->c_state, old, new) == old; 686 WARN_ON(conn->c_trans->t_mp_capable);
687 return rds_conn_path_transition(&conn->c_path[0], old, new);
688}
689
690static inline int
691rds_conn_path_state(struct rds_conn_path *cp)
692{
693 return atomic_read(&cp->cp_state);
657} 694}
658 695
659static inline int 696static inline int
660rds_conn_state(struct rds_connection *conn) 697rds_conn_state(struct rds_connection *conn)
661{ 698{
662 return atomic_read(&conn->c_state); 699 WARN_ON(conn->c_trans->t_mp_capable);
700 return rds_conn_path_state(&conn->c_path[0]);
701}
702
703static inline int
704rds_conn_path_up(struct rds_conn_path *cp)
705{
706 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
663} 707}
664 708
665static inline int 709static inline int
666rds_conn_up(struct rds_connection *conn) 710rds_conn_up(struct rds_connection *conn)
667{ 711{
668 return atomic_read(&conn->c_state) == RDS_CONN_UP; 712 WARN_ON(conn->c_trans->t_mp_capable);
713 return rds_conn_path_up(&conn->c_path[0]);
714}
715
716static inline int
717rds_conn_path_connecting(struct rds_conn_path *cp)
718{
719 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
669} 720}
670 721
671static inline int 722static inline int
672rds_conn_connecting(struct rds_connection *conn) 723rds_conn_connecting(struct rds_connection *conn)
673{ 724{
674 return atomic_read(&conn->c_state) == RDS_CONN_CONNECTING; 725 WARN_ON(conn->c_trans->t_mp_capable);
726 return rds_conn_path_connecting(&conn->c_path[0]);
675} 727}
676 728
677/* message.c */ 729/* message.c */
@@ -720,6 +772,8 @@ void rds_page_exit(void);
720/* recv.c */ 772/* recv.c */
721void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, 773void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
722 __be32 saddr); 774 __be32 saddr);
775void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
776 __be32 saddr);
723void rds_inc_put(struct rds_incoming *inc); 777void rds_inc_put(struct rds_incoming *inc);
724void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, 778void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
725 struct rds_incoming *inc, gfp_t gfp); 779 struct rds_incoming *inc, gfp_t gfp);
@@ -733,16 +787,16 @@ void rds_inc_info_copy(struct rds_incoming *inc,
733 787
734/* send.c */ 788/* send.c */
735int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len); 789int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
736void rds_send_reset(struct rds_connection *conn); 790void rds_send_path_reset(struct rds_conn_path *conn);
737int rds_send_xmit(struct rds_connection *conn); 791int rds_send_xmit(struct rds_conn_path *cp);
738struct sockaddr_in; 792struct sockaddr_in;
739void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest); 793void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
740typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack); 794typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
741void rds_send_drop_acked(struct rds_connection *conn, u64 ack, 795void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
742 is_acked_func is_acked); 796 is_acked_func is_acked);
743int rds_send_pong(struct rds_connection *conn, __be16 dport); 797void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
744struct rds_message *rds_send_get_message(struct rds_connection *, 798 is_acked_func is_acked);
745 struct rm_rdma_op *); 799int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
746 800
747/* rdma.c */ 801/* rdma.c */
748void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); 802void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
@@ -809,12 +863,12 @@ extern unsigned int rds_sysctl_trace_level;
809int rds_threads_init(void); 863int rds_threads_init(void);
810void rds_threads_exit(void); 864void rds_threads_exit(void);
811extern struct workqueue_struct *rds_wq; 865extern struct workqueue_struct *rds_wq;
812void rds_queue_reconnect(struct rds_connection *conn); 866void rds_queue_reconnect(struct rds_conn_path *cp);
813void rds_connect_worker(struct work_struct *); 867void rds_connect_worker(struct work_struct *);
814void rds_shutdown_worker(struct work_struct *); 868void rds_shutdown_worker(struct work_struct *);
815void rds_send_worker(struct work_struct *); 869void rds_send_worker(struct work_struct *);
816void rds_recv_worker(struct work_struct *); 870void rds_recv_worker(struct work_struct *);
817void rds_connect_path_complete(struct rds_connection *conn, int curr); 871void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
818void rds_connect_complete(struct rds_connection *conn); 872void rds_connect_complete(struct rds_connection *conn);
819 873
820/* transport.c */ 874/* transport.c */
diff --git a/net/rds/rds_single_path.h b/net/rds/rds_single_path.h
new file mode 100644
index 000000000000..e1241af7c1ad
--- /dev/null
+++ b/net/rds/rds_single_path.h
@@ -0,0 +1,30 @@
1#ifndef _RDS_RDS_SINGLE_H
2#define _RDS_RDS_SINGLE_H
3
4#define c_xmit_rm c_path[0].cp_xmit_rm
5#define c_xmit_sg c_path[0].cp_xmit_sg
6#define c_xmit_hdr_off c_path[0].cp_xmit_hdr_off
7#define c_xmit_data_off c_path[0].cp_xmit_data_off
8#define c_xmit_atomic_sent c_path[0].cp_xmit_atomic_sent
9#define c_xmit_rdma_sent c_path[0].cp_xmit_rdma_sent
10#define c_xmit_data_sent c_path[0].cp_xmit_data_sent
11#define c_lock c_path[0].cp_lock
12#define c_next_tx_seq c_path[0].cp_next_tx_seq
13#define c_send_queue c_path[0].cp_send_queue
14#define c_retrans c_path[0].cp_retrans
15#define c_next_rx_seq c_path[0].cp_next_rx_seq
16#define c_transport_data c_path[0].cp_transport_data
17#define c_state c_path[0].cp_state
18#define c_send_gen c_path[0].cp_send_gen
19#define c_flags c_path[0].cp_flags
20#define c_reconnect_jiffies c_path[0].cp_reconnect_jiffies
21#define c_send_w c_path[0].cp_send_w
22#define c_recv_w c_path[0].cp_recv_w
23#define c_conn_w c_path[0].cp_conn_w
24#define c_down_w c_path[0].cp_down_w
25#define c_cm_lock c_path[0].cp_cm_lock
26#define c_waitq c_path[0].cp_waitq
27#define c_unacked_packets c_path[0].cp_unacked_packets
28#define c_unacked_bytes c_path[0].cp_unacked_bytes
29
30#endif /* _RDS_RDS_SINGLE_H */
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 8413f6c99e13..b58f50571782 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -53,6 +53,20 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
53} 53}
54EXPORT_SYMBOL_GPL(rds_inc_init); 54EXPORT_SYMBOL_GPL(rds_inc_init);
55 55
56void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
57 __be32 saddr)
58{
59 atomic_set(&inc->i_refcount, 1);
60 INIT_LIST_HEAD(&inc->i_item);
61 inc->i_conn = cp->cp_conn;
62 inc->i_conn_path = cp;
63 inc->i_saddr = saddr;
64 inc->i_rdma_cookie = 0;
65 inc->i_rx_tstamp.tv_sec = 0;
66 inc->i_rx_tstamp.tv_usec = 0;
67}
68EXPORT_SYMBOL_GPL(rds_inc_path_init);
69
56static void rds_inc_addref(struct rds_incoming *inc) 70static void rds_inc_addref(struct rds_incoming *inc)
57{ 71{
58 rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount)); 72 rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
@@ -164,13 +178,18 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
164 struct rds_sock *rs = NULL; 178 struct rds_sock *rs = NULL;
165 struct sock *sk; 179 struct sock *sk;
166 unsigned long flags; 180 unsigned long flags;
181 struct rds_conn_path *cp;
167 182
168 inc->i_conn = conn; 183 inc->i_conn = conn;
169 inc->i_rx_jiffies = jiffies; 184 inc->i_rx_jiffies = jiffies;
185 if (conn->c_trans->t_mp_capable)
186 cp = inc->i_conn_path;
187 else
188 cp = &conn->c_path[0];
170 189
171 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u " 190 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
172 "flags 0x%x rx_jiffies %lu\n", conn, 191 "flags 0x%x rx_jiffies %lu\n", conn,
173 (unsigned long long)conn->c_next_rx_seq, 192 (unsigned long long)cp->cp_next_rx_seq,
174 inc, 193 inc,
175 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence), 194 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
176 be32_to_cpu(inc->i_hdr.h_len), 195 be32_to_cpu(inc->i_hdr.h_len),
@@ -199,16 +218,16 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
199 * XXX we could spend more on the wire to get more robust failure 218 * XXX we could spend more on the wire to get more robust failure
200 * detection, arguably worth it to avoid data corruption. 219 * detection, arguably worth it to avoid data corruption.
201 */ 220 */
202 if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq && 221 if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
203 (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) { 222 (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
204 rds_stats_inc(s_recv_drop_old_seq); 223 rds_stats_inc(s_recv_drop_old_seq);
205 goto out; 224 goto out;
206 } 225 }
207 conn->c_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1; 226 cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
208 227
209 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) { 228 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
210 rds_stats_inc(s_recv_ping); 229 rds_stats_inc(s_recv_ping);
211 rds_send_pong(conn, inc->i_hdr.h_sport); 230 rds_send_pong(cp, inc->i_hdr.h_sport);
212 goto out; 231 goto out;
213 } 232 }
214 233
diff --git a/net/rds/send.c b/net/rds/send.c
index b1962f8e30f7..ee43d6b2ea8f 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -62,14 +62,14 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status);
62 * Reset the send state. Callers must ensure that this doesn't race with 62 * Reset the send state. Callers must ensure that this doesn't race with
63 * rds_send_xmit(). 63 * rds_send_xmit().
64 */ 64 */
65void rds_send_reset(struct rds_connection *conn) 65void rds_send_path_reset(struct rds_conn_path *cp)
66{ 66{
67 struct rds_message *rm, *tmp; 67 struct rds_message *rm, *tmp;
68 unsigned long flags; 68 unsigned long flags;
69 69
70 if (conn->c_xmit_rm) { 70 if (cp->cp_xmit_rm) {
71 rm = conn->c_xmit_rm; 71 rm = cp->cp_xmit_rm;
72 conn->c_xmit_rm = NULL; 72 cp->cp_xmit_rm = NULL;
73 /* Tell the user the RDMA op is no longer mapped by the 73 /* Tell the user the RDMA op is no longer mapped by the
74 * transport. This isn't entirely true (it's flushed out 74 * transport. This isn't entirely true (it's flushed out
75 * independently) but as the connection is down, there's 75 * independently) but as the connection is down, there's
@@ -78,37 +78,37 @@ void rds_send_reset(struct rds_connection *conn)
78 rds_message_put(rm); 78 rds_message_put(rm);
79 } 79 }
80 80
81 conn->c_xmit_sg = 0; 81 cp->cp_xmit_sg = 0;
82 conn->c_xmit_hdr_off = 0; 82 cp->cp_xmit_hdr_off = 0;
83 conn->c_xmit_data_off = 0; 83 cp->cp_xmit_data_off = 0;
84 conn->c_xmit_atomic_sent = 0; 84 cp->cp_xmit_atomic_sent = 0;
85 conn->c_xmit_rdma_sent = 0; 85 cp->cp_xmit_rdma_sent = 0;
86 conn->c_xmit_data_sent = 0; 86 cp->cp_xmit_data_sent = 0;
87 87
88 conn->c_map_queued = 0; 88 cp->cp_conn->c_map_queued = 0;
89 89
90 conn->c_unacked_packets = rds_sysctl_max_unacked_packets; 90 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
91 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; 91 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
92 92
93 /* Mark messages as retransmissions, and move them to the send q */ 93 /* Mark messages as retransmissions, and move them to the send q */
94 spin_lock_irqsave(&conn->c_lock, flags); 94 spin_lock_irqsave(&cp->cp_lock, flags);
95 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { 95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); 96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); 97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
98 } 98 }
99 list_splice_init(&conn->c_retrans, &conn->c_send_queue); 99 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
100 spin_unlock_irqrestore(&conn->c_lock, flags); 100 spin_unlock_irqrestore(&cp->cp_lock, flags);
101} 101}
102EXPORT_SYMBOL_GPL(rds_send_reset); 102EXPORT_SYMBOL_GPL(rds_send_path_reset);
103 103
104static int acquire_in_xmit(struct rds_connection *conn) 104static int acquire_in_xmit(struct rds_conn_path *cp)
105{ 105{
106 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0; 106 return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
107} 107}
108 108
109static void release_in_xmit(struct rds_connection *conn) 109static void release_in_xmit(struct rds_conn_path *cp)
110{ 110{
111 clear_bit(RDS_IN_XMIT, &conn->c_flags); 111 clear_bit(RDS_IN_XMIT, &cp->cp_flags);
112 smp_mb__after_atomic(); 112 smp_mb__after_atomic();
113 /* 113 /*
114 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a 114 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
@@ -116,8 +116,8 @@ static void release_in_xmit(struct rds_connection *conn)
116 * the system-wide hashed waitqueue buckets in the fast path only to 116 * the system-wide hashed waitqueue buckets in the fast path only to
117 * almost never find waiters. 117 * almost never find waiters.
118 */ 118 */
119 if (waitqueue_active(&conn->c_waitq)) 119 if (waitqueue_active(&cp->cp_waitq))
120 wake_up_all(&conn->c_waitq); 120 wake_up_all(&cp->cp_waitq);
121} 121}
122 122
123/* 123/*
@@ -134,8 +134,9 @@ static void release_in_xmit(struct rds_connection *conn)
134 * - small message latency is higher behind queued large messages 134 * - small message latency is higher behind queued large messages
135 * - large message latency isn't starved by intervening small sends 135 * - large message latency isn't starved by intervening small sends
136 */ 136 */
137int rds_send_xmit(struct rds_connection *conn) 137int rds_send_xmit(struct rds_conn_path *cp)
138{ 138{
139 struct rds_connection *conn = cp->cp_conn;
139 struct rds_message *rm; 140 struct rds_message *rm;
140 unsigned long flags; 141 unsigned long flags;
141 unsigned int tmp; 142 unsigned int tmp;
@@ -155,7 +156,7 @@ restart:
155 * avoids blocking the caller and trading per-connection data between 156 * avoids blocking the caller and trading per-connection data between
156 * caches per message. 157 * caches per message.
157 */ 158 */
158 if (!acquire_in_xmit(conn)) { 159 if (!acquire_in_xmit(cp)) {
159 rds_stats_inc(s_send_lock_contention); 160 rds_stats_inc(s_send_lock_contention);
160 ret = -ENOMEM; 161 ret = -ENOMEM;
161 goto out; 162 goto out;
@@ -169,21 +170,25 @@ restart:
169 * The acquire_in_xmit() check above ensures that only one 170 * The acquire_in_xmit() check above ensures that only one
170 * caller can increment c_send_gen at any time. 171 * caller can increment c_send_gen at any time.
171 */ 172 */
172 conn->c_send_gen++; 173 cp->cp_send_gen++;
173 send_gen = conn->c_send_gen; 174 send_gen = cp->cp_send_gen;
174 175
175 /* 176 /*
176 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, 177 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
177 * we do the opposite to avoid races. 178 * we do the opposite to avoid races.
178 */ 179 */
179 if (!rds_conn_up(conn)) { 180 if (!rds_conn_path_up(cp)) {
180 release_in_xmit(conn); 181 release_in_xmit(cp);
181 ret = 0; 182 ret = 0;
182 goto out; 183 goto out;
183 } 184 }
184 185
185 if (conn->c_trans->xmit_prepare) 186 if (conn->c_trans->t_mp_capable) {
187 if (conn->c_trans->xmit_path_prepare)
188 conn->c_trans->xmit_path_prepare(cp);
189 } else if (conn->c_trans->xmit_prepare) {
186 conn->c_trans->xmit_prepare(conn); 190 conn->c_trans->xmit_prepare(conn);
191 }
187 192
188 /* 193 /*
189 * spin trying to push headers and data down the connection until 194 * spin trying to push headers and data down the connection until
@@ -191,7 +196,7 @@ restart:
191 */ 196 */
192 while (1) { 197 while (1) {
193 198
194 rm = conn->c_xmit_rm; 199 rm = cp->cp_xmit_rm;
195 200
196 /* 201 /*
197 * If between sending messages, we can send a pending congestion 202 * If between sending messages, we can send a pending congestion
@@ -204,14 +209,16 @@ restart:
204 break; 209 break;
205 } 210 }
206 rm->data.op_active = 1; 211 rm->data.op_active = 1;
212 rm->m_inc.i_conn_path = cp;
213 rm->m_inc.i_conn = cp->cp_conn;
207 214
208 conn->c_xmit_rm = rm; 215 cp->cp_xmit_rm = rm;
209 } 216 }
210 217
211 /* 218 /*
212 * If not already working on one, grab the next message. 219 * If not already working on one, grab the next message.
213 * 220 *
214 * c_xmit_rm holds a ref while we're sending this message down 221 * cp_xmit_rm holds a ref while we're sending this message down
215 * the connction. We can use this ref while holding the 222 * the connction. We can use this ref while holding the
216 * send_sem.. rds_send_reset() is serialized with it. 223 * send_sem.. rds_send_reset() is serialized with it.
217 */ 224 */
@@ -228,10 +235,10 @@ restart:
228 if (batch_count >= send_batch_count) 235 if (batch_count >= send_batch_count)
229 goto over_batch; 236 goto over_batch;
230 237
231 spin_lock_irqsave(&conn->c_lock, flags); 238 spin_lock_irqsave(&cp->cp_lock, flags);
232 239
233 if (!list_empty(&conn->c_send_queue)) { 240 if (!list_empty(&cp->cp_send_queue)) {
234 rm = list_entry(conn->c_send_queue.next, 241 rm = list_entry(cp->cp_send_queue.next,
235 struct rds_message, 242 struct rds_message,
236 m_conn_item); 243 m_conn_item);
237 rds_message_addref(rm); 244 rds_message_addref(rm);
@@ -240,10 +247,11 @@ restart:
240 * Move the message from the send queue to the retransmit 247 * Move the message from the send queue to the retransmit
241 * list right away. 248 * list right away.
242 */ 249 */
243 list_move_tail(&rm->m_conn_item, &conn->c_retrans); 250 list_move_tail(&rm->m_conn_item,
251 &cp->cp_retrans);
244 } 252 }
245 253
246 spin_unlock_irqrestore(&conn->c_lock, flags); 254 spin_unlock_irqrestore(&cp->cp_lock, flags);
247 255
248 if (!rm) 256 if (!rm)
249 break; 257 break;
@@ -257,32 +265,34 @@ restart:
257 */ 265 */
258 if (rm->rdma.op_active && 266 if (rm->rdma.op_active &&
259 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { 267 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
260 spin_lock_irqsave(&conn->c_lock, flags); 268 spin_lock_irqsave(&cp->cp_lock, flags);
261 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) 269 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
262 list_move(&rm->m_conn_item, &to_be_dropped); 270 list_move(&rm->m_conn_item, &to_be_dropped);
263 spin_unlock_irqrestore(&conn->c_lock, flags); 271 spin_unlock_irqrestore(&cp->cp_lock, flags);
264 continue; 272 continue;
265 } 273 }
266 274
267 /* Require an ACK every once in a while */ 275 /* Require an ACK every once in a while */
268 len = ntohl(rm->m_inc.i_hdr.h_len); 276 len = ntohl(rm->m_inc.i_hdr.h_len);
269 if (conn->c_unacked_packets == 0 || 277 if (cp->cp_unacked_packets == 0 ||
270 conn->c_unacked_bytes < len) { 278 cp->cp_unacked_bytes < len) {
271 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); 279 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
272 280
273 conn->c_unacked_packets = rds_sysctl_max_unacked_packets; 281 cp->cp_unacked_packets =
274 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; 282 rds_sysctl_max_unacked_packets;
283 cp->cp_unacked_bytes =
284 rds_sysctl_max_unacked_bytes;
275 rds_stats_inc(s_send_ack_required); 285 rds_stats_inc(s_send_ack_required);
276 } else { 286 } else {
277 conn->c_unacked_bytes -= len; 287 cp->cp_unacked_bytes -= len;
278 conn->c_unacked_packets--; 288 cp->cp_unacked_packets--;
279 } 289 }
280 290
281 conn->c_xmit_rm = rm; 291 cp->cp_xmit_rm = rm;
282 } 292 }
283 293
284 /* The transport either sends the whole rdma or none of it */ 294 /* The transport either sends the whole rdma or none of it */
285 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { 295 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
286 rm->m_final_op = &rm->rdma; 296 rm->m_final_op = &rm->rdma;
287 /* The transport owns the mapped memory for now. 297 /* The transport owns the mapped memory for now.
288 * You can't unmap it while it's on the send queue 298 * You can't unmap it while it's on the send queue
@@ -294,11 +304,11 @@ restart:
294 wake_up_interruptible(&rm->m_flush_wait); 304 wake_up_interruptible(&rm->m_flush_wait);
295 break; 305 break;
296 } 306 }
297 conn->c_xmit_rdma_sent = 1; 307 cp->cp_xmit_rdma_sent = 1;
298 308
299 } 309 }
300 310
301 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { 311 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
302 rm->m_final_op = &rm->atomic; 312 rm->m_final_op = &rm->atomic;
303 /* The transport owns the mapped memory for now. 313 /* The transport owns the mapped memory for now.
304 * You can't unmap it while it's on the send queue 314 * You can't unmap it while it's on the send queue
@@ -310,7 +320,7 @@ restart:
310 wake_up_interruptible(&rm->m_flush_wait); 320 wake_up_interruptible(&rm->m_flush_wait);
311 break; 321 break;
312 } 322 }
313 conn->c_xmit_atomic_sent = 1; 323 cp->cp_xmit_atomic_sent = 1;
314 324
315 } 325 }
316 326
@@ -336,41 +346,42 @@ restart:
336 rm->data.op_active = 0; 346 rm->data.op_active = 0;
337 } 347 }
338 348
339 if (rm->data.op_active && !conn->c_xmit_data_sent) { 349 if (rm->data.op_active && !cp->cp_xmit_data_sent) {
340 rm->m_final_op = &rm->data; 350 rm->m_final_op = &rm->data;
351
341 ret = conn->c_trans->xmit(conn, rm, 352 ret = conn->c_trans->xmit(conn, rm,
342 conn->c_xmit_hdr_off, 353 cp->cp_xmit_hdr_off,
343 conn->c_xmit_sg, 354 cp->cp_xmit_sg,
344 conn->c_xmit_data_off); 355 cp->cp_xmit_data_off);
345 if (ret <= 0) 356 if (ret <= 0)
346 break; 357 break;
347 358
348 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) { 359 if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
349 tmp = min_t(int, ret, 360 tmp = min_t(int, ret,
350 sizeof(struct rds_header) - 361 sizeof(struct rds_header) -
351 conn->c_xmit_hdr_off); 362 cp->cp_xmit_hdr_off);
352 conn->c_xmit_hdr_off += tmp; 363 cp->cp_xmit_hdr_off += tmp;
353 ret -= tmp; 364 ret -= tmp;
354 } 365 }
355 366
356 sg = &rm->data.op_sg[conn->c_xmit_sg]; 367 sg = &rm->data.op_sg[cp->cp_xmit_sg];
357 while (ret) { 368 while (ret) {
358 tmp = min_t(int, ret, sg->length - 369 tmp = min_t(int, ret, sg->length -
359 conn->c_xmit_data_off); 370 cp->cp_xmit_data_off);
360 conn->c_xmit_data_off += tmp; 371 cp->cp_xmit_data_off += tmp;
361 ret -= tmp; 372 ret -= tmp;
362 if (conn->c_xmit_data_off == sg->length) { 373 if (cp->cp_xmit_data_off == sg->length) {
363 conn->c_xmit_data_off = 0; 374 cp->cp_xmit_data_off = 0;
364 sg++; 375 sg++;
365 conn->c_xmit_sg++; 376 cp->cp_xmit_sg++;
366 BUG_ON(ret != 0 && 377 BUG_ON(ret != 0 && cp->cp_xmit_sg ==
367 conn->c_xmit_sg == rm->data.op_nents); 378 rm->data.op_nents);
368 } 379 }
369 } 380 }
370 381
371 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) && 382 if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
372 (conn->c_xmit_sg == rm->data.op_nents)) 383 (cp->cp_xmit_sg == rm->data.op_nents))
373 conn->c_xmit_data_sent = 1; 384 cp->cp_xmit_data_sent = 1;
374 } 385 }
375 386
376 /* 387 /*
@@ -378,23 +389,27 @@ restart:
378 * if there is a data op. Thus, if the data is sent (or there was 389 * if there is a data op. Thus, if the data is sent (or there was
379 * none), then we're done with the rm. 390 * none), then we're done with the rm.
380 */ 391 */
381 if (!rm->data.op_active || conn->c_xmit_data_sent) { 392 if (!rm->data.op_active || cp->cp_xmit_data_sent) {
382 conn->c_xmit_rm = NULL; 393 cp->cp_xmit_rm = NULL;
383 conn->c_xmit_sg = 0; 394 cp->cp_xmit_sg = 0;
384 conn->c_xmit_hdr_off = 0; 395 cp->cp_xmit_hdr_off = 0;
385 conn->c_xmit_data_off = 0; 396 cp->cp_xmit_data_off = 0;
386 conn->c_xmit_rdma_sent = 0; 397 cp->cp_xmit_rdma_sent = 0;
387 conn->c_xmit_atomic_sent = 0; 398 cp->cp_xmit_atomic_sent = 0;
388 conn->c_xmit_data_sent = 0; 399 cp->cp_xmit_data_sent = 0;
389 400
390 rds_message_put(rm); 401 rds_message_put(rm);
391 } 402 }
392 } 403 }
393 404
394over_batch: 405over_batch:
395 if (conn->c_trans->xmit_complete) 406 if (conn->c_trans->t_mp_capable) {
407 if (conn->c_trans->xmit_path_complete)
408 conn->c_trans->xmit_path_complete(cp);
409 } else if (conn->c_trans->xmit_complete) {
396 conn->c_trans->xmit_complete(conn); 410 conn->c_trans->xmit_complete(conn);
397 release_in_xmit(conn); 411 }
412 release_in_xmit(cp);
398 413
399 /* Nuke any messages we decided not to retransmit. */ 414 /* Nuke any messages we decided not to retransmit. */
400 if (!list_empty(&to_be_dropped)) { 415 if (!list_empty(&to_be_dropped)) {
@@ -422,12 +437,12 @@ over_batch:
422 if (ret == 0) { 437 if (ret == 0) {
423 smp_mb(); 438 smp_mb();
424 if ((test_bit(0, &conn->c_map_queued) || 439 if ((test_bit(0, &conn->c_map_queued) ||
425 !list_empty(&conn->c_send_queue)) && 440 !list_empty(&cp->cp_send_queue)) &&
426 send_gen == conn->c_send_gen) { 441 send_gen == cp->cp_send_gen) {
427 rds_stats_inc(s_send_lock_queue_raced); 442 rds_stats_inc(s_send_lock_queue_raced);
428 if (batch_count < send_batch_count) 443 if (batch_count < send_batch_count)
429 goto restart; 444 goto restart;
430 queue_delayed_work(rds_wq, &conn->c_send_w, 1); 445 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
431 } 446 }
432 } 447 }
433out: 448out:
@@ -560,42 +575,6 @@ __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
560} 575}
561 576
562/* 577/*
563 * This is called from the IB send completion when we detect
564 * a RDMA operation that failed with remote access error.
565 * So speed is not an issue here.
566 */
567struct rds_message *rds_send_get_message(struct rds_connection *conn,
568 struct rm_rdma_op *op)
569{
570 struct rds_message *rm, *tmp, *found = NULL;
571 unsigned long flags;
572
573 spin_lock_irqsave(&conn->c_lock, flags);
574
575 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
576 if (&rm->rdma == op) {
577 atomic_inc(&rm->m_refcount);
578 found = rm;
579 goto out;
580 }
581 }
582
583 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
584 if (&rm->rdma == op) {
585 atomic_inc(&rm->m_refcount);
586 found = rm;
587 break;
588 }
589 }
590
591out:
592 spin_unlock_irqrestore(&conn->c_lock, flags);
593
594 return found;
595}
596EXPORT_SYMBOL_GPL(rds_send_get_message);
597
598/*
599 * This removes messages from the socket's list if they're on it. The list 578 * This removes messages from the socket's list if they're on it. The list
600 * argument must be private to the caller, we must be able to modify it 579 * argument must be private to the caller, we must be able to modify it
601 * without locks. The messages must have a reference held for their 580 * without locks. The messages must have a reference held for their
@@ -685,16 +664,16 @@ unlock_and_drop:
685 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked 664 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
686 * checks the RDS_MSG_HAS_ACK_SEQ bit. 665 * checks the RDS_MSG_HAS_ACK_SEQ bit.
687 */ 666 */
688void rds_send_drop_acked(struct rds_connection *conn, u64 ack, 667void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
689 is_acked_func is_acked) 668 is_acked_func is_acked)
690{ 669{
691 struct rds_message *rm, *tmp; 670 struct rds_message *rm, *tmp;
692 unsigned long flags; 671 unsigned long flags;
693 LIST_HEAD(list); 672 LIST_HEAD(list);
694 673
695 spin_lock_irqsave(&conn->c_lock, flags); 674 spin_lock_irqsave(&cp->cp_lock, flags);
696 675
697 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { 676 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
698 if (!rds_send_is_acked(rm, ack, is_acked)) 677 if (!rds_send_is_acked(rm, ack, is_acked))
699 break; 678 break;
700 679
@@ -706,17 +685,26 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
706 if (!list_empty(&list)) 685 if (!list_empty(&list))
707 smp_mb__after_atomic(); 686 smp_mb__after_atomic();
708 687
709 spin_unlock_irqrestore(&conn->c_lock, flags); 688 spin_unlock_irqrestore(&cp->cp_lock, flags);
710 689
711 /* now remove the messages from the sock list as needed */ 690 /* now remove the messages from the sock list as needed */
712 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); 691 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
713} 692}
693EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
694
695void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
696 is_acked_func is_acked)
697{
698 WARN_ON(conn->c_trans->t_mp_capable);
699 rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
700}
714EXPORT_SYMBOL_GPL(rds_send_drop_acked); 701EXPORT_SYMBOL_GPL(rds_send_drop_acked);
715 702
716void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) 703void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
717{ 704{
718 struct rds_message *rm, *tmp; 705 struct rds_message *rm, *tmp;
719 struct rds_connection *conn; 706 struct rds_connection *conn;
707 struct rds_conn_path *cp;
720 unsigned long flags; 708 unsigned long flags;
721 LIST_HEAD(list); 709 LIST_HEAD(list);
722 710
@@ -745,22 +733,26 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
745 list_for_each_entry(rm, &list, m_sock_item) { 733 list_for_each_entry(rm, &list, m_sock_item) {
746 734
747 conn = rm->m_inc.i_conn; 735 conn = rm->m_inc.i_conn;
736 if (conn->c_trans->t_mp_capable)
737 cp = rm->m_inc.i_conn_path;
738 else
739 cp = &conn->c_path[0];
748 740
749 spin_lock_irqsave(&conn->c_lock, flags); 741 spin_lock_irqsave(&cp->cp_lock, flags);
750 /* 742 /*
751 * Maybe someone else beat us to removing rm from the conn. 743 * Maybe someone else beat us to removing rm from the conn.
752 * If we race with their flag update we'll get the lock and 744 * If we race with their flag update we'll get the lock and
753 * then really see that the flag has been cleared. 745 * then really see that the flag has been cleared.
754 */ 746 */
755 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { 747 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
756 spin_unlock_irqrestore(&conn->c_lock, flags); 748 spin_unlock_irqrestore(&cp->cp_lock, flags);
757 spin_lock_irqsave(&rm->m_rs_lock, flags); 749 spin_lock_irqsave(&rm->m_rs_lock, flags);
758 rm->m_rs = NULL; 750 rm->m_rs = NULL;
759 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 751 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
760 continue; 752 continue;
761 } 753 }
762 list_del_init(&rm->m_conn_item); 754 list_del_init(&rm->m_conn_item);
763 spin_unlock_irqrestore(&conn->c_lock, flags); 755 spin_unlock_irqrestore(&cp->cp_lock, flags);
764 756
765 /* 757 /*
766 * Couldn't grab m_rs_lock in top loop (lock ordering), 758 * Couldn't grab m_rs_lock in top loop (lock ordering),
@@ -809,6 +801,7 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
809 * message from the flow with RDS_CANCEL_SENT_TO. 801 * message from the flow with RDS_CANCEL_SENT_TO.
810 */ 802 */
811static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, 803static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
804 struct rds_conn_path *cp,
812 struct rds_message *rm, __be16 sport, 805 struct rds_message *rm, __be16 sport,
813 __be16 dport, int *queued) 806 __be16 dport, int *queued)
814{ 807{
@@ -852,13 +845,14 @@ static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
852 trying to minimize the time we hold c_lock */ 845 trying to minimize the time we hold c_lock */
853 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); 846 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
854 rm->m_inc.i_conn = conn; 847 rm->m_inc.i_conn = conn;
848 rm->m_inc.i_conn_path = cp;
855 rds_message_addref(rm); 849 rds_message_addref(rm);
856 850
857 spin_lock(&conn->c_lock); 851 spin_lock(&cp->cp_lock);
858 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++); 852 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
859 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); 853 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
860 set_bit(RDS_MSG_ON_CONN, &rm->m_flags); 854 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
861 spin_unlock(&conn->c_lock); 855 spin_unlock(&cp->cp_lock);
862 856
863 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", 857 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
864 rm, len, rs, rs->rs_snd_bytes, 858 rm, len, rs, rs->rs_snd_bytes,
@@ -990,6 +984,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
990 int queued = 0, allocated_mr = 0; 984 int queued = 0, allocated_mr = 0;
991 int nonblock = msg->msg_flags & MSG_DONTWAIT; 985 int nonblock = msg->msg_flags & MSG_DONTWAIT;
992 long timeo = sock_sndtimeo(sk, nonblock); 986 long timeo = sock_sndtimeo(sk, nonblock);
987 struct rds_conn_path *cpath;
993 988
994 /* Mirror Linux UDP mirror of BSD error message compatibility */ 989 /* Mirror Linux UDP mirror of BSD error message compatibility */
995 /* XXX: Perhaps MSG_MORE someday */ 990 /* XXX: Perhaps MSG_MORE someday */
@@ -1088,15 +1083,16 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1088 goto out; 1083 goto out;
1089 } 1084 }
1090 1085
1091 rds_conn_connect_if_down(conn); 1086 cpath = &conn->c_path[0];
1087
1088 rds_conn_path_connect_if_down(cpath);
1092 1089
1093 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); 1090 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1094 if (ret) { 1091 if (ret) {
1095 rs->rs_seen_congestion = 1; 1092 rs->rs_seen_congestion = 1;
1096 goto out; 1093 goto out;
1097 } 1094 }
1098 1095 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
1099 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1100 dport, &queued)) { 1096 dport, &queued)) {
1101 rds_stats_inc(s_send_queue_full); 1097 rds_stats_inc(s_send_queue_full);
1102 1098
@@ -1106,7 +1102,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1106 } 1102 }
1107 1103
1108 timeo = wait_event_interruptible_timeout(*sk_sleep(sk), 1104 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1109 rds_send_queue_rm(rs, conn, rm, 1105 rds_send_queue_rm(rs, conn, cpath, rm,
1110 rs->rs_bound_port, 1106 rs->rs_bound_port,
1111 dport, 1107 dport,
1112 &queued), 1108 &queued),
@@ -1127,9 +1123,9 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1127 */ 1123 */
1128 rds_stats_inc(s_send_queued); 1124 rds_stats_inc(s_send_queued);
1129 1125
1130 ret = rds_send_xmit(conn); 1126 ret = rds_send_xmit(cpath);
1131 if (ret == -ENOMEM || ret == -EAGAIN) 1127 if (ret == -ENOMEM || ret == -EAGAIN)
1132 queue_delayed_work(rds_wq, &conn->c_send_w, 1); 1128 queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
1133 1129
1134 rds_message_put(rm); 1130 rds_message_put(rm);
1135 return payload_len; 1131 return payload_len;
@@ -1150,7 +1146,7 @@ out:
1150 * Reply to a ping packet. 1146 * Reply to a ping packet.
1151 */ 1147 */
1152int 1148int
1153rds_send_pong(struct rds_connection *conn, __be16 dport) 1149rds_send_pong(struct rds_conn_path *cp, __be16 dport)
1154{ 1150{
1155 struct rds_message *rm; 1151 struct rds_message *rm;
1156 unsigned long flags; 1152 unsigned long flags;
@@ -1162,31 +1158,32 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
1162 goto out; 1158 goto out;
1163 } 1159 }
1164 1160
1165 rm->m_daddr = conn->c_faddr; 1161 rm->m_daddr = cp->cp_conn->c_faddr;
1166 rm->data.op_active = 1; 1162 rm->data.op_active = 1;
1167 1163
1168 rds_conn_connect_if_down(conn); 1164 rds_conn_path_connect_if_down(cp);
1169 1165
1170 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); 1166 ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
1171 if (ret) 1167 if (ret)
1172 goto out; 1168 goto out;
1173 1169
1174 spin_lock_irqsave(&conn->c_lock, flags); 1170 spin_lock_irqsave(&cp->cp_lock, flags);
1175 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); 1171 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
1176 set_bit(RDS_MSG_ON_CONN, &rm->m_flags); 1172 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1177 rds_message_addref(rm); 1173 rds_message_addref(rm);
1178 rm->m_inc.i_conn = conn; 1174 rm->m_inc.i_conn = cp->cp_conn;
1175 rm->m_inc.i_conn_path = cp;
1179 1176
1180 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport, 1177 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1181 conn->c_next_tx_seq); 1178 cp->cp_next_tx_seq);
1182 conn->c_next_tx_seq++; 1179 cp->cp_next_tx_seq++;
1183 spin_unlock_irqrestore(&conn->c_lock, flags); 1180 spin_unlock_irqrestore(&cp->cp_lock, flags);
1184 1181
1185 rds_stats_inc(s_send_queued); 1182 rds_stats_inc(s_send_queued);
1186 rds_stats_inc(s_send_pong); 1183 rds_stats_inc(s_send_pong);
1187 1184
1188 /* schedule the send work on rds_wq */ 1185 /* schedule the send work on rds_wq */
1189 queue_delayed_work(rds_wq, &conn->c_send_w, 1); 1186 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1190 1187
1191 rds_message_put(rm); 1188 rds_message_put(rm);
1192 return 0; 1189 return 0;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 74ee126a6fe6..5217d49ce6d6 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -38,6 +38,7 @@
38#include <net/net_namespace.h> 38#include <net/net_namespace.h>
39#include <net/netns/generic.h> 39#include <net/netns/generic.h>
40 40
41#include "rds_single_path.h"
41#include "rds.h" 42#include "rds.h"
42#include "tcp.h" 43#include "tcp.h"
43 44
@@ -56,8 +57,8 @@ static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
56 void __user *buffer, size_t *lenp, 57 void __user *buffer, size_t *lenp,
57 loff_t *fpos); 58 loff_t *fpos);
58 59
59int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF; 60static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
60int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF; 61static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
61 62
62static struct ctl_table rds_tcp_sysctl_table[] = { 63static struct ctl_table rds_tcp_sysctl_table[] = {
63#define RDS_TCP_SNDBUF 0 64#define RDS_TCP_SNDBUF 0
@@ -185,7 +186,7 @@ void rds_tcp_reset_callbacks(struct socket *sock,
185 release_sock(osock->sk); 186 release_sock(osock->sk);
186 sock_release(osock); 187 sock_release(osock);
187newsock: 188newsock:
188 rds_send_reset(conn); 189 rds_send_path_reset(&conn->c_path[0]);
189 lock_sock(sock->sk); 190 lock_sock(sock->sk);
190 write_lock_bh(&sock->sk->sk_callback_lock); 191 write_lock_bh(&sock->sk->sk_callback_lock);
191 tc->t_sock = sock; 192 tc->t_sock = sock;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index f6e95d60db54..96c2c4d17909 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -34,6 +34,7 @@
34#include <linux/in.h> 34#include <linux/in.h>
35#include <net/tcp.h> 35#include <net/tcp.h>
36 36
37#include "rds_single_path.h"
37#include "rds.h" 38#include "rds.h"
38#include "tcp.h" 39#include "tcp.h"
39 40
@@ -60,7 +61,8 @@ void rds_tcp_state_change(struct sock *sk)
60 case TCP_SYN_RECV: 61 case TCP_SYN_RECV:
61 break; 62 break;
62 case TCP_ESTABLISHED: 63 case TCP_ESTABLISHED:
63 rds_connect_path_complete(conn, RDS_CONN_CONNECTING); 64 rds_connect_path_complete(&conn->c_path[0],
65 RDS_CONN_CONNECTING);
64 break; 66 break;
65 case TCP_CLOSE_WAIT: 67 case TCP_CLOSE_WAIT:
66 case TCP_CLOSE: 68 case TCP_CLOSE:
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 245542ca4718..f9cc945a77b3 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -35,6 +35,7 @@
35#include <linux/in.h> 35#include <linux/in.h>
36#include <net/tcp.h> 36#include <net/tcp.h>
37 37
38#include "rds_single_path.h"
38#include "rds.h" 39#include "rds.h"
39#include "tcp.h" 40#include "tcp.h"
40 41
@@ -132,17 +133,19 @@ int rds_tcp_accept_one(struct socket *sock)
132 * c_transport_data. 133 * c_transport_data.
133 */ 134 */
134 if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr) || 135 if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr) ||
135 !conn->c_outgoing) { 136 !conn->c_path[0].cp_outgoing) {
136 goto rst_nsk; 137 goto rst_nsk;
137 } else { 138 } else {
138 rds_tcp_reset_callbacks(new_sock, conn); 139 rds_tcp_reset_callbacks(new_sock, conn);
139 conn->c_outgoing = 0; 140 conn->c_path[0].cp_outgoing = 0;
140 /* rds_connect_path_complete() marks RDS_CONN_UP */ 141 /* rds_connect_path_complete() marks RDS_CONN_UP */
141 rds_connect_path_complete(conn, RDS_CONN_RESETTING); 142 rds_connect_path_complete(&conn->c_path[0],
143 RDS_CONN_RESETTING);
142 } 144 }
143 } else { 145 } else {
144 rds_tcp_set_callbacks(new_sock, conn); 146 rds_tcp_set_callbacks(new_sock, conn);
145 rds_connect_path_complete(conn, RDS_CONN_CONNECTING); 147 rds_connect_path_complete(&conn->c_path[0],
148 RDS_CONN_CONNECTING);
146 } 149 }
147 new_sock = NULL; 150 new_sock = NULL;
148 ret = 0; 151 ret = 0;
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 6e6a7111a034..4a87d9ef3084 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <net/tcp.h> 35#include <net/tcp.h>
36 36
37#include "rds_single_path.h"
37#include "rds.h" 38#include "rds.h"
38#include "tcp.h" 39#include "tcp.h"
39 40
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 618be69c9c3b..710f1aae97ad 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -34,6 +34,7 @@
34#include <linux/in.h> 34#include <linux/in.h>
35#include <net/tcp.h> 35#include <net/tcp.h>
36 36
37#include "rds_single_path.h"
37#include "rds.h" 38#include "rds.h"
38#include "tcp.h" 39#include "tcp.h"
39 40
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 4a323045719b..9fbe95bb14a9 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -71,30 +71,30 @@
71struct workqueue_struct *rds_wq; 71struct workqueue_struct *rds_wq;
72EXPORT_SYMBOL_GPL(rds_wq); 72EXPORT_SYMBOL_GPL(rds_wq);
73 73
74void rds_connect_path_complete(struct rds_connection *conn, int curr) 74void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
75{ 75{
76 if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) { 76 if (!rds_conn_path_transition(cp, curr, RDS_CONN_UP)) {
77 printk(KERN_WARNING "%s: Cannot transition to state UP, " 77 printk(KERN_WARNING "%s: Cannot transition to state UP, "
78 "current state is %d\n", 78 "current state is %d\n",
79 __func__, 79 __func__,
80 atomic_read(&conn->c_state)); 80 atomic_read(&cp->cp_state));
81 rds_conn_drop(conn); 81 rds_conn_path_drop(cp);
82 return; 82 return;
83 } 83 }
84 84
85 rdsdebug("conn %p for %pI4 to %pI4 complete\n", 85 rdsdebug("conn %p for %pI4 to %pI4 complete\n",
86 conn, &conn->c_laddr, &conn->c_faddr); 86 cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr);
87 87
88 conn->c_reconnect_jiffies = 0; 88 cp->cp_reconnect_jiffies = 0;
89 set_bit(0, &conn->c_map_queued); 89 set_bit(0, &cp->cp_conn->c_map_queued);
90 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 90 queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
91 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 91 queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
92} 92}
93EXPORT_SYMBOL_GPL(rds_connect_path_complete); 93EXPORT_SYMBOL_GPL(rds_connect_path_complete);
94 94
95void rds_connect_complete(struct rds_connection *conn) 95void rds_connect_complete(struct rds_connection *conn)
96{ 96{
97 rds_connect_path_complete(conn, RDS_CONN_CONNECTING); 97 rds_connect_path_complete(&conn->c_path[0], RDS_CONN_CONNECTING);
98} 98}
99EXPORT_SYMBOL_GPL(rds_connect_complete); 99EXPORT_SYMBOL_GPL(rds_connect_complete);
100 100
@@ -116,70 +116,79 @@ EXPORT_SYMBOL_GPL(rds_connect_complete);
116 * We should *always* start with a random backoff; otherwise a broken connection 116 * We should *always* start with a random backoff; otherwise a broken connection
117 * will always take several iterations to be re-established. 117 * will always take several iterations to be re-established.
118 */ 118 */
119void rds_queue_reconnect(struct rds_connection *conn) 119void rds_queue_reconnect(struct rds_conn_path *cp)
120{ 120{
121 unsigned long rand; 121 unsigned long rand;
122 struct rds_connection *conn = cp->cp_conn;
122 123
123 rdsdebug("conn %p for %pI4 to %pI4 reconnect jiffies %lu\n", 124 rdsdebug("conn %p for %pI4 to %pI4 reconnect jiffies %lu\n",
124 conn, &conn->c_laddr, &conn->c_faddr, 125 conn, &conn->c_laddr, &conn->c_faddr,
125 conn->c_reconnect_jiffies); 126 cp->cp_reconnect_jiffies);
126 127
127 set_bit(RDS_RECONNECT_PENDING, &conn->c_flags); 128 set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
128 if (conn->c_reconnect_jiffies == 0) { 129 if (cp->cp_reconnect_jiffies == 0) {
129 conn->c_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies; 130 cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
130 queue_delayed_work(rds_wq, &conn->c_conn_w, 0); 131 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
131 return; 132 return;
132 } 133 }
133 134
134 get_random_bytes(&rand, sizeof(rand)); 135 get_random_bytes(&rand, sizeof(rand));
135 rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n", 136 rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n",
136 rand % conn->c_reconnect_jiffies, conn->c_reconnect_jiffies, 137 rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
137 conn, &conn->c_laddr, &conn->c_faddr); 138 conn, &conn->c_laddr, &conn->c_faddr);
138 queue_delayed_work(rds_wq, &conn->c_conn_w, 139 queue_delayed_work(rds_wq, &cp->cp_conn_w,
139 rand % conn->c_reconnect_jiffies); 140 rand % cp->cp_reconnect_jiffies);
140 141
141 conn->c_reconnect_jiffies = min(conn->c_reconnect_jiffies * 2, 142 cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2,
142 rds_sysctl_reconnect_max_jiffies); 143 rds_sysctl_reconnect_max_jiffies);
143} 144}
144 145
145void rds_connect_worker(struct work_struct *work) 146void rds_connect_worker(struct work_struct *work)
146{ 147{
147 struct rds_connection *conn = container_of(work, struct rds_connection, c_conn_w.work); 148 struct rds_conn_path *cp = container_of(work,
149 struct rds_conn_path,
150 cp_conn_w.work);
151 struct rds_connection *conn = cp->cp_conn;
148 int ret; 152 int ret;
149 153
150 clear_bit(RDS_RECONNECT_PENDING, &conn->c_flags); 154 clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
151 if (rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) { 155 if (rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
152 ret = conn->c_trans->conn_connect(conn); 156 ret = conn->c_trans->conn_connect(conn);
153 rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n", 157 rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n",
154 conn, &conn->c_laddr, &conn->c_faddr, ret); 158 conn, &conn->c_laddr, &conn->c_faddr, ret);
155 159
156 if (ret) { 160 if (ret) {
157 if (rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_DOWN)) 161 if (rds_conn_path_transition(cp,
158 rds_queue_reconnect(conn); 162 RDS_CONN_CONNECTING,
163 RDS_CONN_DOWN))
164 rds_queue_reconnect(cp);
159 else 165 else
160 rds_conn_error(conn, "RDS: connect failed\n"); 166 rds_conn_path_error(cp,
167 "RDS: connect failed\n");
161 } 168 }
162 } 169 }
163} 170}
164 171
165void rds_send_worker(struct work_struct *work) 172void rds_send_worker(struct work_struct *work)
166{ 173{
167 struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work); 174 struct rds_conn_path *cp = container_of(work,
175 struct rds_conn_path,
176 cp_send_w.work);
168 int ret; 177 int ret;
169 178
170 if (rds_conn_state(conn) == RDS_CONN_UP) { 179 if (rds_conn_path_state(cp) == RDS_CONN_UP) {
171 clear_bit(RDS_LL_SEND_FULL, &conn->c_flags); 180 clear_bit(RDS_LL_SEND_FULL, &cp->cp_flags);
172 ret = rds_send_xmit(conn); 181 ret = rds_send_xmit(cp);
173 cond_resched(); 182 cond_resched();
174 rdsdebug("conn %p ret %d\n", conn, ret); 183 rdsdebug("conn %p ret %d\n", cp->cp_conn, ret);
175 switch (ret) { 184 switch (ret) {
176 case -EAGAIN: 185 case -EAGAIN:
177 rds_stats_inc(s_send_immediate_retry); 186 rds_stats_inc(s_send_immediate_retry);
178 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 187 queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
179 break; 188 break;
180 case -ENOMEM: 189 case -ENOMEM:
181 rds_stats_inc(s_send_delayed_retry); 190 rds_stats_inc(s_send_delayed_retry);
182 queue_delayed_work(rds_wq, &conn->c_send_w, 2); 191 queue_delayed_work(rds_wq, &cp->cp_send_w, 2);
183 default: 192 default:
184 break; 193 break;
185 } 194 }
@@ -188,20 +197,22 @@ void rds_send_worker(struct work_struct *work)
188 197
189void rds_recv_worker(struct work_struct *work) 198void rds_recv_worker(struct work_struct *work)
190{ 199{
191 struct rds_connection *conn = container_of(work, struct rds_connection, c_recv_w.work); 200 struct rds_conn_path *cp = container_of(work,
201 struct rds_conn_path,
202 cp_recv_w.work);
192 int ret; 203 int ret;
193 204
194 if (rds_conn_state(conn) == RDS_CONN_UP) { 205 if (rds_conn_path_state(cp) == RDS_CONN_UP) {
195 ret = conn->c_trans->recv(conn); 206 ret = cp->cp_conn->c_trans->recv(cp->cp_conn);
196 rdsdebug("conn %p ret %d\n", conn, ret); 207 rdsdebug("conn %p ret %d\n", cp->cp_conn, ret);
197 switch (ret) { 208 switch (ret) {
198 case -EAGAIN: 209 case -EAGAIN:
199 rds_stats_inc(s_recv_immediate_retry); 210 rds_stats_inc(s_recv_immediate_retry);
200 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 211 queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
201 break; 212 break;
202 case -ENOMEM: 213 case -ENOMEM:
203 rds_stats_inc(s_recv_delayed_retry); 214 rds_stats_inc(s_recv_delayed_retry);
204 queue_delayed_work(rds_wq, &conn->c_recv_w, 2); 215 queue_delayed_work(rds_wq, &cp->cp_recv_w, 2);
205 default: 216 default:
206 break; 217 break;
207 } 218 }
@@ -210,9 +221,11 @@ void rds_recv_worker(struct work_struct *work)
210 221
211void rds_shutdown_worker(struct work_struct *work) 222void rds_shutdown_worker(struct work_struct *work)
212{ 223{
213 struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w); 224 struct rds_conn_path *cp = container_of(work,
225 struct rds_conn_path,
226 cp_down_w);
214 227
215 rds_conn_shutdown(conn); 228 rds_conn_shutdown(cp);
216} 229}
217 230
218void rds_threads_exit(void) 231void rds_threads_exit(void)
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index e05a06ef2254..6522e50fb750 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -4,25 +4,27 @@
4 4
5af-rxrpc-y := \ 5af-rxrpc-y := \
6 af_rxrpc.o \ 6 af_rxrpc.o \
7 ar-accept.o \ 7 call_accept.o \
8 ar-ack.o \ 8 call_event.o \
9 ar-call.o \ 9 call_object.o \
10 ar-connection.o \ 10 conn_client.o \
11 ar-connevent.o \ 11 conn_event.o \
12 ar-error.o \ 12 conn_object.o \
13 ar-input.o \ 13 input.o \
14 ar-key.o \
15 ar-local.o \
16 ar-output.o \
17 ar-peer.o \
18 ar-recvmsg.o \
19 ar-security.o \
20 ar-skbuff.o \
21 ar-transport.o \
22 insecure.o \ 14 insecure.o \
23 misc.o 15 key.o \
16 local_event.o \
17 local_object.o \
18 misc.o \
19 output.o \
20 peer_event.o \
21 peer_object.o \
22 recvmsg.o \
23 security.o \
24 skbuff.o \
25 utils.o
24 26
25af-rxrpc-$(CONFIG_PROC_FS) += ar-proc.o 27af-rxrpc-$(CONFIG_PROC_FS) += proc.o
26af-rxrpc-$(CONFIG_RXKAD) += rxkad.o 28af-rxrpc-$(CONFIG_RXKAD) += rxkad.o
27af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o 29af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o
28 30
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index e45e94ca030f..5d3e795a7c48 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/net.h> 16#include <linux/net.h>
@@ -31,8 +33,6 @@ unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
31module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); 33module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
32MODULE_PARM_DESC(debug, "RxRPC debugging mask"); 34MODULE_PARM_DESC(debug, "RxRPC debugging mask");
33 35
34static int sysctl_rxrpc_max_qlen __read_mostly = 10;
35
36static struct proto rxrpc_proto; 36static struct proto rxrpc_proto;
37static const struct proto_ops rxrpc_rpc_ops; 37static const struct proto_ops rxrpc_rpc_ops;
38 38
@@ -97,11 +97,13 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx,
97 srx->transport_len > len) 97 srx->transport_len > len)
98 return -EINVAL; 98 return -EINVAL;
99 99
100 if (srx->transport.family != rx->proto) 100 if (srx->transport.family != rx->family)
101 return -EAFNOSUPPORT; 101 return -EAFNOSUPPORT;
102 102
103 switch (srx->transport.family) { 103 switch (srx->transport.family) {
104 case AF_INET: 104 case AF_INET:
105 if (srx->transport_len < sizeof(struct sockaddr_in))
106 return -EINVAL;
105 _debug("INET: %x @ %pI4", 107 _debug("INET: %x @ %pI4",
106 ntohs(srx->transport.sin.sin_port), 108 ntohs(srx->transport.sin.sin_port),
107 &srx->transport.sin.sin_addr); 109 &srx->transport.sin.sin_addr);
@@ -137,33 +139,33 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
137 139
138 lock_sock(&rx->sk); 140 lock_sock(&rx->sk);
139 141
140 if (rx->sk.sk_state != RXRPC_UNCONNECTED) { 142 if (rx->sk.sk_state != RXRPC_UNBOUND) {
141 ret = -EINVAL; 143 ret = -EINVAL;
142 goto error_unlock; 144 goto error_unlock;
143 } 145 }
144 146
145 memcpy(&rx->srx, srx, sizeof(rx->srx)); 147 memcpy(&rx->srx, srx, sizeof(rx->srx));
146 148
147 /* Find or create a local transport endpoint to use */
148 local = rxrpc_lookup_local(&rx->srx); 149 local = rxrpc_lookup_local(&rx->srx);
149 if (IS_ERR(local)) { 150 if (IS_ERR(local)) {
150 ret = PTR_ERR(local); 151 ret = PTR_ERR(local);
151 goto error_unlock; 152 goto error_unlock;
152 } 153 }
153 154
154 rx->local = local; 155 if (rx->srx.srx_service) {
155 if (srx->srx_service) {
156 write_lock_bh(&local->services_lock); 156 write_lock_bh(&local->services_lock);
157 list_for_each_entry(prx, &local->services, listen_link) { 157 list_for_each_entry(prx, &local->services, listen_link) {
158 if (prx->srx.srx_service == srx->srx_service) 158 if (prx->srx.srx_service == rx->srx.srx_service)
159 goto service_in_use; 159 goto service_in_use;
160 } 160 }
161 161
162 rx->local = local;
162 list_add_tail(&rx->listen_link, &local->services); 163 list_add_tail(&rx->listen_link, &local->services);
163 write_unlock_bh(&local->services_lock); 164 write_unlock_bh(&local->services_lock);
164 165
165 rx->sk.sk_state = RXRPC_SERVER_BOUND; 166 rx->sk.sk_state = RXRPC_SERVER_BOUND;
166 } else { 167 } else {
168 rx->local = local;
167 rx->sk.sk_state = RXRPC_CLIENT_BOUND; 169 rx->sk.sk_state = RXRPC_CLIENT_BOUND;
168 } 170 }
169 171
@@ -172,8 +174,9 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
172 return 0; 174 return 0;
173 175
174service_in_use: 176service_in_use:
175 ret = -EADDRINUSE;
176 write_unlock_bh(&local->services_lock); 177 write_unlock_bh(&local->services_lock);
178 rxrpc_put_local(local);
179 ret = -EADDRINUSE;
177error_unlock: 180error_unlock:
178 release_sock(&rx->sk); 181 release_sock(&rx->sk);
179error: 182error:
@@ -188,6 +191,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
188{ 191{
189 struct sock *sk = sock->sk; 192 struct sock *sk = sock->sk;
190 struct rxrpc_sock *rx = rxrpc_sk(sk); 193 struct rxrpc_sock *rx = rxrpc_sk(sk);
194 unsigned int max;
191 int ret; 195 int ret;
192 196
193 _enter("%p,%d", rx, backlog); 197 _enter("%p,%d", rx, backlog);
@@ -195,20 +199,24 @@ static int rxrpc_listen(struct socket *sock, int backlog)
195 lock_sock(&rx->sk); 199 lock_sock(&rx->sk);
196 200
197 switch (rx->sk.sk_state) { 201 switch (rx->sk.sk_state) {
198 case RXRPC_UNCONNECTED: 202 case RXRPC_UNBOUND:
199 ret = -EADDRNOTAVAIL; 203 ret = -EADDRNOTAVAIL;
200 break; 204 break;
201 case RXRPC_CLIENT_BOUND:
202 case RXRPC_CLIENT_CONNECTED:
203 default:
204 ret = -EBUSY;
205 break;
206 case RXRPC_SERVER_BOUND: 205 case RXRPC_SERVER_BOUND:
207 ASSERT(rx->local != NULL); 206 ASSERT(rx->local != NULL);
207 max = READ_ONCE(rxrpc_max_backlog);
208 ret = -EINVAL;
209 if (backlog == INT_MAX)
210 backlog = max;
211 else if (backlog < 0 || backlog > max)
212 break;
208 sk->sk_max_ack_backlog = backlog; 213 sk->sk_max_ack_backlog = backlog;
209 rx->sk.sk_state = RXRPC_SERVER_LISTENING; 214 rx->sk.sk_state = RXRPC_SERVER_LISTENING;
210 ret = 0; 215 ret = 0;
211 break; 216 break;
217 default:
218 ret = -EBUSY;
219 break;
212 } 220 }
213 221
214 release_sock(&rx->sk); 222 release_sock(&rx->sk);
@@ -216,45 +224,10 @@ static int rxrpc_listen(struct socket *sock, int backlog)
216 return ret; 224 return ret;
217} 225}
218 226
219/*
220 * find a transport by address
221 */
222static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
223 struct sockaddr *addr,
224 int addr_len, int flags,
225 gfp_t gfp)
226{
227 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
228 struct rxrpc_transport *trans;
229 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
230 struct rxrpc_peer *peer;
231
232 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
233
234 ASSERT(rx->local != NULL);
235 ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED);
236
237 if (rx->srx.transport_type != srx->transport_type)
238 return ERR_PTR(-ESOCKTNOSUPPORT);
239 if (rx->srx.transport.family != srx->transport.family)
240 return ERR_PTR(-EAFNOSUPPORT);
241
242 /* find a remote transport endpoint from the local one */
243 peer = rxrpc_get_peer(srx, gfp);
244 if (IS_ERR(peer))
245 return ERR_CAST(peer);
246
247 /* find a transport */
248 trans = rxrpc_get_transport(rx->local, peer, gfp);
249 rxrpc_put_peer(peer);
250 _leave(" = %p", trans);
251 return trans;
252}
253
254/** 227/**
255 * rxrpc_kernel_begin_call - Allow a kernel service to begin a call 228 * rxrpc_kernel_begin_call - Allow a kernel service to begin a call
256 * @sock: The socket on which to make the call 229 * @sock: The socket on which to make the call
257 * @srx: The address of the peer to contact (defaults to socket setting) 230 * @srx: The address of the peer to contact
258 * @key: The security context to use (defaults to socket setting) 231 * @key: The security context to use (defaults to socket setting)
259 * @user_call_ID: The ID to use 232 * @user_call_ID: The ID to use
260 * 233 *
@@ -271,51 +244,32 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
271 unsigned long user_call_ID, 244 unsigned long user_call_ID,
272 gfp_t gfp) 245 gfp_t gfp)
273{ 246{
274 struct rxrpc_conn_bundle *bundle; 247 struct rxrpc_conn_parameters cp;
275 struct rxrpc_transport *trans;
276 struct rxrpc_call *call; 248 struct rxrpc_call *call;
277 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 249 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
250 int ret;
278 251
279 _enter(",,%x,%lx", key_serial(key), user_call_ID); 252 _enter(",,%x,%lx", key_serial(key), user_call_ID);
280 253
281 lock_sock(&rx->sk); 254 ret = rxrpc_validate_address(rx, srx, sizeof(*srx));
255 if (ret < 0)
256 return ERR_PTR(ret);
282 257
283 if (srx) { 258 lock_sock(&rx->sk);
284 trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx,
285 sizeof(*srx), 0, gfp);
286 if (IS_ERR(trans)) {
287 call = ERR_CAST(trans);
288 trans = NULL;
289 goto out_notrans;
290 }
291 } else {
292 trans = rx->trans;
293 if (!trans) {
294 call = ERR_PTR(-ENOTCONN);
295 goto out_notrans;
296 }
297 atomic_inc(&trans->usage);
298 }
299 259
300 if (!srx)
301 srx = &rx->srx;
302 if (!key) 260 if (!key)
303 key = rx->key; 261 key = rx->key;
304 if (key && !key->payload.data[0]) 262 if (key && !key->payload.data[0])
305 key = NULL; /* a no-security key */ 263 key = NULL; /* a no-security key */
306 264
307 bundle = rxrpc_get_bundle(rx, trans, key, srx->srx_service, gfp); 265 memset(&cp, 0, sizeof(cp));
308 if (IS_ERR(bundle)) { 266 cp.local = rx->local;
309 call = ERR_CAST(bundle); 267 cp.key = key;
310 goto out; 268 cp.security_level = 0;
311 } 269 cp.exclusive = false;
270 cp.service_id = srx->srx_service;
271 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp);
312 272
313 call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true,
314 gfp);
315 rxrpc_put_bundle(trans, bundle);
316out:
317 rxrpc_put_transport(trans);
318out_notrans:
319 release_sock(&rx->sk); 273 release_sock(&rx->sk);
320 _leave(" = %p", call); 274 _leave(" = %p", call);
321 return call; 275 return call;
@@ -367,11 +321,8 @@ EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages);
367static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, 321static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
368 int addr_len, int flags) 322 int addr_len, int flags)
369{ 323{
370 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; 324 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)addr;
371 struct sock *sk = sock->sk; 325 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
372 struct rxrpc_transport *trans;
373 struct rxrpc_local *local;
374 struct rxrpc_sock *rx = rxrpc_sk(sk);
375 int ret; 326 int ret;
376 327
377 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); 328 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
@@ -384,45 +335,28 @@ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
384 335
385 lock_sock(&rx->sk); 336 lock_sock(&rx->sk);
386 337
338 ret = -EISCONN;
339 if (test_bit(RXRPC_SOCK_CONNECTED, &rx->flags))
340 goto error;
341
387 switch (rx->sk.sk_state) { 342 switch (rx->sk.sk_state) {
388 case RXRPC_UNCONNECTED: 343 case RXRPC_UNBOUND:
389 /* find a local transport endpoint if we don't have one already */ 344 rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
390 ASSERTCMP(rx->local, ==, NULL); 345 case RXRPC_CLIENT_UNBOUND:
391 rx->srx.srx_family = AF_RXRPC;
392 rx->srx.srx_service = 0;
393 rx->srx.transport_type = srx->transport_type;
394 rx->srx.transport_len = sizeof(sa_family_t);
395 rx->srx.transport.family = srx->transport.family;
396 local = rxrpc_lookup_local(&rx->srx);
397 if (IS_ERR(local)) {
398 release_sock(&rx->sk);
399 return PTR_ERR(local);
400 }
401 rx->local = local;
402 rx->sk.sk_state = RXRPC_CLIENT_BOUND;
403 case RXRPC_CLIENT_BOUND: 346 case RXRPC_CLIENT_BOUND:
404 break; 347 break;
405 case RXRPC_CLIENT_CONNECTED:
406 release_sock(&rx->sk);
407 return -EISCONN;
408 default: 348 default:
409 release_sock(&rx->sk); 349 ret = -EBUSY;
410 return -EBUSY; /* server sockets can't connect as well */ 350 goto error;
411 }
412
413 trans = rxrpc_name_to_transport(sock, addr, addr_len, flags,
414 GFP_KERNEL);
415 if (IS_ERR(trans)) {
416 release_sock(&rx->sk);
417 _leave(" = %ld", PTR_ERR(trans));
418 return PTR_ERR(trans);
419 } 351 }
420 352
421 rx->trans = trans; 353 rx->connect_srx = *srx;
422 rx->sk.sk_state = RXRPC_CLIENT_CONNECTED; 354 set_bit(RXRPC_SOCK_CONNECTED, &rx->flags);
355 ret = 0;
423 356
357error:
424 release_sock(&rx->sk); 358 release_sock(&rx->sk);
425 return 0; 359 return ret;
426} 360}
427 361
428/* 362/*
@@ -436,7 +370,7 @@ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
436 */ 370 */
437static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) 371static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
438{ 372{
439 struct rxrpc_transport *trans; 373 struct rxrpc_local *local;
440 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 374 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
441 int ret; 375 int ret;
442 376
@@ -453,48 +387,38 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
453 } 387 }
454 } 388 }
455 389
456 trans = NULL;
457 lock_sock(&rx->sk); 390 lock_sock(&rx->sk);
458 391
459 if (m->msg_name) {
460 ret = -EISCONN;
461 trans = rxrpc_name_to_transport(sock, m->msg_name,
462 m->msg_namelen, 0, GFP_KERNEL);
463 if (IS_ERR(trans)) {
464 ret = PTR_ERR(trans);
465 trans = NULL;
466 goto out;
467 }
468 } else {
469 trans = rx->trans;
470 if (trans)
471 atomic_inc(&trans->usage);
472 }
473
474 switch (rx->sk.sk_state) { 392 switch (rx->sk.sk_state) {
475 case RXRPC_SERVER_LISTENING: 393 case RXRPC_UNBOUND:
476 if (!m->msg_name) { 394 local = rxrpc_lookup_local(&rx->srx);
477 ret = rxrpc_server_sendmsg(rx, m, len); 395 if (IS_ERR(local)) {
478 break; 396 ret = PTR_ERR(local);
397 goto error_unlock;
479 } 398 }
480 case RXRPC_SERVER_BOUND: 399
400 rx->local = local;
401 rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
402 /* Fall through */
403
404 case RXRPC_CLIENT_UNBOUND:
481 case RXRPC_CLIENT_BOUND: 405 case RXRPC_CLIENT_BOUND:
482 if (!m->msg_name) { 406 if (!m->msg_name &&
483 ret = -ENOTCONN; 407 test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
484 break; 408 m->msg_name = &rx->connect_srx;
409 m->msg_namelen = sizeof(rx->connect_srx);
485 } 410 }
486 case RXRPC_CLIENT_CONNECTED: 411 case RXRPC_SERVER_BOUND:
487 ret = rxrpc_client_sendmsg(rx, trans, m, len); 412 case RXRPC_SERVER_LISTENING:
413 ret = rxrpc_do_sendmsg(rx, m, len);
488 break; 414 break;
489 default: 415 default:
490 ret = -ENOTCONN; 416 ret = -EINVAL;
491 break; 417 break;
492 } 418 }
493 419
494out: 420error_unlock:
495 release_sock(&rx->sk); 421 release_sock(&rx->sk);
496 if (trans)
497 rxrpc_put_transport(trans);
498 _leave(" = %d", ret); 422 _leave(" = %d", ret);
499 return ret; 423 return ret;
500} 424}
@@ -521,9 +445,9 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
521 if (optlen != 0) 445 if (optlen != 0)
522 goto error; 446 goto error;
523 ret = -EISCONN; 447 ret = -EISCONN;
524 if (rx->sk.sk_state != RXRPC_UNCONNECTED) 448 if (rx->sk.sk_state != RXRPC_UNBOUND)
525 goto error; 449 goto error;
526 set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags); 450 rx->exclusive = true;
527 goto success; 451 goto success;
528 452
529 case RXRPC_SECURITY_KEY: 453 case RXRPC_SECURITY_KEY:
@@ -531,7 +455,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
531 if (rx->key) 455 if (rx->key)
532 goto error; 456 goto error;
533 ret = -EISCONN; 457 ret = -EISCONN;
534 if (rx->sk.sk_state != RXRPC_UNCONNECTED) 458 if (rx->sk.sk_state != RXRPC_UNBOUND)
535 goto error; 459 goto error;
536 ret = rxrpc_request_key(rx, optval, optlen); 460 ret = rxrpc_request_key(rx, optval, optlen);
537 goto error; 461 goto error;
@@ -541,7 +465,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
541 if (rx->key) 465 if (rx->key)
542 goto error; 466 goto error;
543 ret = -EISCONN; 467 ret = -EISCONN;
544 if (rx->sk.sk_state != RXRPC_UNCONNECTED) 468 if (rx->sk.sk_state != RXRPC_UNBOUND)
545 goto error; 469 goto error;
546 ret = rxrpc_server_keyring(rx, optval, optlen); 470 ret = rxrpc_server_keyring(rx, optval, optlen);
547 goto error; 471 goto error;
@@ -551,7 +475,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
551 if (optlen != sizeof(unsigned int)) 475 if (optlen != sizeof(unsigned int))
552 goto error; 476 goto error;
553 ret = -EISCONN; 477 ret = -EISCONN;
554 if (rx->sk.sk_state != RXRPC_UNCONNECTED) 478 if (rx->sk.sk_state != RXRPC_UNBOUND)
555 goto error; 479 goto error;
556 ret = get_user(min_sec_level, 480 ret = get_user(min_sec_level,
557 (unsigned int __user *) optval); 481 (unsigned int __user *) optval);
@@ -630,13 +554,13 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
630 return -ENOMEM; 554 return -ENOMEM;
631 555
632 sock_init_data(sock, sk); 556 sock_init_data(sock, sk);
633 sk->sk_state = RXRPC_UNCONNECTED; 557 sk->sk_state = RXRPC_UNBOUND;
634 sk->sk_write_space = rxrpc_write_space; 558 sk->sk_write_space = rxrpc_write_space;
635 sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen; 559 sk->sk_max_ack_backlog = 0;
636 sk->sk_destruct = rxrpc_sock_destructor; 560 sk->sk_destruct = rxrpc_sock_destructor;
637 561
638 rx = rxrpc_sk(sk); 562 rx = rxrpc_sk(sk);
639 rx->proto = protocol; 563 rx->family = protocol;
640 rx->calls = RB_ROOT; 564 rx->calls = RB_ROOT;
641 565
642 INIT_LIST_HEAD(&rx->listen_link); 566 INIT_LIST_HEAD(&rx->listen_link);
@@ -698,24 +622,8 @@ static int rxrpc_release_sock(struct sock *sk)
698 flush_workqueue(rxrpc_workqueue); 622 flush_workqueue(rxrpc_workqueue);
699 rxrpc_purge_queue(&sk->sk_receive_queue); 623 rxrpc_purge_queue(&sk->sk_receive_queue);
700 624
701 if (rx->conn) { 625 rxrpc_put_local(rx->local);
702 rxrpc_put_connection(rx->conn); 626 rx->local = NULL;
703 rx->conn = NULL;
704 }
705
706 if (rx->bundle) {
707 rxrpc_put_bundle(rx->trans, rx->bundle);
708 rx->bundle = NULL;
709 }
710 if (rx->trans) {
711 rxrpc_put_transport(rx->trans);
712 rx->trans = NULL;
713 }
714 if (rx->local) {
715 rxrpc_put_local(rx->local);
716 rx->local = NULL;
717 }
718
719 key_put(rx->key); 627 key_put(rx->key);
720 rx->key = NULL; 628 rx->key = NULL;
721 key_put(rx->securities); 629 key_put(rx->securities);
@@ -796,49 +704,49 @@ static int __init af_rxrpc_init(void)
796 "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, 704 "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
797 SLAB_HWCACHE_ALIGN, NULL); 705 SLAB_HWCACHE_ALIGN, NULL);
798 if (!rxrpc_call_jar) { 706 if (!rxrpc_call_jar) {
799 printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n"); 707 pr_notice("Failed to allocate call jar\n");
800 goto error_call_jar; 708 goto error_call_jar;
801 } 709 }
802 710
803 rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1); 711 rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
804 if (!rxrpc_workqueue) { 712 if (!rxrpc_workqueue) {
805 printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); 713 pr_notice("Failed to allocate work queue\n");
806 goto error_work_queue; 714 goto error_work_queue;
807 } 715 }
808 716
809 ret = rxrpc_init_security(); 717 ret = rxrpc_init_security();
810 if (ret < 0) { 718 if (ret < 0) {
811 printk(KERN_CRIT "RxRPC: Cannot initialise security\n"); 719 pr_crit("Cannot initialise security\n");
812 goto error_security; 720 goto error_security;
813 } 721 }
814 722
815 ret = proto_register(&rxrpc_proto, 1); 723 ret = proto_register(&rxrpc_proto, 1);
816 if (ret < 0) { 724 if (ret < 0) {
817 printk(KERN_CRIT "RxRPC: Cannot register protocol\n"); 725 pr_crit("Cannot register protocol\n");
818 goto error_proto; 726 goto error_proto;
819 } 727 }
820 728
821 ret = sock_register(&rxrpc_family_ops); 729 ret = sock_register(&rxrpc_family_ops);
822 if (ret < 0) { 730 if (ret < 0) {
823 printk(KERN_CRIT "RxRPC: Cannot register socket family\n"); 731 pr_crit("Cannot register socket family\n");
824 goto error_sock; 732 goto error_sock;
825 } 733 }
826 734
827 ret = register_key_type(&key_type_rxrpc); 735 ret = register_key_type(&key_type_rxrpc);
828 if (ret < 0) { 736 if (ret < 0) {
829 printk(KERN_CRIT "RxRPC: Cannot register client key type\n"); 737 pr_crit("Cannot register client key type\n");
830 goto error_key_type; 738 goto error_key_type;
831 } 739 }
832 740
833 ret = register_key_type(&key_type_rxrpc_s); 741 ret = register_key_type(&key_type_rxrpc_s);
834 if (ret < 0) { 742 if (ret < 0) {
835 printk(KERN_CRIT "RxRPC: Cannot register server key type\n"); 743 pr_crit("Cannot register server key type\n");
836 goto error_key_type_s; 744 goto error_key_type_s;
837 } 745 }
838 746
839 ret = rxrpc_sysctl_init(); 747 ret = rxrpc_sysctl_init();
840 if (ret < 0) { 748 if (ret < 0) {
841 printk(KERN_CRIT "RxRPC: Cannot register sysctls\n"); 749 pr_crit("Cannot register sysctls\n");
842 goto error_sysctls; 750 goto error_sysctls;
843 } 751 }
844 752
@@ -880,14 +788,29 @@ static void __exit af_rxrpc_exit(void)
880 proto_unregister(&rxrpc_proto); 788 proto_unregister(&rxrpc_proto);
881 rxrpc_destroy_all_calls(); 789 rxrpc_destroy_all_calls();
882 rxrpc_destroy_all_connections(); 790 rxrpc_destroy_all_connections();
883 rxrpc_destroy_all_transports();
884 rxrpc_destroy_all_peers();
885 rxrpc_destroy_all_locals();
886 791
887 ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); 792 ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
888 793
794 /* We need to flush the scheduled work twice because the local endpoint
795 * records involve a work item in their destruction as they can only be
796 * destroyed from process context. However, a connection may have a
797 * work item outstanding - and this will pin the local endpoint record
798 * until the connection goes away.
799 *
800 * Peers don't pin locals and calls pin sockets - which prevents the
801 * module from being unloaded - so we should only need two flushes.
802 */
889 _debug("flush scheduled work"); 803 _debug("flush scheduled work");
890 flush_workqueue(rxrpc_workqueue); 804 flush_workqueue(rxrpc_workqueue);
805 _debug("flush scheduled work 2");
806 flush_workqueue(rxrpc_workqueue);
807 _debug("synchronise RCU");
808 rcu_barrier();
809 _debug("destroy locals");
810 ASSERT(idr_is_empty(&rxrpc_client_conn_ids));
811 idr_destroy(&rxrpc_client_conn_ids);
812 rxrpc_destroy_all_locals();
813
891 remove_proc_entry("rxrpc_conns", init_net.proc_net); 814 remove_proc_entry("rxrpc_conns", init_net.proc_net);
892 remove_proc_entry("rxrpc_calls", init_net.proc_net); 815 remove_proc_entry("rxrpc_calls", init_net.proc_net);
893 destroy_workqueue(rxrpc_workqueue); 816 destroy_workqueue(rxrpc_workqueue);
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
deleted file mode 100644
index 97f4fae74bca..000000000000
--- a/net/rxrpc/ar-connection.c
+++ /dev/null
@@ -1,927 +0,0 @@
1/* RxRPC virtual connection handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/net.h>
15#include <linux/skbuff.h>
16#include <linux/crypto.h>
17#include <net/sock.h>
18#include <net/af_rxrpc.h>
19#include "ar-internal.h"
20
21/*
22 * Time till a connection expires after last use (in seconds).
23 */
24unsigned int rxrpc_connection_expiry = 10 * 60;
25
26static void rxrpc_connection_reaper(struct work_struct *work);
27
28LIST_HEAD(rxrpc_connections);
29DEFINE_RWLOCK(rxrpc_connection_lock);
30static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
31
32/*
33 * allocate a new client connection bundle
34 */
35static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
36{
37 struct rxrpc_conn_bundle *bundle;
38
39 _enter("");
40
41 bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
42 if (bundle) {
43 INIT_LIST_HEAD(&bundle->unused_conns);
44 INIT_LIST_HEAD(&bundle->avail_conns);
45 INIT_LIST_HEAD(&bundle->busy_conns);
46 init_waitqueue_head(&bundle->chanwait);
47 atomic_set(&bundle->usage, 1);
48 }
49
50 _leave(" = %p", bundle);
51 return bundle;
52}
53
54/*
55 * compare bundle parameters with what we're looking for
56 * - return -ve, 0 or +ve
57 */
58static inline
59int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
60 struct key *key, u16 service_id)
61{
62 return (bundle->service_id - service_id) ?:
63 ((unsigned long)bundle->key - (unsigned long)key);
64}
65
66/*
67 * get bundle of client connections that a client socket can make use of
68 */
69struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
70 struct rxrpc_transport *trans,
71 struct key *key,
72 u16 service_id,
73 gfp_t gfp)
74{
75 struct rxrpc_conn_bundle *bundle, *candidate;
76 struct rb_node *p, *parent, **pp;
77
78 _enter("%p{%x},%x,%hx,",
79 rx, key_serial(key), trans->debug_id, service_id);
80
81 if (rx->trans == trans && rx->bundle) {
82 atomic_inc(&rx->bundle->usage);
83 return rx->bundle;
84 }
85
86 /* search the extant bundles first for one that matches the specified
87 * user ID */
88 spin_lock(&trans->client_lock);
89
90 p = trans->bundles.rb_node;
91 while (p) {
92 bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
93
94 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
95 p = p->rb_left;
96 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
97 p = p->rb_right;
98 else
99 goto found_extant_bundle;
100 }
101
102 spin_unlock(&trans->client_lock);
103
104 /* not yet present - create a candidate for a new record and then
105 * redo the search */
106 candidate = rxrpc_alloc_bundle(gfp);
107 if (!candidate) {
108 _leave(" = -ENOMEM");
109 return ERR_PTR(-ENOMEM);
110 }
111
112 candidate->key = key_get(key);
113 candidate->service_id = service_id;
114
115 spin_lock(&trans->client_lock);
116
117 pp = &trans->bundles.rb_node;
118 parent = NULL;
119 while (*pp) {
120 parent = *pp;
121 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
122
123 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
124 pp = &(*pp)->rb_left;
125 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
126 pp = &(*pp)->rb_right;
127 else
128 goto found_extant_second;
129 }
130
131 /* second search also failed; add the new bundle */
132 bundle = candidate;
133 candidate = NULL;
134
135 rb_link_node(&bundle->node, parent, pp);
136 rb_insert_color(&bundle->node, &trans->bundles);
137 spin_unlock(&trans->client_lock);
138 _net("BUNDLE new on trans %d", trans->debug_id);
139 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
140 atomic_inc(&bundle->usage);
141 rx->bundle = bundle;
142 }
143 _leave(" = %p [new]", bundle);
144 return bundle;
145
146 /* we found the bundle in the list immediately */
147found_extant_bundle:
148 atomic_inc(&bundle->usage);
149 spin_unlock(&trans->client_lock);
150 _net("BUNDLE old on trans %d", trans->debug_id);
151 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
152 atomic_inc(&bundle->usage);
153 rx->bundle = bundle;
154 }
155 _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
156 return bundle;
157
158 /* we found the bundle on the second time through the list */
159found_extant_second:
160 atomic_inc(&bundle->usage);
161 spin_unlock(&trans->client_lock);
162 kfree(candidate);
163 _net("BUNDLE old2 on trans %d", trans->debug_id);
164 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
165 atomic_inc(&bundle->usage);
166 rx->bundle = bundle;
167 }
168 _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
169 return bundle;
170}
171
172/*
173 * release a bundle
174 */
175void rxrpc_put_bundle(struct rxrpc_transport *trans,
176 struct rxrpc_conn_bundle *bundle)
177{
178 _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
179
180 if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
181 _debug("Destroy bundle");
182 rb_erase(&bundle->node, &trans->bundles);
183 spin_unlock(&trans->client_lock);
184 ASSERT(list_empty(&bundle->unused_conns));
185 ASSERT(list_empty(&bundle->avail_conns));
186 ASSERT(list_empty(&bundle->busy_conns));
187 ASSERTCMP(bundle->num_conns, ==, 0);
188 key_put(bundle->key);
189 kfree(bundle);
190 }
191
192 _leave("");
193}
194
195/*
196 * allocate a new connection
197 */
198static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
199{
200 struct rxrpc_connection *conn;
201
202 _enter("");
203
204 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
205 if (conn) {
206 INIT_WORK(&conn->processor, &rxrpc_process_connection);
207 INIT_LIST_HEAD(&conn->bundle_link);
208 conn->calls = RB_ROOT;
209 skb_queue_head_init(&conn->rx_queue);
210 conn->security = &rxrpc_no_security;
211 rwlock_init(&conn->lock);
212 spin_lock_init(&conn->state_lock);
213 atomic_set(&conn->usage, 1);
214 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
215 conn->avail_calls = RXRPC_MAXCALLS;
216 conn->size_align = 4;
217 conn->header_size = sizeof(struct rxrpc_wire_header);
218 }
219
220 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
221 return conn;
222}
223
224/*
225 * assign a connection ID to a connection and add it to the transport's
226 * connection lookup tree
227 * - called with transport client lock held
228 */
229static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
230{
231 struct rxrpc_connection *xconn;
232 struct rb_node *parent, **p;
233 __be32 epoch;
234 u32 cid;
235
236 _enter("");
237
238 epoch = conn->epoch;
239
240 write_lock_bh(&conn->trans->conn_lock);
241
242 conn->trans->conn_idcounter += RXRPC_CID_INC;
243 if (conn->trans->conn_idcounter < RXRPC_CID_INC)
244 conn->trans->conn_idcounter = RXRPC_CID_INC;
245 cid = conn->trans->conn_idcounter;
246
247attempt_insertion:
248 parent = NULL;
249 p = &conn->trans->client_conns.rb_node;
250
251 while (*p) {
252 parent = *p;
253 xconn = rb_entry(parent, struct rxrpc_connection, node);
254
255 if (epoch < xconn->epoch)
256 p = &(*p)->rb_left;
257 else if (epoch > xconn->epoch)
258 p = &(*p)->rb_right;
259 else if (cid < xconn->cid)
260 p = &(*p)->rb_left;
261 else if (cid > xconn->cid)
262 p = &(*p)->rb_right;
263 else
264 goto id_exists;
265 }
266
267 /* we've found a suitable hole - arrange for this connection to occupy
268 * it */
269 rb_link_node(&conn->node, parent, p);
270 rb_insert_color(&conn->node, &conn->trans->client_conns);
271
272 conn->cid = cid;
273 write_unlock_bh(&conn->trans->conn_lock);
274 _leave(" [CID %x]", cid);
275 return;
276
277 /* we found a connection with the proposed ID - walk the tree from that
278 * point looking for the next unused ID */
279id_exists:
280 for (;;) {
281 cid += RXRPC_CID_INC;
282 if (cid < RXRPC_CID_INC) {
283 cid = RXRPC_CID_INC;
284 conn->trans->conn_idcounter = cid;
285 goto attempt_insertion;
286 }
287
288 parent = rb_next(parent);
289 if (!parent)
290 goto attempt_insertion;
291
292 xconn = rb_entry(parent, struct rxrpc_connection, node);
293 if (epoch < xconn->epoch ||
294 cid < xconn->cid)
295 goto attempt_insertion;
296 }
297}
298
299/*
300 * add a call to a connection's call-by-ID tree
301 */
302static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
303 struct rxrpc_call *call)
304{
305 struct rxrpc_call *xcall;
306 struct rb_node *parent, **p;
307 __be32 call_id;
308
309 write_lock_bh(&conn->lock);
310
311 call_id = call->call_id;
312 p = &conn->calls.rb_node;
313 parent = NULL;
314 while (*p) {
315 parent = *p;
316 xcall = rb_entry(parent, struct rxrpc_call, conn_node);
317
318 if (call_id < xcall->call_id)
319 p = &(*p)->rb_left;
320 else if (call_id > xcall->call_id)
321 p = &(*p)->rb_right;
322 else
323 BUG();
324 }
325
326 rb_link_node(&call->conn_node, parent, p);
327 rb_insert_color(&call->conn_node, &conn->calls);
328
329 write_unlock_bh(&conn->lock);
330}
331
332/*
333 * connect a call on an exclusive connection
334 */
335static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
336 struct rxrpc_transport *trans,
337 u16 service_id,
338 struct rxrpc_call *call,
339 gfp_t gfp)
340{
341 struct rxrpc_connection *conn;
342 int chan, ret;
343
344 _enter("");
345
346 conn = rx->conn;
347 if (!conn) {
348 /* not yet present - create a candidate for a new connection
349 * and then redo the check */
350 conn = rxrpc_alloc_connection(gfp);
351 if (!conn) {
352 _leave(" = -ENOMEM");
353 return -ENOMEM;
354 }
355
356 conn->trans = trans;
357 conn->bundle = NULL;
358 conn->service_id = service_id;
359 conn->epoch = rxrpc_epoch;
360 conn->in_clientflag = 0;
361 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
362 conn->cid = 0;
363 conn->state = RXRPC_CONN_CLIENT;
364 conn->avail_calls = RXRPC_MAXCALLS - 1;
365 conn->security_level = rx->min_sec_level;
366 conn->key = key_get(rx->key);
367
368 ret = rxrpc_init_client_conn_security(conn);
369 if (ret < 0) {
370 key_put(conn->key);
371 kfree(conn);
372 _leave(" = %d [key]", ret);
373 return ret;
374 }
375
376 write_lock_bh(&rxrpc_connection_lock);
377 list_add_tail(&conn->link, &rxrpc_connections);
378 write_unlock_bh(&rxrpc_connection_lock);
379
380 spin_lock(&trans->client_lock);
381 atomic_inc(&trans->usage);
382
383 _net("CONNECT EXCL new %d on TRANS %d",
384 conn->debug_id, conn->trans->debug_id);
385
386 rxrpc_assign_connection_id(conn);
387 rx->conn = conn;
388 } else {
389 spin_lock(&trans->client_lock);
390 }
391
392 /* we've got a connection with a free channel and we can now attach the
393 * call to it
394 * - we're holding the transport's client lock
395 * - we're holding a reference on the connection
396 */
397 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
398 if (!conn->channels[chan])
399 goto found_channel;
400 goto no_free_channels;
401
402found_channel:
403 atomic_inc(&conn->usage);
404 conn->channels[chan] = call;
405 call->conn = conn;
406 call->channel = chan;
407 call->cid = conn->cid | chan;
408 call->call_id = ++conn->call_counter;
409
410 _net("CONNECT client on conn %d chan %d as call %x",
411 conn->debug_id, chan, call->call_id);
412
413 spin_unlock(&trans->client_lock);
414
415 rxrpc_add_call_ID_to_conn(conn, call);
416 _leave(" = 0");
417 return 0;
418
419no_free_channels:
420 spin_unlock(&trans->client_lock);
421 _leave(" = -ENOSR");
422 return -ENOSR;
423}
424
425/*
426 * find a connection for a call
427 * - called in process context with IRQs enabled
428 */
429int rxrpc_connect_call(struct rxrpc_sock *rx,
430 struct rxrpc_transport *trans,
431 struct rxrpc_conn_bundle *bundle,
432 struct rxrpc_call *call,
433 gfp_t gfp)
434{
435 struct rxrpc_connection *conn, *candidate;
436 int chan, ret;
437
438 DECLARE_WAITQUEUE(myself, current);
439
440 _enter("%p,%lx,", rx, call->user_call_ID);
441
442 if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
443 return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
444 call, gfp);
445
446 spin_lock(&trans->client_lock);
447 for (;;) {
448 /* see if the bundle has a call slot available */
449 if (!list_empty(&bundle->avail_conns)) {
450 _debug("avail");
451 conn = list_entry(bundle->avail_conns.next,
452 struct rxrpc_connection,
453 bundle_link);
454 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
455 list_del_init(&conn->bundle_link);
456 bundle->num_conns--;
457 continue;
458 }
459 if (--conn->avail_calls == 0)
460 list_move(&conn->bundle_link,
461 &bundle->busy_conns);
462 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
463 ASSERT(conn->channels[0] == NULL ||
464 conn->channels[1] == NULL ||
465 conn->channels[2] == NULL ||
466 conn->channels[3] == NULL);
467 atomic_inc(&conn->usage);
468 break;
469 }
470
471 if (!list_empty(&bundle->unused_conns)) {
472 _debug("unused");
473 conn = list_entry(bundle->unused_conns.next,
474 struct rxrpc_connection,
475 bundle_link);
476 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
477 list_del_init(&conn->bundle_link);
478 bundle->num_conns--;
479 continue;
480 }
481 ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
482 conn->avail_calls = RXRPC_MAXCALLS - 1;
483 ASSERT(conn->channels[0] == NULL &&
484 conn->channels[1] == NULL &&
485 conn->channels[2] == NULL &&
486 conn->channels[3] == NULL);
487 atomic_inc(&conn->usage);
488 list_move(&conn->bundle_link, &bundle->avail_conns);
489 break;
490 }
491
492 /* need to allocate a new connection */
493 _debug("get new conn [%d]", bundle->num_conns);
494
495 spin_unlock(&trans->client_lock);
496
497 if (signal_pending(current))
498 goto interrupted;
499
500 if (bundle->num_conns >= 20) {
501 _debug("too many conns");
502
503 if (!gfpflags_allow_blocking(gfp)) {
504 _leave(" = -EAGAIN");
505 return -EAGAIN;
506 }
507
508 add_wait_queue(&bundle->chanwait, &myself);
509 for (;;) {
510 set_current_state(TASK_INTERRUPTIBLE);
511 if (bundle->num_conns < 20 ||
512 !list_empty(&bundle->unused_conns) ||
513 !list_empty(&bundle->avail_conns))
514 break;
515 if (signal_pending(current))
516 goto interrupted_dequeue;
517 schedule();
518 }
519 remove_wait_queue(&bundle->chanwait, &myself);
520 __set_current_state(TASK_RUNNING);
521 spin_lock(&trans->client_lock);
522 continue;
523 }
524
525 /* not yet present - create a candidate for a new connection and then
526 * redo the check */
527 candidate = rxrpc_alloc_connection(gfp);
528 if (!candidate) {
529 _leave(" = -ENOMEM");
530 return -ENOMEM;
531 }
532
533 candidate->trans = trans;
534 candidate->bundle = bundle;
535 candidate->service_id = bundle->service_id;
536 candidate->epoch = rxrpc_epoch;
537 candidate->in_clientflag = 0;
538 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
539 candidate->cid = 0;
540 candidate->state = RXRPC_CONN_CLIENT;
541 candidate->avail_calls = RXRPC_MAXCALLS;
542 candidate->security_level = rx->min_sec_level;
543 candidate->key = key_get(bundle->key);
544
545 ret = rxrpc_init_client_conn_security(candidate);
546 if (ret < 0) {
547 key_put(candidate->key);
548 kfree(candidate);
549 _leave(" = %d [key]", ret);
550 return ret;
551 }
552
553 write_lock_bh(&rxrpc_connection_lock);
554 list_add_tail(&candidate->link, &rxrpc_connections);
555 write_unlock_bh(&rxrpc_connection_lock);
556
557 spin_lock(&trans->client_lock);
558
559 list_add(&candidate->bundle_link, &bundle->unused_conns);
560 bundle->num_conns++;
561 atomic_inc(&bundle->usage);
562 atomic_inc(&trans->usage);
563
564 _net("CONNECT new %d on TRANS %d",
565 candidate->debug_id, candidate->trans->debug_id);
566
567 rxrpc_assign_connection_id(candidate);
568 candidate->security->prime_packet_security(candidate);
569
570 /* leave the candidate lurking in zombie mode attached to the
571 * bundle until we're ready for it */
572 rxrpc_put_connection(candidate);
573 candidate = NULL;
574 }
575
576 /* we've got a connection with a free channel and we can now attach the
577 * call to it
578 * - we're holding the transport's client lock
579 * - we're holding a reference on the connection
580 * - we're holding a reference on the bundle
581 */
582 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
583 if (!conn->channels[chan])
584 goto found_channel;
585 ASSERT(conn->channels[0] == NULL ||
586 conn->channels[1] == NULL ||
587 conn->channels[2] == NULL ||
588 conn->channels[3] == NULL);
589 BUG();
590
591found_channel:
592 conn->channels[chan] = call;
593 call->conn = conn;
594 call->channel = chan;
595 call->cid = conn->cid | chan;
596 call->call_id = ++conn->call_counter;
597
598 _net("CONNECT client on conn %d chan %d as call %x",
599 conn->debug_id, chan, call->call_id);
600
601 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
602 spin_unlock(&trans->client_lock);
603
604 rxrpc_add_call_ID_to_conn(conn, call);
605
606 _leave(" = 0");
607 return 0;
608
609interrupted_dequeue:
610 remove_wait_queue(&bundle->chanwait, &myself);
611 __set_current_state(TASK_RUNNING);
612interrupted:
613 _leave(" = -ERESTARTSYS");
614 return -ERESTARTSYS;
615}
616
617/*
618 * get a record of an incoming connection
619 */
620struct rxrpc_connection *
621rxrpc_incoming_connection(struct rxrpc_transport *trans,
622 struct rxrpc_host_header *hdr)
623{
624 struct rxrpc_connection *conn, *candidate = NULL;
625 struct rb_node *p, **pp;
626 const char *new = "old";
627 __be32 epoch;
628 u32 cid;
629
630 _enter("");
631
632 ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
633
634 epoch = hdr->epoch;
635 cid = hdr->cid & RXRPC_CIDMASK;
636
637 /* search the connection list first */
638 read_lock_bh(&trans->conn_lock);
639
640 p = trans->server_conns.rb_node;
641 while (p) {
642 conn = rb_entry(p, struct rxrpc_connection, node);
643
644 _debug("maybe %x", conn->cid);
645
646 if (epoch < conn->epoch)
647 p = p->rb_left;
648 else if (epoch > conn->epoch)
649 p = p->rb_right;
650 else if (cid < conn->cid)
651 p = p->rb_left;
652 else if (cid > conn->cid)
653 p = p->rb_right;
654 else
655 goto found_extant_connection;
656 }
657 read_unlock_bh(&trans->conn_lock);
658
659 /* not yet present - create a candidate for a new record and then
660 * redo the search */
661 candidate = rxrpc_alloc_connection(GFP_NOIO);
662 if (!candidate) {
663 _leave(" = -ENOMEM");
664 return ERR_PTR(-ENOMEM);
665 }
666
667 candidate->trans = trans;
668 candidate->epoch = hdr->epoch;
669 candidate->cid = hdr->cid & RXRPC_CIDMASK;
670 candidate->service_id = hdr->serviceId;
671 candidate->security_ix = hdr->securityIndex;
672 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
673 candidate->out_clientflag = 0;
674 candidate->state = RXRPC_CONN_SERVER;
675 if (candidate->service_id)
676 candidate->state = RXRPC_CONN_SERVER_UNSECURED;
677
678 write_lock_bh(&trans->conn_lock);
679
680 pp = &trans->server_conns.rb_node;
681 p = NULL;
682 while (*pp) {
683 p = *pp;
684 conn = rb_entry(p, struct rxrpc_connection, node);
685
686 if (epoch < conn->epoch)
687 pp = &(*pp)->rb_left;
688 else if (epoch > conn->epoch)
689 pp = &(*pp)->rb_right;
690 else if (cid < conn->cid)
691 pp = &(*pp)->rb_left;
692 else if (cid > conn->cid)
693 pp = &(*pp)->rb_right;
694 else
695 goto found_extant_second;
696 }
697
698 /* we can now add the new candidate to the list */
699 conn = candidate;
700 candidate = NULL;
701 rb_link_node(&conn->node, p, pp);
702 rb_insert_color(&conn->node, &trans->server_conns);
703 atomic_inc(&conn->trans->usage);
704
705 write_unlock_bh(&trans->conn_lock);
706
707 write_lock_bh(&rxrpc_connection_lock);
708 list_add_tail(&conn->link, &rxrpc_connections);
709 write_unlock_bh(&rxrpc_connection_lock);
710
711 new = "new";
712
713success:
714 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->cid);
715
716 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
717 return conn;
718
719 /* we found the connection in the list immediately */
720found_extant_connection:
721 if (hdr->securityIndex != conn->security_ix) {
722 read_unlock_bh(&trans->conn_lock);
723 goto security_mismatch;
724 }
725 atomic_inc(&conn->usage);
726 read_unlock_bh(&trans->conn_lock);
727 goto success;
728
729 /* we found the connection on the second time through the list */
730found_extant_second:
731 if (hdr->securityIndex != conn->security_ix) {
732 write_unlock_bh(&trans->conn_lock);
733 goto security_mismatch;
734 }
735 atomic_inc(&conn->usage);
736 write_unlock_bh(&trans->conn_lock);
737 kfree(candidate);
738 goto success;
739
740security_mismatch:
741 kfree(candidate);
742 _leave(" = -EKEYREJECTED");
743 return ERR_PTR(-EKEYREJECTED);
744}
745
746/*
747 * find a connection based on transport and RxRPC connection ID for an incoming
748 * packet
749 */
750struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
751 struct rxrpc_host_header *hdr)
752{
753 struct rxrpc_connection *conn;
754 struct rb_node *p;
755 u32 epoch, cid;
756
757 _enter(",{%x,%x}", hdr->cid, hdr->flags);
758
759 read_lock_bh(&trans->conn_lock);
760
761 cid = hdr->cid & RXRPC_CIDMASK;
762 epoch = hdr->epoch;
763
764 if (hdr->flags & RXRPC_CLIENT_INITIATED)
765 p = trans->server_conns.rb_node;
766 else
767 p = trans->client_conns.rb_node;
768
769 while (p) {
770 conn = rb_entry(p, struct rxrpc_connection, node);
771
772 _debug("maybe %x", conn->cid);
773
774 if (epoch < conn->epoch)
775 p = p->rb_left;
776 else if (epoch > conn->epoch)
777 p = p->rb_right;
778 else if (cid < conn->cid)
779 p = p->rb_left;
780 else if (cid > conn->cid)
781 p = p->rb_right;
782 else
783 goto found;
784 }
785
786 read_unlock_bh(&trans->conn_lock);
787 _leave(" = NULL");
788 return NULL;
789
790found:
791 atomic_inc(&conn->usage);
792 read_unlock_bh(&trans->conn_lock);
793 _leave(" = %p", conn);
794 return conn;
795}
796
797/*
798 * release a virtual connection
799 */
800void rxrpc_put_connection(struct rxrpc_connection *conn)
801{
802 _enter("%p{u=%d,d=%d}",
803 conn, atomic_read(&conn->usage), conn->debug_id);
804
805 ASSERTCMP(atomic_read(&conn->usage), >, 0);
806
807 conn->put_time = ktime_get_seconds();
808 if (atomic_dec_and_test(&conn->usage)) {
809 _debug("zombie");
810 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
811 }
812
813 _leave("");
814}
815
816/*
817 * destroy a virtual connection
818 */
819static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
820{
821 _enter("%p{%d}", conn, atomic_read(&conn->usage));
822
823 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
824
825 _net("DESTROY CONN %d", conn->debug_id);
826
827 if (conn->bundle)
828 rxrpc_put_bundle(conn->trans, conn->bundle);
829
830 ASSERT(RB_EMPTY_ROOT(&conn->calls));
831 rxrpc_purge_queue(&conn->rx_queue);
832
833 conn->security->clear(conn);
834 key_put(conn->key);
835 key_put(conn->server_key);
836
837 rxrpc_put_transport(conn->trans);
838 kfree(conn);
839 _leave("");
840}
841
842/*
843 * reap dead connections
844 */
845static void rxrpc_connection_reaper(struct work_struct *work)
846{
847 struct rxrpc_connection *conn, *_p;
848 unsigned long now, earliest, reap_time;
849
850 LIST_HEAD(graveyard);
851
852 _enter("");
853
854 now = ktime_get_seconds();
855 earliest = ULONG_MAX;
856
857 write_lock_bh(&rxrpc_connection_lock);
858 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
859 _debug("reap CONN %d { u=%d,t=%ld }",
860 conn->debug_id, atomic_read(&conn->usage),
861 (long) now - (long) conn->put_time);
862
863 if (likely(atomic_read(&conn->usage) > 0))
864 continue;
865
866 spin_lock(&conn->trans->client_lock);
867 write_lock(&conn->trans->conn_lock);
868 reap_time = conn->put_time + rxrpc_connection_expiry;
869
870 if (atomic_read(&conn->usage) > 0) {
871 ;
872 } else if (reap_time <= now) {
873 list_move_tail(&conn->link, &graveyard);
874 if (conn->out_clientflag)
875 rb_erase(&conn->node,
876 &conn->trans->client_conns);
877 else
878 rb_erase(&conn->node,
879 &conn->trans->server_conns);
880 if (conn->bundle) {
881 list_del_init(&conn->bundle_link);
882 conn->bundle->num_conns--;
883 }
884
885 } else if (reap_time < earliest) {
886 earliest = reap_time;
887 }
888
889 write_unlock(&conn->trans->conn_lock);
890 spin_unlock(&conn->trans->client_lock);
891 }
892 write_unlock_bh(&rxrpc_connection_lock);
893
894 if (earliest != ULONG_MAX) {
895 _debug("reschedule reaper %ld", (long) earliest - now);
896 ASSERTCMP(earliest, >, now);
897 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
898 (earliest - now) * HZ);
899 }
900
901 /* then destroy all those pulled out */
902 while (!list_empty(&graveyard)) {
903 conn = list_entry(graveyard.next, struct rxrpc_connection,
904 link);
905 list_del_init(&conn->link);
906
907 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
908 rxrpc_destroy_connection(conn);
909 }
910
911 _leave("");
912}
913
914/*
915 * preemptively destroy all the connection records rather than waiting for them
916 * to time out
917 */
918void __exit rxrpc_destroy_all_connections(void)
919{
920 _enter("");
921
922 rxrpc_connection_expiry = 0;
923 cancel_delayed_work(&rxrpc_connection_reap);
924 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
925
926 _leave("");
927}
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
deleted file mode 100644
index 3e82d6f0313c..000000000000
--- a/net/rxrpc/ar-error.c
+++ /dev/null
@@ -1,230 +0,0 @@
1/* Error message handling (ICMP)
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/errqueue.h>
16#include <linux/udp.h>
17#include <linux/in.h>
18#include <linux/in6.h>
19#include <linux/icmp.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include "ar-internal.h"
24
25/*
26 * handle an error received on the local endpoint
27 */
28void rxrpc_UDP_error_report(struct sock *sk)
29{
30 struct sock_exterr_skb *serr;
31 struct rxrpc_transport *trans;
32 struct rxrpc_local *local = sk->sk_user_data;
33 struct rxrpc_peer *peer;
34 struct sk_buff *skb;
35 __be32 addr;
36 __be16 port;
37
38 _enter("%p{%d}", sk, local->debug_id);
39
40 skb = sock_dequeue_err_skb(sk);
41 if (!skb) {
42 _leave("UDP socket errqueue empty");
43 return;
44 }
45 serr = SKB_EXT_ERR(skb);
46 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
47 _leave("UDP empty message");
48 kfree_skb(skb);
49 return;
50 }
51
52 rxrpc_new_skb(skb);
53
54 addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset);
55 port = serr->port;
56
57 _net("Rx UDP Error from %pI4:%hu", &addr, ntohs(port));
58 _debug("Msg l:%d d:%d", skb->len, skb->data_len);
59
60 peer = rxrpc_find_peer(local, addr, port);
61 if (IS_ERR(peer)) {
62 rxrpc_free_skb(skb);
63 _leave(" [no peer]");
64 return;
65 }
66
67 trans = rxrpc_find_transport(local, peer);
68 if (!trans) {
69 rxrpc_put_peer(peer);
70 rxrpc_free_skb(skb);
71 _leave(" [no trans]");
72 return;
73 }
74
75 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
76 serr->ee.ee_type == ICMP_DEST_UNREACH &&
77 serr->ee.ee_code == ICMP_FRAG_NEEDED
78 ) {
79 u32 mtu = serr->ee.ee_info;
80
81 _net("Rx Received ICMP Fragmentation Needed (%d)", mtu);
82
83 /* wind down the local interface MTU */
84 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
85 peer->if_mtu = mtu;
86 _net("I/F MTU %u", mtu);
87 }
88
89 if (mtu == 0) {
90 /* they didn't give us a size, estimate one */
91 mtu = peer->if_mtu;
92 if (mtu > 1500) {
93 mtu >>= 1;
94 if (mtu < 1500)
95 mtu = 1500;
96 } else {
97 mtu -= 100;
98 if (mtu < peer->hdrsize)
99 mtu = peer->hdrsize + 4;
100 }
101 }
102
103 if (mtu < peer->mtu) {
104 spin_lock_bh(&peer->lock);
105 peer->mtu = mtu;
106 peer->maxdata = peer->mtu - peer->hdrsize;
107 spin_unlock_bh(&peer->lock);
108 _net("Net MTU %u (maxdata %u)",
109 peer->mtu, peer->maxdata);
110 }
111 }
112
113 rxrpc_put_peer(peer);
114
115 /* pass the transport ref to error_handler to release */
116 skb_queue_tail(&trans->error_queue, skb);
117 rxrpc_queue_work(&trans->error_handler);
118 _leave("");
119}
120
121/*
122 * deal with UDP error messages
123 */
124void rxrpc_UDP_error_handler(struct work_struct *work)
125{
126 struct sock_extended_err *ee;
127 struct sock_exterr_skb *serr;
128 struct rxrpc_transport *trans =
129 container_of(work, struct rxrpc_transport, error_handler);
130 struct sk_buff *skb;
131 int err;
132
133 _enter("");
134
135 skb = skb_dequeue(&trans->error_queue);
136 if (!skb)
137 return;
138
139 serr = SKB_EXT_ERR(skb);
140 ee = &serr->ee;
141
142 _net("Rx Error o=%d t=%d c=%d e=%d",
143 ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno);
144
145 err = ee->ee_errno;
146
147 switch (ee->ee_origin) {
148 case SO_EE_ORIGIN_ICMP:
149 switch (ee->ee_type) {
150 case ICMP_DEST_UNREACH:
151 switch (ee->ee_code) {
152 case ICMP_NET_UNREACH:
153 _net("Rx Received ICMP Network Unreachable");
154 break;
155 case ICMP_HOST_UNREACH:
156 _net("Rx Received ICMP Host Unreachable");
157 break;
158 case ICMP_PORT_UNREACH:
159 _net("Rx Received ICMP Port Unreachable");
160 break;
161 case ICMP_NET_UNKNOWN:
162 _net("Rx Received ICMP Unknown Network");
163 break;
164 case ICMP_HOST_UNKNOWN:
165 _net("Rx Received ICMP Unknown Host");
166 break;
167 default:
168 _net("Rx Received ICMP DestUnreach code=%u",
169 ee->ee_code);
170 break;
171 }
172 break;
173
174 case ICMP_TIME_EXCEEDED:
175 _net("Rx Received ICMP TTL Exceeded");
176 break;
177
178 default:
179 _proto("Rx Received ICMP error { type=%u code=%u }",
180 ee->ee_type, ee->ee_code);
181 break;
182 }
183 break;
184
185 case SO_EE_ORIGIN_LOCAL:
186 _proto("Rx Received local error { error=%d }",
187 ee->ee_errno);
188 break;
189
190 case SO_EE_ORIGIN_NONE:
191 case SO_EE_ORIGIN_ICMP6:
192 default:
193 _proto("Rx Received error report { orig=%u }",
194 ee->ee_origin);
195 break;
196 }
197
198 /* terminate all the affected calls if there's an unrecoverable
199 * error */
200 if (err) {
201 struct rxrpc_call *call, *_n;
202
203 _debug("ISSUE ERROR %d", err);
204
205 spin_lock_bh(&trans->peer->lock);
206 trans->peer->net_error = err;
207
208 list_for_each_entry_safe(call, _n, &trans->peer->error_targets,
209 error_link) {
210 write_lock(&call->state_lock);
211 if (call->state != RXRPC_CALL_COMPLETE &&
212 call->state < RXRPC_CALL_NETWORK_ERROR) {
213 call->state = RXRPC_CALL_NETWORK_ERROR;
214 set_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
215 rxrpc_queue_call(call);
216 }
217 write_unlock(&call->state_lock);
218 list_del_init(&call->error_link);
219 }
220
221 spin_unlock_bh(&trans->peer->lock);
222 }
223
224 if (!skb_queue_empty(&trans->error_queue))
225 rxrpc_queue_work(&trans->error_handler);
226
227 rxrpc_free_skb(skb);
228 rxrpc_put_transport(trans);
229 _leave("");
230}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index f0b807a163fa..702db72196fb 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -9,7 +9,9 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/atomic.h>
12#include <net/sock.h> 13#include <net/sock.h>
14#include <net/af_rxrpc.h>
13#include <rxrpc/packet.h> 15#include <rxrpc/packet.h>
14 16
15#if 0 17#if 0
@@ -35,13 +37,15 @@ struct rxrpc_crypt {
35#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor) 37#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor)
36#define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor) 38#define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor)
37 39
40struct rxrpc_connection;
41
38/* 42/*
39 * sk_state for RxRPC sockets 43 * sk_state for RxRPC sockets
40 */ 44 */
41enum { 45enum {
42 RXRPC_UNCONNECTED = 0, 46 RXRPC_UNBOUND = 0,
47 RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
43 RXRPC_CLIENT_BOUND, /* client local address bound */ 48 RXRPC_CLIENT_BOUND, /* client local address bound */
44 RXRPC_CLIENT_CONNECTED, /* client is connected */
45 RXRPC_SERVER_BOUND, /* server local address bound */ 49 RXRPC_SERVER_BOUND, /* server local address bound */
46 RXRPC_SERVER_LISTENING, /* server listening for connections */ 50 RXRPC_SERVER_LISTENING, /* server listening for connections */
47 RXRPC_CLOSE, /* socket is being closed */ 51 RXRPC_CLOSE, /* socket is being closed */
@@ -55,9 +59,6 @@ struct rxrpc_sock {
55 struct sock sk; 59 struct sock sk;
56 rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */ 60 rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */
57 struct rxrpc_local *local; /* local endpoint */ 61 struct rxrpc_local *local; /* local endpoint */
58 struct rxrpc_transport *trans; /* transport handler */
59 struct rxrpc_conn_bundle *bundle; /* virtual connection bundle */
60 struct rxrpc_connection *conn; /* exclusive virtual connection */
61 struct list_head listen_link; /* link in the local endpoint's listen list */ 62 struct list_head listen_link; /* link in the local endpoint's listen list */
62 struct list_head secureq; /* calls awaiting connection security clearance */ 63 struct list_head secureq; /* calls awaiting connection security clearance */
63 struct list_head acceptq; /* calls awaiting acceptance */ 64 struct list_head acceptq; /* calls awaiting acceptance */
@@ -65,12 +66,14 @@ struct rxrpc_sock {
65 struct key *securities; /* list of server security descriptors */ 66 struct key *securities; /* list of server security descriptors */
66 struct rb_root calls; /* outstanding calls on this socket */ 67 struct rb_root calls; /* outstanding calls on this socket */
67 unsigned long flags; 68 unsigned long flags;
68#define RXRPC_SOCK_EXCLUSIVE_CONN 1 /* exclusive connection for a client socket */ 69#define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
69 rwlock_t call_lock; /* lock for calls */ 70 rwlock_t call_lock; /* lock for calls */
70 u32 min_sec_level; /* minimum security level */ 71 u32 min_sec_level; /* minimum security level */
71#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT 72#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
73 bool exclusive; /* Exclusive connection for a client socket */
74 sa_family_t family; /* Protocol family created with */
72 struct sockaddr_rxrpc srx; /* local address */ 75 struct sockaddr_rxrpc srx; /* local address */
73 sa_family_t proto; /* protocol created with */ 76 struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
74}; 77};
75 78
76#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk) 79#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
@@ -168,46 +171,52 @@ struct rxrpc_security {
168}; 171};
169 172
170/* 173/*
171 * RxRPC local transport endpoint definition 174 * RxRPC local transport endpoint description
172 * - matched by local port, address and protocol type 175 * - owned by a single AF_RXRPC socket
176 * - pointed to by transport socket struct sk_user_data
173 */ 177 */
174struct rxrpc_local { 178struct rxrpc_local {
179 struct rcu_head rcu;
180 atomic_t usage;
181 struct list_head link;
175 struct socket *socket; /* my UDP socket */ 182 struct socket *socket; /* my UDP socket */
176 struct work_struct destroyer; /* endpoint destroyer */ 183 struct work_struct processor;
177 struct work_struct acceptor; /* incoming call processor */
178 struct work_struct rejecter; /* packet reject writer */
179 struct work_struct event_processor; /* endpoint event processor */
180 struct list_head services; /* services listening on this endpoint */ 184 struct list_head services; /* services listening on this endpoint */
181 struct list_head link; /* link in endpoint list */
182 struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */ 185 struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
183 struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */ 186 struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
184 struct sk_buff_head reject_queue; /* packets awaiting rejection */ 187 struct sk_buff_head reject_queue; /* packets awaiting rejection */
185 struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */ 188 struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
189 struct rb_root client_conns; /* Client connections by socket params */
190 spinlock_t client_conns_lock; /* Lock for client_conns */
186 spinlock_t lock; /* access lock */ 191 spinlock_t lock; /* access lock */
187 rwlock_t services_lock; /* lock for services list */ 192 rwlock_t services_lock; /* lock for services list */
188 atomic_t usage;
189 int debug_id; /* debug ID for printks */ 193 int debug_id; /* debug ID for printks */
190 volatile char error_rcvd; /* T if received ICMP error outstanding */ 194 bool dead;
191 struct sockaddr_rxrpc srx; /* local address */ 195 struct sockaddr_rxrpc srx; /* local address */
192}; 196};
193 197
194/* 198/*
195 * RxRPC remote transport endpoint definition 199 * RxRPC remote transport endpoint definition
196 * - matched by remote port, address and protocol type 200 * - matched by local endpoint, remote port, address and protocol type
197 * - holds the connection ID counter for connections between the two endpoints
198 */ 201 */
199struct rxrpc_peer { 202struct rxrpc_peer {
200 struct work_struct destroyer; /* peer destroyer */ 203 struct rcu_head rcu; /* This must be first */
201 struct list_head link; /* link in master peer list */
202 struct list_head error_targets; /* targets for net error distribution */
203 spinlock_t lock; /* access lock */
204 atomic_t usage; 204 atomic_t usage;
205 unsigned long hash_key;
206 struct hlist_node hash_link;
207 struct rxrpc_local *local;
208 struct hlist_head error_targets; /* targets for net error distribution */
209 struct work_struct error_distributor;
210 struct rb_root service_conns; /* Service connections */
211 rwlock_t conn_lock;
212 spinlock_t lock; /* access lock */
205 unsigned int if_mtu; /* interface MTU for this peer */ 213 unsigned int if_mtu; /* interface MTU for this peer */
206 unsigned int mtu; /* network MTU for this peer */ 214 unsigned int mtu; /* network MTU for this peer */
207 unsigned int maxdata; /* data size (MTU - hdrsize) */ 215 unsigned int maxdata; /* data size (MTU - hdrsize) */
208 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ 216 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
209 int debug_id; /* debug ID for printks */ 217 int debug_id; /* debug ID for printks */
210 int net_error; /* network error distributed */ 218 int error_report; /* Net (+0) or local (+1000000) to distribute */
219#define RXRPC_LOCAL_ERROR_OFFSET 1000000
211 struct sockaddr_rxrpc srx; /* remote address */ 220 struct sockaddr_rxrpc srx; /* remote address */
212 221
213 /* calculated RTT cache */ 222 /* calculated RTT cache */
@@ -219,68 +228,63 @@ struct rxrpc_peer {
219}; 228};
220 229
221/* 230/*
222 * RxRPC point-to-point transport / connection manager definition 231 * Keys for matching a connection.
223 * - handles a bundle of connections between two endpoints 232 */
224 * - matched by { local, peer } 233struct rxrpc_conn_proto {
225 */ 234 unsigned long hash_key;
226struct rxrpc_transport { 235 struct rxrpc_local *local; /* Representation of local endpoint */
227 struct rxrpc_local *local; /* local transport endpoint */ 236 u32 epoch; /* epoch of this connection */
228 struct rxrpc_peer *peer; /* remote transport endpoint */ 237 u32 cid; /* connection ID */
229 struct work_struct error_handler; /* network error distributor */ 238 u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
230 struct rb_root bundles; /* client connection bundles on this transport */ 239 u8 addr_size; /* Size of the address */
231 struct rb_root client_conns; /* client connections on this transport */ 240 sa_family_t family; /* Transport protocol */
232 struct rb_root server_conns; /* server connections on this transport */ 241 __be16 port; /* Peer UDP/UDP6 port */
233 struct list_head link; /* link in master session list */ 242 union { /* Peer address */
234 struct sk_buff_head error_queue; /* error packets awaiting processing */ 243 struct in_addr ipv4_addr;
235 unsigned long put_time; /* time at which to reap */ 244 struct in6_addr ipv6_addr;
236 spinlock_t client_lock; /* client connection allocation lock */ 245 u32 raw_addr[0];
237 rwlock_t conn_lock; /* lock for active/dead connections */ 246 };
238 atomic_t usage;
239 int debug_id; /* debug ID for printks */
240 unsigned int conn_idcounter; /* connection ID counter (client) */
241}; 247};
242 248
243/* 249struct rxrpc_conn_parameters {
244 * RxRPC client connection bundle 250 struct rxrpc_local *local; /* Representation of local endpoint */
245 * - matched by { transport, service_id, key } 251 struct rxrpc_peer *peer; /* Remote endpoint */
246 */ 252 struct key *key; /* Security details */
247struct rxrpc_conn_bundle { 253 bool exclusive; /* T if conn is exclusive */
248 struct rb_node node; /* node in transport's lookup tree */ 254 u16 service_id; /* Service ID for this connection */
249 struct list_head unused_conns; /* unused connections in this bundle */ 255 u32 security_level; /* Security level selected */
250 struct list_head avail_conns; /* available connections in this bundle */
251 struct list_head busy_conns; /* busy connections in this bundle */
252 struct key *key; /* security for this bundle */
253 wait_queue_head_t chanwait; /* wait for channel to become available */
254 atomic_t usage;
255 int debug_id; /* debug ID for printks */
256 unsigned short num_conns; /* number of connections in this bundle */
257 u16 service_id; /* Service ID for this bundle */
258 u8 security_ix; /* security type */
259}; 256};
260 257
261/* 258/*
262 * RxRPC connection definition 259 * RxRPC connection definition
263 * - matched by { transport, service_id, conn_id, direction, key } 260 * - matched by { local, peer, epoch, conn_id, direction }
264 * - each connection can only handle four simultaneous calls 261 * - each connection can only handle four simultaneous calls
265 */ 262 */
266struct rxrpc_connection { 263struct rxrpc_connection {
267 struct rxrpc_transport *trans; /* transport session */ 264 struct rxrpc_conn_proto proto;
268 struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */ 265 struct rxrpc_conn_parameters params;
266
267 spinlock_t channel_lock;
268 struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* active calls */
269 wait_queue_head_t channel_wq; /* queue to wait for channel to become available */
270
269 struct work_struct processor; /* connection event processor */ 271 struct work_struct processor; /* connection event processor */
270 struct rb_node node; /* node in transport's lookup tree */ 272 union {
273 struct rb_node client_node; /* Node in local->client_conns */
274 struct rb_node service_node; /* Node in peer->service_conns */
275 };
271 struct list_head link; /* link in master connection list */ 276 struct list_head link; /* link in master connection list */
272 struct list_head bundle_link; /* link in bundle */
273 struct rb_root calls; /* calls on this connection */ 277 struct rb_root calls; /* calls on this connection */
274 struct sk_buff_head rx_queue; /* received conn-level packets */ 278 struct sk_buff_head rx_queue; /* received conn-level packets */
275 struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */
276 const struct rxrpc_security *security; /* applied security module */ 279 const struct rxrpc_security *security; /* applied security module */
277 struct key *key; /* security for this connection (client) */
278 struct key *server_key; /* security for this service */ 280 struct key *server_key; /* security for this service */
279 struct crypto_skcipher *cipher; /* encryption handle */ 281 struct crypto_skcipher *cipher; /* encryption handle */
280 struct rxrpc_crypt csum_iv; /* packet checksum base */ 282 struct rxrpc_crypt csum_iv; /* packet checksum base */
283 unsigned long flags;
284#define RXRPC_CONN_HAS_IDR 0 /* - Has a client conn ID assigned */
281 unsigned long events; 285 unsigned long events;
282#define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */ 286#define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */
283 unsigned long put_time; /* time at which to reap */ 287 unsigned long put_time; /* Time at which last put */
284 rwlock_t lock; /* access lock */ 288 rwlock_t lock; /* access lock */
285 spinlock_t state_lock; /* state-change lock */ 289 spinlock_t state_lock; /* state-change lock */
286 atomic_t usage; 290 atomic_t usage;
@@ -301,17 +305,12 @@ struct rxrpc_connection {
301 unsigned int call_counter; /* call ID counter */ 305 unsigned int call_counter; /* call ID counter */
302 atomic_t serial; /* packet serial number counter */ 306 atomic_t serial; /* packet serial number counter */
303 atomic_t hi_serial; /* highest serial number received */ 307 atomic_t hi_serial; /* highest serial number received */
304 u8 avail_calls; /* number of calls available */ 308 atomic_t avail_chans; /* number of channels available */
305 u8 size_align; /* data size alignment (for security) */ 309 u8 size_align; /* data size alignment (for security) */
306 u8 header_size; /* rxrpc + security header size */ 310 u8 header_size; /* rxrpc + security header size */
307 u8 security_size; /* security header size */ 311 u8 security_size; /* security header size */
308 u32 security_level; /* security level negotiated */
309 u32 security_nonce; /* response re-use preventer */ 312 u32 security_nonce; /* response re-use preventer */
310 u32 epoch; /* epoch of this connection */
311 u32 cid; /* connection ID */
312 u16 service_id; /* service ID for this connection */
313 u8 security_ix; /* security type */ 313 u8 security_ix; /* security type */
314 u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
315 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ 314 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
316}; 315};
317 316
@@ -357,6 +356,8 @@ enum rxrpc_call_event {
357 * The states that a call can be in. 356 * The states that a call can be in.
358 */ 357 */
359enum rxrpc_call_state { 358enum rxrpc_call_state {
359 RXRPC_CALL_UNINITIALISED,
360 RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
360 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ 361 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
361 RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */ 362 RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
362 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */ 363 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
@@ -390,7 +391,7 @@ struct rxrpc_call {
390 struct work_struct destroyer; /* call destroyer */ 391 struct work_struct destroyer; /* call destroyer */
391 struct work_struct processor; /* packet processor and ACK generator */ 392 struct work_struct processor; /* packet processor and ACK generator */
392 struct list_head link; /* link in master call list */ 393 struct list_head link; /* link in master call list */
393 struct list_head error_link; /* link in error distribution list */ 394 struct hlist_node error_link; /* link in error distribution list */
394 struct list_head accept_link; /* calls awaiting acceptance */ 395 struct list_head accept_link; /* calls awaiting acceptance */
395 struct rb_node sock_node; /* node in socket call tree */ 396 struct rb_node sock_node; /* node in socket call tree */
396 struct rb_node conn_node; /* node in connection call tree */ 397 struct rb_node conn_node; /* node in connection call tree */
@@ -408,7 +409,8 @@ struct rxrpc_call {
408 atomic_t sequence; /* Tx data packet sequence counter */ 409 atomic_t sequence; /* Tx data packet sequence counter */
409 u32 local_abort; /* local abort code */ 410 u32 local_abort; /* local abort code */
410 u32 remote_abort; /* remote abort code */ 411 u32 remote_abort; /* remote abort code */
411 int error; /* local error incurred */ 412 int error_report; /* Network error (ICMP/local transport) */
413 int error; /* Local error incurred */
412 enum rxrpc_call_state state : 8; /* current state of call */ 414 enum rxrpc_call_state state : 8; /* current state of call */
413 int debug_id; /* debug ID for printks */ 415 int debug_id; /* debug ID for printks */
414 u8 channel; /* connection channel occupied by this call */ 416 u8 channel; /* connection channel occupied by this call */
@@ -444,7 +446,7 @@ struct rxrpc_call {
444 unsigned long hash_key; /* Full hash key */ 446 unsigned long hash_key; /* Full hash key */
445 u8 in_clientflag; /* Copy of conn->in_clientflag for hashing */ 447 u8 in_clientflag; /* Copy of conn->in_clientflag for hashing */
446 struct rxrpc_local *local; /* Local endpoint. Used for hashing. */ 448 struct rxrpc_local *local; /* Local endpoint. Used for hashing. */
447 sa_family_t proto; /* Frame protocol */ 449 sa_family_t family; /* Frame protocol */
448 u32 call_id; /* call ID on connection */ 450 u32 call_id; /* call ID on connection */
449 u32 cid; /* connection ID plus channel index */ 451 u32 cid; /* connection ID plus channel index */
450 u32 epoch; /* epoch of this connection */ 452 u32 epoch; /* epoch of this connection */
@@ -478,21 +480,21 @@ extern atomic_t rxrpc_debug_id;
478extern struct workqueue_struct *rxrpc_workqueue; 480extern struct workqueue_struct *rxrpc_workqueue;
479 481
480/* 482/*
481 * ar-accept.c 483 * call_accept.c
482 */ 484 */
483void rxrpc_accept_incoming_calls(struct work_struct *); 485void rxrpc_accept_incoming_calls(struct rxrpc_local *);
484struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long); 486struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
485int rxrpc_reject_call(struct rxrpc_sock *); 487int rxrpc_reject_call(struct rxrpc_sock *);
486 488
487/* 489/*
488 * ar-ack.c 490 * call_event.c
489 */ 491 */
490void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool); 492void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
491void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool); 493void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
492void rxrpc_process_call(struct work_struct *); 494void rxrpc_process_call(struct work_struct *);
493 495
494/* 496/*
495 * ar-call.c 497 * call_object.c
496 */ 498 */
497extern unsigned int rxrpc_max_call_lifetime; 499extern unsigned int rxrpc_max_call_lifetime;
498extern unsigned int rxrpc_dead_call_expiry; 500extern unsigned int rxrpc_dead_call_expiry;
@@ -502,70 +504,82 @@ extern rwlock_t rxrpc_call_lock;
502 504
503struct rxrpc_call *rxrpc_find_call_hash(struct rxrpc_host_header *, 505struct rxrpc_call *rxrpc_find_call_hash(struct rxrpc_host_header *,
504 void *, sa_family_t, const void *); 506 void *, sa_family_t, const void *);
505struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *, 507struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
506 struct rxrpc_transport *, 508struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
507 struct rxrpc_conn_bundle *, 509 struct rxrpc_conn_parameters *,
508 unsigned long, int, gfp_t); 510 struct sockaddr_rxrpc *,
511 unsigned long, gfp_t);
509struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *, 512struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
510 struct rxrpc_connection *, 513 struct rxrpc_connection *,
511 struct rxrpc_host_header *); 514 struct sk_buff *);
512struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
513void rxrpc_release_call(struct rxrpc_call *); 515void rxrpc_release_call(struct rxrpc_call *);
514void rxrpc_release_calls_on_socket(struct rxrpc_sock *); 516void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
515void __rxrpc_put_call(struct rxrpc_call *); 517void __rxrpc_put_call(struct rxrpc_call *);
516void __exit rxrpc_destroy_all_calls(void); 518void __exit rxrpc_destroy_all_calls(void);
517 519
518/* 520/*
519 * ar-connection.c 521 * conn_client.c
520 */ 522 */
521extern unsigned int rxrpc_connection_expiry; 523extern struct idr rxrpc_client_conn_ids;
522extern struct list_head rxrpc_connections;
523extern rwlock_t rxrpc_connection_lock;
524 524
525struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *, 525int rxrpc_get_client_connection_id(struct rxrpc_connection *, gfp_t);
526 struct rxrpc_transport *, 526void rxrpc_put_client_connection_id(struct rxrpc_connection *);
527 struct key *, u16, gfp_t);
528void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
529int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
530 struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t);
531void rxrpc_put_connection(struct rxrpc_connection *);
532void __exit rxrpc_destroy_all_connections(void);
533struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
534 struct rxrpc_host_header *);
535extern struct rxrpc_connection *
536rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *);
537 527
538/* 528/*
539 * ar-connevent.c 529 * conn_event.c
540 */ 530 */
541void rxrpc_process_connection(struct work_struct *); 531void rxrpc_process_connection(struct work_struct *);
542void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *); 532void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
543void rxrpc_reject_packets(struct work_struct *); 533void rxrpc_reject_packets(struct rxrpc_local *);
544 534
545/* 535/*
546 * ar-error.c 536 * conn_object.c
547 */ 537 */
548void rxrpc_UDP_error_report(struct sock *); 538extern unsigned int rxrpc_connection_expiry;
549void rxrpc_UDP_error_handler(struct work_struct *); 539extern struct list_head rxrpc_connections;
540extern rwlock_t rxrpc_connection_lock;
541
542int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
543 struct sockaddr_rxrpc *, gfp_t);
544struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *,
545 struct rxrpc_peer *,
546 struct sk_buff *);
547void rxrpc_disconnect_call(struct rxrpc_call *);
548void rxrpc_put_connection(struct rxrpc_connection *);
549void __exit rxrpc_destroy_all_connections(void);
550struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *,
551 struct rxrpc_peer *,
552 struct sk_buff *);
553
554static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
555{
556 return conn->out_clientflag;
557}
558
559static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
560{
561 return conn->proto.in_clientflag;
562}
563
564static inline void rxrpc_get_connection(struct rxrpc_connection *conn)
565{
566 atomic_inc(&conn->usage);
567}
550 568
551/* 569/*
552 * ar-input.c 570 * input.c
553 */ 571 */
554void rxrpc_data_ready(struct sock *); 572void rxrpc_data_ready(struct sock *);
555int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool); 573int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
556void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *); 574void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
557 575
558/* 576/*
559 * ar-local.c 577 * insecure.c
560 */ 578 */
561extern rwlock_t rxrpc_local_lock; 579extern const struct rxrpc_security rxrpc_no_security;
562
563struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
564void rxrpc_put_local(struct rxrpc_local *);
565void __exit rxrpc_destroy_all_locals(void);
566 580
567/* 581/*
568 * ar-key.c 582 * key.c
569 */ 583 */
570extern struct key_type key_type_rxrpc; 584extern struct key_type key_type_rxrpc;
571extern struct key_type key_type_rxrpc_s; 585extern struct key_type key_type_rxrpc_s;
@@ -576,80 +590,103 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
576 u32); 590 u32);
577 591
578/* 592/*
579 * ar-output.c 593 * local_event.c
580 */ 594 */
581extern unsigned int rxrpc_resend_timeout; 595extern void rxrpc_process_local_events(struct rxrpc_local *);
582
583int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
584int rxrpc_client_sendmsg(struct rxrpc_sock *, struct rxrpc_transport *,
585 struct msghdr *, size_t);
586int rxrpc_server_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
587 596
588/* 597/*
589 * ar-peer.c 598 * local_object.c
590 */ 599 */
591struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t); 600struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *);
592void rxrpc_put_peer(struct rxrpc_peer *); 601void __rxrpc_put_local(struct rxrpc_local *);
593struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *, __be32, __be16); 602void __exit rxrpc_destroy_all_locals(void);
594void __exit rxrpc_destroy_all_peers(void);
595 603
596/* 604static inline void rxrpc_get_local(struct rxrpc_local *local)
597 * ar-proc.c 605{
598 */ 606 atomic_inc(&local->usage);
599extern const char *const rxrpc_call_states[]; 607}
600extern const struct file_operations rxrpc_call_seq_fops; 608
601extern const struct file_operations rxrpc_connection_seq_fops; 609static inline
610struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
611{
612 return atomic_inc_not_zero(&local->usage) ? local : NULL;
613}
614
615static inline void rxrpc_put_local(struct rxrpc_local *local)
616{
617 if (local && atomic_dec_and_test(&local->usage))
618 __rxrpc_put_local(local);
619}
602 620
603/* 621/*
604 * ar-recvmsg.c 622 * misc.c
605 */ 623 */
606void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *); 624extern unsigned int rxrpc_max_backlog __read_mostly;
607int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); 625extern unsigned int rxrpc_requested_ack_delay;
626extern unsigned int rxrpc_soft_ack_delay;
627extern unsigned int rxrpc_idle_ack_delay;
628extern unsigned int rxrpc_rx_window_size;
629extern unsigned int rxrpc_rx_mtu;
630extern unsigned int rxrpc_rx_jumbo_max;
631
632extern const char *const rxrpc_pkts[];
633extern const s8 rxrpc_ack_priority[];
634
635extern const char *rxrpc_acks(u8 reason);
608 636
609/* 637/*
610 * ar-security.c 638 * output.c
611 */ 639 */
612int __init rxrpc_init_security(void); 640extern unsigned int rxrpc_resend_timeout;
613void rxrpc_exit_security(void); 641
614int rxrpc_init_client_conn_security(struct rxrpc_connection *); 642int rxrpc_send_data_packet(struct rxrpc_connection *, struct sk_buff *);
615int rxrpc_init_server_conn_security(struct rxrpc_connection *); 643int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
616 644
617/* 645/*
618 * ar-skbuff.c 646 * peer_event.c
619 */ 647 */
620void rxrpc_packet_destructor(struct sk_buff *); 648void rxrpc_error_report(struct sock *);
649void rxrpc_peer_error_distributor(struct work_struct *);
621 650
622/* 651/*
623 * ar-transport.c 652 * peer_object.c
624 */ 653 */
625extern unsigned int rxrpc_transport_expiry; 654struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
655 const struct sockaddr_rxrpc *);
656struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
657 struct sockaddr_rxrpc *, gfp_t);
658struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
659
660static inline void rxrpc_get_peer(struct rxrpc_peer *peer)
661{
662 atomic_inc(&peer->usage);
663}
664
665static inline
666struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
667{
668 return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
669}
626 670
627struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *, 671extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
628 struct rxrpc_peer *, gfp_t); 672static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
629void rxrpc_put_transport(struct rxrpc_transport *); 673{
630void __exit rxrpc_destroy_all_transports(void); 674 if (peer && atomic_dec_and_test(&peer->usage))
631struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *, 675 __rxrpc_put_peer(peer);
632 struct rxrpc_peer *); 676}
633 677
634/* 678/*
635 * insecure.c 679 * proc.c
636 */ 680 */
637extern const struct rxrpc_security rxrpc_no_security; 681extern const char *const rxrpc_call_states[];
682extern const struct file_operations rxrpc_call_seq_fops;
683extern const struct file_operations rxrpc_connection_seq_fops;
638 684
639/* 685/*
640 * misc.c 686 * recvmsg.c
641 */ 687 */
642extern unsigned int rxrpc_requested_ack_delay; 688void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
643extern unsigned int rxrpc_soft_ack_delay; 689int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
644extern unsigned int rxrpc_idle_ack_delay;
645extern unsigned int rxrpc_rx_window_size;
646extern unsigned int rxrpc_rx_mtu;
647extern unsigned int rxrpc_rx_jumbo_max;
648
649extern const char *const rxrpc_pkts[];
650extern const s8 rxrpc_ack_priority[];
651
652extern const char *rxrpc_acks(u8 reason);
653 690
654/* 691/*
655 * rxkad.c 692 * rxkad.c
@@ -659,6 +696,19 @@ extern const struct rxrpc_security rxkad;
659#endif 696#endif
660 697
661/* 698/*
699 * security.c
700 */
701int __init rxrpc_init_security(void);
702void rxrpc_exit_security(void);
703int rxrpc_init_client_conn_security(struct rxrpc_connection *);
704int rxrpc_init_server_conn_security(struct rxrpc_connection *);
705
706/*
707 * skbuff.c
708 */
709void rxrpc_packet_destructor(struct sk_buff *);
710
711/*
662 * sysctl.c 712 * sysctl.c
663 */ 713 */
664#ifdef CONFIG_SYSCTL 714#ifdef CONFIG_SYSCTL
@@ -670,6 +720,12 @@ static inline void rxrpc_sysctl_exit(void) {}
670#endif 720#endif
671 721
672/* 722/*
723 * utils.c
724 */
725void rxrpc_get_addr_from_skb(struct rxrpc_local *, const struct sk_buff *,
726 struct sockaddr_rxrpc *);
727
728/*
673 * debug tracing 729 * debug tracing
674 */ 730 */
675extern unsigned int rxrpc_debug; 731extern unsigned int rxrpc_debug;
@@ -744,21 +800,18 @@ do { \
744#define ASSERT(X) \ 800#define ASSERT(X) \
745do { \ 801do { \
746 if (unlikely(!(X))) { \ 802 if (unlikely(!(X))) { \
747 printk(KERN_ERR "\n"); \ 803 pr_err("Assertion failed\n"); \
748 printk(KERN_ERR "RxRPC: Assertion failed\n"); \
749 BUG(); \ 804 BUG(); \
750 } \ 805 } \
751} while (0) 806} while (0)
752 807
753#define ASSERTCMP(X, OP, Y) \ 808#define ASSERTCMP(X, OP, Y) \
754do { \ 809do { \
755 if (unlikely(!((X) OP (Y)))) { \ 810 unsigned long _x = (unsigned long)(X); \
756 printk(KERN_ERR "\n"); \ 811 unsigned long _y = (unsigned long)(Y); \
757 printk(KERN_ERR "RxRPC: Assertion failed\n"); \ 812 if (unlikely(!(_x OP _y))) { \
758 printk(KERN_ERR "%lu " #OP " %lu is false\n", \ 813 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
759 (unsigned long)(X), (unsigned long)(Y)); \ 814 _x, _x, #OP, _y, _y); \
760 printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
761 (unsigned long)(X), (unsigned long)(Y)); \
762 BUG(); \ 815 BUG(); \
763 } \ 816 } \
764} while (0) 817} while (0)
@@ -766,21 +819,18 @@ do { \
766#define ASSERTIF(C, X) \ 819#define ASSERTIF(C, X) \
767do { \ 820do { \
768 if (unlikely((C) && !(X))) { \ 821 if (unlikely((C) && !(X))) { \
769 printk(KERN_ERR "\n"); \ 822 pr_err("Assertion failed\n"); \
770 printk(KERN_ERR "RxRPC: Assertion failed\n"); \
771 BUG(); \ 823 BUG(); \
772 } \ 824 } \
773} while (0) 825} while (0)
774 826
775#define ASSERTIFCMP(C, X, OP, Y) \ 827#define ASSERTIFCMP(C, X, OP, Y) \
776do { \ 828do { \
777 if (unlikely((C) && !((X) OP (Y)))) { \ 829 unsigned long _x = (unsigned long)(X); \
778 printk(KERN_ERR "\n"); \ 830 unsigned long _y = (unsigned long)(Y); \
779 printk(KERN_ERR "RxRPC: Assertion failed\n"); \ 831 if (unlikely((C) && !(_x OP _y))) { \
780 printk(KERN_ERR "%lu " #OP " %lu is false\n", \ 832 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
781 (unsigned long)(X), (unsigned long)(Y)); \ 833 _x, _x, #OP, _y, _y); \
782 printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
783 (unsigned long)(X), (unsigned long)(Y)); \
784 BUG(); \ 834 BUG(); \
785 } \ 835 } \
786} while (0) 836} while (0)
@@ -844,15 +894,6 @@ static inline void rxrpc_purge_queue(struct sk_buff_head *list)
844 rxrpc_free_skb(skb); 894 rxrpc_free_skb(skb);
845} 895}
846 896
847static inline void __rxrpc_get_local(struct rxrpc_local *local, const char *f)
848{
849 CHECK_SLAB_OKAY(&local->usage);
850 if (atomic_inc_return(&local->usage) == 1)
851 printk("resurrected (%s)\n", f);
852}
853
854#define rxrpc_get_local(LOCAL) __rxrpc_get_local((LOCAL), __func__)
855
856#define rxrpc_get_call(CALL) \ 897#define rxrpc_get_call(CALL) \
857do { \ 898do { \
858 CHECK_SLAB_OKAY(&(CALL)->usage); \ 899 CHECK_SLAB_OKAY(&(CALL)->usage); \
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
deleted file mode 100644
index 4e1e6db0050b..000000000000
--- a/net/rxrpc/ar-local.c
+++ /dev/null
@@ -1,415 +0,0 @@
1/* AF_RXRPC local endpoint management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/slab.h>
16#include <linux/udp.h>
17#include <linux/ip.h>
18#include <net/sock.h>
19#include <net/af_rxrpc.h>
20#include <generated/utsrelease.h>
21#include "ar-internal.h"
22
23static const char rxrpc_version_string[65] = "linux-" UTS_RELEASE " AF_RXRPC";
24
25static LIST_HEAD(rxrpc_locals);
26DEFINE_RWLOCK(rxrpc_local_lock);
27static DECLARE_RWSEM(rxrpc_local_sem);
28static DECLARE_WAIT_QUEUE_HEAD(rxrpc_local_wq);
29
30static void rxrpc_destroy_local(struct work_struct *work);
31static void rxrpc_process_local_events(struct work_struct *work);
32
33/*
34 * allocate a new local
35 */
36static
37struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
38{
39 struct rxrpc_local *local;
40
41 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
42 if (local) {
43 INIT_WORK(&local->destroyer, &rxrpc_destroy_local);
44 INIT_WORK(&local->acceptor, &rxrpc_accept_incoming_calls);
45 INIT_WORK(&local->rejecter, &rxrpc_reject_packets);
46 INIT_WORK(&local->event_processor, &rxrpc_process_local_events);
47 INIT_LIST_HEAD(&local->services);
48 INIT_LIST_HEAD(&local->link);
49 init_rwsem(&local->defrag_sem);
50 skb_queue_head_init(&local->accept_queue);
51 skb_queue_head_init(&local->reject_queue);
52 skb_queue_head_init(&local->event_queue);
53 spin_lock_init(&local->lock);
54 rwlock_init(&local->services_lock);
55 atomic_set(&local->usage, 1);
56 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
57 memcpy(&local->srx, srx, sizeof(*srx));
58 }
59
60 _leave(" = %p", local);
61 return local;
62}
63
64/*
65 * create the local socket
66 * - must be called with rxrpc_local_sem writelocked
67 */
68static int rxrpc_create_local(struct rxrpc_local *local)
69{
70 struct sock *sock;
71 int ret, opt;
72
73 _enter("%p{%d}", local, local->srx.transport_type);
74
75 /* create a socket to represent the local endpoint */
76 ret = sock_create_kern(&init_net, PF_INET, local->srx.transport_type,
77 IPPROTO_UDP, &local->socket);
78 if (ret < 0) {
79 _leave(" = %d [socket]", ret);
80 return ret;
81 }
82
83 /* if a local address was supplied then bind it */
84 if (local->srx.transport_len > sizeof(sa_family_t)) {
85 _debug("bind");
86 ret = kernel_bind(local->socket,
87 (struct sockaddr *) &local->srx.transport,
88 local->srx.transport_len);
89 if (ret < 0) {
90 _debug("bind failed");
91 goto error;
92 }
93 }
94
95 /* we want to receive ICMP errors */
96 opt = 1;
97 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
98 (char *) &opt, sizeof(opt));
99 if (ret < 0) {
100 _debug("setsockopt failed");
101 goto error;
102 }
103
104 /* we want to set the don't fragment bit */
105 opt = IP_PMTUDISC_DO;
106 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
107 (char *) &opt, sizeof(opt));
108 if (ret < 0) {
109 _debug("setsockopt failed");
110 goto error;
111 }
112
113 write_lock_bh(&rxrpc_local_lock);
114 list_add(&local->link, &rxrpc_locals);
115 write_unlock_bh(&rxrpc_local_lock);
116
117 /* set the socket up */
118 sock = local->socket->sk;
119 sock->sk_user_data = local;
120 sock->sk_data_ready = rxrpc_data_ready;
121 sock->sk_error_report = rxrpc_UDP_error_report;
122 _leave(" = 0");
123 return 0;
124
125error:
126 kernel_sock_shutdown(local->socket, SHUT_RDWR);
127 local->socket->sk->sk_user_data = NULL;
128 sock_release(local->socket);
129 local->socket = NULL;
130
131 _leave(" = %d", ret);
132 return ret;
133}
134
135/*
136 * create a new local endpoint using the specified UDP address
137 */
138struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *srx)
139{
140 struct rxrpc_local *local;
141 int ret;
142
143 _enter("{%d,%u,%pI4+%hu}",
144 srx->transport_type,
145 srx->transport.family,
146 &srx->transport.sin.sin_addr,
147 ntohs(srx->transport.sin.sin_port));
148
149 down_write(&rxrpc_local_sem);
150
151 /* see if we have a suitable local local endpoint already */
152 read_lock_bh(&rxrpc_local_lock);
153
154 list_for_each_entry(local, &rxrpc_locals, link) {
155 _debug("CMP {%d,%u,%pI4+%hu}",
156 local->srx.transport_type,
157 local->srx.transport.family,
158 &local->srx.transport.sin.sin_addr,
159 ntohs(local->srx.transport.sin.sin_port));
160
161 if (local->srx.transport_type != srx->transport_type ||
162 local->srx.transport.family != srx->transport.family)
163 continue;
164
165 switch (srx->transport.family) {
166 case AF_INET:
167 if (local->srx.transport.sin.sin_port !=
168 srx->transport.sin.sin_port)
169 continue;
170 if (memcmp(&local->srx.transport.sin.sin_addr,
171 &srx->transport.sin.sin_addr,
172 sizeof(struct in_addr)) != 0)
173 continue;
174 goto found_local;
175
176 default:
177 BUG();
178 }
179 }
180
181 read_unlock_bh(&rxrpc_local_lock);
182
183 /* we didn't find one, so we need to create one */
184 local = rxrpc_alloc_local(srx);
185 if (!local) {
186 up_write(&rxrpc_local_sem);
187 return ERR_PTR(-ENOMEM);
188 }
189
190 ret = rxrpc_create_local(local);
191 if (ret < 0) {
192 up_write(&rxrpc_local_sem);
193 kfree(local);
194 _leave(" = %d", ret);
195 return ERR_PTR(ret);
196 }
197
198 up_write(&rxrpc_local_sem);
199
200 _net("LOCAL new %d {%d,%u,%pI4+%hu}",
201 local->debug_id,
202 local->srx.transport_type,
203 local->srx.transport.family,
204 &local->srx.transport.sin.sin_addr,
205 ntohs(local->srx.transport.sin.sin_port));
206
207 _leave(" = %p [new]", local);
208 return local;
209
210found_local:
211 rxrpc_get_local(local);
212 read_unlock_bh(&rxrpc_local_lock);
213 up_write(&rxrpc_local_sem);
214
215 _net("LOCAL old %d {%d,%u,%pI4+%hu}",
216 local->debug_id,
217 local->srx.transport_type,
218 local->srx.transport.family,
219 &local->srx.transport.sin.sin_addr,
220 ntohs(local->srx.transport.sin.sin_port));
221
222 _leave(" = %p [reuse]", local);
223 return local;
224}
225
226/*
227 * release a local endpoint
228 */
229void rxrpc_put_local(struct rxrpc_local *local)
230{
231 _enter("%p{u=%d}", local, atomic_read(&local->usage));
232
233 ASSERTCMP(atomic_read(&local->usage), >, 0);
234
235 /* to prevent a race, the decrement and the dequeue must be effectively
236 * atomic */
237 write_lock_bh(&rxrpc_local_lock);
238 if (unlikely(atomic_dec_and_test(&local->usage))) {
239 _debug("destroy local");
240 rxrpc_queue_work(&local->destroyer);
241 }
242 write_unlock_bh(&rxrpc_local_lock);
243 _leave("");
244}
245
246/*
247 * destroy a local endpoint
248 */
249static void rxrpc_destroy_local(struct work_struct *work)
250{
251 struct rxrpc_local *local =
252 container_of(work, struct rxrpc_local, destroyer);
253
254 _enter("%p{%d}", local, atomic_read(&local->usage));
255
256 down_write(&rxrpc_local_sem);
257
258 write_lock_bh(&rxrpc_local_lock);
259 if (atomic_read(&local->usage) > 0) {
260 write_unlock_bh(&rxrpc_local_lock);
261 up_read(&rxrpc_local_sem);
262 _leave(" [resurrected]");
263 return;
264 }
265
266 list_del(&local->link);
267 local->socket->sk->sk_user_data = NULL;
268 write_unlock_bh(&rxrpc_local_lock);
269
270 downgrade_write(&rxrpc_local_sem);
271
272 ASSERT(list_empty(&local->services));
273 ASSERT(!work_pending(&local->acceptor));
274 ASSERT(!work_pending(&local->rejecter));
275 ASSERT(!work_pending(&local->event_processor));
276
277 /* finish cleaning up the local descriptor */
278 rxrpc_purge_queue(&local->accept_queue);
279 rxrpc_purge_queue(&local->reject_queue);
280 rxrpc_purge_queue(&local->event_queue);
281 kernel_sock_shutdown(local->socket, SHUT_RDWR);
282 sock_release(local->socket);
283
284 up_read(&rxrpc_local_sem);
285
286 _net("DESTROY LOCAL %d", local->debug_id);
287 kfree(local);
288
289 if (list_empty(&rxrpc_locals))
290 wake_up_all(&rxrpc_local_wq);
291
292 _leave("");
293}
294
295/*
296 * preemptively destroy all local local endpoint rather than waiting for
297 * them to be destroyed
298 */
299void __exit rxrpc_destroy_all_locals(void)
300{
301 DECLARE_WAITQUEUE(myself,current);
302
303 _enter("");
304
305 /* we simply have to wait for them to go away */
306 if (!list_empty(&rxrpc_locals)) {
307 set_current_state(TASK_UNINTERRUPTIBLE);
308 add_wait_queue(&rxrpc_local_wq, &myself);
309
310 while (!list_empty(&rxrpc_locals)) {
311 schedule();
312 set_current_state(TASK_UNINTERRUPTIBLE);
313 }
314
315 remove_wait_queue(&rxrpc_local_wq, &myself);
316 set_current_state(TASK_RUNNING);
317 }
318
319 _leave("");
320}
321
322/*
323 * Reply to a version request
324 */
325static void rxrpc_send_version_request(struct rxrpc_local *local,
326 struct rxrpc_host_header *hdr,
327 struct sk_buff *skb)
328{
329 struct rxrpc_wire_header whdr;
330 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
331 struct sockaddr_in sin;
332 struct msghdr msg;
333 struct kvec iov[2];
334 size_t len;
335 int ret;
336
337 _enter("");
338
339 sin.sin_family = AF_INET;
340 sin.sin_port = udp_hdr(skb)->source;
341 sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
342
343 msg.msg_name = &sin;
344 msg.msg_namelen = sizeof(sin);
345 msg.msg_control = NULL;
346 msg.msg_controllen = 0;
347 msg.msg_flags = 0;
348
349 whdr.epoch = htonl(sp->hdr.epoch);
350 whdr.cid = htonl(sp->hdr.cid);
351 whdr.callNumber = htonl(sp->hdr.callNumber);
352 whdr.seq = 0;
353 whdr.serial = 0;
354 whdr.type = RXRPC_PACKET_TYPE_VERSION;
355 whdr.flags = RXRPC_LAST_PACKET | (~hdr->flags & RXRPC_CLIENT_INITIATED);
356 whdr.userStatus = 0;
357 whdr.securityIndex = 0;
358 whdr._rsvd = 0;
359 whdr.serviceId = htons(sp->hdr.serviceId);
360
361 iov[0].iov_base = &whdr;
362 iov[0].iov_len = sizeof(whdr);
363 iov[1].iov_base = (char *)rxrpc_version_string;
364 iov[1].iov_len = sizeof(rxrpc_version_string);
365
366 len = iov[0].iov_len + iov[1].iov_len;
367
368 _proto("Tx VERSION (reply)");
369
370 ret = kernel_sendmsg(local->socket, &msg, iov, 2, len);
371 if (ret < 0)
372 _debug("sendmsg failed: %d", ret);
373
374 _leave("");
375}
376
377/*
378 * Process event packets targetted at a local endpoint.
379 */
380static void rxrpc_process_local_events(struct work_struct *work)
381{
382 struct rxrpc_local *local = container_of(work, struct rxrpc_local, event_processor);
383 struct sk_buff *skb;
384 char v;
385
386 _enter("");
387
388 atomic_inc(&local->usage);
389
390 while ((skb = skb_dequeue(&local->event_queue))) {
391 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
392
393 _debug("{%d},{%u}", local->debug_id, sp->hdr.type);
394
395 switch (sp->hdr.type) {
396 case RXRPC_PACKET_TYPE_VERSION:
397 if (skb_copy_bits(skb, 0, &v, 1) < 0)
398 return;
399 _proto("Rx VERSION { %02x }", v);
400 if (v == 0)
401 rxrpc_send_version_request(local, &sp->hdr, skb);
402 break;
403
404 default:
405 /* Just ignore anything we don't understand */
406 break;
407 }
408
409 rxrpc_put_local(local);
410 rxrpc_free_skb(skb);
411 }
412
413 rxrpc_put_local(local);
414 _leave("");
415}
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
deleted file mode 100644
index dc089b1976aa..000000000000
--- a/net/rxrpc/ar-peer.c
+++ /dev/null
@@ -1,303 +0,0 @@
1/* RxRPC remote transport endpoint management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/udp.h>
16#include <linux/in.h>
17#include <linux/in6.h>
18#include <linux/icmp.h>
19#include <linux/slab.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include <net/route.h>
24#include "ar-internal.h"
25
26static LIST_HEAD(rxrpc_peers);
27static DEFINE_RWLOCK(rxrpc_peer_lock);
28static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq);
29
30static void rxrpc_destroy_peer(struct work_struct *work);
31
32/*
33 * assess the MTU size for the network interface through which this peer is
34 * reached
35 */
36static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
37{
38 struct rtable *rt;
39 struct flowi4 fl4;
40
41 peer->if_mtu = 1500;
42
43 rt = ip_route_output_ports(&init_net, &fl4, NULL,
44 peer->srx.transport.sin.sin_addr.s_addr, 0,
45 htons(7000), htons(7001),
46 IPPROTO_UDP, 0, 0);
47 if (IS_ERR(rt)) {
48 _leave(" [route err %ld]", PTR_ERR(rt));
49 return;
50 }
51
52 peer->if_mtu = dst_mtu(&rt->dst);
53 dst_release(&rt->dst);
54
55 _leave(" [if_mtu %u]", peer->if_mtu);
56}
57
58/*
59 * allocate a new peer
60 */
61static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
62 gfp_t gfp)
63{
64 struct rxrpc_peer *peer;
65
66 _enter("");
67
68 peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
69 if (peer) {
70 INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer);
71 INIT_LIST_HEAD(&peer->link);
72 INIT_LIST_HEAD(&peer->error_targets);
73 spin_lock_init(&peer->lock);
74 atomic_set(&peer->usage, 1);
75 peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
76 memcpy(&peer->srx, srx, sizeof(*srx));
77
78 rxrpc_assess_MTU_size(peer);
79 peer->mtu = peer->if_mtu;
80
81 if (srx->transport.family == AF_INET) {
82 peer->hdrsize = sizeof(struct iphdr);
83 switch (srx->transport_type) {
84 case SOCK_DGRAM:
85 peer->hdrsize += sizeof(struct udphdr);
86 break;
87 default:
88 BUG();
89 break;
90 }
91 } else {
92 BUG();
93 }
94
95 peer->hdrsize += sizeof(struct rxrpc_wire_header);
96 peer->maxdata = peer->mtu - peer->hdrsize;
97 }
98
99 _leave(" = %p", peer);
100 return peer;
101}
102
103/*
104 * obtain a remote transport endpoint for the specified address
105 */
106struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp)
107{
108 struct rxrpc_peer *peer, *candidate;
109 const char *new = "old";
110 int usage;
111
112 _enter("{%d,%d,%pI4+%hu}",
113 srx->transport_type,
114 srx->transport_len,
115 &srx->transport.sin.sin_addr,
116 ntohs(srx->transport.sin.sin_port));
117
118 /* search the peer list first */
119 read_lock_bh(&rxrpc_peer_lock);
120 list_for_each_entry(peer, &rxrpc_peers, link) {
121 _debug("check PEER %d { u=%d t=%d l=%d }",
122 peer->debug_id,
123 atomic_read(&peer->usage),
124 peer->srx.transport_type,
125 peer->srx.transport_len);
126
127 if (atomic_read(&peer->usage) > 0 &&
128 peer->srx.transport_type == srx->transport_type &&
129 peer->srx.transport_len == srx->transport_len &&
130 memcmp(&peer->srx.transport,
131 &srx->transport,
132 srx->transport_len) == 0)
133 goto found_extant_peer;
134 }
135 read_unlock_bh(&rxrpc_peer_lock);
136
137 /* not yet present - create a candidate for a new record and then
138 * redo the search */
139 candidate = rxrpc_alloc_peer(srx, gfp);
140 if (!candidate) {
141 _leave(" = -ENOMEM");
142 return ERR_PTR(-ENOMEM);
143 }
144
145 write_lock_bh(&rxrpc_peer_lock);
146
147 list_for_each_entry(peer, &rxrpc_peers, link) {
148 if (atomic_read(&peer->usage) > 0 &&
149 peer->srx.transport_type == srx->transport_type &&
150 peer->srx.transport_len == srx->transport_len &&
151 memcmp(&peer->srx.transport,
152 &srx->transport,
153 srx->transport_len) == 0)
154 goto found_extant_second;
155 }
156
157 /* we can now add the new candidate to the list */
158 peer = candidate;
159 candidate = NULL;
160 usage = atomic_read(&peer->usage);
161
162 list_add_tail(&peer->link, &rxrpc_peers);
163 write_unlock_bh(&rxrpc_peer_lock);
164 new = "new";
165
166success:
167 _net("PEER %s %d {%d,%u,%pI4+%hu}",
168 new,
169 peer->debug_id,
170 peer->srx.transport_type,
171 peer->srx.transport.family,
172 &peer->srx.transport.sin.sin_addr,
173 ntohs(peer->srx.transport.sin.sin_port));
174
175 _leave(" = %p {u=%d}", peer, usage);
176 return peer;
177
178 /* we found the peer in the list immediately */
179found_extant_peer:
180 usage = atomic_inc_return(&peer->usage);
181 read_unlock_bh(&rxrpc_peer_lock);
182 goto success;
183
184 /* we found the peer on the second time through the list */
185found_extant_second:
186 usage = atomic_inc_return(&peer->usage);
187 write_unlock_bh(&rxrpc_peer_lock);
188 kfree(candidate);
189 goto success;
190}
191
192/*
193 * find the peer associated with a packet
194 */
195struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local,
196 __be32 addr, __be16 port)
197{
198 struct rxrpc_peer *peer;
199
200 _enter("");
201
202 /* search the peer list */
203 read_lock_bh(&rxrpc_peer_lock);
204
205 if (local->srx.transport.family == AF_INET &&
206 local->srx.transport_type == SOCK_DGRAM
207 ) {
208 list_for_each_entry(peer, &rxrpc_peers, link) {
209 if (atomic_read(&peer->usage) > 0 &&
210 peer->srx.transport_type == SOCK_DGRAM &&
211 peer->srx.transport.family == AF_INET &&
212 peer->srx.transport.sin.sin_port == port &&
213 peer->srx.transport.sin.sin_addr.s_addr == addr)
214 goto found_UDP_peer;
215 }
216
217 goto new_UDP_peer;
218 }
219
220 read_unlock_bh(&rxrpc_peer_lock);
221 _leave(" = -EAFNOSUPPORT");
222 return ERR_PTR(-EAFNOSUPPORT);
223
224found_UDP_peer:
225 _net("Rx UDP DGRAM from peer %d", peer->debug_id);
226 atomic_inc(&peer->usage);
227 read_unlock_bh(&rxrpc_peer_lock);
228 _leave(" = %p", peer);
229 return peer;
230
231new_UDP_peer:
232 _net("Rx UDP DGRAM from NEW peer");
233 read_unlock_bh(&rxrpc_peer_lock);
234 _leave(" = -EBUSY [new]");
235 return ERR_PTR(-EBUSY);
236}
237
238/*
239 * release a remote transport endpoint
240 */
241void rxrpc_put_peer(struct rxrpc_peer *peer)
242{
243 _enter("%p{u=%d}", peer, atomic_read(&peer->usage));
244
245 ASSERTCMP(atomic_read(&peer->usage), >, 0);
246
247 if (likely(!atomic_dec_and_test(&peer->usage))) {
248 _leave(" [in use]");
249 return;
250 }
251
252 rxrpc_queue_work(&peer->destroyer);
253 _leave("");
254}
255
256/*
257 * destroy a remote transport endpoint
258 */
259static void rxrpc_destroy_peer(struct work_struct *work)
260{
261 struct rxrpc_peer *peer =
262 container_of(work, struct rxrpc_peer, destroyer);
263
264 _enter("%p{%d}", peer, atomic_read(&peer->usage));
265
266 write_lock_bh(&rxrpc_peer_lock);
267 list_del(&peer->link);
268 write_unlock_bh(&rxrpc_peer_lock);
269
270 _net("DESTROY PEER %d", peer->debug_id);
271 kfree(peer);
272
273 if (list_empty(&rxrpc_peers))
274 wake_up_all(&rxrpc_peer_wq);
275 _leave("");
276}
277
278/*
279 * preemptively destroy all the peer records from a transport endpoint rather
280 * than waiting for them to time out
281 */
282void __exit rxrpc_destroy_all_peers(void)
283{
284 DECLARE_WAITQUEUE(myself,current);
285
286 _enter("");
287
288 /* we simply have to wait for them to go away */
289 if (!list_empty(&rxrpc_peers)) {
290 set_current_state(TASK_UNINTERRUPTIBLE);
291 add_wait_queue(&rxrpc_peer_wq, &myself);
292
293 while (!list_empty(&rxrpc_peers)) {
294 schedule();
295 set_current_state(TASK_UNINTERRUPTIBLE);
296 }
297
298 remove_wait_queue(&rxrpc_peer_wq, &myself);
299 set_current_state(TASK_RUNNING);
300 }
301
302 _leave("");
303}
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
deleted file mode 100644
index 66a1a5676446..000000000000
--- a/net/rxrpc/ar-transport.c
+++ /dev/null
@@ -1,284 +0,0 @@
1/* RxRPC point-to-point transport session management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/slab.h>
16#include <net/sock.h>
17#include <net/af_rxrpc.h>
18#include "ar-internal.h"
19
20/*
21 * Time after last use at which transport record is cleaned up.
22 */
23unsigned int rxrpc_transport_expiry = 3600 * 24;
24
25static void rxrpc_transport_reaper(struct work_struct *work);
26
27static LIST_HEAD(rxrpc_transports);
28static DEFINE_RWLOCK(rxrpc_transport_lock);
29static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper);
30
31/*
32 * allocate a new transport session manager
33 */
34static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
35 struct rxrpc_peer *peer,
36 gfp_t gfp)
37{
38 struct rxrpc_transport *trans;
39
40 _enter("");
41
42 trans = kzalloc(sizeof(struct rxrpc_transport), gfp);
43 if (trans) {
44 trans->local = local;
45 trans->peer = peer;
46 INIT_LIST_HEAD(&trans->link);
47 trans->bundles = RB_ROOT;
48 trans->client_conns = RB_ROOT;
49 trans->server_conns = RB_ROOT;
50 skb_queue_head_init(&trans->error_queue);
51 spin_lock_init(&trans->client_lock);
52 rwlock_init(&trans->conn_lock);
53 atomic_set(&trans->usage, 1);
54 trans->conn_idcounter = peer->srx.srx_service << 16;
55 trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
56
57 if (peer->srx.transport.family == AF_INET) {
58 switch (peer->srx.transport_type) {
59 case SOCK_DGRAM:
60 INIT_WORK(&trans->error_handler,
61 rxrpc_UDP_error_handler);
62 break;
63 default:
64 BUG();
65 break;
66 }
67 } else {
68 BUG();
69 }
70 }
71
72 _leave(" = %p", trans);
73 return trans;
74}
75
76/*
77 * obtain a transport session for the nominated endpoints
78 */
79struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local,
80 struct rxrpc_peer *peer,
81 gfp_t gfp)
82{
83 struct rxrpc_transport *trans, *candidate;
84 const char *new = "old";
85 int usage;
86
87 _enter("{%pI4+%hu},{%pI4+%hu},",
88 &local->srx.transport.sin.sin_addr,
89 ntohs(local->srx.transport.sin.sin_port),
90 &peer->srx.transport.sin.sin_addr,
91 ntohs(peer->srx.transport.sin.sin_port));
92
93 /* search the transport list first */
94 read_lock_bh(&rxrpc_transport_lock);
95 list_for_each_entry(trans, &rxrpc_transports, link) {
96 if (trans->local == local && trans->peer == peer)
97 goto found_extant_transport;
98 }
99 read_unlock_bh(&rxrpc_transport_lock);
100
101 /* not yet present - create a candidate for a new record and then
102 * redo the search */
103 candidate = rxrpc_alloc_transport(local, peer, gfp);
104 if (!candidate) {
105 _leave(" = -ENOMEM");
106 return ERR_PTR(-ENOMEM);
107 }
108
109 write_lock_bh(&rxrpc_transport_lock);
110
111 list_for_each_entry(trans, &rxrpc_transports, link) {
112 if (trans->local == local && trans->peer == peer)
113 goto found_extant_second;
114 }
115
116 /* we can now add the new candidate to the list */
117 trans = candidate;
118 candidate = NULL;
119 usage = atomic_read(&trans->usage);
120
121 rxrpc_get_local(trans->local);
122 atomic_inc(&trans->peer->usage);
123 list_add_tail(&trans->link, &rxrpc_transports);
124 write_unlock_bh(&rxrpc_transport_lock);
125 new = "new";
126
127success:
128 _net("TRANSPORT %s %d local %d -> peer %d",
129 new,
130 trans->debug_id,
131 trans->local->debug_id,
132 trans->peer->debug_id);
133
134 _leave(" = %p {u=%d}", trans, usage);
135 return trans;
136
137 /* we found the transport in the list immediately */
138found_extant_transport:
139 usage = atomic_inc_return(&trans->usage);
140 read_unlock_bh(&rxrpc_transport_lock);
141 goto success;
142
143 /* we found the transport on the second time through the list */
144found_extant_second:
145 usage = atomic_inc_return(&trans->usage);
146 write_unlock_bh(&rxrpc_transport_lock);
147 kfree(candidate);
148 goto success;
149}
150
151/*
152 * find the transport connecting two endpoints
153 */
154struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local,
155 struct rxrpc_peer *peer)
156{
157 struct rxrpc_transport *trans;
158
159 _enter("{%pI4+%hu},{%pI4+%hu},",
160 &local->srx.transport.sin.sin_addr,
161 ntohs(local->srx.transport.sin.sin_port),
162 &peer->srx.transport.sin.sin_addr,
163 ntohs(peer->srx.transport.sin.sin_port));
164
165 /* search the transport list */
166 read_lock_bh(&rxrpc_transport_lock);
167
168 list_for_each_entry(trans, &rxrpc_transports, link) {
169 if (trans->local == local && trans->peer == peer)
170 goto found_extant_transport;
171 }
172
173 read_unlock_bh(&rxrpc_transport_lock);
174 _leave(" = NULL");
175 return NULL;
176
177found_extant_transport:
178 atomic_inc(&trans->usage);
179 read_unlock_bh(&rxrpc_transport_lock);
180 _leave(" = %p", trans);
181 return trans;
182}
183
184/*
185 * release a transport session
186 */
187void rxrpc_put_transport(struct rxrpc_transport *trans)
188{
189 _enter("%p{u=%d}", trans, atomic_read(&trans->usage));
190
191 ASSERTCMP(atomic_read(&trans->usage), >, 0);
192
193 trans->put_time = ktime_get_seconds();
194 if (unlikely(atomic_dec_and_test(&trans->usage))) {
195 _debug("zombie");
196 /* let the reaper determine the timeout to avoid a race with
197 * overextending the timeout if the reaper is running at the
198 * same time */
199 rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
200 }
201 _leave("");
202}
203
204/*
205 * clean up a transport session
206 */
207static void rxrpc_cleanup_transport(struct rxrpc_transport *trans)
208{
209 _net("DESTROY TRANS %d", trans->debug_id);
210
211 rxrpc_purge_queue(&trans->error_queue);
212
213 rxrpc_put_local(trans->local);
214 rxrpc_put_peer(trans->peer);
215 kfree(trans);
216}
217
218/*
219 * reap dead transports that have passed their expiry date
220 */
221static void rxrpc_transport_reaper(struct work_struct *work)
222{
223 struct rxrpc_transport *trans, *_p;
224 unsigned long now, earliest, reap_time;
225
226 LIST_HEAD(graveyard);
227
228 _enter("");
229
230 now = ktime_get_seconds();
231 earliest = ULONG_MAX;
232
233 /* extract all the transports that have been dead too long */
234 write_lock_bh(&rxrpc_transport_lock);
235 list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) {
236 _debug("reap TRANS %d { u=%d t=%ld }",
237 trans->debug_id, atomic_read(&trans->usage),
238 (long) now - (long) trans->put_time);
239
240 if (likely(atomic_read(&trans->usage) > 0))
241 continue;
242
243 reap_time = trans->put_time + rxrpc_transport_expiry;
244 if (reap_time <= now)
245 list_move_tail(&trans->link, &graveyard);
246 else if (reap_time < earliest)
247 earliest = reap_time;
248 }
249 write_unlock_bh(&rxrpc_transport_lock);
250
251 if (earliest != ULONG_MAX) {
252 _debug("reschedule reaper %ld", (long) earliest - now);
253 ASSERTCMP(earliest, >, now);
254 rxrpc_queue_delayed_work(&rxrpc_transport_reap,
255 (earliest - now) * HZ);
256 }
257
258 /* then destroy all those pulled out */
259 while (!list_empty(&graveyard)) {
260 trans = list_entry(graveyard.next, struct rxrpc_transport,
261 link);
262 list_del_init(&trans->link);
263
264 ASSERTCMP(atomic_read(&trans->usage), ==, 0);
265 rxrpc_cleanup_transport(trans);
266 }
267
268 _leave("");
269}
270
271/*
272 * preemptively destroy all the transport session records rather than waiting
273 * for them to time out
274 */
275void __exit rxrpc_destroy_all_transports(void)
276{
277 _enter("");
278
279 rxrpc_transport_expiry = 0;
280 cancel_delayed_work(&rxrpc_transport_reap);
281 rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
282
283 _leave("");
284}
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/call_accept.c
index e7a7f05f13e2..202e053a3c6d 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/call_accept.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/net.h> 15#include <linux/net.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
@@ -72,7 +74,6 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
72 struct sockaddr_rxrpc *srx) 74 struct sockaddr_rxrpc *srx)
73{ 75{
74 struct rxrpc_connection *conn; 76 struct rxrpc_connection *conn;
75 struct rxrpc_transport *trans;
76 struct rxrpc_skb_priv *sp, *nsp; 77 struct rxrpc_skb_priv *sp, *nsp;
77 struct rxrpc_peer *peer; 78 struct rxrpc_peer *peer;
78 struct rxrpc_call *call; 79 struct rxrpc_call *call;
@@ -93,30 +94,22 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
93 rxrpc_new_skb(notification); 94 rxrpc_new_skb(notification);
94 notification->mark = RXRPC_SKB_MARK_NEW_CALL; 95 notification->mark = RXRPC_SKB_MARK_NEW_CALL;
95 96
96 peer = rxrpc_get_peer(srx, GFP_NOIO); 97 peer = rxrpc_lookup_peer(local, srx, GFP_NOIO);
97 if (IS_ERR(peer)) { 98 if (!peer) {
98 _debug("no peer"); 99 _debug("no peer");
99 ret = -EBUSY; 100 ret = -EBUSY;
100 goto error; 101 goto error;
101 } 102 }
102 103
103 trans = rxrpc_get_transport(local, peer, GFP_NOIO); 104 conn = rxrpc_incoming_connection(local, peer, skb);
104 rxrpc_put_peer(peer); 105 rxrpc_put_peer(peer);
105 if (IS_ERR(trans)) {
106 _debug("no trans");
107 ret = -EBUSY;
108 goto error;
109 }
110
111 conn = rxrpc_incoming_connection(trans, &sp->hdr);
112 rxrpc_put_transport(trans);
113 if (IS_ERR(conn)) { 106 if (IS_ERR(conn)) {
114 _debug("no conn"); 107 _debug("no conn");
115 ret = PTR_ERR(conn); 108 ret = PTR_ERR(conn);
116 goto error; 109 goto error;
117 } 110 }
118 111
119 call = rxrpc_incoming_call(rx, conn, &sp->hdr); 112 call = rxrpc_incoming_call(rx, conn, skb);
120 rxrpc_put_connection(conn); 113 rxrpc_put_connection(conn);
121 if (IS_ERR(call)) { 114 if (IS_ERR(call)) {
122 _debug("no call"); 115 _debug("no call");
@@ -139,7 +132,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
139 _debug("await conn sec"); 132 _debug("await conn sec");
140 list_add_tail(&call->accept_link, &rx->secureq); 133 list_add_tail(&call->accept_link, &rx->secureq);
141 call->conn->state = RXRPC_CONN_SERVER_CHALLENGING; 134 call->conn->state = RXRPC_CONN_SERVER_CHALLENGING;
142 atomic_inc(&call->conn->usage); 135 rxrpc_get_connection(call->conn);
143 set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events); 136 set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events);
144 rxrpc_queue_conn(call->conn); 137 rxrpc_queue_conn(call->conn);
145 } else { 138 } else {
@@ -200,10 +193,8 @@ error_nofree:
200 * accept incoming calls that need peer, transport and/or connection setting up 193 * accept incoming calls that need peer, transport and/or connection setting up
201 * - the packets we get are all incoming client DATA packets that have seq == 1 194 * - the packets we get are all incoming client DATA packets that have seq == 1
202 */ 195 */
203void rxrpc_accept_incoming_calls(struct work_struct *work) 196void rxrpc_accept_incoming_calls(struct rxrpc_local *local)
204{ 197{
205 struct rxrpc_local *local =
206 container_of(work, struct rxrpc_local, acceptor);
207 struct rxrpc_skb_priv *sp; 198 struct rxrpc_skb_priv *sp;
208 struct sockaddr_rxrpc srx; 199 struct sockaddr_rxrpc srx;
209 struct rxrpc_sock *rx; 200 struct rxrpc_sock *rx;
@@ -213,21 +204,8 @@ void rxrpc_accept_incoming_calls(struct work_struct *work)
213 204
214 _enter("%d", local->debug_id); 205 _enter("%d", local->debug_id);
215 206
216 read_lock_bh(&rxrpc_local_lock);
217 if (atomic_read(&local->usage) > 0)
218 rxrpc_get_local(local);
219 else
220 local = NULL;
221 read_unlock_bh(&rxrpc_local_lock);
222 if (!local) {
223 _leave(" [local dead]");
224 return;
225 }
226
227process_next_packet:
228 skb = skb_dequeue(&local->accept_queue); 207 skb = skb_dequeue(&local->accept_queue);
229 if (!skb) { 208 if (!skb) {
230 rxrpc_put_local(local);
231 _leave("\n"); 209 _leave("\n");
232 return; 210 return;
233 } 211 }
@@ -290,7 +268,7 @@ found_service:
290 case -ECONNRESET: /* old calls are ignored */ 268 case -ECONNRESET: /* old calls are ignored */
291 case -ECONNABORTED: /* aborted calls are reaborted or ignored */ 269 case -ECONNABORTED: /* aborted calls are reaborted or ignored */
292 case 0: 270 case 0:
293 goto process_next_packet; 271 return;
294 case -ECONNREFUSED: 272 case -ECONNREFUSED:
295 goto invalid_service; 273 goto invalid_service;
296 case -EBUSY: 274 case -EBUSY:
@@ -306,18 +284,18 @@ backlog_full:
306busy: 284busy:
307 rxrpc_busy(local, &srx, &whdr); 285 rxrpc_busy(local, &srx, &whdr);
308 rxrpc_free_skb(skb); 286 rxrpc_free_skb(skb);
309 goto process_next_packet; 287 return;
310 288
311invalid_service: 289invalid_service:
312 skb->priority = RX_INVALID_OPERATION; 290 skb->priority = RX_INVALID_OPERATION;
313 rxrpc_reject_packet(local, skb); 291 rxrpc_reject_packet(local, skb);
314 goto process_next_packet; 292 return;
315 293
316 /* can't change connection security type mid-flow */ 294 /* can't change connection security type mid-flow */
317security_mismatch: 295security_mismatch:
318 skb->priority = RX_PROTOCOL_ERROR; 296 skb->priority = RX_PROTOCOL_ERROR;
319 rxrpc_reject_packet(local, skb); 297 rxrpc_reject_packet(local, skb);
320 goto process_next_packet; 298 return;
321} 299}
322 300
323/* 301/*
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/call_event.c
index 374478e006e7..0ba84295f913 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/call_event.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/circ_buf.h> 15#include <linux/circ_buf.h>
14#include <linux/net.h> 16#include <linux/net.h>
@@ -185,7 +187,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
185 187
186 _proto("Tx DATA %%%u { #%d }", 188 _proto("Tx DATA %%%u { #%d }",
187 sp->hdr.serial, sp->hdr.seq); 189 sp->hdr.serial, sp->hdr.seq);
188 if (rxrpc_send_packet(call->conn->trans, txb) < 0) { 190 if (rxrpc_send_data_packet(call->conn, txb) < 0) {
189 stop = true; 191 stop = true;
190 sp->resend_at = jiffies + 3; 192 sp->resend_at = jiffies + 3;
191 } else { 193 } else {
@@ -543,7 +545,7 @@ static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
543 545
544 mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU)); 546 mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU));
545 547
546 peer = call->conn->trans->peer; 548 peer = call->conn->params.peer;
547 if (mtu < peer->maxdata) { 549 if (mtu < peer->maxdata) {
548 spin_lock_bh(&peer->lock); 550 spin_lock_bh(&peer->lock);
549 peer->maxdata = mtu; 551 peer->maxdata = mtu;
@@ -834,13 +836,13 @@ void rxrpc_process_call(struct work_struct *work)
834 836
835 /* there's a good chance we're going to have to send a message, so set 837 /* there's a good chance we're going to have to send a message, so set
836 * one up in advance */ 838 * one up in advance */
837 msg.msg_name = &call->conn->trans->peer->srx.transport; 839 msg.msg_name = &call->conn->params.peer->srx.transport;
838 msg.msg_namelen = call->conn->trans->peer->srx.transport_len; 840 msg.msg_namelen = call->conn->params.peer->srx.transport_len;
839 msg.msg_control = NULL; 841 msg.msg_control = NULL;
840 msg.msg_controllen = 0; 842 msg.msg_controllen = 0;
841 msg.msg_flags = 0; 843 msg.msg_flags = 0;
842 844
843 whdr.epoch = htonl(call->conn->epoch); 845 whdr.epoch = htonl(call->conn->proto.epoch);
844 whdr.cid = htonl(call->cid); 846 whdr.cid = htonl(call->cid);
845 whdr.callNumber = htonl(call->call_id); 847 whdr.callNumber = htonl(call->call_id);
846 whdr.seq = 0; 848 whdr.seq = 0;
@@ -862,17 +864,24 @@ void rxrpc_process_call(struct work_struct *work)
862 } 864 }
863 865
864 if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) { 866 if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
867 enum rxrpc_skb_mark mark;
865 int error; 868 int error;
866 869
867 clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events); 870 clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
868 clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events); 871 clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
869 clear_bit(RXRPC_CALL_EV_ABORT, &call->events); 872 clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
870 873
871 error = call->conn->trans->peer->net_error; 874 error = call->error_report;
872 _debug("post net error %d", error); 875 if (error < RXRPC_LOCAL_ERROR_OFFSET) {
876 mark = RXRPC_SKB_MARK_NET_ERROR;
877 _debug("post net error %d", error);
878 } else {
879 mark = RXRPC_SKB_MARK_LOCAL_ERROR;
880 error -= RXRPC_LOCAL_ERROR_OFFSET;
881 _debug("post net local error %d", error);
882 }
873 883
874 if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR, 884 if (rxrpc_post_message(call, mark, error, true) < 0)
875 error, true) < 0)
876 goto no_mem; 885 goto no_mem;
877 clear_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events); 886 clear_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
878 goto kill_ACKs; 887 goto kill_ACKs;
@@ -1142,8 +1151,8 @@ send_ACK_with_skew:
1142 ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - 1151 ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
1143 ntohl(ack.serial)); 1152 ntohl(ack.serial));
1144send_ACK: 1153send_ACK:
1145 mtu = call->conn->trans->peer->if_mtu; 1154 mtu = call->conn->params.peer->if_mtu;
1146 mtu -= call->conn->trans->peer->hdrsize; 1155 mtu -= call->conn->params.peer->hdrsize;
1147 ackinfo.maxMTU = htonl(mtu); 1156 ackinfo.maxMTU = htonl(mtu);
1148 ackinfo.rwind = htonl(rxrpc_rx_window_size); 1157 ackinfo.rwind = htonl(rxrpc_rx_window_size);
1149 1158
@@ -1197,7 +1206,7 @@ send_message_2:
1197 len += iov[1].iov_len; 1206 len += iov[1].iov_len;
1198 } 1207 }
1199 1208
1200 ret = kernel_sendmsg(call->conn->trans->local->socket, 1209 ret = kernel_sendmsg(call->conn->params.local->socket,
1201 &msg, iov, ioc, len); 1210 &msg, iov, ioc, len);
1202 if (ret < 0) { 1211 if (ret < 0) {
1203 _debug("sendmsg failed: %d", ret); 1212 _debug("sendmsg failed: %d", ret);
@@ -1255,7 +1264,7 @@ maybe_reschedule:
1255 if (call->state >= RXRPC_CALL_COMPLETE && 1264 if (call->state >= RXRPC_CALL_COMPLETE &&
1256 !list_empty(&call->accept_link)) { 1265 !list_empty(&call->accept_link)) {
1257 _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }", 1266 _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
1258 call, call->events, call->flags, call->conn->cid); 1267 call, call->events, call->flags, call->conn->proto.cid);
1259 1268
1260 read_lock_bh(&call->state_lock); 1269 read_lock_bh(&call->state_lock);
1261 if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && 1270 if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
@@ -1273,7 +1282,7 @@ error:
1273 * this means there's a race between clearing the flag and setting the 1282 * this means there's a race between clearing the flag and setting the
1274 * work pending bit and the work item being processed again */ 1283 * work pending bit and the work item being processed again */
1275 if (call->events && !work_pending(&call->processor)) { 1284 if (call->events && !work_pending(&call->processor)) {
1276 _debug("jumpstart %x", call->conn->cid); 1285 _debug("jumpstart %x", call->conn->proto.cid);
1277 rxrpc_queue_call(call); 1286 rxrpc_queue_call(call);
1278 } 1287 }
1279 1288
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/call_object.c
index 571a41fd5a32..ad933daae13b 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/call_object.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/slab.h> 14#include <linux/slab.h>
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/circ_buf.h> 16#include <linux/circ_buf.h>
@@ -29,6 +31,8 @@ unsigned int rxrpc_max_call_lifetime = 60 * HZ;
29unsigned int rxrpc_dead_call_expiry = 2 * HZ; 31unsigned int rxrpc_dead_call_expiry = 2 * HZ;
30 32
31const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { 33const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
34 [RXRPC_CALL_UNINITIALISED] = "Uninit",
35 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
32 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", 36 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
33 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", 37 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
34 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", 38 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
@@ -69,7 +73,7 @@ static unsigned long rxrpc_call_hashfunc(
69 u32 call_id, 73 u32 call_id,
70 u32 epoch, 74 u32 epoch,
71 u16 service_id, 75 u16 service_id,
72 sa_family_t proto, 76 sa_family_t family,
73 void *localptr, 77 void *localptr,
74 unsigned int addr_size, 78 unsigned int addr_size,
75 const u8 *peer_addr) 79 const u8 *peer_addr)
@@ -90,7 +94,7 @@ static unsigned long rxrpc_call_hashfunc(
90 key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT; 94 key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
91 key += cid & RXRPC_CHANNELMASK; 95 key += cid & RXRPC_CHANNELMASK;
92 key += in_clientflag; 96 key += in_clientflag;
93 key += proto; 97 key += family;
94 /* Step through the peer address in 16-bit portions for speed */ 98 /* Step through the peer address in 16-bit portions for speed */
95 for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++) 99 for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
96 key += *p; 100 key += *p;
@@ -107,7 +111,7 @@ static void rxrpc_call_hash_add(struct rxrpc_call *call)
107 unsigned int addr_size = 0; 111 unsigned int addr_size = 0;
108 112
109 _enter(""); 113 _enter("");
110 switch (call->proto) { 114 switch (call->family) {
111 case AF_INET: 115 case AF_INET:
112 addr_size = sizeof(call->peer_ip.ipv4_addr); 116 addr_size = sizeof(call->peer_ip.ipv4_addr);
113 break; 117 break;
@@ -119,8 +123,8 @@ static void rxrpc_call_hash_add(struct rxrpc_call *call)
119 } 123 }
120 key = rxrpc_call_hashfunc(call->in_clientflag, call->cid, 124 key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
121 call->call_id, call->epoch, 125 call->call_id, call->epoch,
122 call->service_id, call->proto, 126 call->service_id, call->family,
123 call->conn->trans->local, addr_size, 127 call->conn->params.local, addr_size,
124 call->peer_ip.ipv6_addr); 128 call->peer_ip.ipv6_addr);
125 /* Store the full key in the call */ 129 /* Store the full key in the call */
126 call->hash_key = key; 130 call->hash_key = key;
@@ -149,7 +153,7 @@ static void rxrpc_call_hash_del(struct rxrpc_call *call)
149struct rxrpc_call *rxrpc_find_call_hash( 153struct rxrpc_call *rxrpc_find_call_hash(
150 struct rxrpc_host_header *hdr, 154 struct rxrpc_host_header *hdr,
151 void *localptr, 155 void *localptr,
152 sa_family_t proto, 156 sa_family_t family,
153 const void *peer_addr) 157 const void *peer_addr)
154{ 158{
155 unsigned long key; 159 unsigned long key;
@@ -159,7 +163,7 @@ struct rxrpc_call *rxrpc_find_call_hash(
159 u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED; 163 u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED;
160 164
161 _enter(""); 165 _enter("");
162 switch (proto) { 166 switch (family) {
163 case AF_INET: 167 case AF_INET:
164 addr_size = sizeof(call->peer_ip.ipv4_addr); 168 addr_size = sizeof(call->peer_ip.ipv4_addr);
165 break; 169 break;
@@ -172,7 +176,7 @@ struct rxrpc_call *rxrpc_find_call_hash(
172 176
173 key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber, 177 key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber,
174 hdr->epoch, hdr->serviceId, 178 hdr->epoch, hdr->serviceId,
175 proto, localptr, addr_size, 179 family, localptr, addr_size,
176 peer_addr); 180 peer_addr);
177 hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) { 181 hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
178 if (call->hash_key == key && 182 if (call->hash_key == key &&
@@ -180,7 +184,7 @@ struct rxrpc_call *rxrpc_find_call_hash(
180 call->cid == hdr->cid && 184 call->cid == hdr->cid &&
181 call->in_clientflag == in_clientflag && 185 call->in_clientflag == in_clientflag &&
182 call->service_id == hdr->serviceId && 186 call->service_id == hdr->serviceId &&
183 call->proto == proto && 187 call->family == family &&
184 call->local == localptr && 188 call->local == localptr &&
185 memcmp(call->peer_ip.ipv6_addr, peer_addr, 189 memcmp(call->peer_ip.ipv6_addr, peer_addr,
186 addr_size) == 0 && 190 addr_size) == 0 &&
@@ -194,6 +198,43 @@ struct rxrpc_call *rxrpc_find_call_hash(
194} 198}
195 199
196/* 200/*
201 * find an extant server call
202 * - called in process context with IRQs enabled
203 */
204struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
205 unsigned long user_call_ID)
206{
207 struct rxrpc_call *call;
208 struct rb_node *p;
209
210 _enter("%p,%lx", rx, user_call_ID);
211
212 read_lock(&rx->call_lock);
213
214 p = rx->calls.rb_node;
215 while (p) {
216 call = rb_entry(p, struct rxrpc_call, sock_node);
217
218 if (user_call_ID < call->user_call_ID)
219 p = p->rb_left;
220 else if (user_call_ID > call->user_call_ID)
221 p = p->rb_right;
222 else
223 goto found_extant_call;
224 }
225
226 read_unlock(&rx->call_lock);
227 _leave(" = NULL");
228 return NULL;
229
230found_extant_call:
231 rxrpc_get_call(call);
232 read_unlock(&rx->call_lock);
233 _leave(" = %p [%d]", call, atomic_read(&call->usage));
234 return call;
235}
236
237/*
197 * allocate a new call 238 * allocate a new call
198 */ 239 */
199static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) 240static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
@@ -222,6 +263,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
222 (unsigned long) call); 263 (unsigned long) call);
223 INIT_WORK(&call->destroyer, &rxrpc_destroy_call); 264 INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
224 INIT_WORK(&call->processor, &rxrpc_process_call); 265 INIT_WORK(&call->processor, &rxrpc_process_call);
266 INIT_LIST_HEAD(&call->link);
225 INIT_LIST_HEAD(&call->accept_link); 267 INIT_LIST_HEAD(&call->accept_link);
226 skb_queue_head_init(&call->rx_queue); 268 skb_queue_head_init(&call->rx_queue);
227 skb_queue_head_init(&call->rx_oos_queue); 269 skb_queue_head_init(&call->rx_oos_queue);
@@ -230,7 +272,6 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
230 rwlock_init(&call->state_lock); 272 rwlock_init(&call->state_lock);
231 atomic_set(&call->usage, 1); 273 atomic_set(&call->usage, 1);
232 call->debug_id = atomic_inc_return(&rxrpc_debug_id); 274 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
233 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
234 275
235 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); 276 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
236 277
@@ -243,117 +284,104 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
243} 284}
244 285
245/* 286/*
246 * allocate a new client call and attempt to get a connection slot for it 287 * Allocate a new client call.
247 */ 288 */
248static struct rxrpc_call *rxrpc_alloc_client_call( 289static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
249 struct rxrpc_sock *rx, 290 struct sockaddr_rxrpc *srx,
250 struct rxrpc_transport *trans, 291 gfp_t gfp)
251 struct rxrpc_conn_bundle *bundle,
252 gfp_t gfp)
253{ 292{
254 struct rxrpc_call *call; 293 struct rxrpc_call *call;
255 int ret;
256 294
257 _enter(""); 295 _enter("");
258 296
259 ASSERT(rx != NULL); 297 ASSERT(rx->local != NULL);
260 ASSERT(trans != NULL);
261 ASSERT(bundle != NULL);
262 298
263 call = rxrpc_alloc_call(gfp); 299 call = rxrpc_alloc_call(gfp);
264 if (!call) 300 if (!call)
265 return ERR_PTR(-ENOMEM); 301 return ERR_PTR(-ENOMEM);
302 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
266 303
267 sock_hold(&rx->sk); 304 sock_hold(&rx->sk);
268 call->socket = rx; 305 call->socket = rx;
269 call->rx_data_post = 1; 306 call->rx_data_post = 1;
270 307
271 ret = rxrpc_connect_call(rx, trans, bundle, call, gfp);
272 if (ret < 0) {
273 kmem_cache_free(rxrpc_call_jar, call);
274 return ERR_PTR(ret);
275 }
276
277 /* Record copies of information for hashtable lookup */ 308 /* Record copies of information for hashtable lookup */
278 call->proto = rx->proto; 309 call->family = rx->family;
279 call->local = trans->local; 310 call->local = rx->local;
280 switch (call->proto) { 311 switch (call->family) {
281 case AF_INET: 312 case AF_INET:
282 call->peer_ip.ipv4_addr = 313 call->peer_ip.ipv4_addr = srx->transport.sin.sin_addr.s_addr;
283 trans->peer->srx.transport.sin.sin_addr.s_addr;
284 break; 314 break;
285 case AF_INET6: 315 case AF_INET6:
286 memcpy(call->peer_ip.ipv6_addr, 316 memcpy(call->peer_ip.ipv6_addr,
287 trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8, 317 srx->transport.sin6.sin6_addr.in6_u.u6_addr8,
288 sizeof(call->peer_ip.ipv6_addr)); 318 sizeof(call->peer_ip.ipv6_addr));
289 break; 319 break;
290 } 320 }
291 call->epoch = call->conn->epoch; 321
292 call->service_id = call->conn->service_id; 322 call->service_id = srx->srx_service;
293 call->in_clientflag = call->conn->in_clientflag; 323 call->in_clientflag = 0;
324
325 _leave(" = %p", call);
326 return call;
327}
328
329/*
330 * Begin client call.
331 */
332static int rxrpc_begin_client_call(struct rxrpc_call *call,
333 struct rxrpc_conn_parameters *cp,
334 struct sockaddr_rxrpc *srx,
335 gfp_t gfp)
336{
337 int ret;
338
339 /* Set up or get a connection record and set the protocol parameters,
340 * including channel number and call ID.
341 */
342 ret = rxrpc_connect_call(call, cp, srx, gfp);
343 if (ret < 0)
344 return ret;
345
346 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
347
294 /* Add the new call to the hashtable */ 348 /* Add the new call to the hashtable */
295 rxrpc_call_hash_add(call); 349 rxrpc_call_hash_add(call);
296 350
297 spin_lock(&call->conn->trans->peer->lock); 351 spin_lock(&call->conn->params.peer->lock);
298 list_add(&call->error_link, &call->conn->trans->peer->error_targets); 352 hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
299 spin_unlock(&call->conn->trans->peer->lock); 353 spin_unlock(&call->conn->params.peer->lock);
300 354
301 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; 355 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
302 add_timer(&call->lifetimer); 356 add_timer(&call->lifetimer);
303 357 return 0;
304 _leave(" = %p", call);
305 return call;
306} 358}
307 359
308/* 360/*
309 * set up a call for the given data 361 * set up a call for the given data
310 * - called in process context with IRQs enabled 362 * - called in process context with IRQs enabled
311 */ 363 */
312struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx, 364struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
313 struct rxrpc_transport *trans, 365 struct rxrpc_conn_parameters *cp,
314 struct rxrpc_conn_bundle *bundle, 366 struct sockaddr_rxrpc *srx,
315 unsigned long user_call_ID, 367 unsigned long user_call_ID,
316 int create,
317 gfp_t gfp) 368 gfp_t gfp)
318{ 369{
319 struct rxrpc_call *call, *candidate; 370 struct rxrpc_call *call, *xcall;
320 struct rb_node *p, *parent, **pp; 371 struct rb_node *parent, **pp;
321 372 int ret;
322 _enter("%p,%d,%d,%lx,%d",
323 rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1,
324 user_call_ID, create);
325
326 /* search the extant calls first for one that matches the specified
327 * user ID */
328 read_lock(&rx->call_lock);
329
330 p = rx->calls.rb_node;
331 while (p) {
332 call = rb_entry(p, struct rxrpc_call, sock_node);
333
334 if (user_call_ID < call->user_call_ID)
335 p = p->rb_left;
336 else if (user_call_ID > call->user_call_ID)
337 p = p->rb_right;
338 else
339 goto found_extant_call;
340 }
341
342 read_unlock(&rx->call_lock);
343 373
344 if (!create || !trans) 374 _enter("%p,%lx", rx, user_call_ID);
345 return ERR_PTR(-EBADSLT);
346 375
347 /* not yet present - create a candidate for a new record and then 376 call = rxrpc_alloc_client_call(rx, srx, gfp);
348 * redo the search */ 377 if (IS_ERR(call)) {
349 candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp); 378 _leave(" = %ld", PTR_ERR(call));
350 if (IS_ERR(candidate)) { 379 return call;
351 _leave(" = %ld", PTR_ERR(candidate));
352 return candidate;
353 } 380 }
354 381
355 candidate->user_call_ID = user_call_ID; 382 /* Publish the call, even though it is incompletely set up as yet */
356 __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags); 383 call->user_call_ID = user_call_ID;
384 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
357 385
358 write_lock(&rx->call_lock); 386 write_lock(&rx->call_lock);
359 387
@@ -361,19 +389,16 @@ struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
361 parent = NULL; 389 parent = NULL;
362 while (*pp) { 390 while (*pp) {
363 parent = *pp; 391 parent = *pp;
364 call = rb_entry(parent, struct rxrpc_call, sock_node); 392 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
365 393
366 if (user_call_ID < call->user_call_ID) 394 if (user_call_ID < xcall->user_call_ID)
367 pp = &(*pp)->rb_left; 395 pp = &(*pp)->rb_left;
368 else if (user_call_ID > call->user_call_ID) 396 else if (user_call_ID > xcall->user_call_ID)
369 pp = &(*pp)->rb_right; 397 pp = &(*pp)->rb_right;
370 else 398 else
371 goto found_extant_second; 399 goto found_user_ID_now_present;
372 } 400 }
373 401
374 /* second search also failed; add the new call */
375 call = candidate;
376 candidate = NULL;
377 rxrpc_get_call(call); 402 rxrpc_get_call(call);
378 403
379 rb_link_node(&call->sock_node, parent, pp); 404 rb_link_node(&call->sock_node, parent, pp);
@@ -384,25 +409,39 @@ struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
384 list_add_tail(&call->link, &rxrpc_calls); 409 list_add_tail(&call->link, &rxrpc_calls);
385 write_unlock_bh(&rxrpc_call_lock); 410 write_unlock_bh(&rxrpc_call_lock);
386 411
412 ret = rxrpc_begin_client_call(call, cp, srx, gfp);
413 if (ret < 0)
414 goto error;
415
387 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); 416 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
388 417
389 _leave(" = %p [new]", call); 418 _leave(" = %p [new]", call);
390 return call; 419 return call;
391 420
392 /* we found the call in the list immediately */ 421error:
393found_extant_call: 422 write_lock(&rx->call_lock);
394 rxrpc_get_call(call); 423 rb_erase(&call->sock_node, &rx->calls);
395 read_unlock(&rx->call_lock); 424 write_unlock(&rx->call_lock);
396 _leave(" = %p [extant %d]", call, atomic_read(&call->usage)); 425 rxrpc_put_call(call);
397 return call;
398 426
399 /* we found the call on the second time through the list */ 427 write_lock_bh(&rxrpc_call_lock);
400found_extant_second: 428 list_del(&call->link);
401 rxrpc_get_call(call); 429 write_unlock_bh(&rxrpc_call_lock);
430
431 rxrpc_put_call(call);
432 _leave(" = %d", ret);
433 return ERR_PTR(ret);
434
435 /* We unexpectedly found the user ID in the list after taking
436 * the call_lock. This shouldn't happen unless the user races
437 * with itself and tries to add the same user ID twice at the
438 * same time in different threads.
439 */
440found_user_ID_now_present:
402 write_unlock(&rx->call_lock); 441 write_unlock(&rx->call_lock);
403 rxrpc_put_call(candidate); 442 rxrpc_put_call(call);
404 _leave(" = %p [second %d]", call, atomic_read(&call->usage)); 443 _leave(" = -EEXIST [%p]", call);
405 return call; 444 return ERR_PTR(-EEXIST);
406} 445}
407 446
408/* 447/*
@@ -411,8 +450,9 @@ found_extant_second:
411 */ 450 */
412struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, 451struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
413 struct rxrpc_connection *conn, 452 struct rxrpc_connection *conn,
414 struct rxrpc_host_header *hdr) 453 struct sk_buff *skb)
415{ 454{
455 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
416 struct rxrpc_call *call, *candidate; 456 struct rxrpc_call *call, *candidate;
417 struct rb_node **p, *parent; 457 struct rb_node **p, *parent;
418 u32 call_id; 458 u32 call_id;
@@ -425,13 +465,13 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
425 if (!candidate) 465 if (!candidate)
426 return ERR_PTR(-EBUSY); 466 return ERR_PTR(-EBUSY);
427 467
428 candidate->socket = rx; 468 candidate->socket = rx;
429 candidate->conn = conn; 469 candidate->conn = conn;
430 candidate->cid = hdr->cid; 470 candidate->cid = sp->hdr.cid;
431 candidate->call_id = hdr->callNumber; 471 candidate->call_id = sp->hdr.callNumber;
432 candidate->channel = hdr->cid & RXRPC_CHANNELMASK; 472 candidate->channel = sp->hdr.cid & RXRPC_CHANNELMASK;
433 candidate->rx_data_post = 0; 473 candidate->rx_data_post = 0;
434 candidate->state = RXRPC_CALL_SERVER_ACCEPTING; 474 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
435 if (conn->security_ix > 0) 475 if (conn->security_ix > 0)
436 candidate->state = RXRPC_CALL_SERVER_SECURING; 476 candidate->state = RXRPC_CALL_SERVER_SECURING;
437 477
@@ -440,7 +480,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
440 /* set the channel for this call */ 480 /* set the channel for this call */
441 call = conn->channels[candidate->channel]; 481 call = conn->channels[candidate->channel];
442 _debug("channel[%u] is %p", candidate->channel, call); 482 _debug("channel[%u] is %p", candidate->channel, call);
443 if (call && call->call_id == hdr->callNumber) { 483 if (call && call->call_id == sp->hdr.callNumber) {
444 /* already set; must've been a duplicate packet */ 484 /* already set; must've been a duplicate packet */
445 _debug("extant call [%d]", call->state); 485 _debug("extant call [%d]", call->state);
446 ASSERTCMP(call->conn, ==, conn); 486 ASSERTCMP(call->conn, ==, conn);
@@ -478,7 +518,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
478 518
479 /* check the call number isn't duplicate */ 519 /* check the call number isn't duplicate */
480 _debug("check dup"); 520 _debug("check dup");
481 call_id = hdr->callNumber; 521 call_id = sp->hdr.callNumber;
482 p = &conn->calls.rb_node; 522 p = &conn->calls.rb_node;
483 parent = NULL; 523 parent = NULL;
484 while (*p) { 524 while (*p) {
@@ -504,36 +544,36 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
504 rb_insert_color(&call->conn_node, &conn->calls); 544 rb_insert_color(&call->conn_node, &conn->calls);
505 conn->channels[call->channel] = call; 545 conn->channels[call->channel] = call;
506 sock_hold(&rx->sk); 546 sock_hold(&rx->sk);
507 atomic_inc(&conn->usage); 547 rxrpc_get_connection(conn);
508 write_unlock_bh(&conn->lock); 548 write_unlock_bh(&conn->lock);
509 549
510 spin_lock(&conn->trans->peer->lock); 550 spin_lock(&conn->params.peer->lock);
511 list_add(&call->error_link, &conn->trans->peer->error_targets); 551 hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
512 spin_unlock(&conn->trans->peer->lock); 552 spin_unlock(&conn->params.peer->lock);
513 553
514 write_lock_bh(&rxrpc_call_lock); 554 write_lock_bh(&rxrpc_call_lock);
515 list_add_tail(&call->link, &rxrpc_calls); 555 list_add_tail(&call->link, &rxrpc_calls);
516 write_unlock_bh(&rxrpc_call_lock); 556 write_unlock_bh(&rxrpc_call_lock);
517 557
518 /* Record copies of information for hashtable lookup */ 558 /* Record copies of information for hashtable lookup */
519 call->proto = rx->proto; 559 call->family = rx->family;
520 call->local = conn->trans->local; 560 call->local = conn->params.local;
521 switch (call->proto) { 561 switch (call->family) {
522 case AF_INET: 562 case AF_INET:
523 call->peer_ip.ipv4_addr = 563 call->peer_ip.ipv4_addr =
524 conn->trans->peer->srx.transport.sin.sin_addr.s_addr; 564 conn->params.peer->srx.transport.sin.sin_addr.s_addr;
525 break; 565 break;
526 case AF_INET6: 566 case AF_INET6:
527 memcpy(call->peer_ip.ipv6_addr, 567 memcpy(call->peer_ip.ipv6_addr,
528 conn->trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8, 568 conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
529 sizeof(call->peer_ip.ipv6_addr)); 569 sizeof(call->peer_ip.ipv6_addr));
530 break; 570 break;
531 default: 571 default:
532 break; 572 break;
533 } 573 }
534 call->epoch = conn->epoch; 574 call->epoch = conn->proto.epoch;
535 call->service_id = conn->service_id; 575 call->service_id = conn->params.service_id;
536 call->in_clientflag = conn->in_clientflag; 576 call->in_clientflag = conn->proto.in_clientflag;
537 /* Add the new call to the hashtable */ 577 /* Add the new call to the hashtable */
538 rxrpc_call_hash_add(call); 578 rxrpc_call_hash_add(call);
539 579
@@ -564,46 +604,6 @@ old_call:
564} 604}
565 605
566/* 606/*
567 * find an extant server call
568 * - called in process context with IRQs enabled
569 */
570struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx,
571 unsigned long user_call_ID)
572{
573 struct rxrpc_call *call;
574 struct rb_node *p;
575
576 _enter("%p,%lx", rx, user_call_ID);
577
578 /* search the extant calls for one that matches the specified user
579 * ID */
580 read_lock(&rx->call_lock);
581
582 p = rx->calls.rb_node;
583 while (p) {
584 call = rb_entry(p, struct rxrpc_call, sock_node);
585
586 if (user_call_ID < call->user_call_ID)
587 p = p->rb_left;
588 else if (user_call_ID > call->user_call_ID)
589 p = p->rb_right;
590 else
591 goto found_extant_call;
592 }
593
594 read_unlock(&rx->call_lock);
595 _leave(" = NULL");
596 return NULL;
597
598 /* we found the call in the list immediately */
599found_extant_call:
600 rxrpc_get_call(call);
601 read_unlock(&rx->call_lock);
602 _leave(" = %p [%d]", call, atomic_read(&call->usage));
603 return call;
604}
605
606/*
607 * detach a call from a socket and set up for release 607 * detach a call from a socket and set up for release
608 */ 608 */
609void rxrpc_release_call(struct rxrpc_call *call) 609void rxrpc_release_call(struct rxrpc_call *call)
@@ -641,41 +641,13 @@ void rxrpc_release_call(struct rxrpc_call *call)
641 write_unlock_bh(&rx->call_lock); 641 write_unlock_bh(&rx->call_lock);
642 642
643 /* free up the channel for reuse */ 643 /* free up the channel for reuse */
644 spin_lock(&conn->trans->client_lock); 644 spin_lock(&conn->channel_lock);
645 write_lock_bh(&conn->lock); 645 write_lock_bh(&conn->lock);
646 write_lock(&call->state_lock); 646 write_lock(&call->state_lock);
647 647
648 if (conn->channels[call->channel] == call) 648 rxrpc_disconnect_call(call);
649 conn->channels[call->channel] = NULL;
650
651 if (conn->out_clientflag && conn->bundle) {
652 conn->avail_calls++;
653 switch (conn->avail_calls) {
654 case 1:
655 list_move_tail(&conn->bundle_link,
656 &conn->bundle->avail_conns);
657 case 2 ... RXRPC_MAXCALLS - 1:
658 ASSERT(conn->channels[0] == NULL ||
659 conn->channels[1] == NULL ||
660 conn->channels[2] == NULL ||
661 conn->channels[3] == NULL);
662 break;
663 case RXRPC_MAXCALLS:
664 list_move_tail(&conn->bundle_link,
665 &conn->bundle->unused_conns);
666 ASSERT(conn->channels[0] == NULL &&
667 conn->channels[1] == NULL &&
668 conn->channels[2] == NULL &&
669 conn->channels[3] == NULL);
670 break;
671 default:
672 printk(KERN_ERR "RxRPC: conn->avail_calls=%d\n",
673 conn->avail_calls);
674 BUG();
675 }
676 }
677 649
678 spin_unlock(&conn->trans->client_lock); 650 spin_unlock(&conn->channel_lock);
679 651
680 if (call->state < RXRPC_CALL_COMPLETE && 652 if (call->state < RXRPC_CALL_COMPLETE &&
681 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { 653 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
@@ -844,9 +816,9 @@ static void rxrpc_cleanup_call(struct rxrpc_call *call)
844 } 816 }
845 817
846 if (call->conn) { 818 if (call->conn) {
847 spin_lock(&call->conn->trans->peer->lock); 819 spin_lock(&call->conn->params.peer->lock);
848 list_del(&call->error_link); 820 hlist_del_init(&call->error_link);
849 spin_unlock(&call->conn->trans->peer->lock); 821 spin_unlock(&call->conn->params.peer->lock);
850 822
851 write_lock_bh(&call->conn->lock); 823 write_lock_bh(&call->conn->lock);
852 rb_erase(&call->conn_node, &call->conn->calls); 824 rb_erase(&call->conn_node, &call->conn->calls);
@@ -935,16 +907,15 @@ void __exit rxrpc_destroy_all_calls(void)
935 if (call->state != RXRPC_CALL_DEAD) 907 if (call->state != RXRPC_CALL_DEAD)
936 break; 908 break;
937 default: 909 default:
938 printk(KERN_ERR "RXRPC:" 910 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
939 " Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
940 call, atomic_read(&call->usage), 911 call, atomic_read(&call->usage),
941 atomic_read(&call->ackr_not_idle), 912 atomic_read(&call->ackr_not_idle),
942 rxrpc_call_states[call->state], 913 rxrpc_call_states[call->state],
943 call->flags, call->events); 914 call->flags, call->events);
944 if (!skb_queue_empty(&call->rx_queue)) 915 if (!skb_queue_empty(&call->rx_queue))
945 printk(KERN_ERR"RXRPC: Rx queue occupied\n"); 916 pr_err("Rx queue occupied\n");
946 if (!skb_queue_empty(&call->rx_oos_queue)) 917 if (!skb_queue_empty(&call->rx_oos_queue))
947 printk(KERN_ERR"RXRPC: OOS queue occupied\n"); 918 pr_err("OOS queue occupied\n");
948 break; 919 break;
949 } 920 }
950 921
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
new file mode 100644
index 000000000000..82488d6adb83
--- /dev/null
+++ b/net/rxrpc/conn_client.c
@@ -0,0 +1,94 @@
1/* Client connection-specific management code.
2 *
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/slab.h>
15#include <linux/idr.h>
16#include <linux/timer.h>
17#include "ar-internal.h"
18
19/*
20 * We use machine-unique IDs for our client connections.
21 */
22DEFINE_IDR(rxrpc_client_conn_ids);
23static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
24
25/*
26 * Get a connection ID and epoch for a client connection from the global pool.
27 * The connection struct pointer is then recorded in the idr radix tree. The
28 * epoch is changed if this wraps.
29 *
30 * TODO: The IDR tree gets very expensive on memory if the connection IDs are
31 * widely scattered throughout the number space, so we shall need to retire
32 * connections that have, say, an ID more than four times the maximum number of
33 * client conns away from the current allocation point to try and keep the IDs
34 * concentrated. We will also need to retire connections from an old epoch.
35 */
36int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, gfp_t gfp)
37{
38 u32 epoch;
39 int id;
40
41 _enter("");
42
43 idr_preload(gfp);
44 spin_lock(&rxrpc_conn_id_lock);
45
46 epoch = rxrpc_epoch;
47
48 /* We could use idr_alloc_cyclic() here, but we really need to know
49 * when the thing wraps so that we can advance the epoch.
50 */
51 if (rxrpc_client_conn_ids.cur == 0)
52 rxrpc_client_conn_ids.cur = 1;
53 id = idr_alloc(&rxrpc_client_conn_ids, conn,
54 rxrpc_client_conn_ids.cur, 0x40000000, GFP_NOWAIT);
55 if (id < 0) {
56 if (id != -ENOSPC)
57 goto error;
58 id = idr_alloc(&rxrpc_client_conn_ids, conn,
59 1, 0x40000000, GFP_NOWAIT);
60 if (id < 0)
61 goto error;
62 epoch++;
63 rxrpc_epoch = epoch;
64 }
65 rxrpc_client_conn_ids.cur = id + 1;
66
67 spin_unlock(&rxrpc_conn_id_lock);
68 idr_preload_end();
69
70 conn->proto.epoch = epoch;
71 conn->proto.cid = id << RXRPC_CIDSHIFT;
72 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
73 _leave(" [CID %x:%x]", epoch, conn->proto.cid);
74 return 0;
75
76error:
77 spin_unlock(&rxrpc_conn_id_lock);
78 idr_preload_end();
79 _leave(" = %d", id);
80 return id;
81}
82
83/*
84 * Release a connection ID for a client connection from the global pool.
85 */
86void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
87{
88 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
89 spin_lock(&rxrpc_conn_id_lock);
90 idr_remove(&rxrpc_client_conn_ids,
91 conn->proto.cid >> RXRPC_CIDSHIFT);
92 spin_unlock(&rxrpc_conn_id_lock);
93 }
94}
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/conn_event.c
index 5f9563968a5b..bf6971555eac 100644
--- a/net/rxrpc/ar-connevent.c
+++ b/net/rxrpc/conn_event.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/net.h> 15#include <linux/net.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
@@ -86,14 +88,14 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
86 88
87 rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code); 89 rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code);
88 90
89 msg.msg_name = &conn->trans->peer->srx.transport; 91 msg.msg_name = &conn->params.peer->srx.transport;
90 msg.msg_namelen = conn->trans->peer->srx.transport_len; 92 msg.msg_namelen = conn->params.peer->srx.transport_len;
91 msg.msg_control = NULL; 93 msg.msg_control = NULL;
92 msg.msg_controllen = 0; 94 msg.msg_controllen = 0;
93 msg.msg_flags = 0; 95 msg.msg_flags = 0;
94 96
95 whdr.epoch = htonl(conn->epoch); 97 whdr.epoch = htonl(conn->proto.epoch);
96 whdr.cid = htonl(conn->cid); 98 whdr.cid = htonl(conn->proto.cid);
97 whdr.callNumber = 0; 99 whdr.callNumber = 0;
98 whdr.seq = 0; 100 whdr.seq = 0;
99 whdr.type = RXRPC_PACKET_TYPE_ABORT; 101 whdr.type = RXRPC_PACKET_TYPE_ABORT;
@@ -101,7 +103,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
101 whdr.userStatus = 0; 103 whdr.userStatus = 0;
102 whdr.securityIndex = conn->security_ix; 104 whdr.securityIndex = conn->security_ix;
103 whdr._rsvd = 0; 105 whdr._rsvd = 0;
104 whdr.serviceId = htons(conn->service_id); 106 whdr.serviceId = htons(conn->params.service_id);
105 107
106 word = htonl(conn->local_abort); 108 word = htonl(conn->local_abort);
107 109
@@ -116,7 +118,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
116 whdr.serial = htonl(serial); 118 whdr.serial = htonl(serial);
117 _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort); 119 _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort);
118 120
119 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); 121 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
120 if (ret < 0) { 122 if (ret < 0) {
121 _debug("sendmsg failed: %d", ret); 123 _debug("sendmsg failed: %d", ret);
122 return -EAGAIN; 124 return -EAGAIN;
@@ -218,7 +220,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
218 220
219 ASSERT(conn->security_ix != 0); 221 ASSERT(conn->security_ix != 0);
220 222
221 if (!conn->key) { 223 if (!conn->params.key) {
222 _debug("set up security"); 224 _debug("set up security");
223 ret = rxrpc_init_server_conn_security(conn); 225 ret = rxrpc_init_server_conn_security(conn);
224 switch (ret) { 226 switch (ret) {
@@ -261,7 +263,7 @@ void rxrpc_process_connection(struct work_struct *work)
261 263
262 _enter("{%d}", conn->debug_id); 264 _enter("{%d}", conn->debug_id);
263 265
264 atomic_inc(&conn->usage); 266 rxrpc_get_connection(conn);
265 267
266 if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) { 268 if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) {
267 rxrpc_secure_connection(conn); 269 rxrpc_secure_connection(conn);
@@ -312,19 +314,14 @@ void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
312{ 314{
313 CHECK_SLAB_OKAY(&local->usage); 315 CHECK_SLAB_OKAY(&local->usage);
314 316
315 if (!atomic_inc_not_zero(&local->usage)) {
316 printk("resurrected on reject\n");
317 BUG();
318 }
319
320 skb_queue_tail(&local->reject_queue, skb); 317 skb_queue_tail(&local->reject_queue, skb);
321 rxrpc_queue_work(&local->rejecter); 318 rxrpc_queue_work(&local->processor);
322} 319}
323 320
324/* 321/*
325 * reject packets through the local endpoint 322 * reject packets through the local endpoint
326 */ 323 */
327void rxrpc_reject_packets(struct work_struct *work) 324void rxrpc_reject_packets(struct rxrpc_local *local)
328{ 325{
329 union { 326 union {
330 struct sockaddr sa; 327 struct sockaddr sa;
@@ -332,16 +329,12 @@ void rxrpc_reject_packets(struct work_struct *work)
332 } sa; 329 } sa;
333 struct rxrpc_skb_priv *sp; 330 struct rxrpc_skb_priv *sp;
334 struct rxrpc_wire_header whdr; 331 struct rxrpc_wire_header whdr;
335 struct rxrpc_local *local;
336 struct sk_buff *skb; 332 struct sk_buff *skb;
337 struct msghdr msg; 333 struct msghdr msg;
338 struct kvec iov[2]; 334 struct kvec iov[2];
339 size_t size; 335 size_t size;
340 __be32 code; 336 __be32 code;
341 337
342 local = container_of(work, struct rxrpc_local, rejecter);
343 rxrpc_get_local(local);
344
345 _enter("%d", local->debug_id); 338 _enter("%d", local->debug_id);
346 339
347 iov[0].iov_base = &whdr; 340 iov[0].iov_base = &whdr;
@@ -393,9 +386,7 @@ void rxrpc_reject_packets(struct work_struct *work)
393 } 386 }
394 387
395 rxrpc_free_skb(skb); 388 rxrpc_free_skb(skb);
396 rxrpc_put_local(local);
397 } 389 }
398 390
399 rxrpc_put_local(local);
400 _leave(""); 391 _leave("");
401} 392}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
new file mode 100644
index 000000000000..4bfad7cf96cb
--- /dev/null
+++ b/net/rxrpc/conn_object.c
@@ -0,0 +1,686 @@
1/* RxRPC virtual connection handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/net.h>
17#include <linux/skbuff.h>
18#include <linux/crypto.h>
19#include <net/sock.h>
20#include <net/af_rxrpc.h>
21#include "ar-internal.h"
22
23/*
24 * Time till a connection expires after last use (in seconds).
25 */
26unsigned int rxrpc_connection_expiry = 10 * 60;
27
28static void rxrpc_connection_reaper(struct work_struct *work);
29
30LIST_HEAD(rxrpc_connections);
31DEFINE_RWLOCK(rxrpc_connection_lock);
32static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
33
34/*
35 * allocate a new connection
36 */
37static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
38{
39 struct rxrpc_connection *conn;
40
41 _enter("");
42
43 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
44 if (conn) {
45 spin_lock_init(&conn->channel_lock);
46 init_waitqueue_head(&conn->channel_wq);
47 INIT_WORK(&conn->processor, &rxrpc_process_connection);
48 INIT_LIST_HEAD(&conn->link);
49 conn->calls = RB_ROOT;
50 skb_queue_head_init(&conn->rx_queue);
51 conn->security = &rxrpc_no_security;
52 rwlock_init(&conn->lock);
53 spin_lock_init(&conn->state_lock);
54 atomic_set(&conn->usage, 1);
55 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
56 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS);
57 conn->size_align = 4;
58 conn->header_size = sizeof(struct rxrpc_wire_header);
59 }
60
61 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
62 return conn;
63}
64
65/*
66 * add a call to a connection's call-by-ID tree
67 */
68static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
69 struct rxrpc_call *call)
70{
71 struct rxrpc_call *xcall;
72 struct rb_node *parent, **p;
73 __be32 call_id;
74
75 write_lock_bh(&conn->lock);
76
77 call_id = call->call_id;
78 p = &conn->calls.rb_node;
79 parent = NULL;
80 while (*p) {
81 parent = *p;
82 xcall = rb_entry(parent, struct rxrpc_call, conn_node);
83
84 if (call_id < xcall->call_id)
85 p = &(*p)->rb_left;
86 else if (call_id > xcall->call_id)
87 p = &(*p)->rb_right;
88 else
89 BUG();
90 }
91
92 rb_link_node(&call->conn_node, parent, p);
93 rb_insert_color(&call->conn_node, &conn->calls);
94
95 write_unlock_bh(&conn->lock);
96}
97
98/*
99 * Allocate a client connection. The caller must take care to clear any
100 * padding bytes in *cp.
101 */
102static struct rxrpc_connection *
103rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
104{
105 struct rxrpc_connection *conn;
106 int ret;
107
108 _enter("");
109
110 conn = rxrpc_alloc_connection(gfp);
111 if (!conn) {
112 _leave(" = -ENOMEM");
113 return ERR_PTR(-ENOMEM);
114 }
115
116 conn->params = *cp;
117 conn->proto.local = cp->local;
118 conn->proto.epoch = rxrpc_epoch;
119 conn->proto.cid = 0;
120 conn->proto.in_clientflag = 0;
121 conn->proto.family = cp->peer->srx.transport.family;
122 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
123 conn->state = RXRPC_CONN_CLIENT;
124
125 switch (conn->proto.family) {
126 case AF_INET:
127 conn->proto.addr_size = sizeof(conn->proto.ipv4_addr);
128 conn->proto.ipv4_addr = cp->peer->srx.transport.sin.sin_addr;
129 conn->proto.port = cp->peer->srx.transport.sin.sin_port;
130 break;
131 }
132
133 ret = rxrpc_get_client_connection_id(conn, gfp);
134 if (ret < 0)
135 goto error_0;
136
137 ret = rxrpc_init_client_conn_security(conn);
138 if (ret < 0)
139 goto error_1;
140
141 conn->security->prime_packet_security(conn);
142
143 write_lock(&rxrpc_connection_lock);
144 list_add_tail(&conn->link, &rxrpc_connections);
145 write_unlock(&rxrpc_connection_lock);
146
147 /* We steal the caller's peer ref. */
148 cp->peer = NULL;
149 rxrpc_get_local(conn->params.local);
150 key_get(conn->params.key);
151
152 _leave(" = %p", conn);
153 return conn;
154
155error_1:
156 rxrpc_put_client_connection_id(conn);
157error_0:
158 kfree(conn);
159 _leave(" = %d", ret);
160 return ERR_PTR(ret);
161}
162
163/*
164 * find a connection for a call
165 * - called in process context with IRQs enabled
166 */
167int rxrpc_connect_call(struct rxrpc_call *call,
168 struct rxrpc_conn_parameters *cp,
169 struct sockaddr_rxrpc *srx,
170 gfp_t gfp)
171{
172 struct rxrpc_connection *conn, *candidate = NULL;
173 struct rxrpc_local *local = cp->local;
174 struct rb_node *p, **pp, *parent;
175 long diff;
176 int chan;
177
178 DECLARE_WAITQUEUE(myself, current);
179
180 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
181
182 cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
183 if (!cp->peer)
184 return -ENOMEM;
185
186 if (!cp->exclusive) {
187 /* Search for a existing client connection unless this is going
188 * to be a connection that's used exclusively for a single call.
189 */
190 _debug("search 1");
191 spin_lock(&local->client_conns_lock);
192 p = local->client_conns.rb_node;
193 while (p) {
194 conn = rb_entry(p, struct rxrpc_connection, client_node);
195
196#define cmp(X) ((long)conn->params.X - (long)cp->X)
197 diff = (cmp(peer) ?:
198 cmp(key) ?:
199 cmp(security_level));
200 if (diff < 0)
201 p = p->rb_left;
202 else if (diff > 0)
203 p = p->rb_right;
204 else
205 goto found_extant_conn;
206 }
207 spin_unlock(&local->client_conns_lock);
208 }
209
210 /* We didn't find a connection or we want an exclusive one. */
211 _debug("get new conn");
212 candidate = rxrpc_alloc_client_connection(cp, gfp);
213 if (!candidate) {
214 _leave(" = -ENOMEM");
215 return -ENOMEM;
216 }
217
218 if (cp->exclusive) {
219 /* Assign the call on an exclusive connection to channel 0 and
220 * don't add the connection to the endpoint's shareable conn
221 * lookup tree.
222 */
223 _debug("exclusive chan 0");
224 conn = candidate;
225 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
226 spin_lock(&conn->channel_lock);
227 chan = 0;
228 goto found_channel;
229 }
230
231 /* We need to redo the search before attempting to add a new connection
232 * lest we race with someone else adding a conflicting instance.
233 */
234 _debug("search 2");
235 spin_lock(&local->client_conns_lock);
236
237 pp = &local->client_conns.rb_node;
238 parent = NULL;
239 while (*pp) {
240 parent = *pp;
241 conn = rb_entry(parent, struct rxrpc_connection, client_node);
242
243 diff = (cmp(peer) ?:
244 cmp(key) ?:
245 cmp(security_level));
246 if (diff < 0)
247 pp = &(*pp)->rb_left;
248 else if (diff > 0)
249 pp = &(*pp)->rb_right;
250 else
251 goto found_extant_conn;
252 }
253
254 /* The second search also failed; simply add the new connection with
255 * the new call in channel 0. Note that we need to take the channel
256 * lock before dropping the client conn lock.
257 */
258 _debug("new conn");
259 conn = candidate;
260 candidate = NULL;
261
262 rb_link_node(&conn->client_node, parent, pp);
263 rb_insert_color(&conn->client_node, &local->client_conns);
264
265 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
266 spin_lock(&conn->channel_lock);
267 spin_unlock(&local->client_conns_lock);
268 chan = 0;
269
270found_channel:
271 _debug("found chan");
272 call->conn = conn;
273 call->channel = chan;
274 call->epoch = conn->proto.epoch;
275 call->cid = conn->proto.cid | chan;
276 call->call_id = ++conn->call_counter;
277 rcu_assign_pointer(conn->channels[chan], call);
278
279 _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
280
281 rxrpc_add_call_ID_to_conn(conn, call);
282 spin_unlock(&conn->channel_lock);
283 rxrpc_put_peer(cp->peer);
284 cp->peer = NULL;
285 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
286 return 0;
287
288 /* We found a suitable connection already in existence. Discard any
289 * candidate we may have allocated, and try to get a channel on this
290 * one.
291 */
292found_extant_conn:
293 _debug("found conn");
294 rxrpc_get_connection(conn);
295 spin_unlock(&local->client_conns_lock);
296
297 rxrpc_put_connection(candidate);
298
299 if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
300 if (!gfpflags_allow_blocking(gfp)) {
301 rxrpc_put_connection(conn);
302 _leave(" = -EAGAIN");
303 return -EAGAIN;
304 }
305
306 add_wait_queue(&conn->channel_wq, &myself);
307 for (;;) {
308 set_current_state(TASK_INTERRUPTIBLE);
309 if (atomic_add_unless(&conn->avail_chans, -1, 0))
310 break;
311 if (signal_pending(current))
312 goto interrupted;
313 schedule();
314 }
315 remove_wait_queue(&conn->channel_wq, &myself);
316 __set_current_state(TASK_RUNNING);
317 }
318
319 /* The connection allegedly now has a free channel and we can now
320 * attach the call to it.
321 */
322 spin_lock(&conn->channel_lock);
323
324 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
325 if (!conn->channels[chan])
326 goto found_channel;
327 BUG();
328
329interrupted:
330 remove_wait_queue(&conn->channel_wq, &myself);
331 __set_current_state(TASK_RUNNING);
332 rxrpc_put_connection(conn);
333 rxrpc_put_peer(cp->peer);
334 cp->peer = NULL;
335 _leave(" = -ERESTARTSYS");
336 return -ERESTARTSYS;
337}
338
339/*
340 * get a record of an incoming connection
341 */
342struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local,
343 struct rxrpc_peer *peer,
344 struct sk_buff *skb)
345{
346 struct rxrpc_connection *conn, *candidate = NULL;
347 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
348 struct rb_node *p, **pp;
349 const char *new = "old";
350 __be32 epoch;
351 u32 cid;
352
353 _enter("");
354
355 ASSERT(sp->hdr.flags & RXRPC_CLIENT_INITIATED);
356
357 epoch = sp->hdr.epoch;
358 cid = sp->hdr.cid & RXRPC_CIDMASK;
359
360 /* search the connection list first */
361 read_lock_bh(&peer->conn_lock);
362
363 p = peer->service_conns.rb_node;
364 while (p) {
365 conn = rb_entry(p, struct rxrpc_connection, service_node);
366
367 _debug("maybe %x", conn->proto.cid);
368
369 if (epoch < conn->proto.epoch)
370 p = p->rb_left;
371 else if (epoch > conn->proto.epoch)
372 p = p->rb_right;
373 else if (cid < conn->proto.cid)
374 p = p->rb_left;
375 else if (cid > conn->proto.cid)
376 p = p->rb_right;
377 else
378 goto found_extant_connection;
379 }
380 read_unlock_bh(&peer->conn_lock);
381
382 /* not yet present - create a candidate for a new record and then
383 * redo the search */
384 candidate = rxrpc_alloc_connection(GFP_NOIO);
385 if (!candidate) {
386 _leave(" = -ENOMEM");
387 return ERR_PTR(-ENOMEM);
388 }
389
390 candidate->proto.local = local;
391 candidate->proto.epoch = sp->hdr.epoch;
392 candidate->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
393 candidate->proto.in_clientflag = RXRPC_CLIENT_INITIATED;
394 candidate->params.local = local;
395 candidate->params.peer = peer;
396 candidate->params.service_id = sp->hdr.serviceId;
397 candidate->security_ix = sp->hdr.securityIndex;
398 candidate->out_clientflag = 0;
399 candidate->state = RXRPC_CONN_SERVER;
400 if (candidate->params.service_id)
401 candidate->state = RXRPC_CONN_SERVER_UNSECURED;
402
403 write_lock_bh(&peer->conn_lock);
404
405 pp = &peer->service_conns.rb_node;
406 p = NULL;
407 while (*pp) {
408 p = *pp;
409 conn = rb_entry(p, struct rxrpc_connection, service_node);
410
411 if (epoch < conn->proto.epoch)
412 pp = &(*pp)->rb_left;
413 else if (epoch > conn->proto.epoch)
414 pp = &(*pp)->rb_right;
415 else if (cid < conn->proto.cid)
416 pp = &(*pp)->rb_left;
417 else if (cid > conn->proto.cid)
418 pp = &(*pp)->rb_right;
419 else
420 goto found_extant_second;
421 }
422
423 /* we can now add the new candidate to the list */
424 conn = candidate;
425 candidate = NULL;
426 rb_link_node(&conn->service_node, p, pp);
427 rb_insert_color(&conn->service_node, &peer->service_conns);
428 rxrpc_get_peer(peer);
429 rxrpc_get_local(local);
430
431 write_unlock_bh(&peer->conn_lock);
432
433 write_lock(&rxrpc_connection_lock);
434 list_add_tail(&conn->link, &rxrpc_connections);
435 write_unlock(&rxrpc_connection_lock);
436
437 new = "new";
438
439success:
440 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->proto.cid);
441
442 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
443 return conn;
444
445 /* we found the connection in the list immediately */
446found_extant_connection:
447 if (sp->hdr.securityIndex != conn->security_ix) {
448 read_unlock_bh(&peer->conn_lock);
449 goto security_mismatch;
450 }
451 rxrpc_get_connection(conn);
452 read_unlock_bh(&peer->conn_lock);
453 goto success;
454
455 /* we found the connection on the second time through the list */
456found_extant_second:
457 if (sp->hdr.securityIndex != conn->security_ix) {
458 write_unlock_bh(&peer->conn_lock);
459 goto security_mismatch;
460 }
461 rxrpc_get_connection(conn);
462 write_unlock_bh(&peer->conn_lock);
463 kfree(candidate);
464 goto success;
465
466security_mismatch:
467 kfree(candidate);
468 _leave(" = -EKEYREJECTED");
469 return ERR_PTR(-EKEYREJECTED);
470}
471
472/*
473 * find a connection based on transport and RxRPC connection ID for an incoming
474 * packet
475 */
476struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *local,
477 struct rxrpc_peer *peer,
478 struct sk_buff *skb)
479{
480 struct rxrpc_connection *conn;
481 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
482 struct rb_node *p;
483 u32 epoch, cid;
484
485 _enter(",{%x,%x}", sp->hdr.cid, sp->hdr.flags);
486
487 read_lock_bh(&peer->conn_lock);
488
489 cid = sp->hdr.cid & RXRPC_CIDMASK;
490 epoch = sp->hdr.epoch;
491
492 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
493 p = peer->service_conns.rb_node;
494 while (p) {
495 conn = rb_entry(p, struct rxrpc_connection, service_node);
496
497 _debug("maybe %x", conn->proto.cid);
498
499 if (epoch < conn->proto.epoch)
500 p = p->rb_left;
501 else if (epoch > conn->proto.epoch)
502 p = p->rb_right;
503 else if (cid < conn->proto.cid)
504 p = p->rb_left;
505 else if (cid > conn->proto.cid)
506 p = p->rb_right;
507 else
508 goto found;
509 }
510 } else {
511 conn = idr_find(&rxrpc_client_conn_ids, cid >> RXRPC_CIDSHIFT);
512 if (conn && conn->proto.epoch == epoch)
513 goto found;
514 }
515
516 read_unlock_bh(&peer->conn_lock);
517 _leave(" = NULL");
518 return NULL;
519
520found:
521 rxrpc_get_connection(conn);
522 read_unlock_bh(&peer->conn_lock);
523 _leave(" = %p", conn);
524 return conn;
525}
526
527/*
528 * Disconnect a call and clear any channel it occupies when that call
529 * terminates.
530 */
531void rxrpc_disconnect_call(struct rxrpc_call *call)
532{
533 struct rxrpc_connection *conn = call->conn;
534 unsigned chan = call->channel;
535
536 _enter("%d,%d", conn->debug_id, call->channel);
537
538 if (conn->channels[chan] == call) {
539 rcu_assign_pointer(conn->channels[chan], NULL);
540 atomic_inc(&conn->avail_chans);
541 wake_up(&conn->channel_wq);
542 }
543}
544
545/*
546 * release a virtual connection
547 */
548void rxrpc_put_connection(struct rxrpc_connection *conn)
549{
550 if (!conn)
551 return;
552
553 _enter("%p{u=%d,d=%d}",
554 conn, atomic_read(&conn->usage), conn->debug_id);
555
556 ASSERTCMP(atomic_read(&conn->usage), >, 0);
557
558 conn->put_time = ktime_get_seconds();
559 if (atomic_dec_and_test(&conn->usage)) {
560 _debug("zombie");
561 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
562 }
563
564 _leave("");
565}
566
567/*
568 * destroy a virtual connection
569 */
570static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
571{
572 _enter("%p{%d}", conn, atomic_read(&conn->usage));
573
574 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
575
576 _net("DESTROY CONN %d", conn->debug_id);
577
578 ASSERT(RB_EMPTY_ROOT(&conn->calls));
579 rxrpc_purge_queue(&conn->rx_queue);
580
581 conn->security->clear(conn);
582 key_put(conn->params.key);
583 key_put(conn->server_key);
584 rxrpc_put_peer(conn->params.peer);
585 rxrpc_put_local(conn->params.local);
586
587 kfree(conn);
588 _leave("");
589}
590
591/*
592 * reap dead connections
593 */
594static void rxrpc_connection_reaper(struct work_struct *work)
595{
596 struct rxrpc_connection *conn, *_p;
597 struct rxrpc_peer *peer;
598 unsigned long now, earliest, reap_time;
599
600 LIST_HEAD(graveyard);
601
602 _enter("");
603
604 now = ktime_get_seconds();
605 earliest = ULONG_MAX;
606
607 write_lock(&rxrpc_connection_lock);
608 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
609 _debug("reap CONN %d { u=%d,t=%ld }",
610 conn->debug_id, atomic_read(&conn->usage),
611 (long) now - (long) conn->put_time);
612
613 if (likely(atomic_read(&conn->usage) > 0))
614 continue;
615
616 if (rxrpc_conn_is_client(conn)) {
617 struct rxrpc_local *local = conn->params.local;
618 spin_lock(&local->client_conns_lock);
619 reap_time = conn->put_time + rxrpc_connection_expiry;
620
621 if (atomic_read(&conn->usage) > 0) {
622 ;
623 } else if (reap_time <= now) {
624 list_move_tail(&conn->link, &graveyard);
625 rxrpc_put_client_connection_id(conn);
626 rb_erase(&conn->client_node,
627 &local->client_conns);
628 } else if (reap_time < earliest) {
629 earliest = reap_time;
630 }
631
632 spin_unlock(&local->client_conns_lock);
633 } else {
634 peer = conn->params.peer;
635 write_lock_bh(&peer->conn_lock);
636 reap_time = conn->put_time + rxrpc_connection_expiry;
637
638 if (atomic_read(&conn->usage) > 0) {
639 ;
640 } else if (reap_time <= now) {
641 list_move_tail(&conn->link, &graveyard);
642 rb_erase(&conn->service_node,
643 &peer->service_conns);
644 } else if (reap_time < earliest) {
645 earliest = reap_time;
646 }
647
648 write_unlock_bh(&peer->conn_lock);
649 }
650 }
651 write_unlock(&rxrpc_connection_lock);
652
653 if (earliest != ULONG_MAX) {
654 _debug("reschedule reaper %ld", (long) earliest - now);
655 ASSERTCMP(earliest, >, now);
656 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
657 (earliest - now) * HZ);
658 }
659
660 /* then destroy all those pulled out */
661 while (!list_empty(&graveyard)) {
662 conn = list_entry(graveyard.next, struct rxrpc_connection,
663 link);
664 list_del_init(&conn->link);
665
666 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
667 rxrpc_destroy_connection(conn);
668 }
669
670 _leave("");
671}
672
673/*
674 * preemptively destroy all the connection records rather than waiting for them
675 * to time out
676 */
677void __exit rxrpc_destroy_all_connections(void)
678{
679 _enter("");
680
681 rxrpc_connection_expiry = 0;
682 cancel_delayed_work(&rxrpc_connection_reap);
683 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
684
685 _leave("");
686}
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/input.c
index 6ff97412a0bb..f4bd57b77b93 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/input.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/net.h> 15#include <linux/net.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
@@ -358,7 +360,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
358 case RXRPC_PACKET_TYPE_BUSY: 360 case RXRPC_PACKET_TYPE_BUSY:
359 _proto("Rx BUSY %%%u", sp->hdr.serial); 361 _proto("Rx BUSY %%%u", sp->hdr.serial);
360 362
361 if (call->conn->out_clientflag) 363 if (rxrpc_conn_is_service(call->conn))
362 goto protocol_error; 364 goto protocol_error;
363 365
364 write_lock_bh(&call->state_lock); 366 write_lock_bh(&call->state_lock);
@@ -531,7 +533,7 @@ static void rxrpc_post_packet_to_call(struct rxrpc_call *call,
531 case RXRPC_CALL_COMPLETE: 533 case RXRPC_CALL_COMPLETE:
532 case RXRPC_CALL_CLIENT_FINAL_ACK: 534 case RXRPC_CALL_CLIENT_FINAL_ACK:
533 /* complete server call */ 535 /* complete server call */
534 if (call->conn->in_clientflag) 536 if (rxrpc_conn_is_service(call->conn))
535 goto dead_call; 537 goto dead_call;
536 /* resend last packet of a completed call */ 538 /* resend last packet of a completed call */
537 _debug("final ack again"); 539 _debug("final ack again");
@@ -558,7 +560,7 @@ static void rxrpc_post_packet_to_call(struct rxrpc_call *call,
558dead_call: 560dead_call:
559 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) { 561 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
560 skb->priority = RX_CALL_DEAD; 562 skb->priority = RX_CALL_DEAD;
561 rxrpc_reject_packet(call->conn->trans->local, skb); 563 rxrpc_reject_packet(call->conn->params.local, skb);
562 goto unlock; 564 goto unlock;
563 } 565 }
564free_unlock: 566free_unlock:
@@ -578,7 +580,7 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
578{ 580{
579 _enter("%p,%p", conn, skb); 581 _enter("%p,%p", conn, skb);
580 582
581 atomic_inc(&conn->usage); 583 rxrpc_get_connection(conn);
582 skb_queue_tail(&conn->rx_queue, skb); 584 skb_queue_tail(&conn->rx_queue, skb);
583 rxrpc_queue_conn(conn); 585 rxrpc_queue_conn(conn);
584} 586}
@@ -592,9 +594,8 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
592{ 594{
593 _enter("%p,%p", local, skb); 595 _enter("%p,%p", local, skb);
594 596
595 atomic_inc(&local->usage);
596 skb_queue_tail(&local->event_queue, skb); 597 skb_queue_tail(&local->event_queue, skb);
597 rxrpc_queue_work(&local->event_processor); 598 rxrpc_queue_work(&local->processor);
598} 599}
599 600
600/* 601/*
@@ -627,29 +628,27 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
627} 628}
628 629
629static struct rxrpc_connection *rxrpc_conn_from_local(struct rxrpc_local *local, 630static struct rxrpc_connection *rxrpc_conn_from_local(struct rxrpc_local *local,
630 struct sk_buff *skb, 631 struct sk_buff *skb)
631 struct rxrpc_skb_priv *sp)
632{ 632{
633 struct rxrpc_peer *peer; 633 struct rxrpc_peer *peer;
634 struct rxrpc_transport *trans;
635 struct rxrpc_connection *conn; 634 struct rxrpc_connection *conn;
635 struct sockaddr_rxrpc srx;
636 636
637 peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr, 637 rxrpc_get_addr_from_skb(local, skb, &srx);
638 udp_hdr(skb)->source); 638 rcu_read_lock();
639 if (IS_ERR(peer)) 639 peer = rxrpc_lookup_peer_rcu(local, &srx);
640 goto cant_find_conn; 640 if (!peer)
641 641 goto cant_find_peer;
642 trans = rxrpc_find_transport(local, peer);
643 rxrpc_put_peer(peer);
644 if (!trans)
645 goto cant_find_conn;
646 642
647 conn = rxrpc_find_connection(trans, &sp->hdr); 643 conn = rxrpc_find_connection(local, peer, skb);
648 rxrpc_put_transport(trans); 644 rcu_read_unlock();
649 if (!conn) 645 if (!conn)
650 goto cant_find_conn; 646 goto cant_find_conn;
651 647
652 return conn; 648 return conn;
649
650cant_find_peer:
651 rcu_read_unlock();
653cant_find_conn: 652cant_find_conn:
654 return NULL; 653 return NULL;
655} 654}
@@ -657,11 +656,15 @@ cant_find_conn:
657/* 656/*
658 * handle data received on the local endpoint 657 * handle data received on the local endpoint
659 * - may be called in interrupt context 658 * - may be called in interrupt context
659 *
660 * The socket is locked by the caller and this prevents the socket from being
661 * shut down and the local endpoint from going away, thus sk_user_data will not
662 * be cleared until this function returns.
660 */ 663 */
661void rxrpc_data_ready(struct sock *sk) 664void rxrpc_data_ready(struct sock *sk)
662{ 665{
663 struct rxrpc_skb_priv *sp; 666 struct rxrpc_skb_priv *sp;
664 struct rxrpc_local *local; 667 struct rxrpc_local *local = sk->sk_user_data;
665 struct sk_buff *skb; 668 struct sk_buff *skb;
666 int ret; 669 int ret;
667 670
@@ -669,21 +672,8 @@ void rxrpc_data_ready(struct sock *sk)
669 672
670 ASSERT(!irqs_disabled()); 673 ASSERT(!irqs_disabled());
671 674
672 read_lock_bh(&rxrpc_local_lock);
673 local = sk->sk_user_data;
674 if (local && atomic_read(&local->usage) > 0)
675 rxrpc_get_local(local);
676 else
677 local = NULL;
678 read_unlock_bh(&rxrpc_local_lock);
679 if (!local) {
680 _leave(" [local dead]");
681 return;
682 }
683
684 skb = skb_recv_datagram(sk, 0, 1, &ret); 675 skb = skb_recv_datagram(sk, 0, 1, &ret);
685 if (!skb) { 676 if (!skb) {
686 rxrpc_put_local(local);
687 if (ret == -EAGAIN) 677 if (ret == -EAGAIN)
688 return; 678 return;
689 _debug("UDP socket error %d", ret); 679 _debug("UDP socket error %d", ret);
@@ -697,7 +687,6 @@ void rxrpc_data_ready(struct sock *sk)
697 /* we'll probably need to checksum it (didn't call sock_recvmsg) */ 687 /* we'll probably need to checksum it (didn't call sock_recvmsg) */
698 if (skb_checksum_complete(skb)) { 688 if (skb_checksum_complete(skb)) {
699 rxrpc_free_skb(skb); 689 rxrpc_free_skb(skb);
700 rxrpc_put_local(local);
701 __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0); 690 __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
702 _leave(" [CSUM failed]"); 691 _leave(" [CSUM failed]");
703 return; 692 return;
@@ -732,7 +721,7 @@ void rxrpc_data_ready(struct sock *sk)
732 rxrpc_post_packet_to_local(local, skb); 721 rxrpc_post_packet_to_local(local, skb);
733 goto out; 722 goto out;
734 } 723 }
735 724
736 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && 725 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
737 (sp->hdr.callNumber == 0 || sp->hdr.seq == 0)) 726 (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
738 goto bad_message; 727 goto bad_message;
@@ -743,7 +732,7 @@ void rxrpc_data_ready(struct sock *sk)
743 * old-fashioned way doesn't really hurt */ 732 * old-fashioned way doesn't really hurt */
744 struct rxrpc_connection *conn; 733 struct rxrpc_connection *conn;
745 734
746 conn = rxrpc_conn_from_local(local, skb, sp); 735 conn = rxrpc_conn_from_local(local, skb);
747 if (!conn) 736 if (!conn)
748 goto cant_route_call; 737 goto cant_route_call;
749 738
@@ -762,7 +751,6 @@ void rxrpc_data_ready(struct sock *sk)
762 } 751 }
763 752
764out: 753out:
765 rxrpc_put_local(local);
766 return; 754 return;
767 755
768cant_route_call: 756cant_route_call:
@@ -772,8 +760,7 @@ cant_route_call:
772 if (sp->hdr.seq == 1) { 760 if (sp->hdr.seq == 1) {
773 _debug("first packet"); 761 _debug("first packet");
774 skb_queue_tail(&local->accept_queue, skb); 762 skb_queue_tail(&local->accept_queue, skb);
775 rxrpc_queue_work(&local->acceptor); 763 rxrpc_queue_work(&local->processor);
776 rxrpc_put_local(local);
777 _leave(" [incoming]"); 764 _leave(" [incoming]");
778 return; 765 return;
779 } 766 }
@@ -786,13 +773,11 @@ cant_route_call:
786 _debug("reject type %d",sp->hdr.type); 773 _debug("reject type %d",sp->hdr.type);
787 rxrpc_reject_packet(local, skb); 774 rxrpc_reject_packet(local, skb);
788 } 775 }
789 rxrpc_put_local(local);
790 _leave(" [no call]"); 776 _leave(" [no call]");
791 return; 777 return;
792 778
793bad_message: 779bad_message:
794 skb->priority = RX_PROTOCOL_ERROR; 780 skb->priority = RX_PROTOCOL_ERROR;
795 rxrpc_reject_packet(local, skb); 781 rxrpc_reject_packet(local, skb);
796 rxrpc_put_local(local);
797 _leave(" [badmsg]"); 782 _leave(" [badmsg]");
798} 783}
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/key.c
index 1021b4c0bdd2..18c737a61d80 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/key.c
@@ -12,6 +12,8 @@
12 * "afs@CAMBRIDGE.REDHAT.COM> 12 * "afs@CAMBRIDGE.REDHAT.COM>
13 */ 13 */
14 14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
15#include <crypto/skcipher.h> 17#include <crypto/skcipher.h>
16#include <linux/module.h> 18#include <linux/module.h>
17#include <linux/net.h> 19#include <linux/net.h>
@@ -800,7 +802,7 @@ static void rxrpc_free_token_list(struct rxrpc_key_token *token)
800 rxrpc_rxk5_free(token->k5); 802 rxrpc_rxk5_free(token->k5);
801 break; 803 break;
802 default: 804 default:
803 printk(KERN_ERR "Unknown token type %x on rxrpc key\n", 805 pr_err("Unknown token type %x on rxrpc key\n",
804 token->security_index); 806 token->security_index);
805 BUG(); 807 BUG();
806 } 808 }
@@ -985,7 +987,7 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *conn,
985 if (ret < 0) 987 if (ret < 0)
986 goto error; 988 goto error;
987 989
988 conn->key = key; 990 conn->params.key = key;
989 _leave(" = 0 [%d]", key_serial(key)); 991 _leave(" = 0 [%d]", key_serial(key));
990 return 0; 992 return 0;
991 993
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c
new file mode 100644
index 000000000000..31a3f86ef2f6
--- /dev/null
+++ b/net/rxrpc/local_event.c
@@ -0,0 +1,116 @@
1/* AF_RXRPC local endpoint management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/skbuff.h>
17#include <linux/slab.h>
18#include <linux/udp.h>
19#include <linux/ip.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <generated/utsrelease.h>
23#include "ar-internal.h"
24
25static const char rxrpc_version_string[65] = "linux-" UTS_RELEASE " AF_RXRPC";
26
27/*
28 * Reply to a version request
29 */
30static void rxrpc_send_version_request(struct rxrpc_local *local,
31 struct rxrpc_host_header *hdr,
32 struct sk_buff *skb)
33{
34 struct rxrpc_wire_header whdr;
35 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
36 struct sockaddr_in sin;
37 struct msghdr msg;
38 struct kvec iov[2];
39 size_t len;
40 int ret;
41
42 _enter("");
43
44 sin.sin_family = AF_INET;
45 sin.sin_port = udp_hdr(skb)->source;
46 sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
47
48 msg.msg_name = &sin;
49 msg.msg_namelen = sizeof(sin);
50 msg.msg_control = NULL;
51 msg.msg_controllen = 0;
52 msg.msg_flags = 0;
53
54 whdr.epoch = htonl(sp->hdr.epoch);
55 whdr.cid = htonl(sp->hdr.cid);
56 whdr.callNumber = htonl(sp->hdr.callNumber);
57 whdr.seq = 0;
58 whdr.serial = 0;
59 whdr.type = RXRPC_PACKET_TYPE_VERSION;
60 whdr.flags = RXRPC_LAST_PACKET | (~hdr->flags & RXRPC_CLIENT_INITIATED);
61 whdr.userStatus = 0;
62 whdr.securityIndex = 0;
63 whdr._rsvd = 0;
64 whdr.serviceId = htons(sp->hdr.serviceId);
65
66 iov[0].iov_base = &whdr;
67 iov[0].iov_len = sizeof(whdr);
68 iov[1].iov_base = (char *)rxrpc_version_string;
69 iov[1].iov_len = sizeof(rxrpc_version_string);
70
71 len = iov[0].iov_len + iov[1].iov_len;
72
73 _proto("Tx VERSION (reply)");
74
75 ret = kernel_sendmsg(local->socket, &msg, iov, 2, len);
76 if (ret < 0)
77 _debug("sendmsg failed: %d", ret);
78
79 _leave("");
80}
81
82/*
83 * Process event packets targetted at a local endpoint.
84 */
85void rxrpc_process_local_events(struct rxrpc_local *local)
86{
87 struct sk_buff *skb;
88 char v;
89
90 _enter("");
91
92 skb = skb_dequeue(&local->event_queue);
93 if (skb) {
94 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
95
96 _debug("{%d},{%u}", local->debug_id, sp->hdr.type);
97
98 switch (sp->hdr.type) {
99 case RXRPC_PACKET_TYPE_VERSION:
100 if (skb_copy_bits(skb, 0, &v, 1) < 0)
101 return;
102 _proto("Rx VERSION { %02x }", v);
103 if (v == 0)
104 rxrpc_send_version_request(local, &sp->hdr, skb);
105 break;
106
107 default:
108 /* Just ignore anything we don't understand */
109 break;
110 }
111
112 rxrpc_free_skb(skb);
113 }
114
115 _leave("");
116}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
new file mode 100644
index 000000000000..3ab7764f7cd8
--- /dev/null
+++ b/net/rxrpc/local_object.c
@@ -0,0 +1,387 @@
1/* Local endpoint object management
2 *
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/skbuff.h>
17#include <linux/slab.h>
18#include <linux/udp.h>
19#include <linux/ip.h>
20#include <linux/hashtable.h>
21#include <net/sock.h>
22#include <net/af_rxrpc.h>
23#include "ar-internal.h"
24
25static void rxrpc_local_processor(struct work_struct *);
26static void rxrpc_local_rcu(struct rcu_head *);
27
28static DEFINE_MUTEX(rxrpc_local_mutex);
29static LIST_HEAD(rxrpc_local_endpoints);
30
31/*
32 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
33 * same or greater than.
34 *
35 * We explicitly don't compare the RxRPC service ID as we want to reject
36 * conflicting uses by differing services. Further, we don't want to share
37 * addresses with different options (IPv6), so we don't compare those bits
38 * either.
39 */
40static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
41 const struct sockaddr_rxrpc *srx)
42{
43 long diff;
44
45 diff = ((local->srx.transport_type - srx->transport_type) ?:
46 (local->srx.transport_len - srx->transport_len) ?:
47 (local->srx.transport.family - srx->transport.family));
48 if (diff != 0)
49 return diff;
50
51 switch (srx->transport.family) {
52 case AF_INET:
53 /* If the choice of UDP port is left up to the transport, then
54 * the endpoint record doesn't match.
55 */
56 return ((u16 __force)local->srx.transport.sin.sin_port -
57 (u16 __force)srx->transport.sin.sin_port) ?:
58 memcmp(&local->srx.transport.sin.sin_addr,
59 &srx->transport.sin.sin_addr,
60 sizeof(struct in_addr));
61 default:
62 BUG();
63 }
64}
65
66/*
67 * Allocate a new local endpoint.
68 */
69static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx)
70{
71 struct rxrpc_local *local;
72
73 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
74 if (local) {
75 atomic_set(&local->usage, 1);
76 INIT_LIST_HEAD(&local->link);
77 INIT_WORK(&local->processor, rxrpc_local_processor);
78 INIT_LIST_HEAD(&local->services);
79 init_rwsem(&local->defrag_sem);
80 skb_queue_head_init(&local->accept_queue);
81 skb_queue_head_init(&local->reject_queue);
82 skb_queue_head_init(&local->event_queue);
83 local->client_conns = RB_ROOT;
84 spin_lock_init(&local->client_conns_lock);
85 spin_lock_init(&local->lock);
86 rwlock_init(&local->services_lock);
87 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
88 memcpy(&local->srx, srx, sizeof(*srx));
89 }
90
91 _leave(" = %p", local);
92 return local;
93}
94
95/*
96 * create the local socket
97 * - must be called with rxrpc_local_mutex locked
98 */
99static int rxrpc_open_socket(struct rxrpc_local *local)
100{
101 struct sock *sock;
102 int ret, opt;
103
104 _enter("%p{%d}", local, local->srx.transport_type);
105
106 /* create a socket to represent the local endpoint */
107 ret = sock_create_kern(&init_net, PF_INET, local->srx.transport_type,
108 IPPROTO_UDP, &local->socket);
109 if (ret < 0) {
110 _leave(" = %d [socket]", ret);
111 return ret;
112 }
113
114 /* if a local address was supplied then bind it */
115 if (local->srx.transport_len > sizeof(sa_family_t)) {
116 _debug("bind");
117 ret = kernel_bind(local->socket,
118 (struct sockaddr *)&local->srx.transport,
119 local->srx.transport_len);
120 if (ret < 0) {
121 _debug("bind failed %d", ret);
122 goto error;
123 }
124 }
125
126 /* we want to receive ICMP errors */
127 opt = 1;
128 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
129 (char *) &opt, sizeof(opt));
130 if (ret < 0) {
131 _debug("setsockopt failed");
132 goto error;
133 }
134
135 /* we want to set the don't fragment bit */
136 opt = IP_PMTUDISC_DO;
137 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
138 (char *) &opt, sizeof(opt));
139 if (ret < 0) {
140 _debug("setsockopt failed");
141 goto error;
142 }
143
144 /* set the socket up */
145 sock = local->socket->sk;
146 sock->sk_user_data = local;
147 sock->sk_data_ready = rxrpc_data_ready;
148 sock->sk_error_report = rxrpc_error_report;
149 _leave(" = 0");
150 return 0;
151
152error:
153 kernel_sock_shutdown(local->socket, SHUT_RDWR);
154 local->socket->sk->sk_user_data = NULL;
155 sock_release(local->socket);
156 local->socket = NULL;
157
158 _leave(" = %d", ret);
159 return ret;
160}
161
162/*
163 * Look up or create a new local endpoint using the specified local address.
164 */
165struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
166{
167 struct rxrpc_local *local;
168 struct list_head *cursor;
169 const char *age;
170 long diff;
171 int ret;
172
173 if (srx->transport.family == AF_INET) {
174 _enter("{%d,%u,%pI4+%hu}",
175 srx->transport_type,
176 srx->transport.family,
177 &srx->transport.sin.sin_addr,
178 ntohs(srx->transport.sin.sin_port));
179 } else {
180 _enter("{%d,%u}",
181 srx->transport_type,
182 srx->transport.family);
183 return ERR_PTR(-EAFNOSUPPORT);
184 }
185
186 mutex_lock(&rxrpc_local_mutex);
187
188 for (cursor = rxrpc_local_endpoints.next;
189 cursor != &rxrpc_local_endpoints;
190 cursor = cursor->next) {
191 local = list_entry(cursor, struct rxrpc_local, link);
192
193 diff = rxrpc_local_cmp_key(local, srx);
194 if (diff < 0)
195 continue;
196 if (diff > 0)
197 break;
198
199 /* Services aren't allowed to share transport sockets, so
200 * reject that here. It is possible that the object is dying -
201 * but it may also still have the local transport address that
202 * we want bound.
203 */
204 if (srx->srx_service) {
205 local = NULL;
206 goto addr_in_use;
207 }
208
209 /* Found a match. We replace a dying object. Attempting to
210 * bind the transport socket may still fail if we're attempting
211 * to use a local address that the dying object is still using.
212 */
213 if (!rxrpc_get_local_maybe(local)) {
214 cursor = cursor->next;
215 list_del_init(&local->link);
216 break;
217 }
218
219 age = "old";
220 goto found;
221 }
222
223 local = rxrpc_alloc_local(srx);
224 if (!local)
225 goto nomem;
226
227 ret = rxrpc_open_socket(local);
228 if (ret < 0)
229 goto sock_error;
230
231 list_add_tail(&local->link, cursor);
232 age = "new";
233
234found:
235 mutex_unlock(&rxrpc_local_mutex);
236
237 _net("LOCAL %s %d {%d,%u,%pI4+%hu}",
238 age,
239 local->debug_id,
240 local->srx.transport_type,
241 local->srx.transport.family,
242 &local->srx.transport.sin.sin_addr,
243 ntohs(local->srx.transport.sin.sin_port));
244
245 _leave(" = %p", local);
246 return local;
247
248nomem:
249 ret = -ENOMEM;
250sock_error:
251 mutex_unlock(&rxrpc_local_mutex);
252 kfree(local);
253 _leave(" = %d", ret);
254 return ERR_PTR(ret);
255
256addr_in_use:
257 mutex_unlock(&rxrpc_local_mutex);
258 _leave(" = -EADDRINUSE");
259 return ERR_PTR(-EADDRINUSE);
260}
261
262/*
263 * A local endpoint reached its end of life.
264 */
265void __rxrpc_put_local(struct rxrpc_local *local)
266{
267 _enter("%d", local->debug_id);
268 rxrpc_queue_work(&local->processor);
269}
270
271/*
272 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
273 * of.
274 *
275 * Closing the socket cannot be done from bottom half context or RCU callback
276 * context because it might sleep.
277 */
278static void rxrpc_local_destroyer(struct rxrpc_local *local)
279{
280 struct socket *socket = local->socket;
281
282 _enter("%d", local->debug_id);
283
284 /* We can get a race between an incoming call packet queueing the
285 * processor again and the work processor starting the destruction
286 * process which will shut down the UDP socket.
287 */
288 if (local->dead) {
289 _leave(" [already dead]");
290 return;
291 }
292 local->dead = true;
293
294 mutex_lock(&rxrpc_local_mutex);
295 list_del_init(&local->link);
296 mutex_unlock(&rxrpc_local_mutex);
297
298 ASSERT(RB_EMPTY_ROOT(&local->client_conns));
299 ASSERT(list_empty(&local->services));
300
301 if (socket) {
302 local->socket = NULL;
303 kernel_sock_shutdown(socket, SHUT_RDWR);
304 socket->sk->sk_user_data = NULL;
305 sock_release(socket);
306 }
307
308 /* At this point, there should be no more packets coming in to the
309 * local endpoint.
310 */
311 rxrpc_purge_queue(&local->accept_queue);
312 rxrpc_purge_queue(&local->reject_queue);
313 rxrpc_purge_queue(&local->event_queue);
314
315 _debug("rcu local %d", local->debug_id);
316 call_rcu(&local->rcu, rxrpc_local_rcu);
317}
318
319/*
320 * Process events on an endpoint
321 */
322static void rxrpc_local_processor(struct work_struct *work)
323{
324 struct rxrpc_local *local =
325 container_of(work, struct rxrpc_local, processor);
326 bool again;
327
328 _enter("%d", local->debug_id);
329
330 do {
331 again = false;
332 if (atomic_read(&local->usage) == 0)
333 return rxrpc_local_destroyer(local);
334
335 if (!skb_queue_empty(&local->accept_queue)) {
336 rxrpc_accept_incoming_calls(local);
337 again = true;
338 }
339
340 if (!skb_queue_empty(&local->reject_queue)) {
341 rxrpc_reject_packets(local);
342 again = true;
343 }
344
345 if (!skb_queue_empty(&local->event_queue)) {
346 rxrpc_process_local_events(local);
347 again = true;
348 }
349 } while (again);
350}
351
352/*
353 * Destroy a local endpoint after the RCU grace period expires.
354 */
355static void rxrpc_local_rcu(struct rcu_head *rcu)
356{
357 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
358
359 _enter("%d", local->debug_id);
360
361 ASSERT(!work_pending(&local->processor));
362
363 _net("DESTROY LOCAL %d", local->debug_id);
364 kfree(local);
365 _leave("");
366}
367
368/*
369 * Verify the local endpoint list is empty by this point.
370 */
371void __exit rxrpc_destroy_all_locals(void)
372{
373 struct rxrpc_local *local;
374
375 _enter("");
376
377 if (list_empty(&rxrpc_local_endpoints))
378 return;
379
380 mutex_lock(&rxrpc_local_mutex);
381 list_for_each_entry(local, &rxrpc_local_endpoints, link) {
382 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
383 local, atomic_read(&local->usage));
384 }
385 mutex_unlock(&rxrpc_local_mutex);
386 BUG();
387}
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index 1afe9876e79f..bdc5e42fe600 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -15,6 +15,12 @@
15#include "ar-internal.h" 15#include "ar-internal.h"
16 16
17/* 17/*
18 * The maximum listening backlog queue size that may be set on a socket by
19 * listen().
20 */
21unsigned int rxrpc_max_backlog __read_mostly = 10;
22
23/*
18 * How long to wait before scheduling ACK generation after seeing a 24 * How long to wait before scheduling ACK generation after seeing a
19 * packet with RXRPC_REQUEST_ACK set (in jiffies). 25 * packet with RXRPC_REQUEST_ACK set (in jiffies).
20 */ 26 */
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/output.c
index 51cb10062a8d..f4bda06b7d2d 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/output.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/net.h> 14#include <linux/net.h>
13#include <linux/gfp.h> 15#include <linux/gfp.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
@@ -30,13 +32,14 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
30/* 32/*
31 * extract control messages from the sendmsg() control buffer 33 * extract control messages from the sendmsg() control buffer
32 */ 34 */
33static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg, 35static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
34 unsigned long *user_call_ID, 36 unsigned long *user_call_ID,
35 enum rxrpc_command *command, 37 enum rxrpc_command *command,
36 u32 *abort_code, 38 u32 *abort_code,
37 bool server) 39 bool *_exclusive)
38{ 40{
39 struct cmsghdr *cmsg; 41 struct cmsghdr *cmsg;
42 bool got_user_ID = false;
40 int len; 43 int len;
41 44
42 *command = RXRPC_CMD_SEND_DATA; 45 *command = RXRPC_CMD_SEND_DATA;
@@ -68,6 +71,7 @@ static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
68 CMSG_DATA(cmsg); 71 CMSG_DATA(cmsg);
69 } 72 }
70 _debug("User Call ID %lx", *user_call_ID); 73 _debug("User Call ID %lx", *user_call_ID);
74 got_user_ID = true;
71 break; 75 break;
72 76
73 case RXRPC_ABORT: 77 case RXRPC_ABORT:
@@ -88,15 +92,20 @@ static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
88 *command = RXRPC_CMD_ACCEPT; 92 *command = RXRPC_CMD_ACCEPT;
89 if (len != 0) 93 if (len != 0)
90 return -EINVAL; 94 return -EINVAL;
91 if (!server)
92 return -EISCONN;
93 break; 95 break;
94 96
97 case RXRPC_EXCLUSIVE_CALL:
98 *_exclusive = true;
99 if (len != 0)
100 return -EINVAL;
101 break;
95 default: 102 default:
96 return -EINVAL; 103 return -EINVAL;
97 } 104 }
98 } 105 }
99 106
107 if (!got_user_ID)
108 return -EINVAL;
100 _leave(" = 0"); 109 _leave(" = 0");
101 return 0; 110 return 0;
102} 111}
@@ -124,55 +133,78 @@ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
124} 133}
125 134
126/* 135/*
136 * Create a new client call for sendmsg().
137 */
138static struct rxrpc_call *
139rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
140 unsigned long user_call_ID, bool exclusive)
141{
142 struct rxrpc_conn_parameters cp;
143 struct rxrpc_call *call;
144 struct key *key;
145
146 DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name);
147
148 _enter("");
149
150 if (!msg->msg_name)
151 return ERR_PTR(-EDESTADDRREQ);
152
153 key = rx->key;
154 if (key && !rx->key->payload.data[0])
155 key = NULL;
156
157 memset(&cp, 0, sizeof(cp));
158 cp.local = rx->local;
159 cp.key = rx->key;
160 cp.security_level = rx->min_sec_level;
161 cp.exclusive = rx->exclusive | exclusive;
162 cp.service_id = srx->srx_service;
163 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL);
164
165 _leave(" = %p\n", call);
166 return call;
167}
168
169/*
127 * send a message forming part of a client call through an RxRPC socket 170 * send a message forming part of a client call through an RxRPC socket
128 * - caller holds the socket locked 171 * - caller holds the socket locked
129 * - the socket may be either a client socket or a server socket 172 * - the socket may be either a client socket or a server socket
130 */ 173 */
131int rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans, 174int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
132 struct msghdr *msg, size_t len)
133{ 175{
134 struct rxrpc_conn_bundle *bundle;
135 enum rxrpc_command cmd; 176 enum rxrpc_command cmd;
136 struct rxrpc_call *call; 177 struct rxrpc_call *call;
137 unsigned long user_call_ID = 0; 178 unsigned long user_call_ID = 0;
138 struct key *key; 179 bool exclusive = false;
139 u16 service_id;
140 u32 abort_code = 0; 180 u32 abort_code = 0;
141 int ret; 181 int ret;
142 182
143 _enter(""); 183 _enter("");
144 184
145 ASSERT(trans != NULL); 185 ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code,
146 186 &exclusive);
147 ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
148 false);
149 if (ret < 0) 187 if (ret < 0)
150 return ret; 188 return ret;
151 189
152 bundle = NULL; 190 if (cmd == RXRPC_CMD_ACCEPT) {
153 if (trans) { 191 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
154 service_id = rx->srx.srx_service; 192 return -EINVAL;
155 if (msg->msg_name) { 193 call = rxrpc_accept_call(rx, user_call_ID);
156 DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, 194 if (IS_ERR(call))
157 msg->msg_name); 195 return PTR_ERR(call);
158 service_id = srx->srx_service; 196 rxrpc_put_call(call);
159 } 197 return 0;
160 key = rx->key;
161 if (key && !rx->key->payload.data[0])
162 key = NULL;
163 bundle = rxrpc_get_bundle(rx, trans, key, service_id,
164 GFP_KERNEL);
165 if (IS_ERR(bundle))
166 return PTR_ERR(bundle);
167 } 198 }
168 199
169 call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, 200 call = rxrpc_find_call_by_user_ID(rx, user_call_ID);
170 abort_code == 0, GFP_KERNEL); 201 if (!call) {
171 if (trans) 202 if (cmd != RXRPC_CMD_SEND_DATA)
172 rxrpc_put_bundle(trans, bundle); 203 return -EBADSLT;
173 if (IS_ERR(call)) { 204 call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID,
174 _leave(" = %ld", PTR_ERR(call)); 205 exclusive);
175 return PTR_ERR(call); 206 if (IS_ERR(call))
207 return PTR_ERR(call);
176 } 208 }
177 209
178 _debug("CALL %d USR %lx ST %d on CONN %p", 210 _debug("CALL %d USR %lx ST %d on CONN %p",
@@ -180,14 +212,21 @@ int rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans,
180 212
181 if (call->state >= RXRPC_CALL_COMPLETE) { 213 if (call->state >= RXRPC_CALL_COMPLETE) {
182 /* it's too late for this call */ 214 /* it's too late for this call */
183 ret = -ESHUTDOWN; 215 ret = -ECONNRESET;
184 } else if (cmd == RXRPC_CMD_SEND_ABORT) { 216 } else if (cmd == RXRPC_CMD_SEND_ABORT) {
185 rxrpc_send_abort(call, abort_code); 217 rxrpc_send_abort(call, abort_code);
218 ret = 0;
186 } else if (cmd != RXRPC_CMD_SEND_DATA) { 219 } else if (cmd != RXRPC_CMD_SEND_DATA) {
187 ret = -EINVAL; 220 ret = -EINVAL;
188 } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { 221 } else if (!call->in_clientflag &&
222 call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
189 /* request phase complete for this client call */ 223 /* request phase complete for this client call */
190 ret = -EPROTO; 224 ret = -EPROTO;
225 } else if (call->in_clientflag &&
226 call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
227 call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
228 /* Reply phase not begun or not complete for service call. */
229 ret = -EPROTO;
191 } else { 230 } else {
192 ret = rxrpc_send_data(rx, call, msg, len); 231 ret = rxrpc_send_data(rx, call, msg, len);
193 } 232 }
@@ -266,70 +305,9 @@ void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code)
266EXPORT_SYMBOL(rxrpc_kernel_abort_call); 305EXPORT_SYMBOL(rxrpc_kernel_abort_call);
267 306
268/* 307/*
269 * send a message through a server socket
270 * - caller holds the socket locked
271 */
272int rxrpc_server_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
273{
274 enum rxrpc_command cmd;
275 struct rxrpc_call *call;
276 unsigned long user_call_ID = 0;
277 u32 abort_code = 0;
278 int ret;
279
280 _enter("");
281
282 ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
283 true);
284 if (ret < 0)
285 return ret;
286
287 if (cmd == RXRPC_CMD_ACCEPT) {
288 call = rxrpc_accept_call(rx, user_call_ID);
289 if (IS_ERR(call))
290 return PTR_ERR(call);
291 rxrpc_put_call(call);
292 return 0;
293 }
294
295 call = rxrpc_find_server_call(rx, user_call_ID);
296 if (!call)
297 return -EBADSLT;
298 if (call->state >= RXRPC_CALL_COMPLETE) {
299 ret = -ESHUTDOWN;
300 goto out;
301 }
302
303 switch (cmd) {
304 case RXRPC_CMD_SEND_DATA:
305 if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
306 call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
307 call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
308 /* Tx phase not yet begun for this call */
309 ret = -EPROTO;
310 break;
311 }
312
313 ret = rxrpc_send_data(rx, call, msg, len);
314 break;
315
316 case RXRPC_CMD_SEND_ABORT:
317 rxrpc_send_abort(call, abort_code);
318 break;
319 default:
320 BUG();
321 }
322
323 out:
324 rxrpc_put_call(call);
325 _leave(" = %d", ret);
326 return ret;
327}
328
329/*
330 * send a packet through the transport endpoint 308 * send a packet through the transport endpoint
331 */ 309 */
332int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb) 310int rxrpc_send_data_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
333{ 311{
334 struct kvec iov[1]; 312 struct kvec iov[1];
335 struct msghdr msg; 313 struct msghdr msg;
@@ -340,30 +318,30 @@ int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb)
340 iov[0].iov_base = skb->head; 318 iov[0].iov_base = skb->head;
341 iov[0].iov_len = skb->len; 319 iov[0].iov_len = skb->len;
342 320
343 msg.msg_name = &trans->peer->srx.transport.sin; 321 msg.msg_name = &conn->params.peer->srx.transport;
344 msg.msg_namelen = sizeof(trans->peer->srx.transport.sin); 322 msg.msg_namelen = conn->params.peer->srx.transport_len;
345 msg.msg_control = NULL; 323 msg.msg_control = NULL;
346 msg.msg_controllen = 0; 324 msg.msg_controllen = 0;
347 msg.msg_flags = 0; 325 msg.msg_flags = 0;
348 326
349 /* send the packet with the don't fragment bit set if we currently 327 /* send the packet with the don't fragment bit set if we currently
350 * think it's small enough */ 328 * think it's small enough */
351 if (skb->len - sizeof(struct rxrpc_wire_header) < trans->peer->maxdata) { 329 if (skb->len - sizeof(struct rxrpc_wire_header) < conn->params.peer->maxdata) {
352 down_read(&trans->local->defrag_sem); 330 down_read(&conn->params.local->defrag_sem);
353 /* send the packet by UDP 331 /* send the packet by UDP
354 * - returns -EMSGSIZE if UDP would have to fragment the packet 332 * - returns -EMSGSIZE if UDP would have to fragment the packet
355 * to go out of the interface 333 * to go out of the interface
356 * - in which case, we'll have processed the ICMP error 334 * - in which case, we'll have processed the ICMP error
357 * message and update the peer record 335 * message and update the peer record
358 */ 336 */
359 ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, 337 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1,
360 iov[0].iov_len); 338 iov[0].iov_len);
361 339
362 up_read(&trans->local->defrag_sem); 340 up_read(&conn->params.local->defrag_sem);
363 if (ret == -EMSGSIZE) 341 if (ret == -EMSGSIZE)
364 goto send_fragmentable; 342 goto send_fragmentable;
365 343
366 _leave(" = %d [%u]", ret, trans->peer->maxdata); 344 _leave(" = %d [%u]", ret, conn->params.peer->maxdata);
367 return ret; 345 return ret;
368 } 346 }
369 347
@@ -371,21 +349,28 @@ send_fragmentable:
371 /* attempt to send this message with fragmentation enabled */ 349 /* attempt to send this message with fragmentation enabled */
372 _debug("send fragment"); 350 _debug("send fragment");
373 351
374 down_write(&trans->local->defrag_sem); 352 down_write(&conn->params.local->defrag_sem);
375 opt = IP_PMTUDISC_DONT; 353
376 ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER, 354 switch (conn->params.local->srx.transport.family) {
377 (char *) &opt, sizeof(opt)); 355 case AF_INET:
378 if (ret == 0) { 356 opt = IP_PMTUDISC_DONT;
379 ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, 357 ret = kernel_setsockopt(conn->params.local->socket,
380 iov[0].iov_len); 358 SOL_IP, IP_MTU_DISCOVER,
381 359 (char *)&opt, sizeof(opt));
382 opt = IP_PMTUDISC_DO; 360 if (ret == 0) {
383 kernel_setsockopt(trans->local->socket, SOL_IP, 361 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1,
384 IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); 362 iov[0].iov_len);
363
364 opt = IP_PMTUDISC_DO;
365 kernel_setsockopt(conn->params.local->socket, SOL_IP,
366 IP_MTU_DISCOVER,
367 (char *)&opt, sizeof(opt));
368 }
369 break;
385 } 370 }
386 371
387 up_write(&trans->local->defrag_sem); 372 up_write(&conn->params.local->defrag_sem);
388 _leave(" = %d [frag %u]", ret, trans->peer->maxdata); 373 _leave(" = %d [frag %u]", ret, conn->params.peer->maxdata);
389 return ret; 374 return ret;
390} 375}
391 376
@@ -497,7 +482,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
497 if (try_to_del_timer_sync(&call->ack_timer) >= 0) { 482 if (try_to_del_timer_sync(&call->ack_timer) >= 0) {
498 /* the packet may be freed by rxrpc_process_call() before this 483 /* the packet may be freed by rxrpc_process_call() before this
499 * returns */ 484 * returns */
500 ret = rxrpc_send_packet(call->conn->trans, skb); 485 ret = rxrpc_send_data_packet(call->conn, skb);
501 _net("sent skb %p", skb); 486 _net("sent skb %p", skb);
502 } else { 487 } else {
503 _debug("failed to delete ACK timer"); 488 _debug("failed to delete ACK timer");
@@ -583,7 +568,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
583 goto maybe_error; 568 goto maybe_error;
584 } 569 }
585 570
586 max = call->conn->trans->peer->maxdata; 571 max = call->conn->params.peer->maxdata;
587 max -= call->conn->security_size; 572 max -= call->conn->security_size;
588 max &= ~(call->conn->size_align - 1UL); 573 max &= ~(call->conn->size_align - 1UL);
589 574
@@ -674,7 +659,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
674 659
675 seq = atomic_inc_return(&call->sequence); 660 seq = atomic_inc_return(&call->sequence);
676 661
677 sp->hdr.epoch = conn->epoch; 662 sp->hdr.epoch = conn->proto.epoch;
678 sp->hdr.cid = call->cid; 663 sp->hdr.cid = call->cid;
679 sp->hdr.callNumber = call->call_id; 664 sp->hdr.callNumber = call->call_id;
680 sp->hdr.seq = seq; 665 sp->hdr.seq = seq;
@@ -717,7 +702,9 @@ out:
717call_aborted: 702call_aborted:
718 rxrpc_free_skb(skb); 703 rxrpc_free_skb(skb);
719 if (call->state == RXRPC_CALL_NETWORK_ERROR) 704 if (call->state == RXRPC_CALL_NETWORK_ERROR)
720 ret = call->conn->trans->peer->net_error; 705 ret = call->error_report < RXRPC_LOCAL_ERROR_OFFSET ?
706 call->error_report :
707 call->error_report - RXRPC_LOCAL_ERROR_OFFSET;
721 else 708 else
722 ret = -ECONNABORTED; 709 ret = -ECONNABORTED;
723 _leave(" = %d", ret); 710 _leave(" = %d", ret);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
new file mode 100644
index 000000000000..8940674b5e08
--- /dev/null
+++ b/net/rxrpc/peer_event.c
@@ -0,0 +1,281 @@
1/* Peer event handling, typically ICMP messages.
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/errqueue.h>
16#include <linux/udp.h>
17#include <linux/in.h>
18#include <linux/in6.h>
19#include <linux/icmp.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include "ar-internal.h"
24
25static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
26
27/*
28 * Find the peer associated with an ICMP packet.
29 */
30static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
31 const struct sk_buff *skb)
32{
33 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
34 struct sockaddr_rxrpc srx;
35
36 _enter("");
37
38 memset(&srx, 0, sizeof(srx));
39 srx.transport_type = local->srx.transport_type;
40 srx.transport.family = local->srx.transport.family;
41
42 /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
43 * versa?
44 */
45 switch (srx.transport.family) {
46 case AF_INET:
47 srx.transport.sin.sin_port = serr->port;
48 srx.transport_len = sizeof(struct sockaddr_in);
49 switch (serr->ee.ee_origin) {
50 case SO_EE_ORIGIN_ICMP:
51 _net("Rx ICMP");
52 memcpy(&srx.transport.sin.sin_addr,
53 skb_network_header(skb) + serr->addr_offset,
54 sizeof(struct in_addr));
55 break;
56 case SO_EE_ORIGIN_ICMP6:
57 _net("Rx ICMP6 on v4 sock");
58 memcpy(&srx.transport.sin.sin_addr,
59 skb_network_header(skb) + serr->addr_offset + 12,
60 sizeof(struct in_addr));
61 break;
62 default:
63 memcpy(&srx.transport.sin.sin_addr, &ip_hdr(skb)->saddr,
64 sizeof(struct in_addr));
65 break;
66 }
67 break;
68
69 default:
70 BUG();
71 }
72
73 return rxrpc_lookup_peer_rcu(local, &srx);
74}
75
76/*
77 * Handle an MTU/fragmentation problem.
78 */
79static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
80{
81 u32 mtu = serr->ee.ee_info;
82
83 _net("Rx ICMP Fragmentation Needed (%d)", mtu);
84
85 /* wind down the local interface MTU */
86 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
87 peer->if_mtu = mtu;
88 _net("I/F MTU %u", mtu);
89 }
90
91 if (mtu == 0) {
92 /* they didn't give us a size, estimate one */
93 mtu = peer->if_mtu;
94 if (mtu > 1500) {
95 mtu >>= 1;
96 if (mtu < 1500)
97 mtu = 1500;
98 } else {
99 mtu -= 100;
100 if (mtu < peer->hdrsize)
101 mtu = peer->hdrsize + 4;
102 }
103 }
104
105 if (mtu < peer->mtu) {
106 spin_lock_bh(&peer->lock);
107 peer->mtu = mtu;
108 peer->maxdata = peer->mtu - peer->hdrsize;
109 spin_unlock_bh(&peer->lock);
110 _net("Net MTU %u (maxdata %u)",
111 peer->mtu, peer->maxdata);
112 }
113}
114
115/*
116 * Handle an error received on the local endpoint.
117 */
118void rxrpc_error_report(struct sock *sk)
119{
120 struct sock_exterr_skb *serr;
121 struct rxrpc_local *local = sk->sk_user_data;
122 struct rxrpc_peer *peer;
123 struct sk_buff *skb;
124
125 _enter("%p{%d}", sk, local->debug_id);
126
127 skb = sock_dequeue_err_skb(sk);
128 if (!skb) {
129 _leave("UDP socket errqueue empty");
130 return;
131 }
132 serr = SKB_EXT_ERR(skb);
133 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
134 _leave("UDP empty message");
135 kfree_skb(skb);
136 return;
137 }
138
139 rxrpc_new_skb(skb);
140
141 rcu_read_lock();
142 peer = rxrpc_lookup_peer_icmp_rcu(local, skb);
143 if (peer && !rxrpc_get_peer_maybe(peer))
144 peer = NULL;
145 if (!peer) {
146 rcu_read_unlock();
147 rxrpc_free_skb(skb);
148 _leave(" [no peer]");
149 return;
150 }
151
152 if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
153 serr->ee.ee_type == ICMP_DEST_UNREACH &&
154 serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
155 rxrpc_adjust_mtu(peer, serr);
156 rcu_read_unlock();
157 rxrpc_free_skb(skb);
158 rxrpc_put_peer(peer);
159 _leave(" [MTU update]");
160 return;
161 }
162
163 rxrpc_store_error(peer, serr);
164 rcu_read_unlock();
165 rxrpc_free_skb(skb);
166
167 /* The ref we obtained is passed off to the work item */
168 rxrpc_queue_work(&peer->error_distributor);
169 _leave("");
170}
171
172/*
173 * Map an error report to error codes on the peer record.
174 */
175static void rxrpc_store_error(struct rxrpc_peer *peer,
176 struct sock_exterr_skb *serr)
177{
178 struct sock_extended_err *ee;
179 int err;
180
181 _enter("");
182
183 ee = &serr->ee;
184
185 _net("Rx Error o=%d t=%d c=%d e=%d",
186 ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno);
187
188 err = ee->ee_errno;
189
190 switch (ee->ee_origin) {
191 case SO_EE_ORIGIN_ICMP:
192 switch (ee->ee_type) {
193 case ICMP_DEST_UNREACH:
194 switch (ee->ee_code) {
195 case ICMP_NET_UNREACH:
196 _net("Rx Received ICMP Network Unreachable");
197 break;
198 case ICMP_HOST_UNREACH:
199 _net("Rx Received ICMP Host Unreachable");
200 break;
201 case ICMP_PORT_UNREACH:
202 _net("Rx Received ICMP Port Unreachable");
203 break;
204 case ICMP_NET_UNKNOWN:
205 _net("Rx Received ICMP Unknown Network");
206 break;
207 case ICMP_HOST_UNKNOWN:
208 _net("Rx Received ICMP Unknown Host");
209 break;
210 default:
211 _net("Rx Received ICMP DestUnreach code=%u",
212 ee->ee_code);
213 break;
214 }
215 break;
216
217 case ICMP_TIME_EXCEEDED:
218 _net("Rx Received ICMP TTL Exceeded");
219 break;
220
221 default:
222 _proto("Rx Received ICMP error { type=%u code=%u }",
223 ee->ee_type, ee->ee_code);
224 break;
225 }
226 break;
227
228 case SO_EE_ORIGIN_NONE:
229 case SO_EE_ORIGIN_LOCAL:
230 _proto("Rx Received local error { error=%d }", err);
231 err += RXRPC_LOCAL_ERROR_OFFSET;
232 break;
233
234 case SO_EE_ORIGIN_ICMP6:
235 default:
236 _proto("Rx Received error report { orig=%u }", ee->ee_origin);
237 break;
238 }
239
240 peer->error_report = err;
241}
242
243/*
244 * Distribute an error that occurred on a peer
245 */
246void rxrpc_peer_error_distributor(struct work_struct *work)
247{
248 struct rxrpc_peer *peer =
249 container_of(work, struct rxrpc_peer, error_distributor);
250 struct rxrpc_call *call;
251 int error_report;
252
253 _enter("");
254
255 error_report = READ_ONCE(peer->error_report);
256
257 _debug("ISSUE ERROR %d", error_report);
258
259 spin_lock_bh(&peer->lock);
260
261 while (!hlist_empty(&peer->error_targets)) {
262 call = hlist_entry(peer->error_targets.first,
263 struct rxrpc_call, error_link);
264 hlist_del_init(&call->error_link);
265
266 write_lock(&call->state_lock);
267 if (call->state != RXRPC_CALL_COMPLETE &&
268 call->state < RXRPC_CALL_NETWORK_ERROR) {
269 call->error_report = error_report;
270 call->state = RXRPC_CALL_NETWORK_ERROR;
271 set_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
272 rxrpc_queue_call(call);
273 }
274 write_unlock(&call->state_lock);
275 }
276
277 spin_unlock_bh(&peer->lock);
278
279 rxrpc_put_peer(peer);
280 _leave("");
281}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
new file mode 100644
index 000000000000..01d4930a11f7
--- /dev/null
+++ b/net/rxrpc/peer_object.c
@@ -0,0 +1,315 @@
1/* RxRPC remote transport endpoint record management
2 *
3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/skbuff.h>
17#include <linux/udp.h>
18#include <linux/in.h>
19#include <linux/slab.h>
20#include <linux/hashtable.h>
21#include <net/sock.h>
22#include <net/af_rxrpc.h>
23#include <net/ip.h>
24#include <net/route.h>
25#include "ar-internal.h"
26
27static DEFINE_HASHTABLE(rxrpc_peer_hash, 10);
28static DEFINE_SPINLOCK(rxrpc_peer_hash_lock);
29
30/*
31 * Hash a peer key.
32 */
33static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local,
34 const struct sockaddr_rxrpc *srx)
35{
36 const u16 *p;
37 unsigned int i, size;
38 unsigned long hash_key;
39
40 _enter("");
41
42 hash_key = (unsigned long)local / __alignof__(*local);
43 hash_key += srx->transport_type;
44 hash_key += srx->transport_len;
45 hash_key += srx->transport.family;
46
47 switch (srx->transport.family) {
48 case AF_INET:
49 hash_key += (u16 __force)srx->transport.sin.sin_port;
50 size = sizeof(srx->transport.sin.sin_addr);
51 p = (u16 *)&srx->transport.sin.sin_addr;
52 break;
53 default:
54 WARN(1, "AF_RXRPC: Unsupported transport address family\n");
55 return 0;
56 }
57
58 /* Step through the peer address in 16-bit portions for speed */
59 for (i = 0; i < size; i += sizeof(*p), p++)
60 hash_key += *p;
61
62 _leave(" 0x%lx", hash_key);
63 return hash_key;
64}
65
66/*
67 * Compare a peer to a key. Return -ve, 0 or +ve to indicate less than, same
68 * or greater than.
69 *
70 * Unfortunately, the primitives in linux/hashtable.h don't allow for sorted
71 * buckets and mid-bucket insertion, so we don't make full use of this
72 * information at this point.
73 */
74static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
75 struct rxrpc_local *local,
76 const struct sockaddr_rxrpc *srx,
77 unsigned long hash_key)
78{
79 long diff;
80
81 diff = ((peer->hash_key - hash_key) ?:
82 ((unsigned long)peer->local - (unsigned long)local) ?:
83 (peer->srx.transport_type - srx->transport_type) ?:
84 (peer->srx.transport_len - srx->transport_len) ?:
85 (peer->srx.transport.family - srx->transport.family));
86 if (diff != 0)
87 return diff;
88
89 switch (srx->transport.family) {
90 case AF_INET:
91 return ((u16 __force)peer->srx.transport.sin.sin_port -
92 (u16 __force)srx->transport.sin.sin_port) ?:
93 memcmp(&peer->srx.transport.sin.sin_addr,
94 &srx->transport.sin.sin_addr,
95 sizeof(struct in_addr));
96 default:
97 BUG();
98 }
99}
100
101/*
102 * Look up a remote transport endpoint for the specified address using RCU.
103 */
104static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
105 struct rxrpc_local *local,
106 const struct sockaddr_rxrpc *srx,
107 unsigned long hash_key)
108{
109 struct rxrpc_peer *peer;
110
111 hash_for_each_possible_rcu(rxrpc_peer_hash, peer, hash_link, hash_key) {
112 if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
113 if (atomic_read(&peer->usage) == 0)
114 return NULL;
115 return peer;
116 }
117 }
118
119 return NULL;
120}
121
122/*
123 * Look up a remote transport endpoint for the specified address using RCU.
124 */
125struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
126 const struct sockaddr_rxrpc *srx)
127{
128 struct rxrpc_peer *peer;
129 unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
130
131 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
132 if (peer) {
133 switch (srx->transport.family) {
134 case AF_INET:
135 _net("PEER %d {%d,%u,%pI4+%hu}",
136 peer->debug_id,
137 peer->srx.transport_type,
138 peer->srx.transport.family,
139 &peer->srx.transport.sin.sin_addr,
140 ntohs(peer->srx.transport.sin.sin_port));
141 break;
142 }
143
144 _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
145 }
146 return peer;
147}
148
149/*
150 * assess the MTU size for the network interface through which this peer is
151 * reached
152 */
153static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
154{
155 struct rtable *rt;
156 struct flowi4 fl4;
157
158 peer->if_mtu = 1500;
159
160 rt = ip_route_output_ports(&init_net, &fl4, NULL,
161 peer->srx.transport.sin.sin_addr.s_addr, 0,
162 htons(7000), htons(7001),
163 IPPROTO_UDP, 0, 0);
164 if (IS_ERR(rt)) {
165 _leave(" [route err %ld]", PTR_ERR(rt));
166 return;
167 }
168
169 peer->if_mtu = dst_mtu(&rt->dst);
170 dst_release(&rt->dst);
171
172 _leave(" [if_mtu %u]", peer->if_mtu);
173}
174
175/*
176 * Allocate a peer.
177 */
178struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
179{
180 struct rxrpc_peer *peer;
181
182 _enter("");
183
184 peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
185 if (peer) {
186 atomic_set(&peer->usage, 1);
187 peer->local = local;
188 INIT_HLIST_HEAD(&peer->error_targets);
189 INIT_WORK(&peer->error_distributor,
190 &rxrpc_peer_error_distributor);
191 peer->service_conns = RB_ROOT;
192 rwlock_init(&peer->conn_lock);
193 spin_lock_init(&peer->lock);
194 peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
195 }
196
197 _leave(" = %p", peer);
198 return peer;
199}
200
201/*
202 * Set up a new peer.
203 */
204static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
205 struct sockaddr_rxrpc *srx,
206 unsigned long hash_key,
207 gfp_t gfp)
208{
209 struct rxrpc_peer *peer;
210
211 _enter("");
212
213 peer = rxrpc_alloc_peer(local, gfp);
214 if (peer) {
215 peer->hash_key = hash_key;
216 memcpy(&peer->srx, srx, sizeof(*srx));
217
218 rxrpc_assess_MTU_size(peer);
219 peer->mtu = peer->if_mtu;
220
221 if (srx->transport.family == AF_INET) {
222 peer->hdrsize = sizeof(struct iphdr);
223 switch (srx->transport_type) {
224 case SOCK_DGRAM:
225 peer->hdrsize += sizeof(struct udphdr);
226 break;
227 default:
228 BUG();
229 break;
230 }
231 } else {
232 BUG();
233 }
234
235 peer->hdrsize += sizeof(struct rxrpc_wire_header);
236 peer->maxdata = peer->mtu - peer->hdrsize;
237 }
238
239 _leave(" = %p", peer);
240 return peer;
241}
242
243/*
244 * obtain a remote transport endpoint for the specified address
245 */
246struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
247 struct sockaddr_rxrpc *srx, gfp_t gfp)
248{
249 struct rxrpc_peer *peer, *candidate;
250 unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
251
252 _enter("{%d,%d,%pI4+%hu}",
253 srx->transport_type,
254 srx->transport_len,
255 &srx->transport.sin.sin_addr,
256 ntohs(srx->transport.sin.sin_port));
257
258 /* search the peer list first */
259 rcu_read_lock();
260 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
261 if (peer && !rxrpc_get_peer_maybe(peer))
262 peer = NULL;
263 rcu_read_unlock();
264
265 if (!peer) {
266 /* The peer is not yet present in hash - create a candidate
267 * for a new record and then redo the search.
268 */
269 candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
270 if (!candidate) {
271 _leave(" = NULL [nomem]");
272 return NULL;
273 }
274
275 spin_lock(&rxrpc_peer_hash_lock);
276
277 /* Need to check that we aren't racing with someone else */
278 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
279 if (peer && !rxrpc_get_peer_maybe(peer))
280 peer = NULL;
281 if (!peer)
282 hash_add_rcu(rxrpc_peer_hash,
283 &candidate->hash_link, hash_key);
284
285 spin_unlock(&rxrpc_peer_hash_lock);
286
287 if (peer)
288 kfree(candidate);
289 else
290 peer = candidate;
291 }
292
293 _net("PEER %d {%d,%pI4+%hu}",
294 peer->debug_id,
295 peer->srx.transport_type,
296 &peer->srx.transport.sin.sin_addr,
297 ntohs(peer->srx.transport.sin.sin_port));
298
299 _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
300 return peer;
301}
302
303/*
304 * Discard a ref on a remote peer record.
305 */
306void __rxrpc_put_peer(struct rxrpc_peer *peer)
307{
308 ASSERT(hlist_empty(&peer->error_targets));
309
310 spin_lock(&rxrpc_peer_hash_lock);
311 hash_del_rcu(&peer->hash_link);
312 spin_unlock(&rxrpc_peer_hash_lock);
313
314 kfree_rcu(peer, rcu);
315}
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/proc.c
index 225163bc658d..500cdcdc843c 100644
--- a/net/rxrpc/ar-proc.c
+++ b/net/rxrpc/proc.c
@@ -46,7 +46,7 @@ static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
46 46
47static int rxrpc_call_seq_show(struct seq_file *seq, void *v) 47static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
48{ 48{
49 struct rxrpc_transport *trans; 49 struct rxrpc_connection *conn;
50 struct rxrpc_call *call; 50 struct rxrpc_call *call;
51 char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; 51 char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
52 52
@@ -59,25 +59,28 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
59 } 59 }
60 60
61 call = list_entry(v, struct rxrpc_call, link); 61 call = list_entry(v, struct rxrpc_call, link);
62 trans = call->conn->trans;
63 62
64 sprintf(lbuff, "%pI4:%u", 63 sprintf(lbuff, "%pI4:%u",
65 &trans->local->srx.transport.sin.sin_addr, 64 &call->local->srx.transport.sin.sin_addr,
66 ntohs(trans->local->srx.transport.sin.sin_port)); 65 ntohs(call->local->srx.transport.sin.sin_port));
67 66
68 sprintf(rbuff, "%pI4:%u", 67 conn = call->conn;
69 &trans->peer->srx.transport.sin.sin_addr, 68 if (conn)
70 ntohs(trans->peer->srx.transport.sin.sin_port)); 69 sprintf(rbuff, "%pI4:%u",
70 &conn->params.peer->srx.transport.sin.sin_addr,
71 ntohs(conn->params.peer->srx.transport.sin.sin_port));
72 else
73 strcpy(rbuff, "no_connection");
71 74
72 seq_printf(seq, 75 seq_printf(seq,
73 "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u" 76 "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u"
74 " %-8.8s %08x %lx\n", 77 " %-8.8s %08x %lx\n",
75 lbuff, 78 lbuff,
76 rbuff, 79 rbuff,
77 call->conn->service_id, 80 call->service_id,
78 call->cid, 81 call->cid,
79 call->call_id, 82 call->call_id,
80 call->conn->in_clientflag ? "Svc" : "Clt", 83 call->in_clientflag ? "Svc" : "Clt",
81 atomic_read(&call->usage), 84 atomic_read(&call->usage),
82 rxrpc_call_states[call->state], 85 rxrpc_call_states[call->state],
83 call->remote_abort ?: call->local_abort, 86 call->remote_abort ?: call->local_abort,
@@ -129,7 +132,6 @@ static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
129static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) 132static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
130{ 133{
131 struct rxrpc_connection *conn; 134 struct rxrpc_connection *conn;
132 struct rxrpc_transport *trans;
133 char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; 135 char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
134 136
135 if (v == &rxrpc_connections) { 137 if (v == &rxrpc_connections) {
@@ -142,28 +144,27 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
142 } 144 }
143 145
144 conn = list_entry(v, struct rxrpc_connection, link); 146 conn = list_entry(v, struct rxrpc_connection, link);
145 trans = conn->trans;
146 147
147 sprintf(lbuff, "%pI4:%u", 148 sprintf(lbuff, "%pI4:%u",
148 &trans->local->srx.transport.sin.sin_addr, 149 &conn->params.local->srx.transport.sin.sin_addr,
149 ntohs(trans->local->srx.transport.sin.sin_port)); 150 ntohs(conn->params.local->srx.transport.sin.sin_port));
150 151
151 sprintf(rbuff, "%pI4:%u", 152 sprintf(rbuff, "%pI4:%u",
152 &trans->peer->srx.transport.sin.sin_addr, 153 &conn->params.peer->srx.transport.sin.sin_addr,
153 ntohs(trans->peer->srx.transport.sin.sin_port)); 154 ntohs(conn->params.peer->srx.transport.sin.sin_port));
154 155
155 seq_printf(seq, 156 seq_printf(seq,
156 "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u" 157 "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u"
157 " %s %08x %08x %08x\n", 158 " %s %08x %08x %08x\n",
158 lbuff, 159 lbuff,
159 rbuff, 160 rbuff,
160 conn->service_id, 161 conn->params.service_id,
161 conn->cid, 162 conn->proto.cid,
162 conn->call_counter, 163 conn->call_counter,
163 conn->in_clientflag ? "Svc" : "Clt", 164 rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
164 atomic_read(&conn->usage), 165 atomic_read(&conn->usage),
165 rxrpc_conn_states[conn->state], 166 rxrpc_conn_states[conn->state],
166 key_serial(conn->key), 167 key_serial(conn->params.key),
167 atomic_read(&conn->serial), 168 atomic_read(&conn->serial),
168 atomic_read(&conn->hi_serial)); 169 atomic_read(&conn->hi_serial));
169 170
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/recvmsg.c
index 160f0927aa3e..a3fa2ed85d63 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/net.h> 14#include <linux/net.h>
13#include <linux/skbuff.h> 15#include <linux/skbuff.h>
14#include <linux/export.h> 16#include <linux/export.h>
@@ -145,9 +147,9 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
145 if (!continue_call) { 147 if (!continue_call) {
146 if (msg->msg_name) { 148 if (msg->msg_name) {
147 size_t len = 149 size_t len =
148 sizeof(call->conn->trans->peer->srx); 150 sizeof(call->conn->params.peer->srx);
149 memcpy(msg->msg_name, 151 memcpy(msg->msg_name,
150 &call->conn->trans->peer->srx, len); 152 &call->conn->params.peer->srx, len);
151 msg->msg_namelen = len; 153 msg->msg_namelen = len;
152 } 154 }
153 sock_recv_timestamp(msg, &rx->sk, skb); 155 sock_recv_timestamp(msg, &rx->sk, skb);
@@ -203,7 +205,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
203 /* we transferred the whole data packet */ 205 /* we transferred the whole data packet */
204 if (sp->hdr.flags & RXRPC_LAST_PACKET) { 206 if (sp->hdr.flags & RXRPC_LAST_PACKET) {
205 _debug("last"); 207 _debug("last");
206 if (call->conn->out_clientflag) { 208 if (rxrpc_conn_is_client(call->conn)) {
207 /* last byte of reply received */ 209 /* last byte of reply received */
208 ret = copied; 210 ret = copied;
209 goto terminal_message; 211 goto terminal_message;
@@ -307,7 +309,7 @@ receive_non_data_message:
307 &abort_code); 309 &abort_code);
308 break; 310 break;
309 default: 311 default:
310 pr_err("RxRPC: Unknown packet mark %u\n", skb->mark); 312 pr_err("Unknown packet mark %u\n", skb->mark);
311 BUG(); 313 BUG();
312 break; 314 break;
313 } 315 }
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index bab56ed649ba..23c05ec6fa28 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <crypto/skcipher.h> 14#include <crypto/skcipher.h>
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/net.h> 16#include <linux/net.h>
@@ -56,9 +58,9 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
56 struct rxrpc_key_token *token; 58 struct rxrpc_key_token *token;
57 int ret; 59 int ret;
58 60
59 _enter("{%d},{%x}", conn->debug_id, key_serial(conn->key)); 61 _enter("{%d},{%x}", conn->debug_id, key_serial(conn->params.key));
60 62
61 token = conn->key->payload.data[0]; 63 token = conn->params.key->payload.data[0];
62 conn->security_ix = token->security_index; 64 conn->security_ix = token->security_index;
63 65
64 ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); 66 ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
@@ -72,7 +74,7 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
72 sizeof(token->kad->session_key)) < 0) 74 sizeof(token->kad->session_key)) < 0)
73 BUG(); 75 BUG();
74 76
75 switch (conn->security_level) { 77 switch (conn->params.security_level) {
76 case RXRPC_SECURITY_PLAIN: 78 case RXRPC_SECURITY_PLAIN:
77 break; 79 break;
78 case RXRPC_SECURITY_AUTH: 80 case RXRPC_SECURITY_AUTH:
@@ -113,14 +115,14 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
113 115
114 _enter(""); 116 _enter("");
115 117
116 if (!conn->key) 118 if (!conn->params.key)
117 return; 119 return;
118 120
119 token = conn->key->payload.data[0]; 121 token = conn->params.key->payload.data[0];
120 memcpy(&iv, token->kad->session_key, sizeof(iv)); 122 memcpy(&iv, token->kad->session_key, sizeof(iv));
121 123
122 tmpbuf.x[0] = htonl(conn->epoch); 124 tmpbuf.x[0] = htonl(conn->proto.epoch);
123 tmpbuf.x[1] = htonl(conn->cid); 125 tmpbuf.x[1] = htonl(conn->proto.cid);
124 tmpbuf.x[2] = 0; 126 tmpbuf.x[2] = 0;
125 tmpbuf.x[3] = htonl(conn->security_ix); 127 tmpbuf.x[3] = htonl(conn->security_ix);
126 128
@@ -218,7 +220,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
218 rxkhdr.checksum = 0; 220 rxkhdr.checksum = 0;
219 221
220 /* encrypt from the session key */ 222 /* encrypt from the session key */
221 token = call->conn->key->payload.data[0]; 223 token = call->conn->params.key->payload.data[0];
222 memcpy(&iv, token->kad->session_key, sizeof(iv)); 224 memcpy(&iv, token->kad->session_key, sizeof(iv));
223 225
224 sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); 226 sg_init_one(&sg[0], sechdr, sizeof(rxkhdr));
@@ -275,13 +277,13 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
275 sp = rxrpc_skb(skb); 277 sp = rxrpc_skb(skb);
276 278
277 _enter("{%d{%x}},{#%u},%zu,", 279 _enter("{%d{%x}},{#%u},%zu,",
278 call->debug_id, key_serial(call->conn->key), sp->hdr.seq, 280 call->debug_id, key_serial(call->conn->params.key),
279 data_size); 281 sp->hdr.seq, data_size);
280 282
281 if (!call->conn->cipher) 283 if (!call->conn->cipher)
282 return 0; 284 return 0;
283 285
284 ret = key_validate(call->conn->key); 286 ret = key_validate(call->conn->params.key);
285 if (ret < 0) 287 if (ret < 0)
286 return ret; 288 return ret;
287 289
@@ -310,7 +312,7 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
310 y = 1; /* zero checksums are not permitted */ 312 y = 1; /* zero checksums are not permitted */
311 sp->hdr.cksum = y; 313 sp->hdr.cksum = y;
312 314
313 switch (call->conn->security_level) { 315 switch (call->conn->params.security_level) {
314 case RXRPC_SECURITY_PLAIN: 316 case RXRPC_SECURITY_PLAIN:
315 ret = 0; 317 ret = 0;
316 break; 318 break;
@@ -444,7 +446,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
444 skb_to_sgvec(skb, sg, 0, skb->len); 446 skb_to_sgvec(skb, sg, 0, skb->len);
445 447
446 /* decrypt from the session key */ 448 /* decrypt from the session key */
447 token = call->conn->key->payload.data[0]; 449 token = call->conn->params.key->payload.data[0];
448 memcpy(&iv, token->kad->session_key, sizeof(iv)); 450 memcpy(&iv, token->kad->session_key, sizeof(iv));
449 451
450 skcipher_request_set_tfm(req, call->conn->cipher); 452 skcipher_request_set_tfm(req, call->conn->cipher);
@@ -514,7 +516,7 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
514 sp = rxrpc_skb(skb); 516 sp = rxrpc_skb(skb);
515 517
516 _enter("{%d{%x}},{#%u}", 518 _enter("{%d{%x}},{#%u}",
517 call->debug_id, key_serial(call->conn->key), sp->hdr.seq); 519 call->debug_id, key_serial(call->conn->params.key), sp->hdr.seq);
518 520
519 if (!call->conn->cipher) 521 if (!call->conn->cipher)
520 return 0; 522 return 0;
@@ -555,7 +557,7 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
555 return -EPROTO; 557 return -EPROTO;
556 } 558 }
557 559
558 switch (call->conn->security_level) { 560 switch (call->conn->params.security_level) {
559 case RXRPC_SECURITY_PLAIN: 561 case RXRPC_SECURITY_PLAIN:
560 ret = 0; 562 ret = 0;
561 break; 563 break;
@@ -587,9 +589,9 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
587 u32 serial; 589 u32 serial;
588 int ret; 590 int ret;
589 591
590 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); 592 _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key));
591 593
592 ret = key_validate(conn->key); 594 ret = key_validate(conn->params.key);
593 if (ret < 0) 595 if (ret < 0)
594 return ret; 596 return ret;
595 597
@@ -600,14 +602,14 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
600 challenge.min_level = htonl(0); 602 challenge.min_level = htonl(0);
601 challenge.__padding = 0; 603 challenge.__padding = 0;
602 604
603 msg.msg_name = &conn->trans->peer->srx.transport.sin; 605 msg.msg_name = &conn->params.peer->srx.transport.sin;
604 msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); 606 msg.msg_namelen = sizeof(conn->params.peer->srx.transport.sin);
605 msg.msg_control = NULL; 607 msg.msg_control = NULL;
606 msg.msg_controllen = 0; 608 msg.msg_controllen = 0;
607 msg.msg_flags = 0; 609 msg.msg_flags = 0;
608 610
609 whdr.epoch = htonl(conn->epoch); 611 whdr.epoch = htonl(conn->proto.epoch);
610 whdr.cid = htonl(conn->cid); 612 whdr.cid = htonl(conn->proto.cid);
611 whdr.callNumber = 0; 613 whdr.callNumber = 0;
612 whdr.seq = 0; 614 whdr.seq = 0;
613 whdr.type = RXRPC_PACKET_TYPE_CHALLENGE; 615 whdr.type = RXRPC_PACKET_TYPE_CHALLENGE;
@@ -615,7 +617,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
615 whdr.userStatus = 0; 617 whdr.userStatus = 0;
616 whdr.securityIndex = conn->security_ix; 618 whdr.securityIndex = conn->security_ix;
617 whdr._rsvd = 0; 619 whdr._rsvd = 0;
618 whdr.serviceId = htons(conn->service_id); 620 whdr.serviceId = htons(conn->params.service_id);
619 621
620 iov[0].iov_base = &whdr; 622 iov[0].iov_base = &whdr;
621 iov[0].iov_len = sizeof(whdr); 623 iov[0].iov_len = sizeof(whdr);
@@ -628,7 +630,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
628 whdr.serial = htonl(serial); 630 whdr.serial = htonl(serial);
629 _proto("Tx CHALLENGE %%%u", serial); 631 _proto("Tx CHALLENGE %%%u", serial);
630 632
631 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); 633 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
632 if (ret < 0) { 634 if (ret < 0) {
633 _debug("sendmsg failed: %d", ret); 635 _debug("sendmsg failed: %d", ret);
634 return -EAGAIN; 636 return -EAGAIN;
@@ -655,8 +657,8 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
655 657
656 _enter(""); 658 _enter("");
657 659
658 msg.msg_name = &conn->trans->peer->srx.transport.sin; 660 msg.msg_name = &conn->params.peer->srx.transport.sin;
659 msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); 661 msg.msg_namelen = sizeof(conn->params.peer->srx.transport.sin);
660 msg.msg_control = NULL; 662 msg.msg_control = NULL;
661 msg.msg_controllen = 0; 663 msg.msg_controllen = 0;
662 msg.msg_flags = 0; 664 msg.msg_flags = 0;
@@ -682,7 +684,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
682 whdr.serial = htonl(serial); 684 whdr.serial = htonl(serial);
683 _proto("Tx RESPONSE %%%u", serial); 685 _proto("Tx RESPONSE %%%u", serial);
684 686
685 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); 687 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 3, len);
686 if (ret < 0) { 688 if (ret < 0) {
687 _debug("sendmsg failed: %d", ret); 689 _debug("sendmsg failed: %d", ret);
688 return -EAGAIN; 690 return -EAGAIN;
@@ -769,14 +771,14 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
769 u32 version, nonce, min_level, abort_code; 771 u32 version, nonce, min_level, abort_code;
770 int ret; 772 int ret;
771 773
772 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); 774 _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key));
773 775
774 if (!conn->key) { 776 if (!conn->params.key) {
775 _leave(" = -EPROTO [no key]"); 777 _leave(" = -EPROTO [no key]");
776 return -EPROTO; 778 return -EPROTO;
777 } 779 }
778 780
779 ret = key_validate(conn->key); 781 ret = key_validate(conn->params.key);
780 if (ret < 0) { 782 if (ret < 0) {
781 *_abort_code = RXKADEXPIRED; 783 *_abort_code = RXKADEXPIRED;
782 return ret; 784 return ret;
@@ -799,20 +801,20 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
799 goto protocol_error; 801 goto protocol_error;
800 802
801 abort_code = RXKADLEVELFAIL; 803 abort_code = RXKADLEVELFAIL;
802 if (conn->security_level < min_level) 804 if (conn->params.security_level < min_level)
803 goto protocol_error; 805 goto protocol_error;
804 806
805 token = conn->key->payload.data[0]; 807 token = conn->params.key->payload.data[0];
806 808
807 /* build the response packet */ 809 /* build the response packet */
808 memset(&resp, 0, sizeof(resp)); 810 memset(&resp, 0, sizeof(resp));
809 811
810 resp.version = htonl(RXKAD_VERSION); 812 resp.version = htonl(RXKAD_VERSION);
811 resp.encrypted.epoch = htonl(conn->epoch); 813 resp.encrypted.epoch = htonl(conn->proto.epoch);
812 resp.encrypted.cid = htonl(conn->cid); 814 resp.encrypted.cid = htonl(conn->proto.cid);
813 resp.encrypted.securityIndex = htonl(conn->security_ix); 815 resp.encrypted.securityIndex = htonl(conn->security_ix);
814 resp.encrypted.inc_nonce = htonl(nonce + 1); 816 resp.encrypted.inc_nonce = htonl(nonce + 1);
815 resp.encrypted.level = htonl(conn->security_level); 817 resp.encrypted.level = htonl(conn->params.security_level);
816 resp.kvno = htonl(token->kad->kvno); 818 resp.kvno = htonl(token->kad->kvno);
817 resp.ticket_len = htonl(token->kad->ticket_len); 819 resp.ticket_len = htonl(token->kad->ticket_len);
818 820
@@ -1094,9 +1096,9 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
1094 rxkad_decrypt_response(conn, &response, &session_key); 1096 rxkad_decrypt_response(conn, &response, &session_key);
1095 1097
1096 abort_code = RXKADSEALEDINCON; 1098 abort_code = RXKADSEALEDINCON;
1097 if (ntohl(response.encrypted.epoch) != conn->epoch) 1099 if (ntohl(response.encrypted.epoch) != conn->proto.epoch)
1098 goto protocol_error_free; 1100 goto protocol_error_free;
1099 if (ntohl(response.encrypted.cid) != conn->cid) 1101 if (ntohl(response.encrypted.cid) != conn->proto.cid)
1100 goto protocol_error_free; 1102 goto protocol_error_free;
1101 if (ntohl(response.encrypted.securityIndex) != conn->security_ix) 1103 if (ntohl(response.encrypted.securityIndex) != conn->security_ix)
1102 goto protocol_error_free; 1104 goto protocol_error_free;
@@ -1120,7 +1122,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
1120 level = ntohl(response.encrypted.level); 1122 level = ntohl(response.encrypted.level);
1121 if (level > RXRPC_SECURITY_ENCRYPT) 1123 if (level > RXRPC_SECURITY_ENCRYPT)
1122 goto protocol_error_free; 1124 goto protocol_error_free;
1123 conn->security_level = level; 1125 conn->params.security_level = level;
1124 1126
1125 /* create a key to hold the security data and expiration time - after 1127 /* create a key to hold the security data and expiration time - after
1126 * this the connection security can be handled in exactly the same way 1128 * this the connection security can be handled in exactly the same way
diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/security.c
index d223253b22fa..814d285ff802 100644
--- a/net/rxrpc/ar-security.c
+++ b/net/rxrpc/security.c
@@ -76,7 +76,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
76{ 76{
77 const struct rxrpc_security *sec; 77 const struct rxrpc_security *sec;
78 struct rxrpc_key_token *token; 78 struct rxrpc_key_token *token;
79 struct key *key = conn->key; 79 struct key *key = conn->params.key;
80 int ret; 80 int ret;
81 81
82 _enter("{%d},{%x}", conn->debug_id, key_serial(key)); 82 _enter("{%d},{%x}", conn->debug_id, key_serial(key));
@@ -113,7 +113,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
113int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) 113int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
114{ 114{
115 const struct rxrpc_security *sec; 115 const struct rxrpc_security *sec;
116 struct rxrpc_local *local = conn->trans->local; 116 struct rxrpc_local *local = conn->params.local;
117 struct rxrpc_sock *rx; 117 struct rxrpc_sock *rx;
118 struct key *key; 118 struct key *key;
119 key_ref_t kref; 119 key_ref_t kref;
@@ -121,7 +121,7 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
121 121
122 _enter(""); 122 _enter("");
123 123
124 sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix); 124 sprintf(kdesc, "%u:%u", conn->params.service_id, conn->security_ix);
125 125
126 sec = rxrpc_security_lookup(conn->security_ix); 126 sec = rxrpc_security_lookup(conn->security_ix);
127 if (!sec) { 127 if (!sec) {
@@ -132,7 +132,7 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
132 /* find the service */ 132 /* find the service */
133 read_lock_bh(&local->services_lock); 133 read_lock_bh(&local->services_lock);
134 list_for_each_entry(rx, &local->services, listen_link) { 134 list_for_each_entry(rx, &local->services, listen_link) {
135 if (rx->srx.srx_service == conn->service_id) 135 if (rx->srx.srx_service == conn->params.service_id)
136 goto found_service; 136 goto found_service;
137 } 137 }
138 138
diff --git a/net/rxrpc/ar-skbuff.c b/net/rxrpc/skbuff.c
index 62a267472fce..eee0cfd9ac8c 100644
--- a/net/rxrpc/ar-skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/net.h> 15#include <linux/net.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index d20ed575acf4..03ad08774d4e 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -18,6 +18,7 @@ static struct ctl_table_header *rxrpc_sysctl_reg_table;
18static const unsigned int zero = 0; 18static const unsigned int zero = 0;
19static const unsigned int one = 1; 19static const unsigned int one = 1;
20static const unsigned int four = 4; 20static const unsigned int four = 4;
21static const unsigned int thirtytwo = 32;
21static const unsigned int n_65535 = 65535; 22static const unsigned int n_65535 = 65535;
22static const unsigned int n_max_acks = RXRPC_MAXACKS; 23static const unsigned int n_max_acks = RXRPC_MAXACKS;
23 24
@@ -89,16 +90,17 @@ static struct ctl_table rxrpc_sysctl_table[] = {
89 .proc_handler = proc_dointvec_minmax, 90 .proc_handler = proc_dointvec_minmax,
90 .extra1 = (void *)&one, 91 .extra1 = (void *)&one,
91 }, 92 },
93
94 /* Non-time values */
92 { 95 {
93 .procname = "transport_expiry", 96 .procname = "max_backlog",
94 .data = &rxrpc_transport_expiry, 97 .data = &rxrpc_max_backlog,
95 .maxlen = sizeof(unsigned int), 98 .maxlen = sizeof(unsigned int),
96 .mode = 0644, 99 .mode = 0644,
97 .proc_handler = proc_dointvec_minmax, 100 .proc_handler = proc_dointvec_minmax,
98 .extra1 = (void *)&one, 101 .extra1 = (void *)&four,
102 .extra2 = (void *)&thirtytwo,
99 }, 103 },
100
101 /* Non-time values */
102 { 104 {
103 .procname = "rx_window_size", 105 .procname = "rx_window_size",
104 .data = &rxrpc_rx_window_size, 106 .data = &rxrpc_rx_window_size,
diff --git a/net/rxrpc/utils.c b/net/rxrpc/utils.c
new file mode 100644
index 000000000000..f28122a15a24
--- /dev/null
+++ b/net/rxrpc/utils.c
@@ -0,0 +1,41 @@
1/* Utility routines
2 *
3 * Copyright (C) 2015 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/ip.h>
13#include <linux/udp.h>
14#include "ar-internal.h"
15
16/*
17 * Set up an RxRPC address from a socket buffer.
18 */
19void rxrpc_get_addr_from_skb(struct rxrpc_local *local,
20 const struct sk_buff *skb,
21 struct sockaddr_rxrpc *srx)
22{
23 memset(srx, 0, sizeof(*srx));
24 srx->transport_type = local->srx.transport_type;
25 srx->transport.family = local->srx.transport.family;
26
27 /* Can we see an ipv4 UDP packet on an ipv6 UDP socket? and vice
28 * versa?
29 */
30 switch (srx->transport.family) {
31 case AF_INET:
32 srx->transport.sin.sin_port = udp_hdr(skb)->source;
33 srx->transport_len = sizeof(struct sockaddr_in);
34 memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
35 sizeof(struct in_addr));
36 break;
37
38 default:
39 BUG();
40 }
41}
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index c7a0b0d481c0..47ec2305f920 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -224,8 +224,8 @@ int tcf_hash_search(struct tc_action_net *tn, struct tc_action *a, u32 index)
224} 224}
225EXPORT_SYMBOL(tcf_hash_search); 225EXPORT_SYMBOL(tcf_hash_search);
226 226
227int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a, 227bool tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a,
228 int bind) 228 int bind)
229{ 229{
230 struct tcf_hashinfo *hinfo = tn->hinfo; 230 struct tcf_hashinfo *hinfo = tn->hinfo;
231 struct tcf_common *p = NULL; 231 struct tcf_common *p = NULL;
@@ -235,9 +235,9 @@ int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a,
235 p->tcfc_refcnt++; 235 p->tcfc_refcnt++;
236 a->priv = p; 236 a->priv = p;
237 a->hinfo = hinfo; 237 a->hinfo = hinfo;
238 return 1; 238 return true;
239 } 239 }
240 return 0; 240 return false;
241} 241}
242EXPORT_SYMBOL(tcf_hash_check); 242EXPORT_SYMBOL(tcf_hash_check);
243 243
@@ -283,10 +283,11 @@ err2:
283 p->tcfc_index = index ? index : tcf_hash_new_index(tn); 283 p->tcfc_index = index ? index : tcf_hash_new_index(tn);
284 p->tcfc_tm.install = jiffies; 284 p->tcfc_tm.install = jiffies;
285 p->tcfc_tm.lastuse = jiffies; 285 p->tcfc_tm.lastuse = jiffies;
286 p->tcfc_tm.firstuse = 0;
286 if (est) { 287 if (est) {
287 err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats, 288 err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
288 &p->tcfc_rate_est, 289 &p->tcfc_rate_est,
289 &p->tcfc_lock, est); 290 &p->tcfc_lock, NULL, est);
290 if (err) { 291 if (err) {
291 free_percpu(p->cpu_qstats); 292 free_percpu(p->cpu_qstats);
292 goto err2; 293 goto err2;
@@ -503,8 +504,8 @@ nla_put_failure:
503} 504}
504EXPORT_SYMBOL(tcf_action_dump_1); 505EXPORT_SYMBOL(tcf_action_dump_1);
505 506
506int 507int tcf_action_dump(struct sk_buff *skb, struct list_head *actions,
507tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref) 508 int bind, int ref)
508{ 509{
509 struct tc_action *a; 510 struct tc_action *a;
510 int err = -EINVAL; 511 int err = -EINVAL;
@@ -670,7 +671,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
670 if (err < 0) 671 if (err < 0)
671 goto errout; 672 goto errout;
672 673
673 if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 || 674 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
674 gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, 675 gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
675 &p->tcfc_rate_est) < 0 || 676 &p->tcfc_rate_est) < 0 ||
676 gnet_stats_copy_queue(&d, p->cpu_qstats, 677 gnet_stats_copy_queue(&d, p->cpu_qstats,
@@ -687,9 +688,9 @@ errout:
687 return -1; 688 return -1;
688} 689}
689 690
690static int 691static int tca_get_fill(struct sk_buff *skb, struct list_head *actions,
691tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq, 692 u32 portid, u32 seq, u16 flags, int event, int bind,
692 u16 flags, int event, int bind, int ref) 693 int ref)
693{ 694{
694 struct tcamsg *t; 695 struct tcamsg *t;
695 struct nlmsghdr *nlh; 696 struct nlmsghdr *nlh;
@@ -730,7 +731,8 @@ act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
730 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 731 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
731 if (!skb) 732 if (!skb)
732 return -ENOBUFS; 733 return -ENOBUFS;
733 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { 734 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
735 0, 0) <= 0) {
734 kfree_skb(skb); 736 kfree_skb(skb);
735 return -EINVAL; 737 return -EINVAL;
736 } 738 }
@@ -838,7 +840,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
838 if (a.ops == NULL) /*some idjot trying to flush unknown action */ 840 if (a.ops == NULL) /*some idjot trying to flush unknown action */
839 goto err_out; 841 goto err_out;
840 842
841 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0); 843 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
844 sizeof(*t), 0);
842 if (!nlh) 845 if (!nlh)
843 goto out_module_put; 846 goto out_module_put;
844 t = nlmsg_data(nlh); 847 t = nlmsg_data(nlh);
@@ -1001,7 +1004,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
1001 u32 portid = skb ? NETLINK_CB(skb).portid : 0; 1004 u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1002 int ret = 0, ovr = 0; 1005 int ret = 0, ovr = 0;
1003 1006
1004 if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN)) 1007 if ((n->nlmsg_type != RTM_GETACTION) &&
1008 !netlink_capable(skb, CAP_NET_ADMIN))
1005 return -EPERM; 1009 return -EPERM;
1006 1010
1007 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); 1011 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index c7123e01c2ca..f7b6cf49ea6f 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -154,10 +154,7 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
154 if (ret) 154 if (ret)
155 goto nla_put_failure; 155 goto nla_put_failure;
156 156
157 tm.install = jiffies_to_clock_t(jiffies - prog->tcf_tm.install); 157 tcf_tm_dump(&tm, &prog->tcf_tm);
158 tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse);
159 tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires);
160
161 if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm, 158 if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
162 TCA_ACT_BPF_PAD)) 159 TCA_ACT_BPF_PAD))
163 goto nla_put_failure; 160 goto nla_put_failure;
@@ -172,7 +169,8 @@ nla_put_failure:
172static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = { 169static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
173 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) }, 170 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
174 [TCA_ACT_BPF_FD] = { .type = NLA_U32 }, 171 [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
175 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING, .len = ACT_BPF_NAME_LEN }, 172 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
173 .len = ACT_BPF_NAME_LEN },
176 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 }, 174 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
177 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY, 175 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
178 .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, 176 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 2ba700c765e0..35a5270f289d 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -44,7 +44,7 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
44 int proto; 44 int proto;
45 45
46 spin_lock(&ca->tcf_lock); 46 spin_lock(&ca->tcf_lock);
47 ca->tcf_tm.lastuse = jiffies; 47 tcf_lastuse_update(&ca->tcf_tm);
48 bstats_update(&ca->tcf_bstats, skb); 48 bstats_update(&ca->tcf_bstats, skb);
49 49
50 if (skb->protocol == htons(ETH_P_IP)) { 50 if (skb->protocol == htons(ETH_P_IP)) {
@@ -160,9 +160,7 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
160 if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt)) 160 if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt))
161 goto nla_put_failure; 161 goto nla_put_failure;
162 162
163 t.install = jiffies_to_clock_t(jiffies - ci->tcf_tm.install); 163 tcf_tm_dump(&t, &ci->tcf_tm);
164 t.lastuse = jiffies_to_clock_t(jiffies - ci->tcf_tm.lastuse);
165 t.expires = jiffies_to_clock_t(ci->tcf_tm.expires);
166 if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t, 164 if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t,
167 TCA_CONNMARK_PAD)) 165 TCA_CONNMARK_PAD))
168 goto nla_put_failure; 166 goto nla_put_failure;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 28e934ed038a..dcd9ababd351 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -501,7 +501,7 @@ static int tcf_csum(struct sk_buff *skb,
501 u32 update_flags; 501 u32 update_flags;
502 502
503 spin_lock(&p->tcf_lock); 503 spin_lock(&p->tcf_lock);
504 p->tcf_tm.lastuse = jiffies; 504 tcf_lastuse_update(&p->tcf_tm);
505 bstats_update(&p->tcf_bstats, skb); 505 bstats_update(&p->tcf_bstats, skb);
506 action = p->tcf_action; 506 action = p->tcf_action;
507 update_flags = p->update_flags; 507 update_flags = p->update_flags;
@@ -546,9 +546,8 @@ static int tcf_csum_dump(struct sk_buff *skb,
546 546
547 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) 547 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
548 goto nla_put_failure; 548 goto nla_put_failure;
549 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 549
550 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 550 tcf_tm_dump(&t, &p->tcf_tm);
551 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
552 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD)) 551 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
553 goto nla_put_failure; 552 goto nla_put_failure;
554 553
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index ec5cc8435238..19058a7f3e5c 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -162,7 +162,8 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
162 tm->lastuse = lastuse; 162 tm->lastuse = lastuse;
163} 163}
164 164
165static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 165static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
166 int bind, int ref)
166{ 167{
167 unsigned char *b = skb_tail_pointer(skb); 168 unsigned char *b = skb_tail_pointer(skb);
168 struct tcf_gact *gact = a->priv; 169 struct tcf_gact *gact = a->priv;
@@ -188,9 +189,7 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
188 goto nla_put_failure; 189 goto nla_put_failure;
189 } 190 }
190#endif 191#endif
191 t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install); 192 tcf_tm_dump(&t, &gact->tcf_tm);
192 t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
193 t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
194 if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD)) 193 if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD))
195 goto nla_put_failure; 194 goto nla_put_failure;
196 return skb->len; 195 return skb->len;
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index ea4a2fef1b71..845ab5119c05 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -428,7 +428,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
428 u16 ife_type = 0; 428 u16 ife_type = 0;
429 u8 *daddr = NULL; 429 u8 *daddr = NULL;
430 u8 *saddr = NULL; 430 u8 *saddr = NULL;
431 int ret = 0, exists = 0; 431 bool exists = false;
432 int ret = 0;
432 int err; 433 int err;
433 434
434 err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy); 435 err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy);
@@ -562,9 +563,7 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
562 if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt)) 563 if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
563 goto nla_put_failure; 564 goto nla_put_failure;
564 565
565 t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install); 566 tcf_tm_dump(&t, &ife->tcf_tm);
566 t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse);
567 t.expires = jiffies_to_clock_t(ife->tcf_tm.expires);
568 if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD)) 567 if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
569 goto nla_put_failure; 568 goto nla_put_failure;
570 569
@@ -632,7 +631,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
632 631
633 spin_lock(&ife->tcf_lock); 632 spin_lock(&ife->tcf_lock);
634 bstats_update(&ife->tcf_bstats, skb); 633 bstats_update(&ife->tcf_bstats, skb);
635 ife->tcf_tm.lastuse = jiffies; 634 tcf_lastuse_update(&ife->tcf_tm);
636 spin_unlock(&ife->tcf_lock); 635 spin_unlock(&ife->tcf_lock);
637 636
638 ifehdrln = ntohs(ifehdrln); 637 ifehdrln = ntohs(ifehdrln);
@@ -720,7 +719,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
720 719
721 spin_lock(&ife->tcf_lock); 720 spin_lock(&ife->tcf_lock);
722 bstats_update(&ife->tcf_bstats, skb); 721 bstats_update(&ife->tcf_bstats, skb);
723 ife->tcf_tm.lastuse = jiffies; 722 tcf_lastuse_update(&ife->tcf_tm);
724 723
725 if (!metalen) { /* no metadata to send */ 724 if (!metalen) { /* no metadata to send */
726 /* abuse overlimits to count when we allow packet 725 /* abuse overlimits to count when we allow packet
@@ -811,7 +810,7 @@ static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
811 pr_info_ratelimited("unknown failure(policy neither de/encode\n"); 810 pr_info_ratelimited("unknown failure(policy neither de/encode\n");
812 spin_lock(&ife->tcf_lock); 811 spin_lock(&ife->tcf_lock);
813 bstats_update(&ife->tcf_bstats, skb); 812 bstats_update(&ife->tcf_bstats, skb);
814 ife->tcf_tm.lastuse = jiffies; 813 tcf_lastuse_update(&ife->tcf_tm);
815 ife->tcf_qstats.drops++; 814 ife->tcf_qstats.drops++;
816 spin_unlock(&ife->tcf_lock); 815 spin_unlock(&ife->tcf_lock);
817 816
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index d4bd19ee5822..b8c50600697a 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -34,7 +34,8 @@ static int ipt_net_id;
34 34
35static int xt_net_id; 35static int xt_net_id;
36 36
37static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook) 37static int ipt_init_target(struct xt_entry_target *t, char *table,
38 unsigned int hook)
38{ 39{
39 struct xt_tgchk_param par; 40 struct xt_tgchk_param par;
40 struct xt_target *target; 41 struct xt_target *target;
@@ -96,7 +97,8 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
96 struct tcf_ipt *ipt; 97 struct tcf_ipt *ipt;
97 struct xt_entry_target *td, *t; 98 struct xt_entry_target *td, *t;
98 char *tname; 99 char *tname;
99 int ret = 0, err, exists = 0; 100 bool exists = false;
101 int ret = 0, err;
100 u32 hook = 0; 102 u32 hook = 0;
101 u32 index = 0; 103 u32 index = 0;
102 104
@@ -215,7 +217,7 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
215 217
216 spin_lock(&ipt->tcf_lock); 218 spin_lock(&ipt->tcf_lock);
217 219
218 ipt->tcf_tm.lastuse = jiffies; 220 tcf_lastuse_update(&ipt->tcf_tm);
219 bstats_update(&ipt->tcf_bstats, skb); 221 bstats_update(&ipt->tcf_bstats, skb);
220 222
221 /* yes, we have to worry about both in and out dev 223 /* yes, we have to worry about both in and out dev
@@ -245,7 +247,7 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
245 default: 247 default:
246 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n", 248 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
247 ret); 249 ret);
248 result = TC_POLICE_OK; 250 result = TC_ACT_OK;
249 break; 251 break;
250 } 252 }
251 spin_unlock(&ipt->tcf_lock); 253 spin_unlock(&ipt->tcf_lock);
@@ -253,7 +255,8 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
253 255
254} 256}
255 257
256static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 258static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
259 int ref)
257{ 260{
258 unsigned char *b = skb_tail_pointer(skb); 261 unsigned char *b = skb_tail_pointer(skb);
259 struct tcf_ipt *ipt = a->priv; 262 struct tcf_ipt *ipt = a->priv;
@@ -280,11 +283,11 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
280 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) || 283 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
281 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname)) 284 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
282 goto nla_put_failure; 285 goto nla_put_failure;
283 tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install); 286
284 tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse); 287 tcf_tm_dump(&tm, &ipt->tcf_tm);
285 tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
286 if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD)) 288 if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
287 goto nla_put_failure; 289 goto nla_put_failure;
290
288 kfree(t); 291 kfree(t);
289 return skb->len; 292 return skb->len;
290 293
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 128942bc9e42..5b135d357e1e 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -62,7 +62,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
62 struct tc_mirred *parm; 62 struct tc_mirred *parm;
63 struct tcf_mirred *m; 63 struct tcf_mirred *m;
64 struct net_device *dev; 64 struct net_device *dev;
65 int ret, ok_push = 0, exists = 0; 65 int ret, ok_push = 0;
66 bool exists = false;
66 67
67 if (nla == NULL) 68 if (nla == NULL)
68 return -EINVAL; 69 return -EINVAL;
@@ -157,7 +158,6 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
157 u32 at; 158 u32 at;
158 159
159 tcf_lastuse_update(&m->tcf_tm); 160 tcf_lastuse_update(&m->tcf_tm);
160
161 bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb); 161 bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
162 162
163 rcu_read_lock(); 163 rcu_read_lock();
@@ -219,9 +219,8 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
219 219
220 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt)) 220 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
221 goto nla_put_failure; 221 goto nla_put_failure;
222 t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); 222
223 t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); 223 tcf_tm_dump(&t, &m->tcf_tm);
224 t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
225 if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD)) 224 if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
226 goto nla_put_failure; 225 goto nla_put_failure;
227 return skb->len; 226 return skb->len;
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index c0a879f940de..06ccb03f25da 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -103,7 +103,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
103 103
104 spin_lock(&p->tcf_lock); 104 spin_lock(&p->tcf_lock);
105 105
106 p->tcf_tm.lastuse = jiffies; 106 tcf_lastuse_update(&p->tcf_tm);
107 old_addr = p->old_addr; 107 old_addr = p->old_addr;
108 new_addr = p->new_addr; 108 new_addr = p->new_addr;
109 mask = p->mask; 109 mask = p->mask;
@@ -264,9 +264,8 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
264 264
265 if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt)) 265 if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
266 goto nla_put_failure; 266 goto nla_put_failure;
267 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 267
268 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 268 tcf_tm_dump(&t, &p->tcf_tm);
269 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
270 if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD)) 269 if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
271 goto nla_put_failure; 270 goto nla_put_failure;
272 271
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index c6e18f230af6..82d3c1479029 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -121,7 +121,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
121 121
122 spin_lock(&p->tcf_lock); 122 spin_lock(&p->tcf_lock);
123 123
124 p->tcf_tm.lastuse = jiffies; 124 tcf_lastuse_update(&p->tcf_tm);
125 125
126 if (p->tcfp_nkeys > 0) { 126 if (p->tcfp_nkeys > 0) {
127 struct tc_pedit_key *tkey = p->tcfp_keys; 127 struct tc_pedit_key *tkey = p->tcfp_keys;
@@ -200,11 +200,11 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
200 200
201 if (nla_put(skb, TCA_PEDIT_PARMS, s, opt)) 201 if (nla_put(skb, TCA_PEDIT_PARMS, s, opt))
202 goto nla_put_failure; 202 goto nla_put_failure;
203 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 203
204 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 204 tcf_tm_dump(&t, &p->tcf_tm);
205 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
206 if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD)) 205 if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD))
207 goto nla_put_failure; 206 goto nla_put_failure;
207
208 kfree(opt); 208 kfree(opt);
209 return skb->len; 209 return skb->len;
210 210
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index c557789765dc..1e8ede3955f4 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -115,9 +115,9 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
115 [TCA_POLICE_RESULT] = { .type = NLA_U32 }, 115 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
116}; 116};
117 117
118static int tcf_act_police_locate(struct net *net, struct nlattr *nla, 118static int tcf_act_police_init(struct net *net, struct nlattr *nla,
119 struct nlattr *est, struct tc_action *a, 119 struct nlattr *est, struct tc_action *a,
120 int ovr, int bind) 120 int ovr, int bind)
121{ 121{
122 int ret = 0, err; 122 int ret = 0, err;
123 struct nlattr *tb[TCA_POLICE_MAX + 1]; 123 struct nlattr *tb[TCA_POLICE_MAX + 1];
@@ -182,7 +182,8 @@ override:
182 if (est) { 182 if (est) {
183 err = gen_replace_estimator(&police->tcf_bstats, NULL, 183 err = gen_replace_estimator(&police->tcf_bstats, NULL,
184 &police->tcf_rate_est, 184 &police->tcf_rate_est,
185 &police->tcf_lock, est); 185 &police->tcf_lock,
186 NULL, est);
186 if (err) 187 if (err)
187 goto failure_unlock; 188 goto failure_unlock;
188 } else if (tb[TCA_POLICE_AVRATE] && 189 } else if (tb[TCA_POLICE_AVRATE] &&
@@ -336,6 +337,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
336 337
337 t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install); 338 t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
338 t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse); 339 t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
340 t.firstuse = jiffies_to_clock_t(jiffies - police->tcf_tm.firstuse);
339 t.expires = jiffies_to_clock_t(police->tcf_tm.expires); 341 t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
340 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD)) 342 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
341 goto nla_put_failure; 343 goto nla_put_failure;
@@ -364,7 +366,7 @@ static struct tc_action_ops act_police_ops = {
364 .owner = THIS_MODULE, 366 .owner = THIS_MODULE,
365 .act = tcf_act_police, 367 .act = tcf_act_police,
366 .dump = tcf_act_police_dump, 368 .dump = tcf_act_police_dump,
367 .init = tcf_act_police_locate, 369 .init = tcf_act_police_init,
368 .walk = tcf_act_police_walker, 370 .walk = tcf_act_police_walker,
369 .lookup = tcf_police_search, 371 .lookup = tcf_police_search,
370}; 372};
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index e42f8daca147..318328d34d12 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -35,7 +35,7 @@ static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
35 struct tcf_defact *d = a->priv; 35 struct tcf_defact *d = a->priv;
36 36
37 spin_lock(&d->tcf_lock); 37 spin_lock(&d->tcf_lock);
38 d->tcf_tm.lastuse = jiffies; 38 tcf_lastuse_update(&d->tcf_tm);
39 bstats_update(&d->tcf_bstats, skb); 39 bstats_update(&d->tcf_bstats, skb);
40 40
41 /* print policy string followed by _ then packet count 41 /* print policy string followed by _ then packet count
@@ -86,8 +86,9 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
86 struct nlattr *tb[TCA_DEF_MAX + 1]; 86 struct nlattr *tb[TCA_DEF_MAX + 1];
87 struct tc_defact *parm; 87 struct tc_defact *parm;
88 struct tcf_defact *d; 88 struct tcf_defact *d;
89 bool exists = false;
90 int ret = 0, err;
89 char *defdata; 91 char *defdata;
90 int ret = 0, err, exists = 0;
91 92
92 if (nla == NULL) 93 if (nla == NULL)
93 return -EINVAL; 94 return -EINVAL;
@@ -158,9 +159,8 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
158 if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) || 159 if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) ||
159 nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata)) 160 nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata))
160 goto nla_put_failure; 161 goto nla_put_failure;
161 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); 162
162 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); 163 tcf_tm_dump(&t, &d->tcf_tm);
163 t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
164 if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD)) 164 if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD))
165 goto nla_put_failure; 165 goto nla_put_failure;
166 return skb->len; 166 return skb->len;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index e928802966bc..53d1486cddf7 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -37,7 +37,7 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
37 struct tcf_skbedit *d = a->priv; 37 struct tcf_skbedit *d = a->priv;
38 38
39 spin_lock(&d->tcf_lock); 39 spin_lock(&d->tcf_lock);
40 d->tcf_tm.lastuse = jiffies; 40 tcf_lastuse_update(&d->tcf_tm);
41 bstats_update(&d->tcf_bstats, skb); 41 bstats_update(&d->tcf_bstats, skb);
42 42
43 if (d->flags & SKBEDIT_F_PRIORITY) 43 if (d->flags & SKBEDIT_F_PRIORITY)
@@ -69,7 +69,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
69 struct tcf_skbedit *d; 69 struct tcf_skbedit *d;
70 u32 flags = 0, *priority = NULL, *mark = NULL; 70 u32 flags = 0, *priority = NULL, *mark = NULL;
71 u16 *queue_mapping = NULL; 71 u16 *queue_mapping = NULL;
72 int ret = 0, err, exists = 0; 72 bool exists = false;
73 int ret = 0, err;
73 74
74 if (nla == NULL) 75 if (nla == NULL)
75 return -EINVAL; 76 return -EINVAL;
@@ -168,9 +169,8 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
168 nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), 169 nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
169 &d->mark)) 170 &d->mark))
170 goto nla_put_failure; 171 goto nla_put_failure;
171 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); 172
172 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); 173 tcf_tm_dump(&t, &d->tcf_tm);
173 t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
174 if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD)) 174 if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
175 goto nla_put_failure; 175 goto nla_put_failure;
176 return skb->len; 176 return skb->len;
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index ac4adc812c12..db9b7ed570ba 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -31,7 +31,7 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
31 int err; 31 int err;
32 32
33 spin_lock(&v->tcf_lock); 33 spin_lock(&v->tcf_lock);
34 v->tcf_tm.lastuse = jiffies; 34 tcf_lastuse_update(&v->tcf_tm);
35 bstats_update(&v->tcf_bstats, skb); 35 bstats_update(&v->tcf_bstats, skb);
36 action = v->tcf_action; 36 action = v->tcf_action;
37 37
@@ -77,8 +77,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
77 int action; 77 int action;
78 __be16 push_vid = 0; 78 __be16 push_vid = 0;
79 __be16 push_proto = 0; 79 __be16 push_proto = 0;
80 int ret = 0, exists = 0; 80 bool exists = false;
81 int err; 81 int ret = 0, err;
82 82
83 if (!nla) 83 if (!nla)
84 return -EINVAL; 84 return -EINVAL;
@@ -179,12 +179,11 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
179 179
180 if (v->tcfv_action == TCA_VLAN_ACT_PUSH && 180 if (v->tcfv_action == TCA_VLAN_ACT_PUSH &&
181 (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) || 181 (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) ||
182 nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, v->tcfv_push_proto))) 182 nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL,
183 v->tcfv_push_proto)))
183 goto nla_put_failure; 184 goto nla_put_failure;
184 185
185 t.install = jiffies_to_clock_t(jiffies - v->tcf_tm.install); 186 tcf_tm_dump(&t, &v->tcf_tm);
186 t.lastuse = jiffies_to_clock_t(jiffies - v->tcf_tm.lastuse);
187 t.expires = jiffies_to_clock_t(v->tcf_tm.expires);
188 if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD)) 187 if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
189 goto nla_put_failure; 188 goto nla_put_failure;
190 return skb->len; 189 return skb->len;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a75864d93142..843a716a4303 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -103,6 +103,17 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
103 struct nlmsghdr *n, struct tcf_proto *tp, 103 struct nlmsghdr *n, struct tcf_proto *tp,
104 unsigned long fh, int event); 104 unsigned long fh, int event);
105 105
106static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
107 struct nlmsghdr *n,
108 struct tcf_proto __rcu **chain, int event)
109{
110 struct tcf_proto __rcu **it_chain;
111 struct tcf_proto *tp;
112
113 for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL;
114 it_chain = &tp->next)
115 tfilter_notify(net, oskb, n, tp, 0, event);
116}
106 117
107/* Select new prio value from the range, managed by kernel. */ 118/* Select new prio value from the range, managed by kernel. */
108 119
@@ -156,11 +167,23 @@ replay:
156 cl = 0; 167 cl = 0;
157 168
158 if (prio == 0) { 169 if (prio == 0) {
159 /* If no priority is given, user wants we allocated it. */ 170 switch (n->nlmsg_type) {
160 if (n->nlmsg_type != RTM_NEWTFILTER || 171 case RTM_DELTFILTER:
161 !(n->nlmsg_flags & NLM_F_CREATE)) 172 if (protocol || t->tcm_handle || tca[TCA_KIND])
173 return -ENOENT;
174 break;
175 case RTM_NEWTFILTER:
176 /* If no priority is provided by the user,
177 * we allocate one.
178 */
179 if (n->nlmsg_flags & NLM_F_CREATE) {
180 prio = TC_H_MAKE(0x80000000U, 0U);
181 break;
182 }
183 /* fall-through */
184 default:
162 return -ENOENT; 185 return -ENOENT;
163 prio = TC_H_MAKE(0x80000000U, 0U); 186 }
164 } 187 }
165 188
166 /* Find head of filter chain. */ 189 /* Find head of filter chain. */
@@ -200,6 +223,12 @@ replay:
200 err = -EINVAL; 223 err = -EINVAL;
201 if (chain == NULL) 224 if (chain == NULL)
202 goto errout; 225 goto errout;
226 if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
227 tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER);
228 tcf_destroy_chain(chain);
229 err = 0;
230 goto errout;
231 }
203 232
204 /* Check the chain for existence of proto-tcf with this priority */ 233 /* Check the chain for existence of proto-tcf with this priority */
205 for (back = chain; 234 for (back = chain;
@@ -351,8 +380,9 @@ errout:
351 return err; 380 return err;
352} 381}
353 382
354static int tcf_fill_node(struct net *net, struct sk_buff *skb, struct tcf_proto *tp, 383static int tcf_fill_node(struct net *net, struct sk_buff *skb,
355 unsigned long fh, u32 portid, u32 seq, u16 flags, int event) 384 struct tcf_proto *tp, unsigned long fh, u32 portid,
385 u32 seq, u16 flags, int event)
356{ 386{
357 struct tcmsg *tcm; 387 struct tcmsg *tcm;
358 struct nlmsghdr *nlh; 388 struct nlmsghdr *nlh;
@@ -474,9 +504,11 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
474 TC_H_MIN(tcm->tcm_info) != tp->protocol) 504 TC_H_MIN(tcm->tcm_info) != tp->protocol)
475 continue; 505 continue;
476 if (t > s_t) 506 if (t > s_t)
477 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); 507 memset(&cb->args[1], 0,
508 sizeof(cb->args)-sizeof(cb->args[0]));
478 if (cb->args[1] == 0) { 509 if (cb->args[1] == 0) {
479 if (tcf_fill_node(net, skb, tp, 0, NETLINK_CB(cb->skb).portid, 510 if (tcf_fill_node(net, skb, tp, 0,
511 NETLINK_CB(cb->skb).portid,
480 cb->nlh->nlmsg_seq, NLM_F_MULTI, 512 cb->nlh->nlmsg_seq, NLM_F_MULTI,
481 RTM_NEWTFILTER) <= 0) 513 RTM_NEWTFILTER) <= 0)
482 break; 514 break;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index b3b7978f4182..5060801a2f6d 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -66,6 +66,7 @@ struct cls_fl_filter {
66 struct fl_flow_key key; 66 struct fl_flow_key key;
67 struct list_head list; 67 struct list_head list;
68 u32 handle; 68 u32 handle;
69 u32 flags;
69 struct rcu_head rcu; 70 struct rcu_head rcu;
70}; 71};
71 72
@@ -123,6 +124,9 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
123 struct fl_flow_key skb_key; 124 struct fl_flow_key skb_key;
124 struct fl_flow_key skb_mkey; 125 struct fl_flow_key skb_mkey;
125 126
127 if (!atomic_read(&head->ht.nelems))
128 return -1;
129
126 fl_clear_masked_range(&skb_key, &head->mask); 130 fl_clear_masked_range(&skb_key, &head->mask);
127 skb_key.indev_ifindex = skb->skb_iif; 131 skb_key.indev_ifindex = skb->skb_iif;
128 /* skb_flow_dissect() does not set n_proto in case an unknown protocol, 132 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
@@ -136,7 +140,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
136 f = rhashtable_lookup_fast(&head->ht, 140 f = rhashtable_lookup_fast(&head->ht,
137 fl_key_get_start(&skb_mkey, &head->mask), 141 fl_key_get_start(&skb_mkey, &head->mask),
138 head->ht_params); 142 head->ht_params);
139 if (f) { 143 if (f && !tc_skip_sw(f->flags)) {
140 *res = f->res; 144 *res = f->res;
141 return tcf_exts_exec(skb, &f->exts, res); 145 return tcf_exts_exec(skb, &f->exts, res);
142 } 146 }
@@ -183,19 +187,20 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
183 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc); 187 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
184} 188}
185 189
186static void fl_hw_replace_filter(struct tcf_proto *tp, 190static int fl_hw_replace_filter(struct tcf_proto *tp,
187 struct flow_dissector *dissector, 191 struct flow_dissector *dissector,
188 struct fl_flow_key *mask, 192 struct fl_flow_key *mask,
189 struct fl_flow_key *key, 193 struct fl_flow_key *key,
190 struct tcf_exts *actions, 194 struct tcf_exts *actions,
191 unsigned long cookie, u32 flags) 195 unsigned long cookie, u32 flags)
192{ 196{
193 struct net_device *dev = tp->q->dev_queue->dev; 197 struct net_device *dev = tp->q->dev_queue->dev;
194 struct tc_cls_flower_offload offload = {0}; 198 struct tc_cls_flower_offload offload = {0};
195 struct tc_to_netdev tc; 199 struct tc_to_netdev tc;
200 int err;
196 201
197 if (!tc_should_offload(dev, tp, flags)) 202 if (!tc_should_offload(dev, tp, flags))
198 return; 203 return tc_skip_sw(flags) ? -EINVAL : 0;
199 204
200 offload.command = TC_CLSFLOWER_REPLACE; 205 offload.command = TC_CLSFLOWER_REPLACE;
201 offload.cookie = cookie; 206 offload.cookie = cookie;
@@ -207,7 +212,12 @@ static void fl_hw_replace_filter(struct tcf_proto *tp,
207 tc.type = TC_SETUP_CLSFLOWER; 212 tc.type = TC_SETUP_CLSFLOWER;
208 tc.cls_flower = &offload; 213 tc.cls_flower = &offload;
209 214
210 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc); 215 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
216
217 if (tc_skip_sw(flags))
218 return err;
219
220 return 0;
211} 221}
212 222
213static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) 223static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
@@ -524,7 +534,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
524 struct cls_fl_filter *fnew; 534 struct cls_fl_filter *fnew;
525 struct nlattr *tb[TCA_FLOWER_MAX + 1]; 535 struct nlattr *tb[TCA_FLOWER_MAX + 1];
526 struct fl_flow_mask mask = {}; 536 struct fl_flow_mask mask = {};
527 u32 flags = 0;
528 int err; 537 int err;
529 538
530 if (!tca[TCA_OPTIONS]) 539 if (!tca[TCA_OPTIONS])
@@ -552,8 +561,14 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
552 } 561 }
553 fnew->handle = handle; 562 fnew->handle = handle;
554 563
555 if (tb[TCA_FLOWER_FLAGS]) 564 if (tb[TCA_FLOWER_FLAGS]) {
556 flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); 565 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
566
567 if (!tc_flags_valid(fnew->flags)) {
568 err = -EINVAL;
569 goto errout;
570 }
571 }
557 572
558 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr); 573 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
559 if (err) 574 if (err)
@@ -563,19 +578,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
563 if (err) 578 if (err)
564 goto errout; 579 goto errout;
565 580
566 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node, 581 if (!tc_skip_sw(fnew->flags)) {
567 head->ht_params); 582 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
583 head->ht_params);
584 if (err)
585 goto errout;
586 }
587
588 err = fl_hw_replace_filter(tp,
589 &head->dissector,
590 &mask.key,
591 &fnew->key,
592 &fnew->exts,
593 (unsigned long)fnew,
594 fnew->flags);
568 if (err) 595 if (err)
569 goto errout; 596 goto errout;
570 597
571 fl_hw_replace_filter(tp,
572 &head->dissector,
573 &mask.key,
574 &fnew->key,
575 &fnew->exts,
576 (unsigned long)fnew,
577 flags);
578
579 if (fold) { 598 if (fold) {
580 rhashtable_remove_fast(&head->ht, &fold->ht_node, 599 rhashtable_remove_fast(&head->ht, &fold->ht_node,
581 head->ht_params); 600 head->ht_params);
@@ -734,6 +753,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
734 sizeof(key->tp.dst)))) 753 sizeof(key->tp.dst))))
735 goto nla_put_failure; 754 goto nla_put_failure;
736 755
756 nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
757
737 if (tcf_exts_dump(skb, &f->exts)) 758 if (tcf_exts_dump(skb, &f->exts))
738 goto nla_put_failure; 759 goto nla_put_failure;
739 760
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index ddf047df5361..12ebde845523 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -95,8 +95,6 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
95 Expected action: do not backoff, but wait until queue will clear. 95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped. 96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore 97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
100 98
101 Auxiliary routines: 99 Auxiliary routines:
102 100
@@ -583,7 +581,6 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
583 timer); 581 timer);
584 582
585 rcu_read_lock(); 583 rcu_read_lock();
586 qdisc_unthrottled(wd->qdisc);
587 __netif_schedule(qdisc_root(wd->qdisc)); 584 __netif_schedule(qdisc_root(wd->qdisc));
588 rcu_read_unlock(); 585 rcu_read_unlock();
589 586
@@ -598,15 +595,12 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
598} 595}
599EXPORT_SYMBOL(qdisc_watchdog_init); 596EXPORT_SYMBOL(qdisc_watchdog_init);
600 597
601void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle) 598void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
602{ 599{
603 if (test_bit(__QDISC_STATE_DEACTIVATED, 600 if (test_bit(__QDISC_STATE_DEACTIVATED,
604 &qdisc_root_sleeping(wd->qdisc)->state)) 601 &qdisc_root_sleeping(wd->qdisc)->state))
605 return; 602 return;
606 603
607 if (throttle)
608 qdisc_throttled(wd->qdisc);
609
610 if (wd->last_expires == expires) 604 if (wd->last_expires == expires)
611 return; 605 return;
612 606
@@ -620,7 +614,6 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
620void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) 614void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
621{ 615{
622 hrtimer_cancel(&wd->timer); 616 hrtimer_cancel(&wd->timer);
623 qdisc_unthrottled(wd->qdisc);
624} 617}
625EXPORT_SYMBOL(qdisc_watchdog_cancel); 618EXPORT_SYMBOL(qdisc_watchdog_cancel);
626 619
@@ -982,7 +975,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
982 rcu_assign_pointer(sch->stab, stab); 975 rcu_assign_pointer(sch->stab, stab);
983 } 976 }
984 if (tca[TCA_RATE]) { 977 if (tca[TCA_RATE]) {
985 spinlock_t *root_lock; 978 seqcount_t *running;
986 979
987 err = -EOPNOTSUPP; 980 err = -EOPNOTSUPP;
988 if (sch->flags & TCQ_F_MQROOT) 981 if (sch->flags & TCQ_F_MQROOT)
@@ -991,14 +984,15 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
991 if ((sch->parent != TC_H_ROOT) && 984 if ((sch->parent != TC_H_ROOT) &&
992 !(sch->flags & TCQ_F_INGRESS) && 985 !(sch->flags & TCQ_F_INGRESS) &&
993 (!p || !(p->flags & TCQ_F_MQROOT))) 986 (!p || !(p->flags & TCQ_F_MQROOT)))
994 root_lock = qdisc_root_sleeping_lock(sch); 987 running = qdisc_root_sleeping_running(sch);
995 else 988 else
996 root_lock = qdisc_lock(sch); 989 running = &sch->running;
997 990
998 err = gen_new_estimator(&sch->bstats, 991 err = gen_new_estimator(&sch->bstats,
999 sch->cpu_bstats, 992 sch->cpu_bstats,
1000 &sch->rate_est, 993 &sch->rate_est,
1001 root_lock, 994 NULL,
995 running,
1002 tca[TCA_RATE]); 996 tca[TCA_RATE]);
1003 if (err) 997 if (err)
1004 goto err_out4; 998 goto err_out4;
@@ -1061,7 +1055,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1061 gen_replace_estimator(&sch->bstats, 1055 gen_replace_estimator(&sch->bstats,
1062 sch->cpu_bstats, 1056 sch->cpu_bstats,
1063 &sch->rate_est, 1057 &sch->rate_est,
1064 qdisc_root_sleeping_lock(sch), 1058 NULL,
1059 qdisc_root_sleeping_running(sch),
1065 tca[TCA_RATE]); 1060 tca[TCA_RATE]);
1066 } 1061 }
1067out: 1062out:
@@ -1369,8 +1364,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1369 goto nla_put_failure; 1364 goto nla_put_failure;
1370 1365
1371 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 1366 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1372 qdisc_root_sleeping_lock(q), &d, 1367 NULL, &d, TCA_PAD) < 0)
1373 TCA_PAD) < 0)
1374 goto nla_put_failure; 1368 goto nla_put_failure;
1375 1369
1376 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 1370 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -1381,7 +1375,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1381 cpu_qstats = q->cpu_qstats; 1375 cpu_qstats = q->cpu_qstats;
1382 } 1376 }
1383 1377
1384 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 || 1378 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
1379 &d, cpu_bstats, &q->bstats) < 0 ||
1385 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || 1380 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1386 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) 1381 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
1387 goto nla_put_failure; 1382 goto nla_put_failure;
@@ -1684,8 +1679,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1684 goto nla_put_failure; 1679 goto nla_put_failure;
1685 1680
1686 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 1681 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1687 qdisc_root_sleeping_lock(q), &d, 1682 NULL, &d, TCA_PAD) < 0)
1688 TCA_PAD) < 0)
1689 goto nla_put_failure; 1683 goto nla_put_failure;
1690 1684
1691 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1685 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 1911af3ca7c0..481e4f12aeb4 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -357,16 +357,17 @@ static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch,
357 357
358/* --------------------------- Qdisc operations ---------------------------- */ 358/* --------------------------- Qdisc operations ---------------------------- */
359 359
360static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 360static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
361 struct sk_buff **to_free)
361{ 362{
362 struct atm_qdisc_data *p = qdisc_priv(sch); 363 struct atm_qdisc_data *p = qdisc_priv(sch);
363 struct atm_flow_data *flow; 364 struct atm_flow_data *flow;
364 struct tcf_result res; 365 struct tcf_result res;
365 int result; 366 int result;
366 int ret = NET_XMIT_POLICED; 367 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
367 368
368 pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p); 369 pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
369 result = TC_POLICE_OK; /* be nice to gcc */ 370 result = TC_ACT_OK; /* be nice to gcc */
370 flow = NULL; 371 flow = NULL;
371 if (TC_H_MAJ(skb->priority) != sch->handle || 372 if (TC_H_MAJ(skb->priority) != sch->handle ||
372 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) { 373 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) {
@@ -398,12 +399,12 @@ done:
398 switch (result) { 399 switch (result) {
399 case TC_ACT_QUEUED: 400 case TC_ACT_QUEUED:
400 case TC_ACT_STOLEN: 401 case TC_ACT_STOLEN:
401 kfree_skb(skb); 402 __qdisc_drop(skb, to_free);
402 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 403 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
403 case TC_ACT_SHOT: 404 case TC_ACT_SHOT:
404 kfree_skb(skb); 405 __qdisc_drop(skb, to_free);
405 goto drop; 406 goto drop;
406 case TC_POLICE_RECLASSIFY: 407 case TC_ACT_RECLASSIFY:
407 if (flow->excess) 408 if (flow->excess)
408 flow = flow->excess; 409 flow = flow->excess;
409 else 410 else
@@ -413,7 +414,7 @@ done:
413#endif 414#endif
414 } 415 }
415 416
416 ret = qdisc_enqueue(skb, flow->q); 417 ret = qdisc_enqueue(skb, flow->q, to_free);
417 if (ret != NET_XMIT_SUCCESS) { 418 if (ret != NET_XMIT_SUCCESS) {
418drop: __maybe_unused 419drop: __maybe_unused
419 if (net_xmit_drop_count(ret)) { 420 if (net_xmit_drop_count(ret)) {
@@ -519,20 +520,6 @@ static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
519 return p->link.q->ops->peek(p->link.q); 520 return p->link.q->ops->peek(p->link.q);
520} 521}
521 522
522static unsigned int atm_tc_drop(struct Qdisc *sch)
523{
524 struct atm_qdisc_data *p = qdisc_priv(sch);
525 struct atm_flow_data *flow;
526 unsigned int len;
527
528 pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p);
529 list_for_each_entry(flow, &p->flows, list) {
530 if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q)))
531 return len;
532 }
533 return 0;
534}
535
536static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) 523static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
537{ 524{
538 struct atm_qdisc_data *p = qdisc_priv(sch); 525 struct atm_qdisc_data *p = qdisc_priv(sch);
@@ -637,7 +624,8 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
637{ 624{
638 struct atm_flow_data *flow = (struct atm_flow_data *)arg; 625 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
639 626
640 if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 || 627 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
628 d, NULL, &flow->bstats) < 0 ||
641 gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0) 629 gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
642 return -1; 630 return -1;
643 631
@@ -671,7 +659,6 @@ static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
671 .enqueue = atm_tc_enqueue, 659 .enqueue = atm_tc_enqueue,
672 .dequeue = atm_tc_dequeue, 660 .dequeue = atm_tc_dequeue,
673 .peek = atm_tc_peek, 661 .peek = atm_tc_peek,
674 .drop = atm_tc_drop,
675 .init = atm_tc_init, 662 .init = atm_tc_init,
676 .reset = atm_tc_reset, 663 .reset = atm_tc_reset,
677 .destroy = atm_tc_destroy, 664 .destroy = atm_tc_destroy,
diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
index 3fee70d9814f..c98a61e980ba 100644
--- a/net/sched/sch_blackhole.c
+++ b/net/sched/sch_blackhole.c
@@ -17,9 +17,10 @@
17#include <linux/skbuff.h> 17#include <linux/skbuff.h>
18#include <net/pkt_sched.h> 18#include <net/pkt_sched.h>
19 19
20static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch) 20static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
21 struct sk_buff **to_free)
21{ 22{
22 qdisc_drop(skb, sch); 23 qdisc_drop(skb, sch, to_free);
23 return NET_XMIT_SUCCESS; 24 return NET_XMIT_SUCCESS;
24} 25}
25 26
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index baafddf229ce..beb554aa8cfb 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -80,10 +80,6 @@ struct cbq_class {
80 unsigned char priority; /* class priority */ 80 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */ 81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */ 82 unsigned char ewma_log; /* time constant for idle time calculation */
83 unsigned char ovl_strategy;
84#ifdef CONFIG_NET_CLS_ACT
85 unsigned char police;
86#endif
87 83
88 u32 defmap; 84 u32 defmap;
89 85
@@ -94,10 +90,6 @@ struct cbq_class {
94 u32 avpkt; 90 u32 avpkt;
95 struct qdisc_rate_table *R_tab; 91 struct qdisc_rate_table *R_tab;
96 92
97 /* Overlimit strategy parameters */
98 void (*overlimit)(struct cbq_class *cl);
99 psched_tdiff_t penalty;
100
101 /* General scheduler (WRR) parameters */ 93 /* General scheduler (WRR) parameters */
102 long allot; 94 long allot;
103 long quantum; /* Allotment per WRR round */ 95 long quantum; /* Allotment per WRR round */
@@ -353,7 +345,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
353{ 345{
354 int toplevel = q->toplevel; 346 int toplevel = q->toplevel;
355 347
356 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { 348 if (toplevel > cl->level) {
357 psched_time_t now = psched_get_time(); 349 psched_time_t now = psched_get_time();
358 350
359 do { 351 do {
@@ -366,7 +358,8 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
366} 358}
367 359
368static int 360static int
369cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 361cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
362 struct sk_buff **to_free)
370{ 363{
371 struct cbq_sched_data *q = qdisc_priv(sch); 364 struct cbq_sched_data *q = qdisc_priv(sch);
372 int uninitialized_var(ret); 365 int uninitialized_var(ret);
@@ -378,14 +371,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
378 if (cl == NULL) { 371 if (cl == NULL) {
379 if (ret & __NET_XMIT_BYPASS) 372 if (ret & __NET_XMIT_BYPASS)
380 qdisc_qstats_drop(sch); 373 qdisc_qstats_drop(sch);
381 kfree_skb(skb); 374 __qdisc_drop(skb, to_free);
382 return ret; 375 return ret;
383 } 376 }
384 377
385#ifdef CONFIG_NET_CLS_ACT 378 ret = qdisc_enqueue(skb, cl->q, to_free);
386 cl->q->__parent = sch;
387#endif
388 ret = qdisc_enqueue(skb, cl->q);
389 if (ret == NET_XMIT_SUCCESS) { 379 if (ret == NET_XMIT_SUCCESS) {
390 sch->q.qlen++; 380 sch->q.qlen++;
391 cbq_mark_toplevel(q, cl); 381 cbq_mark_toplevel(q, cl);
@@ -402,11 +392,8 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
402 return ret; 392 return ret;
403} 393}
404 394
405/* Overlimit actions */ 395/* Overlimit action: penalize leaf class by adding offtime */
406 396static void cbq_overlimit(struct cbq_class *cl)
407/* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
408
409static void cbq_ovl_classic(struct cbq_class *cl)
410{ 397{
411 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); 398 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
412 psched_tdiff_t delay = cl->undertime - q->now; 399 psched_tdiff_t delay = cl->undertime - q->now;
@@ -456,99 +443,6 @@ static void cbq_ovl_classic(struct cbq_class *cl)
456 } 443 }
457} 444}
458 445
459/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
460 * they go overlimit
461 */
462
463static void cbq_ovl_rclassic(struct cbq_class *cl)
464{
465 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
466 struct cbq_class *this = cl;
467
468 do {
469 if (cl->level > q->toplevel) {
470 cl = NULL;
471 break;
472 }
473 } while ((cl = cl->borrow) != NULL);
474
475 if (cl == NULL)
476 cl = this;
477 cbq_ovl_classic(cl);
478}
479
480/* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
481
482static void cbq_ovl_delay(struct cbq_class *cl)
483{
484 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
485 psched_tdiff_t delay = cl->undertime - q->now;
486
487 if (test_bit(__QDISC_STATE_DEACTIVATED,
488 &qdisc_root_sleeping(cl->qdisc)->state))
489 return;
490
491 if (!cl->delayed) {
492 psched_time_t sched = q->now;
493 ktime_t expires;
494
495 delay += cl->offtime;
496 if (cl->avgidle < 0)
497 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
498 if (cl->avgidle < cl->minidle)
499 cl->avgidle = cl->minidle;
500 cl->undertime = q->now + delay;
501
502 if (delay > 0) {
503 sched += delay + cl->penalty;
504 cl->penalized = sched;
505 cl->cpriority = TC_CBQ_MAXPRIO;
506 q->pmask |= (1<<TC_CBQ_MAXPRIO);
507
508 expires = ns_to_ktime(PSCHED_TICKS2NS(sched));
509 if (hrtimer_try_to_cancel(&q->delay_timer) &&
510 ktime_to_ns(ktime_sub(
511 hrtimer_get_expires(&q->delay_timer),
512 expires)) > 0)
513 hrtimer_set_expires(&q->delay_timer, expires);
514 hrtimer_restart(&q->delay_timer);
515 cl->delayed = 1;
516 cl->xstats.overactions++;
517 return;
518 }
519 delay = 1;
520 }
521 if (q->wd_expires == 0 || q->wd_expires > delay)
522 q->wd_expires = delay;
523}
524
525/* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
526
527static void cbq_ovl_lowprio(struct cbq_class *cl)
528{
529 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
530
531 cl->penalized = q->now + cl->penalty;
532
533 if (cl->cpriority != cl->priority2) {
534 cl->cpriority = cl->priority2;
535 q->pmask |= (1<<cl->cpriority);
536 cl->xstats.overactions++;
537 }
538 cbq_ovl_classic(cl);
539}
540
541/* TC_CBQ_OVL_DROP: penalize class by dropping */
542
543static void cbq_ovl_drop(struct cbq_class *cl)
544{
545 if (cl->q->ops->drop)
546 if (cl->q->ops->drop(cl->q))
547 cl->qdisc->q.qlen--;
548 cl->xstats.overactions++;
549 cbq_ovl_classic(cl);
550}
551
552static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, 446static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
553 psched_time_t now) 447 psched_time_t now)
554{ 448{
@@ -620,45 +514,10 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
620 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED); 514 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
621 } 515 }
622 516
623 qdisc_unthrottled(sch);
624 __netif_schedule(qdisc_root(sch)); 517 __netif_schedule(qdisc_root(sch));
625 return HRTIMER_NORESTART; 518 return HRTIMER_NORESTART;
626} 519}
627 520
628#ifdef CONFIG_NET_CLS_ACT
629static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
630{
631 struct Qdisc *sch = child->__parent;
632 struct cbq_sched_data *q = qdisc_priv(sch);
633 struct cbq_class *cl = q->rx_class;
634
635 q->rx_class = NULL;
636
637 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
638 int ret;
639
640 cbq_mark_toplevel(q, cl);
641
642 q->rx_class = cl;
643 cl->q->__parent = sch;
644
645 ret = qdisc_enqueue(skb, cl->q);
646 if (ret == NET_XMIT_SUCCESS) {
647 sch->q.qlen++;
648 if (!cl->next_alive)
649 cbq_activate_class(cl);
650 return 0;
651 }
652 if (net_xmit_drop_count(ret))
653 qdisc_qstats_drop(sch);
654 return 0;
655 }
656
657 qdisc_qstats_drop(sch);
658 return -1;
659}
660#endif
661
662/* 521/*
663 * It is mission critical procedure. 522 * It is mission critical procedure.
664 * 523 *
@@ -807,7 +666,7 @@ cbq_under_limit(struct cbq_class *cl)
807 cl = cl->borrow; 666 cl = cl->borrow;
808 if (!cl) { 667 if (!cl) {
809 this_cl->qstats.overlimits++; 668 this_cl->qstats.overlimits++;
810 this_cl->overlimit(this_cl); 669 cbq_overlimit(this_cl);
811 return NULL; 670 return NULL;
812 } 671 }
813 if (cl->level > q->toplevel) 672 if (cl->level > q->toplevel)
@@ -960,7 +819,6 @@ cbq_dequeue(struct Qdisc *sch)
960 if (skb) { 819 if (skb) {
961 qdisc_bstats_update(sch, skb); 820 qdisc_bstats_update(sch, skb);
962 sch->q.qlen--; 821 sch->q.qlen--;
963 qdisc_unthrottled(sch);
964 return skb; 822 return skb;
965 } 823 }
966 824
@@ -1166,31 +1024,6 @@ static void cbq_link_class(struct cbq_class *this)
1166 } 1024 }
1167} 1025}
1168 1026
1169static unsigned int cbq_drop(struct Qdisc *sch)
1170{
1171 struct cbq_sched_data *q = qdisc_priv(sch);
1172 struct cbq_class *cl, *cl_head;
1173 int prio;
1174 unsigned int len;
1175
1176 for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
1177 cl_head = q->active[prio];
1178 if (!cl_head)
1179 continue;
1180
1181 cl = cl_head;
1182 do {
1183 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
1184 sch->q.qlen--;
1185 if (!cl->q->q.qlen)
1186 cbq_deactivate_class(cl);
1187 return len;
1188 }
1189 } while ((cl = cl->next_alive) != cl_head);
1190 }
1191 return 0;
1192}
1193
1194static void 1027static void
1195cbq_reset(struct Qdisc *sch) 1028cbq_reset(struct Qdisc *sch)
1196{ 1029{
@@ -1280,50 +1113,6 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1280 return 0; 1113 return 0;
1281} 1114}
1282 1115
1283static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1284{
1285 switch (ovl->strategy) {
1286 case TC_CBQ_OVL_CLASSIC:
1287 cl->overlimit = cbq_ovl_classic;
1288 break;
1289 case TC_CBQ_OVL_DELAY:
1290 cl->overlimit = cbq_ovl_delay;
1291 break;
1292 case TC_CBQ_OVL_LOWPRIO:
1293 if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
1294 ovl->priority2 - 1 <= cl->priority)
1295 return -EINVAL;
1296 cl->priority2 = ovl->priority2 - 1;
1297 cl->overlimit = cbq_ovl_lowprio;
1298 break;
1299 case TC_CBQ_OVL_DROP:
1300 cl->overlimit = cbq_ovl_drop;
1301 break;
1302 case TC_CBQ_OVL_RCLASSIC:
1303 cl->overlimit = cbq_ovl_rclassic;
1304 break;
1305 default:
1306 return -EINVAL;
1307 }
1308 cl->penalty = ovl->penalty;
1309 return 0;
1310}
1311
1312#ifdef CONFIG_NET_CLS_ACT
1313static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1314{
1315 cl->police = p->police;
1316
1317 if (cl->q->handle) {
1318 if (p->police == TC_POLICE_RECLASSIFY)
1319 cl->q->reshape_fail = cbq_reshape_fail;
1320 else
1321 cl->q->reshape_fail = NULL;
1322 }
1323 return 0;
1324}
1325#endif
1326
1327static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt) 1116static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1328{ 1117{
1329 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange); 1118 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
@@ -1375,8 +1164,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1375 q->link.priority = TC_CBQ_MAXPRIO - 1; 1164 q->link.priority = TC_CBQ_MAXPRIO - 1;
1376 q->link.priority2 = TC_CBQ_MAXPRIO - 1; 1165 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1377 q->link.cpriority = TC_CBQ_MAXPRIO - 1; 1166 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1378 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1379 q->link.overlimit = cbq_ovl_classic;
1380 q->link.allot = psched_mtu(qdisc_dev(sch)); 1167 q->link.allot = psched_mtu(qdisc_dev(sch));
1381 q->link.quantum = q->link.allot; 1168 q->link.quantum = q->link.allot;
1382 q->link.weight = q->link.R_tab->rate.rate; 1169 q->link.weight = q->link.R_tab->rate.rate;
@@ -1463,24 +1250,6 @@ nla_put_failure:
1463 return -1; 1250 return -1;
1464} 1251}
1465 1252
1466static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
1467{
1468 unsigned char *b = skb_tail_pointer(skb);
1469 struct tc_cbq_ovl opt;
1470
1471 opt.strategy = cl->ovl_strategy;
1472 opt.priority2 = cl->priority2 + 1;
1473 opt.pad = 0;
1474 opt.penalty = cl->penalty;
1475 if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
1476 goto nla_put_failure;
1477 return skb->len;
1478
1479nla_put_failure:
1480 nlmsg_trim(skb, b);
1481 return -1;
1482}
1483
1484static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) 1253static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1485{ 1254{
1486 unsigned char *b = skb_tail_pointer(skb); 1255 unsigned char *b = skb_tail_pointer(skb);
@@ -1500,36 +1269,11 @@ nla_put_failure:
1500 return -1; 1269 return -1;
1501} 1270}
1502 1271
1503#ifdef CONFIG_NET_CLS_ACT
1504static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1505{
1506 unsigned char *b = skb_tail_pointer(skb);
1507 struct tc_cbq_police opt;
1508
1509 if (cl->police) {
1510 opt.police = cl->police;
1511 opt.__res1 = 0;
1512 opt.__res2 = 0;
1513 if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
1514 goto nla_put_failure;
1515 }
1516 return skb->len;
1517
1518nla_put_failure:
1519 nlmsg_trim(skb, b);
1520 return -1;
1521}
1522#endif
1523
1524static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) 1272static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1525{ 1273{
1526 if (cbq_dump_lss(skb, cl) < 0 || 1274 if (cbq_dump_lss(skb, cl) < 0 ||
1527 cbq_dump_rate(skb, cl) < 0 || 1275 cbq_dump_rate(skb, cl) < 0 ||
1528 cbq_dump_wrr(skb, cl) < 0 || 1276 cbq_dump_wrr(skb, cl) < 0 ||
1529 cbq_dump_ovl(skb, cl) < 0 ||
1530#ifdef CONFIG_NET_CLS_ACT
1531 cbq_dump_police(skb, cl) < 0 ||
1532#endif
1533 cbq_dump_fopt(skb, cl) < 0) 1277 cbq_dump_fopt(skb, cl) < 0)
1534 return -1; 1278 return -1;
1535 return 0; 1279 return 0;
@@ -1600,7 +1344,8 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1600 if (cl->undertime != PSCHED_PASTPERFECT) 1344 if (cl->undertime != PSCHED_PASTPERFECT)
1601 cl->xstats.undertime = cl->undertime - q->now; 1345 cl->xstats.undertime = cl->undertime - q->now;
1602 1346
1603 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 1347 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1348 d, NULL, &cl->bstats) < 0 ||
1604 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || 1349 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1605 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) 1350 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
1606 return -1; 1351 return -1;
@@ -1618,11 +1363,6 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1618 &pfifo_qdisc_ops, cl->common.classid); 1363 &pfifo_qdisc_ops, cl->common.classid);
1619 if (new == NULL) 1364 if (new == NULL)
1620 return -ENOBUFS; 1365 return -ENOBUFS;
1621 } else {
1622#ifdef CONFIG_NET_CLS_ACT
1623 if (cl->police == TC_POLICE_RECLASSIFY)
1624 new->reshape_fail = cbq_reshape_fail;
1625#endif
1626 } 1366 }
1627 1367
1628 *old = qdisc_replace(sch, new, &cl->q); 1368 *old = qdisc_replace(sch, new, &cl->q);
@@ -1735,6 +1475,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1735 if (err < 0) 1475 if (err < 0)
1736 return err; 1476 return err;
1737 1477
1478 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
1479 return -EOPNOTSUPP;
1480
1738 if (cl) { 1481 if (cl) {
1739 /* Check parent */ 1482 /* Check parent */
1740 if (parentid) { 1483 if (parentid) {
@@ -1755,7 +1498,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1755 if (tca[TCA_RATE]) { 1498 if (tca[TCA_RATE]) {
1756 err = gen_replace_estimator(&cl->bstats, NULL, 1499 err = gen_replace_estimator(&cl->bstats, NULL,
1757 &cl->rate_est, 1500 &cl->rate_est,
1758 qdisc_root_sleeping_lock(sch), 1501 NULL,
1502 qdisc_root_sleeping_running(sch),
1759 tca[TCA_RATE]); 1503 tca[TCA_RATE]);
1760 if (err) { 1504 if (err) {
1761 qdisc_put_rtab(rtab); 1505 qdisc_put_rtab(rtab);
@@ -1782,14 +1526,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1782 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); 1526 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1783 } 1527 }
1784 1528
1785 if (tb[TCA_CBQ_OVL_STRATEGY])
1786 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
1787
1788#ifdef CONFIG_NET_CLS_ACT
1789 if (tb[TCA_CBQ_POLICE])
1790 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1791#endif
1792
1793 if (tb[TCA_CBQ_FOPT]) 1529 if (tb[TCA_CBQ_FOPT])
1794 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); 1530 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1795 1531
@@ -1848,7 +1584,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1848 1584
1849 if (tca[TCA_RATE]) { 1585 if (tca[TCA_RATE]) {
1850 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, 1586 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1851 qdisc_root_sleeping_lock(sch), 1587 NULL,
1588 qdisc_root_sleeping_running(sch),
1852 tca[TCA_RATE]); 1589 tca[TCA_RATE]);
1853 if (err) { 1590 if (err) {
1854 kfree(cl); 1591 kfree(cl);
@@ -1884,13 +1621,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1884 cl->maxidle = q->link.maxidle; 1621 cl->maxidle = q->link.maxidle;
1885 if (cl->avpkt == 0) 1622 if (cl->avpkt == 0)
1886 cl->avpkt = q->link.avpkt; 1623 cl->avpkt = q->link.avpkt;
1887 cl->overlimit = cbq_ovl_classic;
1888 if (tb[TCA_CBQ_OVL_STRATEGY])
1889 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
1890#ifdef CONFIG_NET_CLS_ACT
1891 if (tb[TCA_CBQ_POLICE])
1892 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1893#endif
1894 if (tb[TCA_CBQ_FOPT]) 1624 if (tb[TCA_CBQ_FOPT])
1895 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); 1625 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1896 sch_tree_unlock(sch); 1626 sch_tree_unlock(sch);
@@ -2035,7 +1765,6 @@ static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
2035 .enqueue = cbq_enqueue, 1765 .enqueue = cbq_enqueue,
2036 .dequeue = cbq_dequeue, 1766 .dequeue = cbq_dequeue,
2037 .peek = qdisc_peek_dequeued, 1767 .peek = qdisc_peek_dequeued,
2038 .drop = cbq_drop,
2039 .init = cbq_init, 1768 .init = cbq_init,
2040 .reset = cbq_reset, 1769 .reset = cbq_reset,
2041 .destroy = cbq_destroy, 1770 .destroy = cbq_destroy,
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 0a08c860eee4..3b6d5bd69101 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -115,7 +115,8 @@ static void choke_zap_tail_holes(struct choke_sched_data *q)
115} 115}
116 116
117/* Drop packet from queue array by creating a "hole" */ 117/* Drop packet from queue array by creating a "hole" */
118static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) 118static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
119 struct sk_buff **to_free)
119{ 120{
120 struct choke_sched_data *q = qdisc_priv(sch); 121 struct choke_sched_data *q = qdisc_priv(sch);
121 struct sk_buff *skb = q->tab[idx]; 122 struct sk_buff *skb = q->tab[idx];
@@ -129,7 +130,7 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
129 130
130 qdisc_qstats_backlog_dec(sch, skb); 131 qdisc_qstats_backlog_dec(sch, skb);
131 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); 132 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
132 qdisc_drop(skb, sch); 133 qdisc_drop(skb, sch, to_free);
133 --sch->q.qlen; 134 --sch->q.qlen;
134} 135}
135 136
@@ -261,7 +262,8 @@ static bool choke_match_random(const struct choke_sched_data *q,
261 return choke_match_flow(oskb, nskb); 262 return choke_match_flow(oskb, nskb);
262} 263}
263 264
264static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) 265static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
266 struct sk_buff **to_free)
265{ 267{
266 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 268 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
267 struct choke_sched_data *q = qdisc_priv(sch); 269 struct choke_sched_data *q = qdisc_priv(sch);
@@ -288,7 +290,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
288 /* Draw a packet at random from queue and compare flow */ 290 /* Draw a packet at random from queue and compare flow */
289 if (choke_match_random(q, skb, &idx)) { 291 if (choke_match_random(q, skb, &idx)) {
290 q->stats.matched++; 292 q->stats.matched++;
291 choke_drop_by_idx(sch, idx); 293 choke_drop_by_idx(sch, idx, to_free);
292 goto congestion_drop; 294 goto congestion_drop;
293 } 295 }
294 296
@@ -331,16 +333,16 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
331 } 333 }
332 334
333 q->stats.pdrop++; 335 q->stats.pdrop++;
334 return qdisc_drop(skb, sch); 336 return qdisc_drop(skb, sch, to_free);
335 337
336congestion_drop: 338congestion_drop:
337 qdisc_drop(skb, sch); 339 qdisc_drop(skb, sch, to_free);
338 return NET_XMIT_CN; 340 return NET_XMIT_CN;
339 341
340other_drop: 342other_drop:
341 if (ret & __NET_XMIT_BYPASS) 343 if (ret & __NET_XMIT_BYPASS)
342 qdisc_qstats_drop(sch); 344 qdisc_qstats_drop(sch);
343 kfree_skb(skb); 345 __qdisc_drop(skb, to_free);
344 return ret; 346 return ret;
345} 347}
346 348
@@ -365,22 +367,6 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch)
365 return skb; 367 return skb;
366} 368}
367 369
368static unsigned int choke_drop(struct Qdisc *sch)
369{
370 struct choke_sched_data *q = qdisc_priv(sch);
371 unsigned int len;
372
373 len = qdisc_queue_drop(sch);
374 if (len > 0)
375 q->stats.other++;
376 else {
377 if (!red_is_idling(&q->vars))
378 red_start_of_idle_period(&q->vars);
379 }
380
381 return len;
382}
383
384static void choke_reset(struct Qdisc *sch) 370static void choke_reset(struct Qdisc *sch)
385{ 371{
386 struct choke_sched_data *q = qdisc_priv(sch); 372 struct choke_sched_data *q = qdisc_priv(sch);
@@ -391,11 +377,11 @@ static void choke_reset(struct Qdisc *sch)
391 q->head = (q->head + 1) & q->tab_mask; 377 q->head = (q->head + 1) & q->tab_mask;
392 if (!skb) 378 if (!skb)
393 continue; 379 continue;
394 qdisc_qstats_backlog_dec(sch, skb); 380 rtnl_qdisc_drop(skb, sch);
395 --sch->q.qlen;
396 qdisc_drop(skb, sch);
397 } 381 }
398 382
383 sch->q.qlen = 0;
384 sch->qstats.backlog = 0;
399 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); 385 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
400 q->head = q->tail = 0; 386 q->head = q->tail = 0;
401 red_restart(&q->vars); 387 red_restart(&q->vars);
@@ -471,7 +457,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
471 dropped += qdisc_pkt_len(skb); 457 dropped += qdisc_pkt_len(skb);
472 qdisc_qstats_backlog_dec(sch, skb); 458 qdisc_qstats_backlog_dec(sch, skb);
473 --sch->q.qlen; 459 --sch->q.qlen;
474 qdisc_drop(skb, sch); 460 rtnl_qdisc_drop(skb, sch);
475 } 461 }
476 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped); 462 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
477 q->head = 0; 463 q->head = 0;
@@ -569,7 +555,6 @@ static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
569 .enqueue = choke_enqueue, 555 .enqueue = choke_enqueue,
570 .dequeue = choke_dequeue, 556 .dequeue = choke_dequeue,
571 .peek = choke_peek_head, 557 .peek = choke_peek_head,
572 .drop = choke_drop,
573 .init = choke_init, 558 .init = choke_init,
574 .destroy = choke_destroy, 559 .destroy = choke_destroy,
575 .reset = choke_reset, 560 .reset = choke_reset,
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index dddf3bb65a32..4002df3c7d9f 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -82,7 +82,8 @@ static void drop_func(struct sk_buff *skb, void *ctx)
82{ 82{
83 struct Qdisc *sch = ctx; 83 struct Qdisc *sch = ctx;
84 84
85 qdisc_drop(skb, sch); 85 kfree_skb(skb);
86 qdisc_qstats_drop(sch);
86} 87}
87 88
88static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) 89static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
@@ -107,7 +108,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
107 return skb; 108 return skb;
108} 109}
109 110
110static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 111static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
112 struct sk_buff **to_free)
111{ 113{
112 struct codel_sched_data *q; 114 struct codel_sched_data *q;
113 115
@@ -117,7 +119,7 @@ static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
117 } 119 }
118 q = qdisc_priv(sch); 120 q = qdisc_priv(sch);
119 q->drop_overlimit++; 121 q->drop_overlimit++;
120 return qdisc_drop(skb, sch); 122 return qdisc_drop(skb, sch, to_free);
121} 123}
122 124
123static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = { 125static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
@@ -174,7 +176,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
174 176
175 dropped += qdisc_pkt_len(skb); 177 dropped += qdisc_pkt_len(skb);
176 qdisc_qstats_backlog_dec(sch, skb); 178 qdisc_qstats_backlog_dec(sch, skb);
177 qdisc_drop(skb, sch); 179 rtnl_qdisc_drop(skb, sch);
178 } 180 }
179 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); 181 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
180 182
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index bf8af2c43c2c..8af5c59eef84 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -91,7 +91,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
91 if (tca[TCA_RATE]) { 91 if (tca[TCA_RATE]) {
92 err = gen_replace_estimator(&cl->bstats, NULL, 92 err = gen_replace_estimator(&cl->bstats, NULL,
93 &cl->rate_est, 93 &cl->rate_est,
94 qdisc_root_sleeping_lock(sch), 94 NULL,
95 qdisc_root_sleeping_running(sch),
95 tca[TCA_RATE]); 96 tca[TCA_RATE]);
96 if (err) 97 if (err)
97 return err; 98 return err;
@@ -119,7 +120,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
119 120
120 if (tca[TCA_RATE]) { 121 if (tca[TCA_RATE]) {
121 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, 122 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
122 qdisc_root_sleeping_lock(sch), 123 NULL,
124 qdisc_root_sleeping_running(sch),
123 tca[TCA_RATE]); 125 tca[TCA_RATE]);
124 if (err) { 126 if (err) {
125 qdisc_destroy(cl->qdisc); 127 qdisc_destroy(cl->qdisc);
@@ -279,7 +281,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
279 if (qlen) 281 if (qlen)
280 xstats.deficit = cl->deficit; 282 xstats.deficit = cl->deficit;
281 283
282 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 284 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
285 d, NULL, &cl->bstats) < 0 ||
283 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || 286 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
284 gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) 287 gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
285 return -1; 288 return -1;
@@ -347,7 +350,8 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
347 return NULL; 350 return NULL;
348} 351}
349 352
350static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) 353static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
354 struct sk_buff **to_free)
351{ 355{
352 struct drr_sched *q = qdisc_priv(sch); 356 struct drr_sched *q = qdisc_priv(sch);
353 struct drr_class *cl; 357 struct drr_class *cl;
@@ -357,11 +361,11 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
357 if (cl == NULL) { 361 if (cl == NULL) {
358 if (err & __NET_XMIT_BYPASS) 362 if (err & __NET_XMIT_BYPASS)
359 qdisc_qstats_drop(sch); 363 qdisc_qstats_drop(sch);
360 kfree_skb(skb); 364 __qdisc_drop(skb, to_free);
361 return err; 365 return err;
362 } 366 }
363 367
364 err = qdisc_enqueue(skb, cl->qdisc); 368 err = qdisc_enqueue(skb, cl->qdisc, to_free);
365 if (unlikely(err != NET_XMIT_SUCCESS)) { 369 if (unlikely(err != NET_XMIT_SUCCESS)) {
366 if (net_xmit_drop_count(err)) { 370 if (net_xmit_drop_count(err)) {
367 cl->qstats.drops++; 371 cl->qstats.drops++;
@@ -420,27 +424,6 @@ out:
420 return NULL; 424 return NULL;
421} 425}
422 426
423static unsigned int drr_drop(struct Qdisc *sch)
424{
425 struct drr_sched *q = qdisc_priv(sch);
426 struct drr_class *cl;
427 unsigned int len;
428
429 list_for_each_entry(cl, &q->active, alist) {
430 if (cl->qdisc->ops->drop) {
431 len = cl->qdisc->ops->drop(cl->qdisc);
432 if (len > 0) {
433 sch->qstats.backlog -= len;
434 sch->q.qlen--;
435 if (cl->qdisc->q.qlen == 0)
436 list_del(&cl->alist);
437 return len;
438 }
439 }
440 }
441 return 0;
442}
443
444static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt) 427static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
445{ 428{
446 struct drr_sched *q = qdisc_priv(sch); 429 struct drr_sched *q = qdisc_priv(sch);
@@ -510,7 +493,6 @@ static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
510 .enqueue = drr_enqueue, 493 .enqueue = drr_enqueue,
511 .dequeue = drr_dequeue, 494 .dequeue = drr_dequeue,
512 .peek = qdisc_peek_dequeued, 495 .peek = qdisc_peek_dequeued,
513 .drop = drr_drop,
514 .init = drr_init_qdisc, 496 .init = drr_init_qdisc,
515 .reset = drr_reset_qdisc, 497 .reset = drr_reset_qdisc,
516 .destroy = drr_destroy_qdisc, 498 .destroy = drr_destroy_qdisc,
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 34b4ddaca27c..1308bbf460f7 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -191,7 +191,8 @@ static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch,
191 191
192/* --------------------------- Qdisc operations ---------------------------- */ 192/* --------------------------- Qdisc operations ---------------------------- */
193 193
194static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) 194static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
195 struct sk_buff **to_free)
195{ 196{
196 struct dsmark_qdisc_data *p = qdisc_priv(sch); 197 struct dsmark_qdisc_data *p = qdisc_priv(sch);
197 int err; 198 int err;
@@ -234,7 +235,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
234#ifdef CONFIG_NET_CLS_ACT 235#ifdef CONFIG_NET_CLS_ACT
235 case TC_ACT_QUEUED: 236 case TC_ACT_QUEUED:
236 case TC_ACT_STOLEN: 237 case TC_ACT_STOLEN:
237 kfree_skb(skb); 238 __qdisc_drop(skb, to_free);
238 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 239 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
239 240
240 case TC_ACT_SHOT: 241 case TC_ACT_SHOT:
@@ -251,7 +252,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
251 } 252 }
252 } 253 }
253 254
254 err = qdisc_enqueue(skb, p->q); 255 err = qdisc_enqueue(skb, p->q, to_free);
255 if (err != NET_XMIT_SUCCESS) { 256 if (err != NET_XMIT_SUCCESS) {
256 if (net_xmit_drop_count(err)) 257 if (net_xmit_drop_count(err))
257 qdisc_qstats_drop(sch); 258 qdisc_qstats_drop(sch);
@@ -264,7 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
264 return NET_XMIT_SUCCESS; 265 return NET_XMIT_SUCCESS;
265 266
266drop: 267drop:
267 qdisc_drop(skb, sch); 268 qdisc_drop(skb, sch, to_free);
268 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 269 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
269} 270}
270 271
@@ -320,23 +321,6 @@ static struct sk_buff *dsmark_peek(struct Qdisc *sch)
320 return p->q->ops->peek(p->q); 321 return p->q->ops->peek(p->q);
321} 322}
322 323
323static unsigned int dsmark_drop(struct Qdisc *sch)
324{
325 struct dsmark_qdisc_data *p = qdisc_priv(sch);
326 unsigned int len;
327
328 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
329
330 if (p->q->ops->drop == NULL)
331 return 0;
332
333 len = p->q->ops->drop(p->q);
334 if (len)
335 sch->q.qlen--;
336
337 return len;
338}
339
340static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) 324static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
341{ 325{
342 struct dsmark_qdisc_data *p = qdisc_priv(sch); 326 struct dsmark_qdisc_data *p = qdisc_priv(sch);
@@ -489,7 +473,6 @@ static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
489 .enqueue = dsmark_enqueue, 473 .enqueue = dsmark_enqueue,
490 .dequeue = dsmark_dequeue, 474 .dequeue = dsmark_dequeue,
491 .peek = dsmark_peek, 475 .peek = dsmark_peek,
492 .drop = dsmark_drop,
493 .init = dsmark_init, 476 .init = dsmark_init,
494 .reset = dsmark_reset, 477 .reset = dsmark_reset,
495 .destroy = dsmark_destroy, 478 .destroy = dsmark_destroy,
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 2e4bd2c0a50c..baeed6a78d28 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -19,23 +19,26 @@
19 19
20/* 1 band FIFO pseudo-"scheduler" */ 20/* 1 band FIFO pseudo-"scheduler" */
21 21
22static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) 22static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
23 struct sk_buff **to_free)
23{ 24{
24 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) 25 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
25 return qdisc_enqueue_tail(skb, sch); 26 return qdisc_enqueue_tail(skb, sch);
26 27
27 return qdisc_reshape_fail(skb, sch); 28 return qdisc_drop(skb, sch, to_free);
28} 29}
29 30
30static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) 31static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
32 struct sk_buff **to_free)
31{ 33{
32 if (likely(skb_queue_len(&sch->q) < sch->limit)) 34 if (likely(skb_queue_len(&sch->q) < sch->limit))
33 return qdisc_enqueue_tail(skb, sch); 35 return qdisc_enqueue_tail(skb, sch);
34 36
35 return qdisc_reshape_fail(skb, sch); 37 return qdisc_drop(skb, sch, to_free);
36} 38}
37 39
38static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) 40static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
41 struct sk_buff **to_free)
39{ 42{
40 unsigned int prev_backlog; 43 unsigned int prev_backlog;
41 44
@@ -44,7 +47,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
44 47
45 prev_backlog = sch->qstats.backlog; 48 prev_backlog = sch->qstats.backlog;
46 /* queue full, remove one skb to fulfill the limit */ 49 /* queue full, remove one skb to fulfill the limit */
47 __qdisc_queue_drop_head(sch, &sch->q); 50 __qdisc_queue_drop_head(sch, &sch->q, to_free);
48 qdisc_qstats_drop(sch); 51 qdisc_qstats_drop(sch);
49 qdisc_enqueue_tail(skb, sch); 52 qdisc_enqueue_tail(skb, sch);
50 53
@@ -103,7 +106,6 @@ struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
103 .enqueue = pfifo_enqueue, 106 .enqueue = pfifo_enqueue,
104 .dequeue = qdisc_dequeue_head, 107 .dequeue = qdisc_dequeue_head,
105 .peek = qdisc_peek_head, 108 .peek = qdisc_peek_head,
106 .drop = qdisc_queue_drop,
107 .init = fifo_init, 109 .init = fifo_init,
108 .reset = qdisc_reset_queue, 110 .reset = qdisc_reset_queue,
109 .change = fifo_init, 111 .change = fifo_init,
@@ -118,7 +120,6 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
118 .enqueue = bfifo_enqueue, 120 .enqueue = bfifo_enqueue,
119 .dequeue = qdisc_dequeue_head, 121 .dequeue = qdisc_dequeue_head,
120 .peek = qdisc_peek_head, 122 .peek = qdisc_peek_head,
121 .drop = qdisc_queue_drop,
122 .init = fifo_init, 123 .init = fifo_init,
123 .reset = qdisc_reset_queue, 124 .reset = qdisc_reset_queue,
124 .change = fifo_init, 125 .change = fifo_init,
@@ -133,7 +134,6 @@ struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
133 .enqueue = pfifo_tail_enqueue, 134 .enqueue = pfifo_tail_enqueue,
134 .dequeue = qdisc_dequeue_head, 135 .dequeue = qdisc_dequeue_head,
135 .peek = qdisc_peek_head, 136 .peek = qdisc_peek_head,
136 .drop = qdisc_queue_drop_head,
137 .init = fifo_init, 137 .init = fifo_init,
138 .reset = qdisc_reset_queue, 138 .reset = qdisc_reset_queue,
139 .change = fifo_init, 139 .change = fifo_init,
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 3c6a47d66a04..e5458b99e09c 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -368,18 +368,19 @@ static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
368 } 368 }
369} 369}
370 370
371static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 371static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
372 struct sk_buff **to_free)
372{ 373{
373 struct fq_sched_data *q = qdisc_priv(sch); 374 struct fq_sched_data *q = qdisc_priv(sch);
374 struct fq_flow *f; 375 struct fq_flow *f;
375 376
376 if (unlikely(sch->q.qlen >= sch->limit)) 377 if (unlikely(sch->q.qlen >= sch->limit))
377 return qdisc_drop(skb, sch); 378 return qdisc_drop(skb, sch, to_free);
378 379
379 f = fq_classify(skb, q); 380 f = fq_classify(skb, q);
380 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { 381 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
381 q->stat_flows_plimit++; 382 q->stat_flows_plimit++;
382 return qdisc_drop(skb, sch); 383 return qdisc_drop(skb, sch, to_free);
383 } 384 }
384 385
385 f->qlen++; 386 f->qlen++;
@@ -445,8 +446,7 @@ begin:
445 if (!head->first) { 446 if (!head->first) {
446 if (q->time_next_delayed_flow != ~0ULL) 447 if (q->time_next_delayed_flow != ~0ULL)
447 qdisc_watchdog_schedule_ns(&q->watchdog, 448 qdisc_watchdog_schedule_ns(&q->watchdog,
448 q->time_next_delayed_flow, 449 q->time_next_delayed_flow);
449 false);
450 return NULL; 450 return NULL;
451 } 451 }
452 } 452 }
@@ -515,17 +515,25 @@ out:
515 return skb; 515 return skb;
516} 516}
517 517
518static void fq_flow_purge(struct fq_flow *flow)
519{
520 rtnl_kfree_skbs(flow->head, flow->tail);
521 flow->head = NULL;
522 flow->qlen = 0;
523}
524
518static void fq_reset(struct Qdisc *sch) 525static void fq_reset(struct Qdisc *sch)
519{ 526{
520 struct fq_sched_data *q = qdisc_priv(sch); 527 struct fq_sched_data *q = qdisc_priv(sch);
521 struct rb_root *root; 528 struct rb_root *root;
522 struct sk_buff *skb;
523 struct rb_node *p; 529 struct rb_node *p;
524 struct fq_flow *f; 530 struct fq_flow *f;
525 unsigned int idx; 531 unsigned int idx;
526 532
527 while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL) 533 sch->q.qlen = 0;
528 kfree_skb(skb); 534 sch->qstats.backlog = 0;
535
536 fq_flow_purge(&q->internal);
529 537
530 if (!q->fq_root) 538 if (!q->fq_root)
531 return; 539 return;
@@ -536,8 +544,7 @@ static void fq_reset(struct Qdisc *sch)
536 f = container_of(p, struct fq_flow, fq_node); 544 f = container_of(p, struct fq_flow, fq_node);
537 rb_erase(p, root); 545 rb_erase(p, root);
538 546
539 while ((skb = fq_dequeue_head(sch, f)) != NULL) 547 fq_flow_purge(f);
540 kfree_skb(skb);
541 548
542 kmem_cache_free(fq_flow_cachep, f); 549 kmem_cache_free(fq_flow_cachep, f);
543 } 550 }
@@ -738,7 +745,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
738 if (!skb) 745 if (!skb)
739 break; 746 break;
740 drop_len += qdisc_pkt_len(skb); 747 drop_len += qdisc_pkt_len(skb);
741 kfree_skb(skb); 748 rtnl_kfree_skbs(skb, skb);
742 drop_count++; 749 drop_count++;
743 } 750 }
744 qdisc_tree_reduce_backlog(sch, drop_count, drop_len); 751 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index da250b2e06ae..a5ea0e9b6be4 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -139,7 +139,8 @@ static inline void flow_queue_add(struct fq_codel_flow *flow,
139 skb->next = NULL; 139 skb->next = NULL;
140} 140}
141 141
142static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) 142static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
143 struct sk_buff **to_free)
143{ 144{
144 struct fq_codel_sched_data *q = qdisc_priv(sch); 145 struct fq_codel_sched_data *q = qdisc_priv(sch);
145 struct sk_buff *skb; 146 struct sk_buff *skb;
@@ -171,8 +172,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
171 do { 172 do {
172 skb = dequeue_head(flow); 173 skb = dequeue_head(flow);
173 len += qdisc_pkt_len(skb); 174 len += qdisc_pkt_len(skb);
174 mem += skb->truesize; 175 mem += get_codel_cb(skb)->mem_usage;
175 kfree_skb(skb); 176 __qdisc_drop(skb, to_free);
176 } while (++i < max_packets && len < threshold); 177 } while (++i < max_packets && len < threshold);
177 178
178 flow->dropped += i; 179 flow->dropped += i;
@@ -184,16 +185,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
184 return idx; 185 return idx;
185} 186}
186 187
187static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch) 188static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
188{ 189 struct sk_buff **to_free)
189 unsigned int prev_backlog;
190
191 prev_backlog = sch->qstats.backlog;
192 fq_codel_drop(sch, 1U);
193 return prev_backlog - sch->qstats.backlog;
194}
195
196static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
197{ 190{
198 struct fq_codel_sched_data *q = qdisc_priv(sch); 191 struct fq_codel_sched_data *q = qdisc_priv(sch);
199 unsigned int idx, prev_backlog, prev_qlen; 192 unsigned int idx, prev_backlog, prev_qlen;
@@ -206,7 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
206 if (idx == 0) { 199 if (idx == 0) {
207 if (ret & __NET_XMIT_BYPASS) 200 if (ret & __NET_XMIT_BYPASS)
208 qdisc_qstats_drop(sch); 201 qdisc_qstats_drop(sch);
209 kfree_skb(skb); 202 __qdisc_drop(skb, to_free);
210 return ret; 203 return ret;
211 } 204 }
212 idx--; 205 idx--;
@@ -223,7 +216,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
223 flow->deficit = q->quantum; 216 flow->deficit = q->quantum;
224 flow->dropped = 0; 217 flow->dropped = 0;
225 } 218 }
226 q->memory_usage += skb->truesize; 219 get_codel_cb(skb)->mem_usage = skb->truesize;
220 q->memory_usage += get_codel_cb(skb)->mem_usage;
227 memory_limited = q->memory_usage > q->memory_limit; 221 memory_limited = q->memory_usage > q->memory_limit;
228 if (++sch->q.qlen <= sch->limit && !memory_limited) 222 if (++sch->q.qlen <= sch->limit && !memory_limited)
229 return NET_XMIT_SUCCESS; 223 return NET_XMIT_SUCCESS;
@@ -238,7 +232,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
238 * So instead of dropping a single packet, drop half of its backlog 232 * So instead of dropping a single packet, drop half of its backlog
239 * with a 64 packets limit to not add a too big cpu spike here. 233 * with a 64 packets limit to not add a too big cpu spike here.
240 */ 234 */
241 ret = fq_codel_drop(sch, q->drop_batch_size); 235 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
242 236
243 prev_qlen -= sch->q.qlen; 237 prev_qlen -= sch->q.qlen;
244 prev_backlog -= sch->qstats.backlog; 238 prev_backlog -= sch->qstats.backlog;
@@ -274,7 +268,7 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
274 if (flow->head) { 268 if (flow->head) {
275 skb = dequeue_head(flow); 269 skb = dequeue_head(flow);
276 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); 270 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
277 q->memory_usage -= skb->truesize; 271 q->memory_usage -= get_codel_cb(skb)->mem_usage;
278 sch->q.qlen--; 272 sch->q.qlen--;
279 sch->qstats.backlog -= qdisc_pkt_len(skb); 273 sch->qstats.backlog -= qdisc_pkt_len(skb);
280 } 274 }
@@ -285,7 +279,8 @@ static void drop_func(struct sk_buff *skb, void *ctx)
285{ 279{
286 struct Qdisc *sch = ctx; 280 struct Qdisc *sch = ctx;
287 281
288 qdisc_drop(skb, sch); 282 kfree_skb(skb);
283 qdisc_qstats_drop(sch);
289} 284}
290 285
291static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) 286static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
@@ -345,6 +340,12 @@ begin:
345 return skb; 340 return skb;
346} 341}
347 342
343static void fq_codel_flow_purge(struct fq_codel_flow *flow)
344{
345 rtnl_kfree_skbs(flow->head, flow->tail);
346 flow->head = NULL;
347}
348
348static void fq_codel_reset(struct Qdisc *sch) 349static void fq_codel_reset(struct Qdisc *sch)
349{ 350{
350 struct fq_codel_sched_data *q = qdisc_priv(sch); 351 struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -355,18 +356,13 @@ static void fq_codel_reset(struct Qdisc *sch)
355 for (i = 0; i < q->flows_cnt; i++) { 356 for (i = 0; i < q->flows_cnt; i++) {
356 struct fq_codel_flow *flow = q->flows + i; 357 struct fq_codel_flow *flow = q->flows + i;
357 358
358 while (flow->head) { 359 fq_codel_flow_purge(flow);
359 struct sk_buff *skb = dequeue_head(flow);
360
361 qdisc_qstats_backlog_dec(sch, skb);
362 kfree_skb(skb);
363 }
364
365 INIT_LIST_HEAD(&flow->flowchain); 360 INIT_LIST_HEAD(&flow->flowchain);
366 codel_vars_init(&flow->cvars); 361 codel_vars_init(&flow->cvars);
367 } 362 }
368 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); 363 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
369 sch->q.qlen = 0; 364 sch->q.qlen = 0;
365 sch->qstats.backlog = 0;
370 q->memory_usage = 0; 366 q->memory_usage = 0;
371} 367}
372 368
@@ -442,7 +438,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
442 struct sk_buff *skb = fq_codel_dequeue(sch); 438 struct sk_buff *skb = fq_codel_dequeue(sch);
443 439
444 q->cstats.drop_len += qdisc_pkt_len(skb); 440 q->cstats.drop_len += qdisc_pkt_len(skb);
445 kfree_skb(skb); 441 rtnl_kfree_skbs(skb, skb);
446 q->cstats.drop_count++; 442 q->cstats.drop_count++;
447 } 443 }
448 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); 444 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
@@ -578,11 +574,13 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
578 st.qdisc_stats.memory_usage = q->memory_usage; 574 st.qdisc_stats.memory_usage = q->memory_usage;
579 st.qdisc_stats.drop_overmemory = q->drop_overmemory; 575 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
580 576
577 sch_tree_lock(sch);
581 list_for_each(pos, &q->new_flows) 578 list_for_each(pos, &q->new_flows)
582 st.qdisc_stats.new_flows_len++; 579 st.qdisc_stats.new_flows_len++;
583 580
584 list_for_each(pos, &q->old_flows) 581 list_for_each(pos, &q->old_flows)
585 st.qdisc_stats.old_flows_len++; 582 st.qdisc_stats.old_flows_len++;
583 sch_tree_unlock(sch);
586 584
587 return gnet_stats_copy_app(d, &st, sizeof(st)); 585 return gnet_stats_copy_app(d, &st, sizeof(st));
588} 586}
@@ -636,7 +634,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
636 634
637 if (idx < q->flows_cnt) { 635 if (idx < q->flows_cnt) {
638 const struct fq_codel_flow *flow = &q->flows[idx]; 636 const struct fq_codel_flow *flow = &q->flows[idx];
639 const struct sk_buff *skb = flow->head; 637 const struct sk_buff *skb;
640 638
641 memset(&xstats, 0, sizeof(xstats)); 639 memset(&xstats, 0, sizeof(xstats));
642 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; 640 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
@@ -654,9 +652,14 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
654 codel_time_to_us(delta) : 652 codel_time_to_us(delta) :
655 -codel_time_to_us(-delta); 653 -codel_time_to_us(-delta);
656 } 654 }
657 while (skb) { 655 if (flow->head) {
658 qs.qlen++; 656 sch_tree_lock(sch);
659 skb = skb->next; 657 skb = flow->head;
658 while (skb) {
659 qs.qlen++;
660 skb = skb->next;
661 }
662 sch_tree_unlock(sch);
660 } 663 }
661 qs.backlog = q->backlogs[idx]; 664 qs.backlog = q->backlogs[idx];
662 qs.drops = flow->dropped; 665 qs.drops = flow->dropped;
@@ -709,7 +712,6 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
709 .enqueue = fq_codel_enqueue, 712 .enqueue = fq_codel_enqueue,
710 .dequeue = fq_codel_dequeue, 713 .dequeue = fq_codel_dequeue,
711 .peek = qdisc_peek_dequeued, 714 .peek = qdisc_peek_dequeued,
712 .drop = fq_codel_qdisc_drop,
713 .init = fq_codel_init, 715 .init = fq_codel_init,
714 .reset = fq_codel_reset, 716 .reset = fq_codel_reset,
715 .destroy = fq_codel_destroy, 717 .destroy = fq_codel_destroy,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f9e0e9c03d0a..e95b67cd5718 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -77,6 +77,34 @@ static void try_bulk_dequeue_skb(struct Qdisc *q,
77 skb->next = NULL; 77 skb->next = NULL;
78} 78}
79 79
80/* This variant of try_bulk_dequeue_skb() makes sure
81 * all skbs in the chain are for the same txq
82 */
83static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
84 struct sk_buff *skb,
85 int *packets)
86{
87 int mapping = skb_get_queue_mapping(skb);
88 struct sk_buff *nskb;
89 int cnt = 0;
90
91 do {
92 nskb = q->dequeue(q);
93 if (!nskb)
94 break;
95 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
96 q->skb_bad_txq = nskb;
97 qdisc_qstats_backlog_inc(q, nskb);
98 q->q.qlen++;
99 break;
100 }
101 skb->next = nskb;
102 skb = nskb;
103 } while (++cnt < 8);
104 (*packets) += cnt;
105 skb->next = NULL;
106}
107
80/* Note that dequeue_skb can possibly return a SKB list (via skb->next). 108/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
81 * A requeued skb (via q->gso_skb) can also be a SKB list. 109 * A requeued skb (via q->gso_skb) can also be a SKB list.
82 */ 110 */
@@ -87,8 +115,9 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
87 const struct netdev_queue *txq = q->dev_queue; 115 const struct netdev_queue *txq = q->dev_queue;
88 116
89 *packets = 1; 117 *packets = 1;
90 *validate = true;
91 if (unlikely(skb)) { 118 if (unlikely(skb)) {
119 /* skb in gso_skb were already validated */
120 *validate = false;
92 /* check the reason of requeuing without tx lock first */ 121 /* check the reason of requeuing without tx lock first */
93 txq = skb_get_tx_queue(txq->dev, skb); 122 txq = skb_get_tx_queue(txq->dev, skb);
94 if (!netif_xmit_frozen_or_stopped(txq)) { 123 if (!netif_xmit_frozen_or_stopped(txq)) {
@@ -97,22 +126,37 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
97 q->q.qlen--; 126 q->q.qlen--;
98 } else 127 } else
99 skb = NULL; 128 skb = NULL;
100 /* skb in gso_skb were already validated */ 129 return skb;
101 *validate = false; 130 }
102 } else { 131 *validate = true;
103 if (!(q->flags & TCQ_F_ONETXQUEUE) || 132 skb = q->skb_bad_txq;
104 !netif_xmit_frozen_or_stopped(txq)) { 133 if (unlikely(skb)) {
105 skb = q->dequeue(q); 134 /* check the reason of requeuing without tx lock first */
106 if (skb && qdisc_may_bulk(q)) 135 txq = skb_get_tx_queue(txq->dev, skb);
107 try_bulk_dequeue_skb(q, skb, txq, packets); 136 if (!netif_xmit_frozen_or_stopped(txq)) {
137 q->skb_bad_txq = NULL;
138 qdisc_qstats_backlog_dec(q, skb);
139 q->q.qlen--;
140 goto bulk;
108 } 141 }
142 return NULL;
143 }
144 if (!(q->flags & TCQ_F_ONETXQUEUE) ||
145 !netif_xmit_frozen_or_stopped(txq))
146 skb = q->dequeue(q);
147 if (skb) {
148bulk:
149 if (qdisc_may_bulk(q))
150 try_bulk_dequeue_skb(q, skb, txq, packets);
151 else
152 try_bulk_dequeue_skb_slow(q, skb, packets);
109 } 153 }
110 return skb; 154 return skb;
111} 155}
112 156
113/* 157/*
114 * Transmit possibly several skbs, and handle the return status as 158 * Transmit possibly several skbs, and handle the return status as
115 * required. Holding the __QDISC___STATE_RUNNING bit guarantees that 159 * required. Owning running seqcount bit guarantees that
116 * only one CPU can execute this function. 160 * only one CPU can execute this function.
117 * 161 *
118 * Returns to the caller: 162 * Returns to the caller:
@@ -165,7 +209,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
165/* 209/*
166 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 210 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
167 * 211 *
168 * __QDISC___STATE_RUNNING guarantees only one CPU can process 212 * running seqcount guarantees only one CPU can process
169 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 213 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
170 * this queue. 214 * this queue.
171 * 215 *
@@ -348,9 +392,10 @@ EXPORT_SYMBOL(netif_carrier_off);
348 cheaper. 392 cheaper.
349 */ 393 */
350 394
351static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) 395static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
396 struct sk_buff **to_free)
352{ 397{
353 kfree_skb(skb); 398 __qdisc_drop(skb, to_free);
354 return NET_XMIT_CN; 399 return NET_XMIT_CN;
355} 400}
356 401
@@ -381,6 +426,7 @@ struct Qdisc noop_qdisc = {
381 .list = LIST_HEAD_INIT(noop_qdisc.list), 426 .list = LIST_HEAD_INIT(noop_qdisc.list),
382 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 427 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
383 .dev_queue = &noop_netdev_queue, 428 .dev_queue = &noop_netdev_queue,
429 .running = SEQCNT_ZERO(noop_qdisc.running),
384 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 430 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
385}; 431};
386EXPORT_SYMBOL(noop_qdisc); 432EXPORT_SYMBOL(noop_qdisc);
@@ -438,7 +484,8 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
438 return priv->q + band; 484 return priv->q + band;
439} 485}
440 486
441static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) 487static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
488 struct sk_buff **to_free)
442{ 489{
443 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 490 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
444 int band = prio2band[skb->priority & TC_PRIO_MAX]; 491 int band = prio2band[skb->priority & TC_PRIO_MAX];
@@ -450,7 +497,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
450 return __qdisc_enqueue_tail(skb, qdisc, list); 497 return __qdisc_enqueue_tail(skb, qdisc, list);
451 } 498 }
452 499
453 return qdisc_drop(skb, qdisc); 500 return qdisc_drop(skb, qdisc, to_free);
454} 501}
455 502
456static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) 503static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
@@ -492,7 +539,7 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
492 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 539 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
493 540
494 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 541 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
495 __qdisc_reset_queue(qdisc, band2list(priv, prio)); 542 __qdisc_reset_queue(band2list(priv, prio));
496 543
497 priv->bitmap = 0; 544 priv->bitmap = 0;
498 qdisc->qstats.backlog = 0; 545 qdisc->qstats.backlog = 0;
@@ -539,6 +586,7 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
539EXPORT_SYMBOL(pfifo_fast_ops); 586EXPORT_SYMBOL(pfifo_fast_ops);
540 587
541static struct lock_class_key qdisc_tx_busylock; 588static struct lock_class_key qdisc_tx_busylock;
589static struct lock_class_key qdisc_running_key;
542 590
543struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 591struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
544 const struct Qdisc_ops *ops) 592 const struct Qdisc_ops *ops)
@@ -572,6 +620,10 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
572 lockdep_set_class(&sch->busylock, 620 lockdep_set_class(&sch->busylock,
573 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 621 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
574 622
623 seqcount_init(&sch->running);
624 lockdep_set_class(&sch->running,
625 dev->qdisc_running_key ?: &qdisc_running_key);
626
575 sch->ops = ops; 627 sch->ops = ops;
576 sch->enqueue = ops->enqueue; 628 sch->enqueue = ops->enqueue;
577 sch->dequeue = ops->dequeue; 629 sch->dequeue = ops->dequeue;
@@ -616,11 +668,14 @@ void qdisc_reset(struct Qdisc *qdisc)
616 if (ops->reset) 668 if (ops->reset)
617 ops->reset(qdisc); 669 ops->reset(qdisc);
618 670
671 kfree_skb(qdisc->skb_bad_txq);
672 qdisc->skb_bad_txq = NULL;
673
619 if (qdisc->gso_skb) { 674 if (qdisc->gso_skb) {
620 kfree_skb_list(qdisc->gso_skb); 675 kfree_skb_list(qdisc->gso_skb);
621 qdisc->gso_skb = NULL; 676 qdisc->gso_skb = NULL;
622 qdisc->q.qlen = 0;
623 } 677 }
678 qdisc->q.qlen = 0;
624} 679}
625EXPORT_SYMBOL(qdisc_reset); 680EXPORT_SYMBOL(qdisc_reset);
626 681
@@ -659,6 +714,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
659 dev_put(qdisc_dev(qdisc)); 714 dev_put(qdisc_dev(qdisc));
660 715
661 kfree_skb_list(qdisc->gso_skb); 716 kfree_skb_list(qdisc->gso_skb);
717 kfree_skb(qdisc->skb_bad_txq);
662 /* 718 /*
663 * gen_estimator est_timer() might access qdisc->q.lock, 719 * gen_estimator est_timer() might access qdisc->q.lock,
664 * wait a RCU grace period before freeing qdisc. 720 * wait a RCU grace period before freeing qdisc.
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 80105109f756..c78a093c551a 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -149,7 +149,8 @@ static inline int gred_use_harddrop(struct gred_sched *t)
149 return t->red_flags & TC_RED_HARDDROP; 149 return t->red_flags & TC_RED_HARDDROP;
150} 150}
151 151
152static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) 152static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
153 struct sk_buff **to_free)
153{ 154{
154 struct gred_sched_data *q = NULL; 155 struct gred_sched_data *q = NULL;
155 struct gred_sched *t = qdisc_priv(sch); 156 struct gred_sched *t = qdisc_priv(sch);
@@ -237,10 +238,10 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
237 238
238 q->stats.pdrop++; 239 q->stats.pdrop++;
239drop: 240drop:
240 return qdisc_drop(skb, sch); 241 return qdisc_drop(skb, sch, to_free);
241 242
242congestion_drop: 243congestion_drop:
243 qdisc_drop(skb, sch); 244 qdisc_drop(skb, sch, to_free);
244 return NET_XMIT_CN; 245 return NET_XMIT_CN;
245} 246}
246 247
@@ -276,40 +277,6 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch)
276 return NULL; 277 return NULL;
277} 278}
278 279
279static unsigned int gred_drop(struct Qdisc *sch)
280{
281 struct sk_buff *skb;
282 struct gred_sched *t = qdisc_priv(sch);
283
284 skb = qdisc_dequeue_tail(sch);
285 if (skb) {
286 unsigned int len = qdisc_pkt_len(skb);
287 struct gred_sched_data *q;
288 u16 dp = tc_index_to_dp(skb);
289
290 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
291 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
292 tc_index_to_dp(skb));
293 } else {
294 q->backlog -= len;
295 q->stats.other++;
296
297 if (gred_wred_mode(t)) {
298 if (!sch->qstats.backlog)
299 red_start_of_idle_period(&t->wred_set);
300 } else {
301 if (!q->backlog)
302 red_start_of_idle_period(&q->vars);
303 }
304 }
305
306 qdisc_drop(skb, sch);
307 return len;
308 }
309
310 return 0;
311}
312
313static void gred_reset(struct Qdisc *sch) 280static void gred_reset(struct Qdisc *sch)
314{ 281{
315 int i; 282 int i;
@@ -623,7 +590,6 @@ static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
623 .enqueue = gred_enqueue, 590 .enqueue = gred_enqueue,
624 .dequeue = gred_dequeue, 591 .dequeue = gred_dequeue,
625 .peek = qdisc_peek_head, 592 .peek = qdisc_peek_head,
626 .drop = gred_drop,
627 .init = gred_init, 593 .init = gred_init,
628 .reset = gred_reset, 594 .reset = gred_reset,
629 .destroy = gred_destroy, 595 .destroy = gred_destroy,
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 1ac9f9f03fe3..8cb5eff7b79c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1015,11 +1015,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1015 cur_time = psched_get_time(); 1015 cur_time = psched_get_time();
1016 1016
1017 if (tca[TCA_RATE]) { 1017 if (tca[TCA_RATE]) {
1018 spinlock_t *lock = qdisc_root_sleeping_lock(sch);
1019
1020 err = gen_replace_estimator(&cl->bstats, NULL, 1018 err = gen_replace_estimator(&cl->bstats, NULL,
1021 &cl->rate_est, 1019 &cl->rate_est,
1022 lock, 1020 NULL,
1021 qdisc_root_sleeping_running(sch),
1023 tca[TCA_RATE]); 1022 tca[TCA_RATE]);
1024 if (err) 1023 if (err)
1025 return err; 1024 return err;
@@ -1068,7 +1067,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1068 1067
1069 if (tca[TCA_RATE]) { 1068 if (tca[TCA_RATE]) {
1070 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, 1069 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1071 qdisc_root_sleeping_lock(sch), 1070 NULL,
1071 qdisc_root_sleeping_running(sch),
1072 tca[TCA_RATE]); 1072 tca[TCA_RATE]);
1073 if (err) { 1073 if (err) {
1074 kfree(cl); 1074 kfree(cl);
@@ -1373,7 +1373,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1373 xstats.work = cl->cl_total; 1373 xstats.work = cl->cl_total;
1374 xstats.rtwork = cl->cl_cumul; 1374 xstats.rtwork = cl->cl_cumul;
1375 1375
1376 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 1376 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
1377 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || 1377 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1378 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) 1378 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
1379 return -1; 1379 return -1;
@@ -1572,7 +1572,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1572} 1572}
1573 1573
1574static int 1574static int
1575hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 1575hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1576{ 1576{
1577 struct hfsc_class *cl; 1577 struct hfsc_class *cl;
1578 int uninitialized_var(err); 1578 int uninitialized_var(err);
@@ -1581,11 +1581,11 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1581 if (cl == NULL) { 1581 if (cl == NULL) {
1582 if (err & __NET_XMIT_BYPASS) 1582 if (err & __NET_XMIT_BYPASS)
1583 qdisc_qstats_drop(sch); 1583 qdisc_qstats_drop(sch);
1584 kfree_skb(skb); 1584 __qdisc_drop(skb, to_free);
1585 return err; 1585 return err;
1586 } 1586 }
1587 1587
1588 err = qdisc_enqueue(skb, cl->qdisc); 1588 err = qdisc_enqueue(skb, cl->qdisc, to_free);
1589 if (unlikely(err != NET_XMIT_SUCCESS)) { 1589 if (unlikely(err != NET_XMIT_SUCCESS)) {
1590 if (net_xmit_drop_count(err)) { 1590 if (net_xmit_drop_count(err)) {
1591 cl->qstats.drops++; 1591 cl->qstats.drops++;
@@ -1664,7 +1664,6 @@ hfsc_dequeue(struct Qdisc *sch)
1664 set_passive(cl); 1664 set_passive(cl);
1665 } 1665 }
1666 1666
1667 qdisc_unthrottled(sch);
1668 qdisc_bstats_update(sch, skb); 1667 qdisc_bstats_update(sch, skb);
1669 qdisc_qstats_backlog_dec(sch, skb); 1668 qdisc_qstats_backlog_dec(sch, skb);
1670 sch->q.qlen--; 1669 sch->q.qlen--;
@@ -1672,32 +1671,6 @@ hfsc_dequeue(struct Qdisc *sch)
1672 return skb; 1671 return skb;
1673} 1672}
1674 1673
1675static unsigned int
1676hfsc_drop(struct Qdisc *sch)
1677{
1678 struct hfsc_sched *q = qdisc_priv(sch);
1679 struct hfsc_class *cl;
1680 unsigned int len;
1681
1682 list_for_each_entry(cl, &q->droplist, dlist) {
1683 if (cl->qdisc->ops->drop != NULL &&
1684 (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1685 if (cl->qdisc->q.qlen == 0) {
1686 update_vf(cl, 0, 0);
1687 set_passive(cl);
1688 } else {
1689 list_move_tail(&cl->dlist, &q->droplist);
1690 }
1691 cl->qstats.drops++;
1692 qdisc_qstats_drop(sch);
1693 sch->qstats.backlog -= len;
1694 sch->q.qlen--;
1695 return len;
1696 }
1697 }
1698 return 0;
1699}
1700
1701static const struct Qdisc_class_ops hfsc_class_ops = { 1674static const struct Qdisc_class_ops hfsc_class_ops = {
1702 .change = hfsc_change_class, 1675 .change = hfsc_change_class,
1703 .delete = hfsc_delete_class, 1676 .delete = hfsc_delete_class,
@@ -1724,7 +1697,6 @@ static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1724 .enqueue = hfsc_enqueue, 1697 .enqueue = hfsc_enqueue,
1725 .dequeue = hfsc_dequeue, 1698 .dequeue = hfsc_dequeue,
1726 .peek = qdisc_peek_dequeued, 1699 .peek = qdisc_peek_dequeued,
1727 .drop = hfsc_drop,
1728 .cl_ops = &hfsc_class_ops, 1700 .cl_ops = &hfsc_class_ops,
1729 .priv_size = sizeof(struct hfsc_sched), 1701 .priv_size = sizeof(struct hfsc_sched),
1730 .owner = THIS_MODULE 1702 .owner = THIS_MODULE
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 13d6f83ec491..e3d0458af17b 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -345,7 +345,7 @@ static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
345 skb->next = NULL; 345 skb->next = NULL;
346} 346}
347 347
348static unsigned int hhf_drop(struct Qdisc *sch) 348static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free)
349{ 349{
350 struct hhf_sched_data *q = qdisc_priv(sch); 350 struct hhf_sched_data *q = qdisc_priv(sch);
351 struct wdrr_bucket *bucket; 351 struct wdrr_bucket *bucket;
@@ -359,25 +359,16 @@ static unsigned int hhf_drop(struct Qdisc *sch)
359 struct sk_buff *skb = dequeue_head(bucket); 359 struct sk_buff *skb = dequeue_head(bucket);
360 360
361 sch->q.qlen--; 361 sch->q.qlen--;
362 qdisc_qstats_drop(sch);
363 qdisc_qstats_backlog_dec(sch, skb); 362 qdisc_qstats_backlog_dec(sch, skb);
364 kfree_skb(skb); 363 qdisc_drop(skb, sch, to_free);
365 } 364 }
366 365
367 /* Return id of the bucket from which the packet was dropped. */ 366 /* Return id of the bucket from which the packet was dropped. */
368 return bucket - q->buckets; 367 return bucket - q->buckets;
369} 368}
370 369
371static unsigned int hhf_qdisc_drop(struct Qdisc *sch) 370static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
372{ 371 struct sk_buff **to_free)
373 unsigned int prev_backlog;
374
375 prev_backlog = sch->qstats.backlog;
376 hhf_drop(sch);
377 return prev_backlog - sch->qstats.backlog;
378}
379
380static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
381{ 372{
382 struct hhf_sched_data *q = qdisc_priv(sch); 373 struct hhf_sched_data *q = qdisc_priv(sch);
383 enum wdrr_bucket_idx idx; 374 enum wdrr_bucket_idx idx;
@@ -415,7 +406,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
415 /* Return Congestion Notification only if we dropped a packet from this 406 /* Return Congestion Notification only if we dropped a packet from this
416 * bucket. 407 * bucket.
417 */ 408 */
418 if (hhf_drop(sch) == idx) 409 if (hhf_drop(sch, to_free) == idx)
419 return NET_XMIT_CN; 410 return NET_XMIT_CN;
420 411
421 /* As we dropped a packet, better let upper stack know this. */ 412 /* As we dropped a packet, better let upper stack know this. */
@@ -473,7 +464,7 @@ static void hhf_reset(struct Qdisc *sch)
473 struct sk_buff *skb; 464 struct sk_buff *skb;
474 465
475 while ((skb = hhf_dequeue(sch)) != NULL) 466 while ((skb = hhf_dequeue(sch)) != NULL)
476 kfree_skb(skb); 467 rtnl_kfree_skbs(skb, skb);
477} 468}
478 469
479static void *hhf_zalloc(size_t sz) 470static void *hhf_zalloc(size_t sz)
@@ -583,7 +574,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
583 while (sch->q.qlen > sch->limit) { 574 while (sch->q.qlen > sch->limit) {
584 struct sk_buff *skb = hhf_dequeue(sch); 575 struct sk_buff *skb = hhf_dequeue(sch);
585 576
586 kfree_skb(skb); 577 rtnl_kfree_skbs(skb, skb);
587 } 578 }
588 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, 579 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
589 prev_backlog - sch->qstats.backlog); 580 prev_backlog - sch->qstats.backlog);
@@ -709,7 +700,6 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
709 .enqueue = hhf_enqueue, 700 .enqueue = hhf_enqueue,
710 .dequeue = hhf_dequeue, 701 .dequeue = hhf_dequeue,
711 .peek = qdisc_peek_dequeued, 702 .peek = qdisc_peek_dequeued,
712 .drop = hhf_qdisc_drop,
713 .init = hhf_init, 703 .init = hhf_init,
714 .reset = hhf_reset, 704 .reset = hhf_reset,
715 .destroy = hhf_destroy, 705 .destroy = hhf_destroy,
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 62f9d8100c6e..91982d9784b3 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -117,7 +117,6 @@ struct htb_class {
117 * Written often fields 117 * Written often fields
118 */ 118 */
119 struct gnet_stats_basic_packed bstats; 119 struct gnet_stats_basic_packed bstats;
120 struct gnet_stats_queue qstats;
121 struct tc_htb_xstats xstats; /* our special stats */ 120 struct tc_htb_xstats xstats; /* our special stats */
122 121
123 /* token bucket parameters */ 122 /* token bucket parameters */
@@ -140,6 +139,8 @@ struct htb_class {
140 enum htb_cmode cmode; /* current mode of the class */ 139 enum htb_cmode cmode; /* current mode of the class */
141 struct rb_node pq_node; /* node for event queue */ 140 struct rb_node pq_node; /* node for event queue */
142 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ 141 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
142
143 unsigned int drops ____cacheline_aligned_in_smp;
143}; 144};
144 145
145struct htb_level { 146struct htb_level {
@@ -569,7 +570,8 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
569 list_del_init(&cl->un.leaf.drop_list); 570 list_del_init(&cl->un.leaf.drop_list);
570} 571}
571 572
572static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) 573static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
574 struct sk_buff **to_free)
573{ 575{
574 int uninitialized_var(ret); 576 int uninitialized_var(ret);
575 struct htb_sched *q = qdisc_priv(sch); 577 struct htb_sched *q = qdisc_priv(sch);
@@ -581,19 +583,20 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
581 __skb_queue_tail(&q->direct_queue, skb); 583 __skb_queue_tail(&q->direct_queue, skb);
582 q->direct_pkts++; 584 q->direct_pkts++;
583 } else { 585 } else {
584 return qdisc_drop(skb, sch); 586 return qdisc_drop(skb, sch, to_free);
585 } 587 }
586#ifdef CONFIG_NET_CLS_ACT 588#ifdef CONFIG_NET_CLS_ACT
587 } else if (!cl) { 589 } else if (!cl) {
588 if (ret & __NET_XMIT_BYPASS) 590 if (ret & __NET_XMIT_BYPASS)
589 qdisc_qstats_drop(sch); 591 qdisc_qstats_drop(sch);
590 kfree_skb(skb); 592 __qdisc_drop(skb, to_free);
591 return ret; 593 return ret;
592#endif 594#endif
593 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { 595 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
596 to_free)) != NET_XMIT_SUCCESS) {
594 if (net_xmit_drop_count(ret)) { 597 if (net_xmit_drop_count(ret)) {
595 qdisc_qstats_drop(sch); 598 qdisc_qstats_drop(sch);
596 cl->qstats.drops++; 599 cl->drops++;
597 } 600 }
598 return ret; 601 return ret;
599 } else { 602 } else {
@@ -889,7 +892,6 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
889 if (skb != NULL) { 892 if (skb != NULL) {
890ok: 893ok:
891 qdisc_bstats_update(sch, skb); 894 qdisc_bstats_update(sch, skb);
892 qdisc_unthrottled(sch);
893 qdisc_qstats_backlog_dec(sch, skb); 895 qdisc_qstats_backlog_dec(sch, skb);
894 sch->q.qlen--; 896 sch->q.qlen--;
895 return skb; 897 return skb;
@@ -929,38 +931,13 @@ ok:
929 } 931 }
930 qdisc_qstats_overlimit(sch); 932 qdisc_qstats_overlimit(sch);
931 if (likely(next_event > q->now)) 933 if (likely(next_event > q->now))
932 qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true); 934 qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
933 else 935 else
934 schedule_work(&q->work); 936 schedule_work(&q->work);
935fin: 937fin:
936 return skb; 938 return skb;
937} 939}
938 940
939/* try to drop from each class (by prio) until one succeed */
940static unsigned int htb_drop(struct Qdisc *sch)
941{
942 struct htb_sched *q = qdisc_priv(sch);
943 int prio;
944
945 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
946 struct list_head *p;
947 list_for_each(p, q->drops + prio) {
948 struct htb_class *cl = list_entry(p, struct htb_class,
949 un.leaf.drop_list);
950 unsigned int len;
951 if (cl->un.leaf.q->ops->drop &&
952 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
953 sch->qstats.backlog -= len;
954 sch->q.qlen--;
955 if (!cl->un.leaf.q->q.qlen)
956 htb_deactivate(q, cl);
957 return len;
958 }
959 }
960 }
961 return 0;
962}
963
964/* reset all classes */ 941/* reset all classes */
965/* always caled under BH & queue lock */ 942/* always caled under BH & queue lock */
966static void htb_reset(struct Qdisc *sch) 943static void htb_reset(struct Qdisc *sch)
@@ -983,7 +960,7 @@ static void htb_reset(struct Qdisc *sch)
983 } 960 }
984 } 961 }
985 qdisc_watchdog_cancel(&q->watchdog); 962 qdisc_watchdog_cancel(&q->watchdog);
986 __skb_queue_purge(&q->direct_queue); 963 __qdisc_reset_queue(&q->direct_queue);
987 sch->q.qlen = 0; 964 sch->q.qlen = 0;
988 sch->qstats.backlog = 0; 965 sch->qstats.backlog = 0;
989 memset(q->hlevel, 0, sizeof(q->hlevel)); 966 memset(q->hlevel, 0, sizeof(q->hlevel));
@@ -1136,16 +1113,22 @@ static int
1136htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) 1113htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1137{ 1114{
1138 struct htb_class *cl = (struct htb_class *)arg; 1115 struct htb_class *cl = (struct htb_class *)arg;
1116 struct gnet_stats_queue qs = {
1117 .drops = cl->drops,
1118 };
1139 __u32 qlen = 0; 1119 __u32 qlen = 0;
1140 1120
1141 if (!cl->level && cl->un.leaf.q) 1121 if (!cl->level && cl->un.leaf.q) {
1142 qlen = cl->un.leaf.q->q.qlen; 1122 qlen = cl->un.leaf.q->q.qlen;
1123 qs.backlog = cl->un.leaf.q->qstats.backlog;
1124 }
1143 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); 1125 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
1144 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); 1126 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
1145 1127
1146 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 1128 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1129 d, NULL, &cl->bstats) < 0 ||
1147 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || 1130 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1148 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) 1131 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1149 return -1; 1132 return -1;
1150 1133
1151 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); 1134 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
@@ -1258,7 +1241,7 @@ static void htb_destroy(struct Qdisc *sch)
1258 htb_destroy_class(sch, cl); 1241 htb_destroy_class(sch, cl);
1259 } 1242 }
1260 qdisc_class_hash_destroy(&q->clhash); 1243 qdisc_class_hash_destroy(&q->clhash);
1261 __skb_queue_purge(&q->direct_queue); 1244 __qdisc_reset_queue(&q->direct_queue);
1262} 1245}
1263 1246
1264static int htb_delete(struct Qdisc *sch, unsigned long arg) 1247static int htb_delete(struct Qdisc *sch, unsigned long arg)
@@ -1397,7 +1380,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1397 if (htb_rate_est || tca[TCA_RATE]) { 1380 if (htb_rate_est || tca[TCA_RATE]) {
1398 err = gen_new_estimator(&cl->bstats, NULL, 1381 err = gen_new_estimator(&cl->bstats, NULL,
1399 &cl->rate_est, 1382 &cl->rate_est,
1400 qdisc_root_sleeping_lock(sch), 1383 NULL,
1384 qdisc_root_sleeping_running(sch),
1401 tca[TCA_RATE] ? : &est.nla); 1385 tca[TCA_RATE] ? : &est.nla);
1402 if (err) { 1386 if (err) {
1403 kfree(cl); 1387 kfree(cl);
@@ -1459,11 +1443,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1459 parent->children++; 1443 parent->children++;
1460 } else { 1444 } else {
1461 if (tca[TCA_RATE]) { 1445 if (tca[TCA_RATE]) {
1462 spinlock_t *lock = qdisc_root_sleeping_lock(sch);
1463
1464 err = gen_replace_estimator(&cl->bstats, NULL, 1446 err = gen_replace_estimator(&cl->bstats, NULL,
1465 &cl->rate_est, 1447 &cl->rate_est,
1466 lock, 1448 NULL,
1449 qdisc_root_sleeping_running(sch),
1467 tca[TCA_RATE]); 1450 tca[TCA_RATE]);
1468 if (err) 1451 if (err)
1469 return err; 1452 return err;
@@ -1601,7 +1584,6 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1601 .enqueue = htb_enqueue, 1584 .enqueue = htb_enqueue,
1602 .dequeue = htb_dequeue, 1585 .dequeue = htb_dequeue,
1603 .peek = qdisc_peek_dequeued, 1586 .peek = qdisc_peek_dequeued,
1604 .drop = htb_drop,
1605 .init = htb_init, 1587 .init = htb_init,
1606 .reset = htb_reset, 1588 .reset = htb_reset,
1607 .destroy = htb_destroy, 1589 .destroy = htb_destroy,
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 56a77b878eb3..b9439827c172 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -199,7 +199,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
199 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 199 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
200 200
201 sch = dev_queue->qdisc_sleeping; 201 sch = dev_queue->qdisc_sleeping;
202 if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || 202 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
203 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) 203 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
204 return -1; 204 return -1;
205 return 0; 205 return 0;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index b8002ce3d010..549c66359924 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -342,7 +342,8 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
342 * hold here is the look on dev_queue->qdisc_sleeping 342 * hold here is the look on dev_queue->qdisc_sleeping
343 * also acquired below. 343 * also acquired below.
344 */ 344 */
345 spin_unlock_bh(d->lock); 345 if (d->lock)
346 spin_unlock_bh(d->lock);
346 347
347 for (i = tc.offset; i < tc.offset + tc.count; i++) { 348 for (i = tc.offset; i < tc.offset + tc.count; i++) {
348 struct netdev_queue *q = netdev_get_tx_queue(dev, i); 349 struct netdev_queue *q = netdev_get_tx_queue(dev, i);
@@ -359,15 +360,17 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
359 spin_unlock_bh(qdisc_lock(qdisc)); 360 spin_unlock_bh(qdisc_lock(qdisc));
360 } 361 }
361 /* Reclaim root sleeping lock before completing stats */ 362 /* Reclaim root sleeping lock before completing stats */
362 spin_lock_bh(d->lock); 363 if (d->lock)
363 if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 || 364 spin_lock_bh(d->lock);
365 if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
364 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) 366 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
365 return -1; 367 return -1;
366 } else { 368 } else {
367 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 369 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
368 370
369 sch = dev_queue->qdisc_sleeping; 371 sch = dev_queue->qdisc_sleeping;
370 if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || 372 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
373 d, NULL, &sch->bstats) < 0 ||
371 gnet_stats_copy_queue(d, NULL, 374 gnet_stats_copy_queue(d, NULL,
372 &sch->qstats, sch->q.qlen) < 0) 375 &sch->qstats, sch->q.qlen) < 0)
373 return -1; 376 return -1;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index bcdd54bb101c..9ffbb025b37e 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -65,7 +65,8 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
65} 65}
66 66
67static int 67static int
68multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 68multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
69 struct sk_buff **to_free)
69{ 70{
70 struct Qdisc *qdisc; 71 struct Qdisc *qdisc;
71 int ret; 72 int ret;
@@ -76,12 +77,12 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
76 77
77 if (ret & __NET_XMIT_BYPASS) 78 if (ret & __NET_XMIT_BYPASS)
78 qdisc_qstats_drop(sch); 79 qdisc_qstats_drop(sch);
79 kfree_skb(skb); 80 __qdisc_drop(skb, to_free);
80 return ret; 81 return ret;
81 } 82 }
82#endif 83#endif
83 84
84 ret = qdisc_enqueue(skb, qdisc); 85 ret = qdisc_enqueue(skb, qdisc, to_free);
85 if (ret == NET_XMIT_SUCCESS) { 86 if (ret == NET_XMIT_SUCCESS) {
86 sch->q.qlen++; 87 sch->q.qlen++;
87 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
@@ -151,27 +152,6 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch)
151 152
152} 153}
153 154
154static unsigned int multiq_drop(struct Qdisc *sch)
155{
156 struct multiq_sched_data *q = qdisc_priv(sch);
157 int band;
158 unsigned int len;
159 struct Qdisc *qdisc;
160
161 for (band = q->bands - 1; band >= 0; band--) {
162 qdisc = q->queues[band];
163 if (qdisc->ops->drop) {
164 len = qdisc->ops->drop(qdisc);
165 if (len != 0) {
166 sch->q.qlen--;
167 return len;
168 }
169 }
170 }
171 return 0;
172}
173
174
175static void 155static void
176multiq_reset(struct Qdisc *sch) 156multiq_reset(struct Qdisc *sch)
177{ 157{
@@ -356,7 +336,8 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
356 struct Qdisc *cl_q; 336 struct Qdisc *cl_q;
357 337
358 cl_q = q->queues[cl - 1]; 338 cl_q = q->queues[cl - 1];
359 if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || 339 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
340 d, NULL, &cl_q->bstats) < 0 ||
360 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) 341 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
361 return -1; 342 return -1;
362 343
@@ -415,7 +396,6 @@ static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
415 .enqueue = multiq_enqueue, 396 .enqueue = multiq_enqueue,
416 .dequeue = multiq_dequeue, 397 .dequeue = multiq_dequeue,
417 .peek = multiq_peek, 398 .peek = multiq_peek,
418 .drop = multiq_drop,
419 .init = multiq_init, 399 .init = multiq_init,
420 .reset = multiq_reset, 400 .reset = multiq_reset,
421 .destroy = multiq_destroy, 401 .destroy = multiq_destroy,
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 178f1630a036..aaaf02175338 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -368,9 +368,7 @@ static void tfifo_reset(struct Qdisc *sch)
368 struct sk_buff *skb = netem_rb_to_skb(p); 368 struct sk_buff *skb = netem_rb_to_skb(p);
369 369
370 rb_erase(p, &q->t_root); 370 rb_erase(p, &q->t_root);
371 skb->next = NULL; 371 rtnl_kfree_skbs(skb, skb);
372 skb->prev = NULL;
373 kfree_skb(skb);
374 } 372 }
375} 373}
376 374
@@ -399,7 +397,8 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
399 * when we statistically choose to corrupt one, we instead segment it, returning 397 * when we statistically choose to corrupt one, we instead segment it, returning
400 * the first packet to be corrupted, and re-enqueue the remaining frames 398 * the first packet to be corrupted, and re-enqueue the remaining frames
401 */ 399 */
402static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) 400static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
401 struct sk_buff **to_free)
403{ 402{
404 struct sk_buff *segs; 403 struct sk_buff *segs;
405 netdev_features_t features = netif_skb_features(skb); 404 netdev_features_t features = netif_skb_features(skb);
@@ -407,7 +406,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
407 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 406 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
408 407
409 if (IS_ERR_OR_NULL(segs)) { 408 if (IS_ERR_OR_NULL(segs)) {
410 qdisc_reshape_fail(skb, sch); 409 qdisc_drop(skb, sch, to_free);
411 return NULL; 410 return NULL;
412 } 411 }
413 consume_skb(skb); 412 consume_skb(skb);
@@ -420,7 +419,8 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
420 * NET_XMIT_DROP: queue length didn't change. 419 * NET_XMIT_DROP: queue length didn't change.
421 * NET_XMIT_SUCCESS: one skb was queued. 420 * NET_XMIT_SUCCESS: one skb was queued.
422 */ 421 */
423static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) 422static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
423 struct sk_buff **to_free)
424{ 424{
425 struct netem_sched_data *q = qdisc_priv(sch); 425 struct netem_sched_data *q = qdisc_priv(sch);
426 /* We don't fill cb now as skb_unshare() may invalidate it */ 426 /* We don't fill cb now as skb_unshare() may invalidate it */
@@ -445,7 +445,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
445 } 445 }
446 if (count == 0) { 446 if (count == 0) {
447 qdisc_qstats_drop(sch); 447 qdisc_qstats_drop(sch);
448 kfree_skb(skb); 448 __qdisc_drop(skb, to_free);
449 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 449 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
450 } 450 }
451 451
@@ -465,7 +465,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
465 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 465 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
466 466
467 q->duplicate = 0; 467 q->duplicate = 0;
468 rootq->enqueue(skb2, rootq); 468 rootq->enqueue(skb2, rootq, to_free);
469 q->duplicate = dupsave; 469 q->duplicate = dupsave;
470 } 470 }
471 471
@@ -477,7 +477,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
477 */ 477 */
478 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 478 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
479 if (skb_is_gso(skb)) { 479 if (skb_is_gso(skb)) {
480 segs = netem_segment(skb, sch); 480 segs = netem_segment(skb, sch, to_free);
481 if (!segs) 481 if (!segs)
482 return NET_XMIT_DROP; 482 return NET_XMIT_DROP;
483 } else { 483 } else {
@@ -487,10 +487,14 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
487 skb = segs; 487 skb = segs;
488 segs = segs->next; 488 segs = segs->next;
489 489
490 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || 490 skb = skb_unshare(skb, GFP_ATOMIC);
491 (skb->ip_summed == CHECKSUM_PARTIAL && 491 if (unlikely(!skb)) {
492 skb_checksum_help(skb))) { 492 qdisc_qstats_drop(sch);
493 rc = qdisc_drop(skb, sch); 493 goto finish_segs;
494 }
495 if (skb->ip_summed == CHECKSUM_PARTIAL &&
496 skb_checksum_help(skb)) {
497 qdisc_drop(skb, sch, to_free);
494 goto finish_segs; 498 goto finish_segs;
495 } 499 }
496 500
@@ -499,7 +503,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
499 } 503 }
500 504
501 if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) 505 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
502 return qdisc_reshape_fail(skb, sch); 506 return qdisc_drop(skb, sch, to_free);
503 507
504 qdisc_qstats_backlog_inc(sch, skb); 508 qdisc_qstats_backlog_inc(sch, skb);
505 509
@@ -559,7 +563,7 @@ finish_segs:
559 segs->next = NULL; 563 segs->next = NULL;
560 qdisc_skb_cb(segs)->pkt_len = segs->len; 564 qdisc_skb_cb(segs)->pkt_len = segs->len;
561 last_len = segs->len; 565 last_len = segs->len;
562 rc = qdisc_enqueue(segs, sch); 566 rc = qdisc_enqueue(segs, sch, to_free);
563 if (rc != NET_XMIT_SUCCESS) { 567 if (rc != NET_XMIT_SUCCESS) {
564 if (net_xmit_drop_count(rc)) 568 if (net_xmit_drop_count(rc))
565 qdisc_qstats_drop(sch); 569 qdisc_qstats_drop(sch);
@@ -576,50 +580,17 @@ finish_segs:
576 return NET_XMIT_SUCCESS; 580 return NET_XMIT_SUCCESS;
577} 581}
578 582
579static unsigned int netem_drop(struct Qdisc *sch)
580{
581 struct netem_sched_data *q = qdisc_priv(sch);
582 unsigned int len;
583
584 len = qdisc_queue_drop(sch);
585
586 if (!len) {
587 struct rb_node *p = rb_first(&q->t_root);
588
589 if (p) {
590 struct sk_buff *skb = netem_rb_to_skb(p);
591
592 rb_erase(p, &q->t_root);
593 sch->q.qlen--;
594 skb->next = NULL;
595 skb->prev = NULL;
596 qdisc_qstats_backlog_dec(sch, skb);
597 kfree_skb(skb);
598 }
599 }
600 if (!len && q->qdisc && q->qdisc->ops->drop)
601 len = q->qdisc->ops->drop(q->qdisc);
602 if (len)
603 qdisc_qstats_drop(sch);
604
605 return len;
606}
607
608static struct sk_buff *netem_dequeue(struct Qdisc *sch) 583static struct sk_buff *netem_dequeue(struct Qdisc *sch)
609{ 584{
610 struct netem_sched_data *q = qdisc_priv(sch); 585 struct netem_sched_data *q = qdisc_priv(sch);
611 struct sk_buff *skb; 586 struct sk_buff *skb;
612 struct rb_node *p; 587 struct rb_node *p;
613 588
614 if (qdisc_is_throttled(sch))
615 return NULL;
616
617tfifo_dequeue: 589tfifo_dequeue:
618 skb = __skb_dequeue(&sch->q); 590 skb = __skb_dequeue(&sch->q);
619 if (skb) { 591 if (skb) {
620 qdisc_qstats_backlog_dec(sch, skb); 592 qdisc_qstats_backlog_dec(sch, skb);
621deliver: 593deliver:
622 qdisc_unthrottled(sch);
623 qdisc_bstats_update(sch, skb); 594 qdisc_bstats_update(sch, skb);
624 return skb; 595 return skb;
625 } 596 }
@@ -651,8 +622,11 @@ deliver:
651 622
652 if (q->qdisc) { 623 if (q->qdisc) {
653 unsigned int pkt_len = qdisc_pkt_len(skb); 624 unsigned int pkt_len = qdisc_pkt_len(skb);
654 int err = qdisc_enqueue(skb, q->qdisc); 625 struct sk_buff *to_free = NULL;
626 int err;
655 627
628 err = qdisc_enqueue(skb, q->qdisc, &to_free);
629 kfree_skb_list(to_free);
656 if (err != NET_XMIT_SUCCESS && 630 if (err != NET_XMIT_SUCCESS &&
657 net_xmit_drop_count(err)) { 631 net_xmit_drop_count(err)) {
658 qdisc_qstats_drop(sch); 632 qdisc_qstats_drop(sch);
@@ -1143,7 +1117,6 @@ static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1143 .enqueue = netem_enqueue, 1117 .enqueue = netem_enqueue,
1144 .dequeue = netem_dequeue, 1118 .dequeue = netem_dequeue,
1145 .peek = qdisc_peek_dequeued, 1119 .peek = qdisc_peek_dequeued,
1146 .drop = netem_drop,
1147 .init = netem_init, 1120 .init = netem_init,
1148 .reset = netem_reset, 1121 .reset = netem_reset,
1149 .destroy = netem_destroy, 1122 .destroy = netem_destroy,
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 71ae3b9629f9..a570b0bb254c 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -134,7 +134,8 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size)
134 return false; 134 return false;
135} 135}
136 136
137static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 137static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
138 struct sk_buff **to_free)
138{ 139{
139 struct pie_sched_data *q = qdisc_priv(sch); 140 struct pie_sched_data *q = qdisc_priv(sch);
140 bool enqueue = false; 141 bool enqueue = false;
@@ -166,7 +167,7 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
166 167
167out: 168out:
168 q->stats.dropped++; 169 q->stats.dropped++;
169 return qdisc_drop(skb, sch); 170 return qdisc_drop(skb, sch, to_free);
170} 171}
171 172
172static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = { 173static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
@@ -234,7 +235,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
234 235
235 dropped += qdisc_pkt_len(skb); 236 dropped += qdisc_pkt_len(skb);
236 qdisc_qstats_backlog_dec(sch, skb); 237 qdisc_qstats_backlog_dec(sch, skb);
237 qdisc_drop(skb, sch); 238 rtnl_qdisc_drop(skb, sch);
238 } 239 }
239 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); 240 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
240 241
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 5abfe44678d4..1c6cbab3e7b9 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -64,6 +64,8 @@ struct plug_sched_data {
64 */ 64 */
65 bool unplug_indefinite; 65 bool unplug_indefinite;
66 66
67 bool throttled;
68
67 /* Queue Limit in bytes */ 69 /* Queue Limit in bytes */
68 u32 limit; 70 u32 limit;
69 71
@@ -86,7 +88,8 @@ struct plug_sched_data {
86 u32 pkts_to_release; 88 u32 pkts_to_release;
87}; 89};
88 90
89static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch) 91static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch,
92 struct sk_buff **to_free)
90{ 93{
91 struct plug_sched_data *q = qdisc_priv(sch); 94 struct plug_sched_data *q = qdisc_priv(sch);
92 95
@@ -96,14 +99,14 @@ static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch)
96 return qdisc_enqueue_tail(skb, sch); 99 return qdisc_enqueue_tail(skb, sch);
97 } 100 }
98 101
99 return qdisc_reshape_fail(skb, sch); 102 return qdisc_drop(skb, sch, to_free);
100} 103}
101 104
102static struct sk_buff *plug_dequeue(struct Qdisc *sch) 105static struct sk_buff *plug_dequeue(struct Qdisc *sch)
103{ 106{
104 struct plug_sched_data *q = qdisc_priv(sch); 107 struct plug_sched_data *q = qdisc_priv(sch);
105 108
106 if (qdisc_is_throttled(sch)) 109 if (q->throttled)
107 return NULL; 110 return NULL;
108 111
109 if (!q->unplug_indefinite) { 112 if (!q->unplug_indefinite) {
@@ -111,7 +114,7 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch)
111 /* No more packets to dequeue. Block the queue 114 /* No more packets to dequeue. Block the queue
112 * and wait for the next release command. 115 * and wait for the next release command.
113 */ 116 */
114 qdisc_throttled(sch); 117 q->throttled = true;
115 return NULL; 118 return NULL;
116 } 119 }
117 q->pkts_to_release--; 120 q->pkts_to_release--;
@@ -141,7 +144,7 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt)
141 q->limit = ctl->limit; 144 q->limit = ctl->limit;
142 } 145 }
143 146
144 qdisc_throttled(sch); 147 q->throttled = true;
145 return 0; 148 return 0;
146} 149}
147 150
@@ -173,7 +176,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt)
173 q->pkts_last_epoch = q->pkts_current_epoch; 176 q->pkts_last_epoch = q->pkts_current_epoch;
174 q->pkts_current_epoch = 0; 177 q->pkts_current_epoch = 0;
175 if (q->unplug_indefinite) 178 if (q->unplug_indefinite)
176 qdisc_throttled(sch); 179 q->throttled = true;
177 q->unplug_indefinite = false; 180 q->unplug_indefinite = false;
178 break; 181 break;
179 case TCQ_PLUG_RELEASE_ONE: 182 case TCQ_PLUG_RELEASE_ONE:
@@ -182,7 +185,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt)
182 */ 185 */
183 q->pkts_to_release += q->pkts_last_epoch; 186 q->pkts_to_release += q->pkts_last_epoch;
184 q->pkts_last_epoch = 0; 187 q->pkts_last_epoch = 0;
185 qdisc_unthrottled(sch); 188 q->throttled = false;
186 netif_schedule_queue(sch->dev_queue); 189 netif_schedule_queue(sch->dev_queue);
187 break; 190 break;
188 case TCQ_PLUG_RELEASE_INDEFINITE: 191 case TCQ_PLUG_RELEASE_INDEFINITE:
@@ -190,7 +193,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt)
190 q->pkts_to_release = 0; 193 q->pkts_to_release = 0;
191 q->pkts_last_epoch = 0; 194 q->pkts_last_epoch = 0;
192 q->pkts_current_epoch = 0; 195 q->pkts_current_epoch = 0;
193 qdisc_unthrottled(sch); 196 q->throttled = false;
194 netif_schedule_queue(sch->dev_queue); 197 netif_schedule_queue(sch->dev_queue);
195 break; 198 break;
196 case TCQ_PLUG_LIMIT: 199 case TCQ_PLUG_LIMIT:
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index a356450b747b..8f575899adfa 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -67,7 +67,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
67} 67}
68 68
69static int 69static int
70prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) 70prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
71{ 71{
72 struct Qdisc *qdisc; 72 struct Qdisc *qdisc;
73 int ret; 73 int ret;
@@ -83,7 +83,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
83 } 83 }
84#endif 84#endif
85 85
86 ret = qdisc_enqueue(skb, qdisc); 86 ret = qdisc_enqueue(skb, qdisc, to_free);
87 if (ret == NET_XMIT_SUCCESS) { 87 if (ret == NET_XMIT_SUCCESS) {
88 qdisc_qstats_backlog_inc(sch, skb); 88 qdisc_qstats_backlog_inc(sch, skb);
89 sch->q.qlen++; 89 sch->q.qlen++;
@@ -127,25 +127,6 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
127 127
128} 128}
129 129
130static unsigned int prio_drop(struct Qdisc *sch)
131{
132 struct prio_sched_data *q = qdisc_priv(sch);
133 int prio;
134 unsigned int len;
135 struct Qdisc *qdisc;
136
137 for (prio = q->bands-1; prio >= 0; prio--) {
138 qdisc = q->queues[prio];
139 if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
140 sch->qstats.backlog -= len;
141 sch->q.qlen--;
142 return len;
143 }
144 }
145 return 0;
146}
147
148
149static void 130static void
150prio_reset(struct Qdisc *sch) 131prio_reset(struct Qdisc *sch)
151{ 132{
@@ -304,7 +285,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
304 struct Qdisc *cl_q; 285 struct Qdisc *cl_q;
305 286
306 cl_q = q->queues[cl - 1]; 287 cl_q = q->queues[cl - 1];
307 if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || 288 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
289 d, NULL, &cl_q->bstats) < 0 ||
308 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) 290 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
309 return -1; 291 return -1;
310 292
@@ -363,7 +345,6 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
363 .enqueue = prio_enqueue, 345 .enqueue = prio_enqueue,
364 .dequeue = prio_dequeue, 346 .dequeue = prio_dequeue,
365 .peek = prio_peek, 347 .peek = prio_peek,
366 .drop = prio_drop,
367 .init = prio_init, 348 .init = prio_init,
368 .reset = prio_reset, 349 .reset = prio_reset,
369 .destroy = prio_destroy, 350 .destroy = prio_destroy,
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index f18857febdad..f27ffee106f6 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -460,7 +460,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
460 if (tca[TCA_RATE]) { 460 if (tca[TCA_RATE]) {
461 err = gen_replace_estimator(&cl->bstats, NULL, 461 err = gen_replace_estimator(&cl->bstats, NULL,
462 &cl->rate_est, 462 &cl->rate_est,
463 qdisc_root_sleeping_lock(sch), 463 NULL,
464 qdisc_root_sleeping_running(sch),
464 tca[TCA_RATE]); 465 tca[TCA_RATE]);
465 if (err) 466 if (err)
466 return err; 467 return err;
@@ -486,7 +487,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
486 if (tca[TCA_RATE]) { 487 if (tca[TCA_RATE]) {
487 err = gen_new_estimator(&cl->bstats, NULL, 488 err = gen_new_estimator(&cl->bstats, NULL,
488 &cl->rate_est, 489 &cl->rate_est,
489 qdisc_root_sleeping_lock(sch), 490 NULL,
491 qdisc_root_sleeping_running(sch),
490 tca[TCA_RATE]); 492 tca[TCA_RATE]);
491 if (err) 493 if (err)
492 goto destroy_class; 494 goto destroy_class;
@@ -663,7 +665,8 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
663 xstats.weight = cl->agg->class_weight; 665 xstats.weight = cl->agg->class_weight;
664 xstats.lmax = cl->agg->lmax; 666 xstats.lmax = cl->agg->lmax;
665 667
666 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 668 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
669 d, NULL, &cl->bstats) < 0 ||
667 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || 670 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
668 gnet_stats_copy_queue(d, NULL, 671 gnet_stats_copy_queue(d, NULL,
669 &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0) 672 &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
@@ -1214,7 +1217,8 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
1214 return agg; 1217 return agg;
1215} 1218}
1216 1219
1217static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 1220static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1221 struct sk_buff **to_free)
1218{ 1222{
1219 struct qfq_sched *q = qdisc_priv(sch); 1223 struct qfq_sched *q = qdisc_priv(sch);
1220 struct qfq_class *cl; 1224 struct qfq_class *cl;
@@ -1237,11 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1237 qdisc_pkt_len(skb)); 1241 qdisc_pkt_len(skb));
1238 if (err) { 1242 if (err) {
1239 cl->qstats.drops++; 1243 cl->qstats.drops++;
1240 return qdisc_drop(skb, sch); 1244 return qdisc_drop(skb, sch, to_free);
1241 } 1245 }
1242 } 1246 }
1243 1247
1244 err = qdisc_enqueue(skb, cl->qdisc); 1248 err = qdisc_enqueue(skb, cl->qdisc, to_free);
1245 if (unlikely(err != NET_XMIT_SUCCESS)) { 1249 if (unlikely(err != NET_XMIT_SUCCESS)) {
1246 pr_debug("qfq_enqueue: enqueue failed %d\n", err); 1250 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
1247 if (net_xmit_drop_count(err)) { 1251 if (net_xmit_drop_count(err)) {
@@ -1422,52 +1426,6 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1422 qfq_deactivate_class(q, cl); 1426 qfq_deactivate_class(q, cl);
1423} 1427}
1424 1428
1425static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
1426 struct hlist_head *slot)
1427{
1428 struct qfq_aggregate *agg;
1429 struct qfq_class *cl;
1430 unsigned int len;
1431
1432 hlist_for_each_entry(agg, slot, next) {
1433 list_for_each_entry(cl, &agg->active, alist) {
1434
1435 if (!cl->qdisc->ops->drop)
1436 continue;
1437
1438 len = cl->qdisc->ops->drop(cl->qdisc);
1439 if (len > 0) {
1440 if (cl->qdisc->q.qlen == 0)
1441 qfq_deactivate_class(q, cl);
1442
1443 return len;
1444 }
1445 }
1446 }
1447 return 0;
1448}
1449
1450static unsigned int qfq_drop(struct Qdisc *sch)
1451{
1452 struct qfq_sched *q = qdisc_priv(sch);
1453 struct qfq_group *grp;
1454 unsigned int i, j, len;
1455
1456 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1457 grp = &q->groups[i];
1458 for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1459 len = qfq_drop_from_slot(q, &grp->slots[j]);
1460 if (len > 0) {
1461 sch->q.qlen--;
1462 return len;
1463 }
1464 }
1465
1466 }
1467
1468 return 0;
1469}
1470
1471static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt) 1429static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1472{ 1430{
1473 struct qfq_sched *q = qdisc_priv(sch); 1431 struct qfq_sched *q = qdisc_priv(sch);
@@ -1562,7 +1520,6 @@ static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
1562 .enqueue = qfq_enqueue, 1520 .enqueue = qfq_enqueue,
1563 .dequeue = qfq_dequeue, 1521 .dequeue = qfq_dequeue,
1564 .peek = qdisc_peek_dequeued, 1522 .peek = qdisc_peek_dequeued,
1565 .drop = qfq_drop,
1566 .init = qfq_init_qdisc, 1523 .init = qfq_init_qdisc,
1567 .reset = qfq_reset_qdisc, 1524 .reset = qfq_reset_qdisc,
1568 .destroy = qfq_destroy_qdisc, 1525 .destroy = qfq_destroy_qdisc,
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 91578bdd378c..249b2a18acbd 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -56,7 +56,8 @@ static inline int red_use_harddrop(struct red_sched_data *q)
56 return q->flags & TC_RED_HARDDROP; 56 return q->flags & TC_RED_HARDDROP;
57} 57}
58 58
59static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) 59static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
60 struct sk_buff **to_free)
60{ 61{
61 struct red_sched_data *q = qdisc_priv(sch); 62 struct red_sched_data *q = qdisc_priv(sch);
62 struct Qdisc *child = q->qdisc; 63 struct Qdisc *child = q->qdisc;
@@ -95,7 +96,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
95 break; 96 break;
96 } 97 }
97 98
98 ret = qdisc_enqueue(skb, child); 99 ret = qdisc_enqueue(skb, child, to_free);
99 if (likely(ret == NET_XMIT_SUCCESS)) { 100 if (likely(ret == NET_XMIT_SUCCESS)) {
100 qdisc_qstats_backlog_inc(sch, skb); 101 qdisc_qstats_backlog_inc(sch, skb);
101 sch->q.qlen++; 102 sch->q.qlen++;
@@ -106,7 +107,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
106 return ret; 107 return ret;
107 108
108congestion_drop: 109congestion_drop:
109 qdisc_drop(skb, sch); 110 qdisc_drop(skb, sch, to_free);
110 return NET_XMIT_CN; 111 return NET_XMIT_CN;
111} 112}
112 113
@@ -136,26 +137,6 @@ static struct sk_buff *red_peek(struct Qdisc *sch)
136 return child->ops->peek(child); 137 return child->ops->peek(child);
137} 138}
138 139
139static unsigned int red_drop(struct Qdisc *sch)
140{
141 struct red_sched_data *q = qdisc_priv(sch);
142 struct Qdisc *child = q->qdisc;
143 unsigned int len;
144
145 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
146 q->stats.other++;
147 qdisc_qstats_drop(sch);
148 sch->qstats.backlog -= len;
149 sch->q.qlen--;
150 return len;
151 }
152
153 if (!red_is_idling(&q->vars))
154 red_start_of_idle_period(&q->vars);
155
156 return 0;
157}
158
159static void red_reset(struct Qdisc *sch) 140static void red_reset(struct Qdisc *sch)
160{ 141{
161 struct red_sched_data *q = qdisc_priv(sch); 142 struct red_sched_data *q = qdisc_priv(sch);
@@ -365,7 +346,6 @@ static struct Qdisc_ops red_qdisc_ops __read_mostly = {
365 .enqueue = red_enqueue, 346 .enqueue = red_enqueue,
366 .dequeue = red_dequeue, 347 .dequeue = red_dequeue,
367 .peek = red_peek, 348 .peek = red_peek,
368 .drop = red_drop,
369 .init = red_init, 349 .init = red_init,
370 .reset = red_reset, 350 .reset = red_reset,
371 .destroy = red_destroy, 351 .destroy = red_destroy,
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index c69611640fa5..add3cc7d37ec 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -275,7 +275,8 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
275 return false; 275 return false;
276} 276}
277 277
278static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) 278static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
279 struct sk_buff **to_free)
279{ 280{
280 281
281 struct sfb_sched_data *q = qdisc_priv(sch); 282 struct sfb_sched_data *q = qdisc_priv(sch);
@@ -397,7 +398,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
397 } 398 }
398 399
399enqueue: 400enqueue:
400 ret = qdisc_enqueue(skb, child); 401 ret = qdisc_enqueue(skb, child, to_free);
401 if (likely(ret == NET_XMIT_SUCCESS)) { 402 if (likely(ret == NET_XMIT_SUCCESS)) {
402 sch->q.qlen++; 403 sch->q.qlen++;
403 increment_qlen(skb, q); 404 increment_qlen(skb, q);
@@ -408,7 +409,7 @@ enqueue:
408 return ret; 409 return ret;
409 410
410drop: 411drop:
411 qdisc_drop(skb, sch); 412 qdisc_drop(skb, sch, to_free);
412 return NET_XMIT_CN; 413 return NET_XMIT_CN;
413other_drop: 414other_drop:
414 if (ret & __NET_XMIT_BYPASS) 415 if (ret & __NET_XMIT_BYPASS)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 498f0a2cb47f..7f195ed4d568 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q)
343} 343}
344 344
345static int 345static int
346sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 346sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
347{ 347{
348 struct sfq_sched_data *q = qdisc_priv(sch); 348 struct sfq_sched_data *q = qdisc_priv(sch);
349 unsigned int hash, dropped; 349 unsigned int hash, dropped;
@@ -367,7 +367,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
367 if (x == SFQ_EMPTY_SLOT) { 367 if (x == SFQ_EMPTY_SLOT) {
368 x = q->dep[0].next; /* get a free slot */ 368 x = q->dep[0].next; /* get a free slot */
369 if (x >= SFQ_MAX_FLOWS) 369 if (x >= SFQ_MAX_FLOWS)
370 return qdisc_drop(skb, sch); 370 return qdisc_drop(skb, sch, to_free);
371 q->ht[hash] = x; 371 q->ht[hash] = x;
372 slot = &q->slots[x]; 372 slot = &q->slots[x];
373 slot->hash = hash; 373 slot->hash = hash;
@@ -424,14 +424,14 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
424 if (slot->qlen >= q->maxdepth) { 424 if (slot->qlen >= q->maxdepth) {
425congestion_drop: 425congestion_drop:
426 if (!sfq_headdrop(q)) 426 if (!sfq_headdrop(q))
427 return qdisc_drop(skb, sch); 427 return qdisc_drop(skb, sch, to_free);
428 428
429 /* We know we have at least one packet in queue */ 429 /* We know we have at least one packet in queue */
430 head = slot_dequeue_head(slot); 430 head = slot_dequeue_head(slot);
431 delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb); 431 delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
432 sch->qstats.backlog -= delta; 432 sch->qstats.backlog -= delta;
433 slot->backlog -= delta; 433 slot->backlog -= delta;
434 qdisc_drop(head, sch); 434 qdisc_drop(head, sch, to_free);
435 435
436 slot_queue_add(slot, skb); 436 slot_queue_add(slot, skb);
437 return NET_XMIT_CN; 437 return NET_XMIT_CN;
@@ -520,7 +520,7 @@ sfq_reset(struct Qdisc *sch)
520 struct sk_buff *skb; 520 struct sk_buff *skb;
521 521
522 while ((skb = sfq_dequeue(sch)) != NULL) 522 while ((skb = sfq_dequeue(sch)) != NULL)
523 kfree_skb(skb); 523 rtnl_kfree_skbs(skb, skb);
524} 524}
525 525
526/* 526/*
@@ -896,7 +896,6 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
896 .enqueue = sfq_enqueue, 896 .enqueue = sfq_enqueue,
897 .dequeue = sfq_dequeue, 897 .dequeue = sfq_dequeue,
898 .peek = qdisc_peek_dequeued, 898 .peek = qdisc_peek_dequeued,
899 .drop = sfq_drop,
900 .init = sfq_init, 899 .init = sfq_init,
901 .reset = sfq_reset, 900 .reset = sfq_reset,
902 .destroy = sfq_destroy, 901 .destroy = sfq_destroy,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 3161e491990b..303355c449ab 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -155,7 +155,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
155/* GSO packet is too big, segment it so that tbf can transmit 155/* GSO packet is too big, segment it so that tbf can transmit
156 * each segment in time 156 * each segment in time
157 */ 157 */
158static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) 158static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
159 struct sk_buff **to_free)
159{ 160{
160 struct tbf_sched_data *q = qdisc_priv(sch); 161 struct tbf_sched_data *q = qdisc_priv(sch);
161 struct sk_buff *segs, *nskb; 162 struct sk_buff *segs, *nskb;
@@ -166,7 +167,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
166 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 167 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
167 168
168 if (IS_ERR_OR_NULL(segs)) 169 if (IS_ERR_OR_NULL(segs))
169 return qdisc_reshape_fail(skb, sch); 170 return qdisc_drop(skb, sch, to_free);
170 171
171 nb = 0; 172 nb = 0;
172 while (segs) { 173 while (segs) {
@@ -174,7 +175,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
174 segs->next = NULL; 175 segs->next = NULL;
175 qdisc_skb_cb(segs)->pkt_len = segs->len; 176 qdisc_skb_cb(segs)->pkt_len = segs->len;
176 len += segs->len; 177 len += segs->len;
177 ret = qdisc_enqueue(segs, q->qdisc); 178 ret = qdisc_enqueue(segs, q->qdisc, to_free);
178 if (ret != NET_XMIT_SUCCESS) { 179 if (ret != NET_XMIT_SUCCESS) {
179 if (net_xmit_drop_count(ret)) 180 if (net_xmit_drop_count(ret))
180 qdisc_qstats_drop(sch); 181 qdisc_qstats_drop(sch);
@@ -190,17 +191,18 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
190 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; 191 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
191} 192}
192 193
193static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) 194static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
195 struct sk_buff **to_free)
194{ 196{
195 struct tbf_sched_data *q = qdisc_priv(sch); 197 struct tbf_sched_data *q = qdisc_priv(sch);
196 int ret; 198 int ret;
197 199
198 if (qdisc_pkt_len(skb) > q->max_size) { 200 if (qdisc_pkt_len(skb) > q->max_size) {
199 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) 201 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
200 return tbf_segment(skb, sch); 202 return tbf_segment(skb, sch, to_free);
201 return qdisc_reshape_fail(skb, sch); 203 return qdisc_drop(skb, sch, to_free);
202 } 204 }
203 ret = qdisc_enqueue(skb, q->qdisc); 205 ret = qdisc_enqueue(skb, q->qdisc, to_free);
204 if (ret != NET_XMIT_SUCCESS) { 206 if (ret != NET_XMIT_SUCCESS) {
205 if (net_xmit_drop_count(ret)) 207 if (net_xmit_drop_count(ret))
206 qdisc_qstats_drop(sch); 208 qdisc_qstats_drop(sch);
@@ -212,19 +214,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
212 return NET_XMIT_SUCCESS; 214 return NET_XMIT_SUCCESS;
213} 215}
214 216
215static unsigned int tbf_drop(struct Qdisc *sch)
216{
217 struct tbf_sched_data *q = qdisc_priv(sch);
218 unsigned int len = 0;
219
220 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
221 sch->qstats.backlog -= len;
222 sch->q.qlen--;
223 qdisc_qstats_drop(sch);
224 }
225 return len;
226}
227
228static bool tbf_peak_present(const struct tbf_sched_data *q) 217static bool tbf_peak_present(const struct tbf_sched_data *q)
229{ 218{
230 return q->peak.rate_bytes_ps; 219 return q->peak.rate_bytes_ps;
@@ -267,14 +256,12 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
267 q->ptokens = ptoks; 256 q->ptokens = ptoks;
268 qdisc_qstats_backlog_dec(sch, skb); 257 qdisc_qstats_backlog_dec(sch, skb);
269 sch->q.qlen--; 258 sch->q.qlen--;
270 qdisc_unthrottled(sch);
271 qdisc_bstats_update(sch, skb); 259 qdisc_bstats_update(sch, skb);
272 return skb; 260 return skb;
273 } 261 }
274 262
275 qdisc_watchdog_schedule_ns(&q->watchdog, 263 qdisc_watchdog_schedule_ns(&q->watchdog,
276 now + max_t(long, -toks, -ptoks), 264 now + max_t(long, -toks, -ptoks));
277 true);
278 265
279 /* Maybe we have a shorter packet in the queue, 266 /* Maybe we have a shorter packet in the queue,
280 which can be sent now. It sounds cool, 267 which can be sent now. It sounds cool,
@@ -559,7 +546,6 @@ static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
559 .enqueue = tbf_enqueue, 546 .enqueue = tbf_enqueue,
560 .dequeue = tbf_dequeue, 547 .dequeue = tbf_dequeue,
561 .peek = qdisc_peek_dequeued, 548 .peek = qdisc_peek_dequeued,
562 .drop = tbf_drop,
563 .init = tbf_init, 549 .init = tbf_init,
564 .reset = tbf_reset, 550 .reset = tbf_reset,
565 .destroy = tbf_destroy, 551 .destroy = tbf_destroy,
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index e02687185a59..2cd9b4478b92 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -77,7 +77,7 @@ struct teql_sched_data {
77/* "teql*" qdisc routines */ 77/* "teql*" qdisc routines */
78 78
79static int 79static int
80teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) 80teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
81{ 81{
82 struct net_device *dev = qdisc_dev(sch); 82 struct net_device *dev = qdisc_dev(sch);
83 struct teql_sched_data *q = qdisc_priv(sch); 83 struct teql_sched_data *q = qdisc_priv(sch);
@@ -87,7 +87,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
87 return NET_XMIT_SUCCESS; 87 return NET_XMIT_SUCCESS;
88 } 88 }
89 89
90 return qdisc_drop(skb, sch); 90 return qdisc_drop(skb, sch, to_free);
91} 91}
92 92
93static struct sk_buff * 93static struct sk_buff *
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
index 0fca5824ad0e..6c4f7496cec6 100644
--- a/net/sctp/Makefile
+++ b/net/sctp/Makefile
@@ -11,7 +11,8 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
11 transport.o chunk.o sm_make_chunk.o ulpevent.o \ 11 transport.o chunk.o sm_make_chunk.o ulpevent.o \
12 inqueue.o outqueue.o ulpqueue.o \ 12 inqueue.o outqueue.o ulpqueue.o \
13 tsnmap.o bind_addr.o socket.o primitive.o \ 13 tsnmap.o bind_addr.o socket.o primitive.o \
14 output.o input.o debug.o ssnmap.o auth.o 14 output.o input.o debug.o ssnmap.o auth.o \
15 offload.o
15 16
16sctp_probe-y := probe.o 17sctp_probe-y := probe.o
17 18
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a701527a9480..6f8e676d285e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -112,7 +112,6 @@ int sctp_rcv(struct sk_buff *skb)
112 struct sctp_ep_common *rcvr; 112 struct sctp_ep_common *rcvr;
113 struct sctp_transport *transport = NULL; 113 struct sctp_transport *transport = NULL;
114 struct sctp_chunk *chunk; 114 struct sctp_chunk *chunk;
115 struct sctphdr *sh;
116 union sctp_addr src; 115 union sctp_addr src;
117 union sctp_addr dest; 116 union sctp_addr dest;
118 int family; 117 int family;
@@ -124,28 +123,29 @@ int sctp_rcv(struct sk_buff *skb)
124 123
125 __SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS); 124 __SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS);
126 125
127 if (skb_linearize(skb)) 126 /* If packet is too small to contain a single chunk, let's not
127 * waste time on it anymore.
128 */
129 if (skb->len < sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr) +
130 skb_transport_offset(skb))
128 goto discard_it; 131 goto discard_it;
129 132
130 sh = sctp_hdr(skb); 133 if (!pskb_may_pull(skb, sizeof(struct sctphdr)))
134 goto discard_it;
131 135
132 /* Pull up the IP and SCTP headers. */ 136 /* Pull up the IP header. */
133 __skb_pull(skb, skb_transport_offset(skb)); 137 __skb_pull(skb, skb_transport_offset(skb));
134 if (skb->len < sizeof(struct sctphdr))
135 goto discard_it;
136 138
137 skb->csum_valid = 0; /* Previous value not applicable */ 139 skb->csum_valid = 0; /* Previous value not applicable */
138 if (skb_csum_unnecessary(skb)) 140 if (skb_csum_unnecessary(skb))
139 __skb_decr_checksum_unnecessary(skb); 141 __skb_decr_checksum_unnecessary(skb);
140 else if (!sctp_checksum_disable && sctp_rcv_checksum(net, skb) < 0) 142 else if (!sctp_checksum_disable &&
143 !(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) &&
144 sctp_rcv_checksum(net, skb) < 0)
141 goto discard_it; 145 goto discard_it;
142 skb->csum_valid = 1; 146 skb->csum_valid = 1;
143 147
144 skb_pull(skb, sizeof(struct sctphdr)); 148 __skb_pull(skb, sizeof(struct sctphdr));
145
146 /* Make sure we at least have chunk headers worth of data left. */
147 if (skb->len < sizeof(struct sctp_chunkhdr))
148 goto discard_it;
149 149
150 family = ipver2af(ip_hdr(skb)->version); 150 family = ipver2af(ip_hdr(skb)->version);
151 af = sctp_get_af_specific(family); 151 af = sctp_get_af_specific(family);
@@ -230,7 +230,7 @@ int sctp_rcv(struct sk_buff *skb)
230 chunk->rcvr = rcvr; 230 chunk->rcvr = rcvr;
231 231
232 /* Remember the SCTP header. */ 232 /* Remember the SCTP header. */
233 chunk->sctp_hdr = sh; 233 chunk->sctp_hdr = sctp_hdr(skb);
234 234
235 /* Set the source and destination addresses of the incoming chunk. */ 235 /* Set the source and destination addresses of the incoming chunk. */
236 sctp_init_addrs(chunk, &src, &dest); 236 sctp_init_addrs(chunk, &src, &dest);
@@ -660,19 +660,23 @@ out_unlock:
660 */ 660 */
661static int sctp_rcv_ootb(struct sk_buff *skb) 661static int sctp_rcv_ootb(struct sk_buff *skb)
662{ 662{
663 sctp_chunkhdr_t *ch; 663 sctp_chunkhdr_t *ch, _ch;
664 __u8 *ch_end; 664 int ch_end, offset = 0;
665
666 ch = (sctp_chunkhdr_t *) skb->data;
667 665
668 /* Scan through all the chunks in the packet. */ 666 /* Scan through all the chunks in the packet. */
669 do { 667 do {
668 /* Make sure we have at least the header there */
669 if (offset + sizeof(sctp_chunkhdr_t) > skb->len)
670 break;
671
672 ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
673
670 /* Break out if chunk length is less then minimal. */ 674 /* Break out if chunk length is less then minimal. */
671 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) 675 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
672 break; 676 break;
673 677
674 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 678 ch_end = offset + WORD_ROUND(ntohs(ch->length));
675 if (ch_end > skb_tail_pointer(skb)) 679 if (ch_end > skb->len)
676 break; 680 break;
677 681
678 /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the 682 /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
@@ -697,8 +701,8 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
697 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) 701 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
698 goto discard; 702 goto discard;
699 703
700 ch = (sctp_chunkhdr_t *) ch_end; 704 offset = ch_end;
701 } while (ch_end < skb_tail_pointer(skb)); 705 } while (ch_end < skb->len);
702 706
703 return 0; 707 return 0;
704 708
@@ -1173,6 +1177,17 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1173{ 1177{
1174 sctp_chunkhdr_t *ch; 1178 sctp_chunkhdr_t *ch;
1175 1179
1180 /* We do not allow GSO frames here as we need to linearize and
1181 * then cannot guarantee frame boundaries. This shouldn't be an
1182 * issue as packets hitting this are mostly INIT or INIT-ACK and
1183 * those cannot be on GSO-style anyway.
1184 */
1185 if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP)
1186 return NULL;
1187
1188 if (skb_linearize(skb))
1189 return NULL;
1190
1176 ch = (sctp_chunkhdr_t *) skb->data; 1191 ch = (sctp_chunkhdr_t *) skb->data;
1177 1192
1178 /* The code below will attempt to walk the chunk and extract 1193 /* The code below will attempt to walk the chunk and extract
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 9d87bba0ff1d..edabbbdfca54 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -130,13 +130,25 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
130 * at this time. 130 * at this time.
131 */ 131 */
132 132
133 if ((chunk = queue->in_progress)) { 133 chunk = queue->in_progress;
134 if (chunk) {
134 /* There is a packet that we have been working on. 135 /* There is a packet that we have been working on.
135 * Any post processing work to do before we move on? 136 * Any post processing work to do before we move on?
136 */ 137 */
137 if (chunk->singleton || 138 if (chunk->singleton ||
138 chunk->end_of_packet || 139 chunk->end_of_packet ||
139 chunk->pdiscard) { 140 chunk->pdiscard) {
141 if (chunk->head_skb == chunk->skb) {
142 chunk->skb = skb_shinfo(chunk->skb)->frag_list;
143 goto new_skb;
144 }
145 if (chunk->skb->next) {
146 chunk->skb = chunk->skb->next;
147 goto new_skb;
148 }
149
150 if (chunk->head_skb)
151 chunk->skb = chunk->head_skb;
140 sctp_chunk_free(chunk); 152 sctp_chunk_free(chunk);
141 chunk = queue->in_progress = NULL; 153 chunk = queue->in_progress = NULL;
142 } else { 154 } else {
@@ -152,34 +164,64 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
152 if (!chunk) { 164 if (!chunk) {
153 struct list_head *entry; 165 struct list_head *entry;
154 166
167next_chunk:
155 /* Is the queue empty? */ 168 /* Is the queue empty? */
156 if (list_empty(&queue->in_chunk_list)) 169 entry = sctp_list_dequeue(&queue->in_chunk_list);
170 if (!entry)
157 return NULL; 171 return NULL;
158 172
159 entry = queue->in_chunk_list.next; 173 chunk = list_entry(entry, struct sctp_chunk, list);
160 chunk = queue->in_progress =
161 list_entry(entry, struct sctp_chunk, list);
162 list_del_init(entry);
163 174
164 /* This is the first chunk in the packet. */ 175 /* Linearize if it's not GSO */
165 chunk->singleton = 1; 176 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) != SKB_GSO_SCTP &&
166 ch = (sctp_chunkhdr_t *) chunk->skb->data; 177 skb_is_nonlinear(chunk->skb)) {
167 chunk->data_accepted = 0; 178 if (skb_linearize(chunk->skb)) {
179 __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
180 sctp_chunk_free(chunk);
181 goto next_chunk;
182 }
183
184 /* Update sctp_hdr as it probably changed */
185 chunk->sctp_hdr = sctp_hdr(chunk->skb);
186 }
187
188 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
189 /* GSO-marked skbs but without frags, handle
190 * them normally
191 */
192 if (skb_shinfo(chunk->skb)->frag_list)
193 chunk->head_skb = chunk->skb;
194
195 /* skbs with "cover letter" */
196 if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
197 chunk->skb = skb_shinfo(chunk->skb)->frag_list;
198
199 if (WARN_ON(!chunk->skb)) {
200 __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
201 sctp_chunk_free(chunk);
202 goto next_chunk;
203 }
204 }
168 205
169 if (chunk->asoc) 206 if (chunk->asoc)
170 sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb); 207 sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
208
209 queue->in_progress = chunk;
210
211new_skb:
212 /* This is the first chunk in the packet. */
213 ch = (sctp_chunkhdr_t *) chunk->skb->data;
214 chunk->singleton = 1;
215 chunk->data_accepted = 0;
216 chunk->pdiscard = 0;
217 chunk->auth = 0;
218 chunk->has_asconf = 0;
219 chunk->end_of_packet = 0;
220 chunk->ecn_ce_done = 0;
171 } 221 }
172 222
173 chunk->chunk_hdr = ch; 223 chunk->chunk_hdr = ch;
174 chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 224 chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
175 /* In the unlikely case of an IP reassembly, the skb could be
176 * non-linear. If so, update chunk_end so that it doesn't go past
177 * the skb->tail.
178 */
179 if (unlikely(skb_is_nonlinear(chunk->skb))) {
180 if (chunk->chunk_end > skb_tail_pointer(chunk->skb))
181 chunk->chunk_end = skb_tail_pointer(chunk->skb);
182 }
183 skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); 225 skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
184 chunk->subh.v = NULL; /* Subheader is no longer valid. */ 226 chunk->subh.v = NULL; /* Subheader is no longer valid. */
185 227
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
new file mode 100644
index 000000000000..a37887b373a7
--- /dev/null
+++ b/net/sctp/offload.c
@@ -0,0 +1,98 @@
1/*
2 * sctp_offload - GRO/GSO Offloading for SCTP
3 *
4 * Copyright (C) 2015, Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/kernel.h>
20#include <linux/kprobes.h>
21#include <linux/socket.h>
22#include <linux/sctp.h>
23#include <linux/proc_fs.h>
24#include <linux/vmalloc.h>
25#include <linux/module.h>
26#include <linux/kfifo.h>
27#include <linux/time.h>
28#include <net/net_namespace.h>
29
30#include <linux/skbuff.h>
31#include <net/sctp/sctp.h>
32#include <net/sctp/checksum.h>
33#include <net/protocol.h>
34
35static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
36{
37 skb->ip_summed = CHECKSUM_NONE;
38 return sctp_compute_cksum(skb, skb_transport_offset(skb));
39}
40
41static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
42 netdev_features_t features)
43{
44 struct sk_buff *segs = ERR_PTR(-EINVAL);
45 struct sctphdr *sh;
46
47 sh = sctp_hdr(skb);
48 if (!pskb_may_pull(skb, sizeof(*sh)))
49 goto out;
50
51 __skb_pull(skb, sizeof(*sh));
52
53 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
54 /* Packet is from an untrusted source, reset gso_segs. */
55 struct skb_shared_info *pinfo = skb_shinfo(skb);
56 struct sk_buff *frag_iter;
57
58 pinfo->gso_segs = 0;
59 if (skb->len != skb->data_len) {
60 /* Means we have chunks in here too */
61 pinfo->gso_segs++;
62 }
63
64 skb_walk_frags(skb, frag_iter)
65 pinfo->gso_segs++;
66
67 segs = NULL;
68 goto out;
69 }
70
71 segs = skb_segment(skb, features | NETIF_F_HW_CSUM);
72 if (IS_ERR(segs))
73 goto out;
74
75 /* All that is left is update SCTP CRC if necessary */
76 if (!(features & NETIF_F_SCTP_CRC)) {
77 for (skb = segs; skb; skb = skb->next) {
78 if (skb->ip_summed == CHECKSUM_PARTIAL) {
79 sh = sctp_hdr(skb);
80 sh->checksum = sctp_gso_make_checksum(skb);
81 }
82 }
83 }
84
85out:
86 return segs;
87}
88
89static const struct net_offload sctp_offload = {
90 .callbacks = {
91 .gso_segment = sctp_gso_segment,
92 },
93};
94
95int __init sctp_offload_init(void)
96{
97 return inet_add_offload(&sctp_offload, IPPROTO_SCTP);
98}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 9844fe573029..1541a91d6d9d 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -84,18 +84,42 @@ static void sctp_packet_reset(struct sctp_packet *packet)
84struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, 84struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
85 __u32 vtag, int ecn_capable) 85 __u32 vtag, int ecn_capable)
86{ 86{
87 struct sctp_chunk *chunk = NULL; 87 struct sctp_transport *tp = packet->transport;
88 struct sctp_association *asoc = tp->asoc;
88 89
89 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); 90 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
90 91
91 packet->vtag = vtag; 92 packet->vtag = vtag;
92 93
94 if (asoc && tp->dst) {
95 struct sock *sk = asoc->base.sk;
96
97 rcu_read_lock();
98 if (__sk_dst_get(sk) != tp->dst) {
99 dst_hold(tp->dst);
100 sk_setup_caps(sk, tp->dst);
101 }
102
103 if (sk_can_gso(sk)) {
104 struct net_device *dev = tp->dst->dev;
105
106 packet->max_size = dev->gso_max_size;
107 } else {
108 packet->max_size = asoc->pathmtu;
109 }
110 rcu_read_unlock();
111
112 } else {
113 packet->max_size = tp->pathmtu;
114 }
115
93 if (ecn_capable && sctp_packet_empty(packet)) { 116 if (ecn_capable && sctp_packet_empty(packet)) {
94 chunk = sctp_get_ecne_prepend(packet->transport->asoc); 117 struct sctp_chunk *chunk;
95 118
96 /* If there a is a prepend chunk stick it on the list before 119 /* If there a is a prepend chunk stick it on the list before
97 * any other chunks get appended. 120 * any other chunks get appended.
98 */ 121 */
122 chunk = sctp_get_ecne_prepend(asoc);
99 if (chunk) 123 if (chunk)
100 sctp_packet_append_chunk(packet, chunk); 124 sctp_packet_append_chunk(packet, chunk);
101 } 125 }
@@ -158,7 +182,8 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
158 sctp_xmit_t retval; 182 sctp_xmit_t retval;
159 int error = 0; 183 int error = 0;
160 184
161 pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); 185 pr_debug("%s: packet:%p size:%Zu chunk:%p size:%d\n", __func__,
186 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
162 187
163 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { 188 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
164 case SCTP_XMIT_PMTU_FULL: 189 case SCTP_XMIT_PMTU_FULL:
@@ -381,12 +406,15 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
381 struct sctp_transport *tp = packet->transport; 406 struct sctp_transport *tp = packet->transport;
382 struct sctp_association *asoc = tp->asoc; 407 struct sctp_association *asoc = tp->asoc;
383 struct sctphdr *sh; 408 struct sctphdr *sh;
384 struct sk_buff *nskb; 409 struct sk_buff *nskb = NULL, *head = NULL;
385 struct sctp_chunk *chunk, *tmp; 410 struct sctp_chunk *chunk, *tmp;
386 struct sock *sk; 411 struct sock *sk;
387 int err = 0; 412 int err = 0;
388 int padding; /* How much padding do we need? */ 413 int padding; /* How much padding do we need? */
414 int pkt_size;
389 __u8 has_data = 0; 415 __u8 has_data = 0;
416 int gso = 0;
417 int pktcount = 0;
390 struct dst_entry *dst; 418 struct dst_entry *dst;
391 unsigned char *auth = NULL; /* pointer to auth in skb data */ 419 unsigned char *auth = NULL; /* pointer to auth in skb data */
392 420
@@ -400,18 +428,37 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
400 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); 428 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
401 sk = chunk->skb->sk; 429 sk = chunk->skb->sk;
402 430
403 /* Allocate the new skb. */ 431 /* Allocate the head skb, or main one if not in GSO */
404 nskb = alloc_skb(packet->size + MAX_HEADER, gfp); 432 if (packet->size > tp->pathmtu && !packet->ipfragok) {
405 if (!nskb) 433 if (sk_can_gso(sk)) {
434 gso = 1;
435 pkt_size = packet->overhead;
436 } else {
437 /* If this happens, we trash this packet and try
438 * to build a new one, hopefully correct this
439 * time. Application may notice this error.
440 */
441 pr_err_once("Trying to GSO but underlying device doesn't support it.");
442 goto nomem;
443 }
444 } else {
445 pkt_size = packet->size;
446 }
447 head = alloc_skb(pkt_size + MAX_HEADER, gfp);
448 if (!head)
406 goto nomem; 449 goto nomem;
450 if (gso) {
451 NAPI_GRO_CB(head)->last = head;
452 skb_shinfo(head)->gso_type = sk->sk_gso_type;
453 }
407 454
408 /* Make sure the outbound skb has enough header room reserved. */ 455 /* Make sure the outbound skb has enough header room reserved. */
409 skb_reserve(nskb, packet->overhead + MAX_HEADER); 456 skb_reserve(head, packet->overhead + MAX_HEADER);
410 457
411 /* Set the owning socket so that we know where to get the 458 /* Set the owning socket so that we know where to get the
412 * destination IP address. 459 * destination IP address.
413 */ 460 */
414 sctp_packet_set_owner_w(nskb, sk); 461 sctp_packet_set_owner_w(head, sk);
415 462
416 if (!sctp_transport_dst_check(tp)) { 463 if (!sctp_transport_dst_check(tp)) {
417 sctp_transport_route(tp, NULL, sctp_sk(sk)); 464 sctp_transport_route(tp, NULL, sctp_sk(sk));
@@ -422,11 +469,11 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
422 dst = dst_clone(tp->dst); 469 dst = dst_clone(tp->dst);
423 if (!dst) 470 if (!dst)
424 goto no_route; 471 goto no_route;
425 skb_dst_set(nskb, dst); 472 skb_dst_set(head, dst);
426 473
427 /* Build the SCTP header. */ 474 /* Build the SCTP header. */
428 sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); 475 sh = (struct sctphdr *)skb_push(head, sizeof(struct sctphdr));
429 skb_reset_transport_header(nskb); 476 skb_reset_transport_header(head);
430 sh->source = htons(packet->source_port); 477 sh->source = htons(packet->source_port);
431 sh->dest = htons(packet->destination_port); 478 sh->dest = htons(packet->destination_port);
432 479
@@ -441,90 +488,133 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
441 sh->vtag = htonl(packet->vtag); 488 sh->vtag = htonl(packet->vtag);
442 sh->checksum = 0; 489 sh->checksum = 0;
443 490
444 /**
445 * 6.10 Bundling
446 *
447 * An endpoint bundles chunks by simply including multiple
448 * chunks in one outbound SCTP packet. ...
449 */
450
451 /**
452 * 3.2 Chunk Field Descriptions
453 *
454 * The total length of a chunk (including Type, Length and
455 * Value fields) MUST be a multiple of 4 bytes. If the length
456 * of the chunk is not a multiple of 4 bytes, the sender MUST
457 * pad the chunk with all zero bytes and this padding is not
458 * included in the chunk length field. The sender should
459 * never pad with more than 3 bytes.
460 *
461 * [This whole comment explains WORD_ROUND() below.]
462 */
463
464 pr_debug("***sctp_transmit_packet***\n"); 491 pr_debug("***sctp_transmit_packet***\n");
465 492
466 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { 493 do {
467 list_del_init(&chunk->list); 494 /* Set up convenience variables... */
468 if (sctp_chunk_is_data(chunk)) { 495 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
469 /* 6.3.1 C4) When data is in flight and when allowed 496 pktcount++;
470 * by rule C5, a new RTT measurement MUST be made each
471 * round trip. Furthermore, new RTT measurements
472 * SHOULD be made no more than once per round-trip
473 * for a given destination transport address.
474 */
475 497
476 if (!chunk->resent && !tp->rto_pending) { 498 /* Calculate packet size, so it fits in PMTU. Leave
477 chunk->rtt_in_progress = 1; 499 * other chunks for the next packets.
478 tp->rto_pending = 1; 500 */
501 if (gso) {
502 pkt_size = packet->overhead;
503 list_for_each_entry(chunk, &packet->chunk_list, list) {
504 int padded = WORD_ROUND(chunk->skb->len);
505
506 if (pkt_size + padded > tp->pathmtu)
507 break;
508 pkt_size += padded;
479 } 509 }
480 510
481 has_data = 1; 511 /* Allocate a new skb. */
482 } 512 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
513 if (!nskb)
514 goto nomem;
483 515
484 padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len; 516 /* Make sure the outbound skb has enough header
485 if (padding) 517 * room reserved.
486 memset(skb_put(chunk->skb, padding), 0, padding); 518 */
519 skb_reserve(nskb, packet->overhead + MAX_HEADER);
520 } else {
521 nskb = head;
522 }
487 523
488 /* if this is the auth chunk that we are adding, 524 /**
489 * store pointer where it will be added and put 525 * 3.2 Chunk Field Descriptions
490 * the auth into the packet. 526 *
527 * The total length of a chunk (including Type, Length and
528 * Value fields) MUST be a multiple of 4 bytes. If the length
529 * of the chunk is not a multiple of 4 bytes, the sender MUST
530 * pad the chunk with all zero bytes and this padding is not
531 * included in the chunk length field. The sender should
532 * never pad with more than 3 bytes.
533 *
534 * [This whole comment explains WORD_ROUND() below.]
491 */ 535 */
492 if (chunk == packet->auth)
493 auth = skb_tail_pointer(nskb);
494 536
495 memcpy(skb_put(nskb, chunk->skb->len), 537 pkt_size -= packet->overhead;
538 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
539 list_del_init(&chunk->list);
540 if (sctp_chunk_is_data(chunk)) {
541 /* 6.3.1 C4) When data is in flight and when allowed
542 * by rule C5, a new RTT measurement MUST be made each
543 * round trip. Furthermore, new RTT measurements
544 * SHOULD be made no more than once per round-trip
545 * for a given destination transport address.
546 */
547
548 if (!chunk->resent && !tp->rto_pending) {
549 chunk->rtt_in_progress = 1;
550 tp->rto_pending = 1;
551 }
552
553 has_data = 1;
554 }
555
556 padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len;
557 if (padding)
558 memset(skb_put(chunk->skb, padding), 0, padding);
559
560 /* if this is the auth chunk that we are adding,
561 * store pointer where it will be added and put
562 * the auth into the packet.
563 */
564 if (chunk == packet->auth)
565 auth = skb_tail_pointer(nskb);
566
567 memcpy(skb_put(nskb, chunk->skb->len),
496 chunk->skb->data, chunk->skb->len); 568 chunk->skb->data, chunk->skb->len);
497 569
498 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, " 570 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
499 "rtt_in_progress:%d\n", chunk, 571 chunk,
500 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), 572 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
501 chunk->has_tsn ? "TSN" : "No TSN", 573 chunk->has_tsn ? "TSN" : "No TSN",
502 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, 574 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
503 ntohs(chunk->chunk_hdr->length), chunk->skb->len, 575 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
504 chunk->rtt_in_progress); 576 chunk->rtt_in_progress);
505 577
506 /* 578 /* If this is a control chunk, this is our last
507 * If this is a control chunk, this is our last 579 * reference. Free data chunks after they've been
508 * reference. Free data chunks after they've been 580 * acknowledged or have failed.
509 * acknowledged or have failed. 581 * Re-queue auth chunks if needed.
510 */ 582 */
511 if (!sctp_chunk_is_data(chunk)) 583 pkt_size -= WORD_ROUND(chunk->skb->len);
512 sctp_chunk_free(chunk);
513 }
514 584
515 /* SCTP-AUTH, Section 6.2 585 if (chunk == packet->auth && !list_empty(&packet->chunk_list))
516 * The sender MUST calculate the MAC as described in RFC2104 [2] 586 list_add(&chunk->list, &packet->chunk_list);
517 * using the hash function H as described by the MAC Identifier and 587 else if (!sctp_chunk_is_data(chunk))
518 * the shared association key K based on the endpoint pair shared key 588 sctp_chunk_free(chunk);
519 * described by the shared key identifier. The 'data' used for the 589
520 * computation of the AUTH-chunk is given by the AUTH chunk with its 590 if (!pkt_size)
521 * HMAC field set to zero (as shown in Figure 6) followed by all 591 break;
522 * chunks that are placed after the AUTH chunk in the SCTP packet. 592 }
523 */ 593
524 if (auth) 594 /* SCTP-AUTH, Section 6.2
525 sctp_auth_calculate_hmac(asoc, nskb, 595 * The sender MUST calculate the MAC as described in RFC2104 [2]
526 (struct sctp_auth_chunk *)auth, 596 * using the hash function H as described by the MAC Identifier and
527 gfp); 597 * the shared association key K based on the endpoint pair shared key
598 * described by the shared key identifier. The 'data' used for the
599 * computation of the AUTH-chunk is given by the AUTH chunk with its
600 * HMAC field set to zero (as shown in Figure 6) followed by all
601 * chunks that are placed after the AUTH chunk in the SCTP packet.
602 */
603 if (auth)
604 sctp_auth_calculate_hmac(asoc, nskb,
605 (struct sctp_auth_chunk *)auth,
606 gfp);
607
608 if (!gso)
609 break;
610
611 if (skb_gro_receive(&head, nskb))
612 goto nomem;
613 nskb = NULL;
614 if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
615 sk->sk_gso_max_segs))
616 goto nomem;
617 } while (!list_empty(&packet->chunk_list));
528 618
529 /* 2) Calculate the Adler-32 checksum of the whole packet, 619 /* 2) Calculate the Adler-32 checksum of the whole packet,
530 * including the SCTP common header and all the 620 * including the SCTP common header and all the
@@ -532,16 +622,18 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
532 * 622 *
533 * Note: Adler-32 is no longer applicable, as has been replaced 623 * Note: Adler-32 is no longer applicable, as has been replaced
534 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. 624 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
625 *
626 * If it's a GSO packet, it's postponed to sctp_skb_segment.
535 */ 627 */
536 if (!sctp_checksum_disable) { 628 if (!sctp_checksum_disable || gso) {
537 if (!(dst->dev->features & NETIF_F_SCTP_CRC) || 629 if (!gso && (!(dst->dev->features & NETIF_F_SCTP_CRC) ||
538 (dst_xfrm(dst) != NULL) || packet->ipfragok) { 630 dst_xfrm(dst) || packet->ipfragok)) {
539 sh->checksum = sctp_compute_cksum(nskb, 0); 631 sh->checksum = sctp_compute_cksum(head, 0);
540 } else { 632 } else {
541 /* no need to seed pseudo checksum for SCTP */ 633 /* no need to seed pseudo checksum for SCTP */
542 nskb->ip_summed = CHECKSUM_PARTIAL; 634 head->ip_summed = CHECKSUM_PARTIAL;
543 nskb->csum_start = skb_transport_header(nskb) - nskb->head; 635 head->csum_start = skb_transport_header(head) - head->head;
544 nskb->csum_offset = offsetof(struct sctphdr, checksum); 636 head->csum_offset = offsetof(struct sctphdr, checksum);
545 } 637 }
546 } 638 }
547 639
@@ -557,7 +649,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
557 * Note: The works for IPv6 layer checks this bit too later 649 * Note: The works for IPv6 layer checks this bit too later
558 * in transmission. See IP6_ECN_flow_xmit(). 650 * in transmission. See IP6_ECN_flow_xmit().
559 */ 651 */
560 tp->af_specific->ecn_capable(nskb->sk); 652 tp->af_specific->ecn_capable(sk);
561 653
562 /* Set up the IP options. */ 654 /* Set up the IP options. */
563 /* BUG: not implemented 655 /* BUG: not implemented
@@ -566,7 +658,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
566 658
567 /* Dump that on IP! */ 659 /* Dump that on IP! */
568 if (asoc) { 660 if (asoc) {
569 asoc->stats.opackets++; 661 asoc->stats.opackets += pktcount;
570 if (asoc->peer.last_sent_to != tp) 662 if (asoc->peer.last_sent_to != tp)
571 /* Considering the multiple CPU scenario, this is a 663 /* Considering the multiple CPU scenario, this is a
572 * "correcter" place for last_sent_to. --xguo 664 * "correcter" place for last_sent_to. --xguo
@@ -589,16 +681,36 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
589 } 681 }
590 } 682 }
591 683
592 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len); 684 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
685
686 if (gso) {
687 /* Cleanup our debris for IP stacks */
688 memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
689 sizeof(struct inet6_skb_parm)));
593 690
594 nskb->ignore_df = packet->ipfragok; 691 skb_shinfo(head)->gso_segs = pktcount;
595 tp->af_specific->sctp_xmit(nskb, tp); 692 skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
693
694 /* We have to refresh this in case we are xmiting to
695 * more than one transport at a time
696 */
697 rcu_read_lock();
698 if (__sk_dst_get(sk) != tp->dst) {
699 dst_hold(tp->dst);
700 sk_setup_caps(sk, tp->dst);
701 }
702 rcu_read_unlock();
703 }
704 head->ignore_df = packet->ipfragok;
705 tp->af_specific->sctp_xmit(head, tp);
596 706
597out: 707out:
598 sctp_packet_reset(packet); 708 sctp_packet_reset(packet);
599 return err; 709 return err;
600no_route: 710no_route:
601 kfree_skb(nskb); 711 kfree_skb(head);
712 if (nskb != head)
713 kfree_skb(nskb);
602 714
603 if (asoc) 715 if (asoc)
604 IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); 716 IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
@@ -751,39 +863,63 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
751 struct sctp_chunk *chunk, 863 struct sctp_chunk *chunk,
752 u16 chunk_len) 864 u16 chunk_len)
753{ 865{
754 size_t psize; 866 size_t psize, pmtu;
755 size_t pmtu;
756 int too_big;
757 sctp_xmit_t retval = SCTP_XMIT_OK; 867 sctp_xmit_t retval = SCTP_XMIT_OK;
758 868
759 psize = packet->size; 869 psize = packet->size;
760 pmtu = ((packet->transport->asoc) ? 870 if (packet->transport->asoc)
761 (packet->transport->asoc->pathmtu) : 871 pmtu = packet->transport->asoc->pathmtu;
762 (packet->transport->pathmtu)); 872 else
763 873 pmtu = packet->transport->pathmtu;
764 too_big = (psize + chunk_len > pmtu);
765 874
766 /* Decide if we need to fragment or resubmit later. */ 875 /* Decide if we need to fragment or resubmit later. */
767 if (too_big) { 876 if (psize + chunk_len > pmtu) {
768 /* It's OK to fragmet at IP level if any one of the following 877 /* It's OK to fragment at IP level if any one of the following
769 * is true: 878 * is true:
770 * 1. The packet is empty (meaning this chunk is greater 879 * 1. The packet is empty (meaning this chunk is greater
771 * the MTU) 880 * the MTU)
772 * 2. The chunk we are adding is a control chunk 881 * 2. The packet doesn't have any data in it yet and data
773 * 3. The packet doesn't have any data in it yet and data 882 * requires authentication.
774 * requires authentication.
775 */ 883 */
776 if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) || 884 if (sctp_packet_empty(packet) ||
777 (!packet->has_data && chunk->auth)) { 885 (!packet->has_data && chunk->auth)) {
778 /* We no longer do re-fragmentation. 886 /* We no longer do re-fragmentation.
779 * Just fragment at the IP layer, if we 887 * Just fragment at the IP layer, if we
780 * actually hit this condition 888 * actually hit this condition
781 */ 889 */
782 packet->ipfragok = 1; 890 packet->ipfragok = 1;
783 } else { 891 goto out;
784 retval = SCTP_XMIT_PMTU_FULL;
785 } 892 }
893
894 /* It is also okay to fragment if the chunk we are
895 * adding is a control chunk, but only if current packet
896 * is not a GSO one otherwise it causes fragmentation of
897 * a large frame. So in this case we allow the
898 * fragmentation by forcing it to be in a new packet.
899 */
900 if (!sctp_chunk_is_data(chunk) && packet->has_data)
901 retval = SCTP_XMIT_PMTU_FULL;
902
903 if (psize + chunk_len > packet->max_size)
904 /* Hit GSO/PMTU limit, gotta flush */
905 retval = SCTP_XMIT_PMTU_FULL;
906
907 if (!packet->transport->burst_limited &&
908 psize + chunk_len > (packet->transport->cwnd >> 1))
909 /* Do not allow a single GSO packet to use more
910 * than half of cwnd.
911 */
912 retval = SCTP_XMIT_PMTU_FULL;
913
914 if (packet->transport->burst_limited &&
915 psize + chunk_len > (packet->transport->burst_limited >> 1))
916 /* Do not allow a single GSO packet to use more
917 * than half of original cwnd.
918 */
919 retval = SCTP_XMIT_PMTU_FULL;
920 /* Otherwise it will fit in the GSO packet */
786 } 921 }
787 922
923out:
788 return retval; 924 return retval;
789} 925}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d3d50daa248b..3b56ae55aba3 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1479,7 +1479,8 @@ static __init int sctp_init(void)
1479 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); 1479 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
1480 } 1480 }
1481 1481
1482 if (sctp_transport_hashtable_init()) 1482 status = sctp_transport_hashtable_init();
1483 if (status)
1483 goto err_thash_alloc; 1484 goto err_thash_alloc;
1484 1485
1485 pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize, 1486 pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize,
@@ -1516,6 +1517,9 @@ static __init int sctp_init(void)
1516 if (status) 1517 if (status)
1517 goto err_v6_add_protocol; 1518 goto err_v6_add_protocol;
1518 1519
1520 if (sctp_offload_init() < 0)
1521 pr_crit("%s: Cannot add SCTP protocol offload\n", __func__);
1522
1519out: 1523out:
1520 return status; 1524 return status;
1521err_v6_add_protocol: 1525err_v6_add_protocol:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index aa3712259368..12d45193357c 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -806,8 +806,10 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
806 806
807 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ 807 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
808 if (sctp_state(asoc, SHUTDOWN_RECEIVED) && 808 if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
809 sctp_sstate(sk, ESTABLISHED)) 809 sctp_sstate(sk, ESTABLISHED)) {
810 sk->sk_state = SCTP_SS_CLOSING;
810 sk->sk_shutdown |= RCV_SHUTDOWN; 811 sk->sk_shutdown |= RCV_SHUTDOWN;
812 }
811 } 813 }
812 814
813 if (sctp_state(asoc, COOKIE_WAIT)) { 815 if (sctp_state(asoc, COOKIE_WAIT)) {
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 67154b848aa9..cdabbd8219b1 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4003,6 +4003,8 @@ static int sctp_init_sock(struct sock *sk)
4003 return -ESOCKTNOSUPPORT; 4003 return -ESOCKTNOSUPPORT;
4004 } 4004 }
4005 4005
4006 sk->sk_gso_type = SKB_GSO_SCTP;
4007
4006 /* Initialize default send parameters. These parameters can be 4008 /* Initialize default send parameters. These parameters can be
4007 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 4009 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
4008 */ 4010 */
@@ -4193,6 +4195,7 @@ static void sctp_shutdown(struct sock *sk, int how)
4193 return; 4195 return;
4194 4196
4195 if (how & SEND_SHUTDOWN) { 4197 if (how & SEND_SHUTDOWN) {
4198 sk->sk_state = SCTP_SS_CLOSING;
4196 ep = sctp_sk(sk)->ep; 4199 ep = sctp_sk(sk)->ep;
4197 if (!list_empty(&ep->asocs)) { 4200 if (!list_empty(&ep->asocs)) {
4198 asoc = list_entry(ep->asocs.next, 4201 asoc = list_entry(ep->asocs.next,
@@ -7564,10 +7567,13 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
7564 /* If the association on the newsk is already closed before accept() 7567 /* If the association on the newsk is already closed before accept()
7565 * is called, set RCV_SHUTDOWN flag. 7568 * is called, set RCV_SHUTDOWN flag.
7566 */ 7569 */
7567 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) 7570 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
7571 newsk->sk_state = SCTP_SS_CLOSED;
7568 newsk->sk_shutdown |= RCV_SHUTDOWN; 7572 newsk->sk_shutdown |= RCV_SHUTDOWN;
7573 } else {
7574 newsk->sk_state = SCTP_SS_ESTABLISHED;
7575 }
7569 7576
7570 newsk->sk_state = SCTP_SS_ESTABLISHED;
7571 release_sock(newsk); 7577 release_sock(newsk);
7572} 7578}
7573 7579
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index 57e460be4692..31b9f9c52974 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_TIPC) := tipc.o
6 6
7tipc-y += addr.o bcast.o bearer.o \ 7tipc-y += addr.o bcast.o bearer.o \
8 core.o link.o discover.o msg.o \ 8 core.o link.o discover.o msg.o \
9 name_distr.o subscr.o name_table.o net.o \ 9 name_distr.o subscr.o monitor.o name_table.o net.o \
10 netlink.o netlink_compat.o node.o socket.o eth_media.o \ 10 netlink.o netlink_compat.o node.o socket.o eth_media.o \
11 server.o socket.o 11 server.o socket.o
12 12
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 93f7c983be33..64f4004a6fac 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -73,4 +73,5 @@ int tipc_addr_node_valid(u32 addr);
73int tipc_in_scope(u32 domain, u32 addr); 73int tipc_in_scope(u32 domain, u32 addr);
74int tipc_addr_scope(u32 domain); 74int tipc_addr_scope(u32 domain);
75char *tipc_addr_string_fill(char *string, u32 addr); 75char *tipc_addr_string_fill(char *string, u32 addr);
76
76#endif 77#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index bf8f05c3eb82..8584cc48654c 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/bearer.c: TIPC bearer code 2 * net/tipc/bearer.c: TIPC bearer code
3 * 3 *
4 * Copyright (c) 1996-2006, 2013-2014, Ericsson AB 4 * Copyright (c) 1996-2006, 2013-2016, Ericsson AB
5 * Copyright (c) 2004-2006, 2010-2013, Wind River Systems 5 * Copyright (c) 2004-2006, 2010-2013, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -39,6 +39,7 @@
39#include "bearer.h" 39#include "bearer.h"
40#include "link.h" 40#include "link.h"
41#include "discover.h" 41#include "discover.h"
42#include "monitor.h"
42#include "bcast.h" 43#include "bcast.h"
43#include "netlink.h" 44#include "netlink.h"
44 45
@@ -313,6 +314,10 @@ restart:
313 rcu_assign_pointer(tn->bearer_list[bearer_id], b); 314 rcu_assign_pointer(tn->bearer_list[bearer_id], b);
314 if (skb) 315 if (skb)
315 tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); 316 tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
317
318 if (tipc_mon_create(net, bearer_id))
319 return -ENOMEM;
320
316 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 321 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
317 name, 322 name,
318 tipc_addr_string_fill(addr_string, disc_domain), priority); 323 tipc_addr_string_fill(addr_string, disc_domain), priority);
@@ -348,6 +353,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b)
348 tipc_disc_delete(b->link_req); 353 tipc_disc_delete(b->link_req);
349 RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL); 354 RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL);
350 kfree_rcu(b, rcu); 355 kfree_rcu(b, rcu);
356 tipc_mon_delete(net, bearer_id);
351} 357}
352 358
353int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, 359int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index f686e41b5abb..0d337c7b6fad 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/bearer.h: Include file for TIPC bearer code 2 * net/tipc/bearer.h: Include file for TIPC bearer code
3 * 3 *
4 * Copyright (c) 1996-2006, 2013-2014, Ericsson AB 4 * Copyright (c) 1996-2006, 2013-2016, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
diff --git a/net/tipc/core.c b/net/tipc/core.c
index fe1b062c4f18..236b043a4156 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -57,6 +57,7 @@ static int __net_init tipc_init_net(struct net *net)
57 57
58 tn->net_id = 4711; 58 tn->net_id = 4711;
59 tn->own_addr = 0; 59 tn->own_addr = 0;
60 tn->mon_threshold = TIPC_DEF_MON_THRESHOLD;
60 get_random_bytes(&tn->random, sizeof(int)); 61 get_random_bytes(&tn->random, sizeof(int));
61 INIT_LIST_HEAD(&tn->node_list); 62 INIT_LIST_HEAD(&tn->node_list);
62 spin_lock_init(&tn->node_list_lock); 63 spin_lock_init(&tn->node_list_lock);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index eff58dc53aa1..a1845fb27d80 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -66,11 +66,13 @@ struct tipc_bc_base;
66struct tipc_link; 66struct tipc_link;
67struct tipc_name_table; 67struct tipc_name_table;
68struct tipc_server; 68struct tipc_server;
69struct tipc_monitor;
69 70
70#define TIPC_MOD_VER "2.0.0" 71#define TIPC_MOD_VER "2.0.0"
71 72
72#define NODE_HTABLE_SIZE 512 73#define NODE_HTABLE_SIZE 512
73#define MAX_BEARERS 3 74#define MAX_BEARERS 3
75#define TIPC_DEF_MON_THRESHOLD 32
74 76
75extern int tipc_net_id __read_mostly; 77extern int tipc_net_id __read_mostly;
76extern int sysctl_tipc_rmem[3] __read_mostly; 78extern int sysctl_tipc_rmem[3] __read_mostly;
@@ -88,6 +90,10 @@ struct tipc_net {
88 u32 num_nodes; 90 u32 num_nodes;
89 u32 num_links; 91 u32 num_links;
90 92
93 /* Neighbor monitoring list */
94 struct tipc_monitor *monitors[MAX_BEARERS];
95 int mon_threshold;
96
91 /* Bearer list */ 97 /* Bearer list */
92 struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1]; 98 struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
93 99
@@ -126,6 +132,11 @@ static inline struct list_head *tipc_nodes(struct net *net)
126 return &tipc_net(net)->node_list; 132 return &tipc_net(net)->node_list;
127} 133}
128 134
135static inline unsigned int tipc_hashfn(u32 addr)
136{
137 return addr & (NODE_HTABLE_SIZE - 1);
138}
139
129static inline u16 mod(u16 x) 140static inline u16 mod(u16 x)
130{ 141{
131 return x & 0xffffu; 142 return x & 0xffffu;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index ad9d477cc242..6b109a808d4c 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -135,9 +135,12 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
135 u16 caps = msg_node_capabilities(hdr); 135 u16 caps = msg_node_capabilities(hdr);
136 bool respond = false; 136 bool respond = false;
137 bool dupl_addr = false; 137 bool dupl_addr = false;
138 int err;
138 139
139 bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr)); 140 err = bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr));
140 kfree_skb(skb); 141 kfree_skb(skb);
142 if (err)
143 return;
141 144
142 /* Ensure message from node is valid and communication is permitted */ 145 /* Ensure message from node is valid and communication is permitted */
143 if (net_id != tn->net_id) 146 if (net_id != tn->net_id)
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 67b6ab9f4c8d..c1df33f878b2 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -42,6 +42,7 @@
42#include "name_distr.h" 42#include "name_distr.h"
43#include "discover.h" 43#include "discover.h"
44#include "netlink.h" 44#include "netlink.h"
45#include "monitor.h"
45 46
46#include <linux/pkt_sched.h> 47#include <linux/pkt_sched.h>
47 48
@@ -87,7 +88,6 @@ struct tipc_stats {
87 * @peer_bearer_id: bearer id used by link's peer endpoint 88 * @peer_bearer_id: bearer id used by link's peer endpoint
88 * @bearer_id: local bearer id used by link 89 * @bearer_id: local bearer id used by link
89 * @tolerance: minimum link continuity loss needed to reset link [in ms] 90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
90 * @keepalive_intv: link keepalive timer interval
91 * @abort_limit: # of unacknowledged continuity probes needed to reset link 91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM 92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node 93 * @peer_caps: bitmap describing capabilities of peer node
@@ -96,6 +96,7 @@ struct tipc_stats {
96 * @pmsg: convenience pointer to "proto_msg" field 96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority 97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H') 98 * @net_plane: current link network plane ('A' through 'H')
99 * @mon_state: cookie with information needed by link monitor
99 * @backlog_limit: backlog queue congestion thresholds (indexed by importance) 100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
100 * @exp_msg_count: # of tunnelled messages expected during link changeover 101 * @exp_msg_count: # of tunnelled messages expected during link changeover
101 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset 102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
@@ -131,7 +132,6 @@ struct tipc_link {
131 u32 peer_bearer_id; 132 u32 peer_bearer_id;
132 u32 bearer_id; 133 u32 bearer_id;
133 u32 tolerance; 134 u32 tolerance;
134 unsigned long keepalive_intv;
135 u32 abort_limit; 135 u32 abort_limit;
136 u32 state; 136 u32 state;
137 u16 peer_caps; 137 u16 peer_caps;
@@ -140,6 +140,7 @@ struct tipc_link {
140 char if_name[TIPC_MAX_IF_NAME]; 140 char if_name[TIPC_MAX_IF_NAME];
141 u32 priority; 141 u32 priority;
142 char net_plane; 142 char net_plane;
143 struct tipc_mon_state mon_state;
143 u16 rst_cnt; 144 u16 rst_cnt;
144 145
145 /* Failover/synch */ 146 /* Failover/synch */
@@ -711,18 +712,25 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
711 bool setup = false; 712 bool setup = false;
712 u16 bc_snt = l->bc_sndlink->snd_nxt - 1; 713 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
713 u16 bc_acked = l->bc_rcvlink->acked; 714 u16 bc_acked = l->bc_rcvlink->acked;
714 715 struct tipc_mon_state *mstate = &l->mon_state;
715 link_profile_stats(l);
716 716
717 switch (l->state) { 717 switch (l->state) {
718 case LINK_ESTABLISHED: 718 case LINK_ESTABLISHED:
719 case LINK_SYNCHING: 719 case LINK_SYNCHING:
720 if (l->silent_intv_cnt > l->abort_limit)
721 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
722 mtyp = STATE_MSG; 720 mtyp = STATE_MSG;
721 link_profile_stats(l);
722 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
723 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
724 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
723 state = bc_acked != bc_snt; 725 state = bc_acked != bc_snt;
724 probe = l->silent_intv_cnt; 726 state |= l->bc_rcvlink->rcv_unacked;
725 l->silent_intv_cnt++; 727 state |= l->rcv_unacked;
728 state |= !skb_queue_empty(&l->transmq);
729 state |= !skb_queue_empty(&l->deferdq);
730 probe = mstate->probing;
731 probe |= l->silent_intv_cnt;
732 if (probe || mstate->monitoring)
733 l->silent_intv_cnt++;
726 break; 734 break;
727 case LINK_RESET: 735 case LINK_RESET:
728 setup = l->rst_cnt++ <= 4; 736 setup = l->rst_cnt++ <= 4;
@@ -833,6 +841,7 @@ void tipc_link_reset(struct tipc_link *l)
833 l->stats.recv_info = 0; 841 l->stats.recv_info = 0;
834 l->stale_count = 0; 842 l->stale_count = 0;
835 l->bc_peer_is_up = false; 843 l->bc_peer_is_up = false;
844 memset(&l->mon_state, 0, sizeof(l->mon_state));
836 tipc_link_reset_stats(l); 845 tipc_link_reset_stats(l);
837} 846}
838 847
@@ -1241,6 +1250,9 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1241 struct tipc_msg *hdr; 1250 struct tipc_msg *hdr;
1242 struct sk_buff_head *dfq = &l->deferdq; 1251 struct sk_buff_head *dfq = &l->deferdq;
1243 bool node_up = link_is_up(l->bc_rcvlink); 1252 bool node_up = link_is_up(l->bc_rcvlink);
1253 struct tipc_mon_state *mstate = &l->mon_state;
1254 int dlen = 0;
1255 void *data;
1244 1256
1245 /* Don't send protocol message during reset or link failover */ 1257 /* Don't send protocol message during reset or link failover */
1246 if (tipc_link_is_blocked(l)) 1258 if (tipc_link_is_blocked(l))
@@ -1253,12 +1265,13 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1253 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt; 1265 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1254 1266
1255 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, 1267 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1256 TIPC_MAX_IF_NAME, l->addr, 1268 tipc_max_domain_size, l->addr,
1257 tipc_own_addr(l->net), 0, 0, 0); 1269 tipc_own_addr(l->net), 0, 0, 0);
1258 if (!skb) 1270 if (!skb)
1259 return; 1271 return;
1260 1272
1261 hdr = buf_msg(skb); 1273 hdr = buf_msg(skb);
1274 data = msg_data(hdr);
1262 msg_set_session(hdr, l->session); 1275 msg_set_session(hdr, l->session);
1263 msg_set_bearer_id(hdr, l->bearer_id); 1276 msg_set_bearer_id(hdr, l->bearer_id);
1264 msg_set_net_plane(hdr, l->net_plane); 1277 msg_set_net_plane(hdr, l->net_plane);
@@ -1274,14 +1287,18 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1274 1287
1275 if (mtyp == STATE_MSG) { 1288 if (mtyp == STATE_MSG) {
1276 msg_set_seq_gap(hdr, rcvgap); 1289 msg_set_seq_gap(hdr, rcvgap);
1277 msg_set_size(hdr, INT_H_SIZE);
1278 msg_set_probe(hdr, probe); 1290 msg_set_probe(hdr, probe);
1291 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1292 msg_set_size(hdr, INT_H_SIZE + dlen);
1293 skb_trim(skb, INT_H_SIZE + dlen);
1279 l->stats.sent_states++; 1294 l->stats.sent_states++;
1280 l->rcv_unacked = 0; 1295 l->rcv_unacked = 0;
1281 } else { 1296 } else {
1282 /* RESET_MSG or ACTIVATE_MSG */ 1297 /* RESET_MSG or ACTIVATE_MSG */
1283 msg_set_max_pkt(hdr, l->advertised_mtu); 1298 msg_set_max_pkt(hdr, l->advertised_mtu);
1284 strcpy(msg_data(hdr), l->if_name); 1299 strcpy(data, l->if_name);
1300 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1301 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1285 } 1302 }
1286 if (probe) 1303 if (probe)
1287 l->stats.sent_probes++; 1304 l->stats.sent_probes++;
@@ -1374,7 +1391,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1374 u16 peers_tol = msg_link_tolerance(hdr); 1391 u16 peers_tol = msg_link_tolerance(hdr);
1375 u16 peers_prio = msg_linkprio(hdr); 1392 u16 peers_prio = msg_linkprio(hdr);
1376 u16 rcv_nxt = l->rcv_nxt; 1393 u16 rcv_nxt = l->rcv_nxt;
1394 u16 dlen = msg_data_sz(hdr);
1377 int mtyp = msg_type(hdr); 1395 int mtyp = msg_type(hdr);
1396 void *data;
1378 char *if_name; 1397 char *if_name;
1379 int rc = 0; 1398 int rc = 0;
1380 1399
@@ -1384,6 +1403,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1384 if (tipc_own_addr(l->net) > msg_prevnode(hdr)) 1403 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1385 l->net_plane = msg_net_plane(hdr); 1404 l->net_plane = msg_net_plane(hdr);
1386 1405
1406 skb_linearize(skb);
1407 hdr = buf_msg(skb);
1408 data = msg_data(hdr);
1409
1387 switch (mtyp) { 1410 switch (mtyp) {
1388 case RESET_MSG: 1411 case RESET_MSG:
1389 1412
@@ -1394,8 +1417,6 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1394 /* fall thru' */ 1417 /* fall thru' */
1395 1418
1396 case ACTIVATE_MSG: 1419 case ACTIVATE_MSG:
1397 skb_linearize(skb);
1398 hdr = buf_msg(skb);
1399 1420
1400 /* Complete own link name with peer's interface name */ 1421 /* Complete own link name with peer's interface name */
1401 if_name = strrchr(l->name, ':') + 1; 1422 if_name = strrchr(l->name, ':') + 1;
@@ -1403,7 +1424,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1403 break; 1424 break;
1404 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) 1425 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1405 break; 1426 break;
1406 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME); 1427 strncpy(if_name, data, TIPC_MAX_IF_NAME);
1407 1428
1408 /* Update own tolerance if peer indicates a non-zero value */ 1429 /* Update own tolerance if peer indicates a non-zero value */
1409 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1430 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
@@ -1451,6 +1472,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1451 rc = TIPC_LINK_UP_EVT; 1472 rc = TIPC_LINK_UP_EVT;
1452 break; 1473 break;
1453 } 1474 }
1475 tipc_mon_rcv(l->net, data, dlen, l->addr,
1476 &l->mon_state, l->bearer_id);
1454 1477
1455 /* Send NACK if peer has sent pkts we haven't received yet */ 1478 /* Send NACK if peer has sent pkts we haven't received yet */
1456 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) 1479 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
new file mode 100644
index 000000000000..0d489e81fcca
--- /dev/null
+++ b/net/tipc/monitor.c
@@ -0,0 +1,651 @@
1/*
2 * net/tipc/monitor.c
3 *
4 * Copyright (c) 2016, Ericsson AB
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36#include "core.h"
37#include "addr.h"
38#include "monitor.h"
39
40#define MAX_MON_DOMAIN 64
41#define MON_TIMEOUT 120000
42#define MAX_PEER_DOWN_EVENTS 4
43
44/* struct tipc_mon_domain: domain record to be transferred between peers
45 * @len: actual size of domain record
46 * @gen: current generation of sender's domain
47 * @ack_gen: most recent generation of self's domain acked by peer
48 * @member_cnt: number of domain member nodes described in this record
49 * @up_map: bit map indicating which of the members the sender considers up
50 * @members: identity of the domain members
51 */
52struct tipc_mon_domain {
53 u16 len;
54 u16 gen;
55 u16 ack_gen;
56 u16 member_cnt;
57 u64 up_map;
58 u32 members[MAX_MON_DOMAIN];
59};
60
61/* struct tipc_peer: state of a peer node and its domain
62 * @addr: tipc node identity of peer
63 * @head_map: shows which other nodes currently consider peer 'up'
64 * @domain: most recent domain record from peer
65 * @hash: position in hashed lookup list
66 * @list: position in linked list, in circular ascending order by 'addr'
67 * @applied: number of reported domain members applied on this monitor list
68 * @is_up: peer is up as seen from this node
69 * @is_head: peer is assigned domain head as seen from this node
70 * @is_local: peer is in local domain and should be continuously monitored
71 * @down_cnt: - numbers of other peers which have reported this on lost
72 */
73struct tipc_peer {
74 u32 addr;
75 struct tipc_mon_domain *domain;
76 struct hlist_node hash;
77 struct list_head list;
78 u8 applied;
79 u8 down_cnt;
80 bool is_up;
81 bool is_head;
82 bool is_local;
83};
84
85struct tipc_monitor {
86 struct hlist_head peers[NODE_HTABLE_SIZE];
87 int peer_cnt;
88 struct tipc_peer *self;
89 rwlock_t lock;
90 struct tipc_mon_domain cache;
91 u16 list_gen;
92 u16 dom_gen;
93 struct net *net;
94 struct timer_list timer;
95 unsigned long timer_intv;
96};
97
98static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id)
99{
100 return tipc_net(net)->monitors[bearer_id];
101}
102
103const int tipc_max_domain_size = sizeof(struct tipc_mon_domain);
104
105/* dom_rec_len(): actual length of domain record for transport
106 */
107static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt)
108{
109 return ((void *)&dom->members - (void *)dom) + (mcnt * sizeof(u32));
110}
111
112/* dom_size() : calculate size of own domain based on number of peers
113 */
114static int dom_size(int peers)
115{
116 int i = 0;
117
118 while ((i * i) < peers)
119 i++;
120 return i < MAX_MON_DOMAIN ? i : MAX_MON_DOMAIN;
121}
122
123static void map_set(u64 *up_map, int i, unsigned int v)
124{
125 *up_map &= ~(1ULL << i);
126 *up_map |= ((u64)v << i);
127}
128
129static int map_get(u64 up_map, int i)
130{
131 return (up_map & (1 << i)) >> i;
132}
133
134static struct tipc_peer *peer_prev(struct tipc_peer *peer)
135{
136 return list_last_entry(&peer->list, struct tipc_peer, list);
137}
138
139static struct tipc_peer *peer_nxt(struct tipc_peer *peer)
140{
141 return list_first_entry(&peer->list, struct tipc_peer, list);
142}
143
144static struct tipc_peer *peer_head(struct tipc_peer *peer)
145{
146 while (!peer->is_head)
147 peer = peer_prev(peer);
148 return peer;
149}
150
151static struct tipc_peer *get_peer(struct tipc_monitor *mon, u32 addr)
152{
153 struct tipc_peer *peer;
154 unsigned int thash = tipc_hashfn(addr);
155
156 hlist_for_each_entry(peer, &mon->peers[thash], hash) {
157 if (peer->addr == addr)
158 return peer;
159 }
160 return NULL;
161}
162
163static struct tipc_peer *get_self(struct net *net, int bearer_id)
164{
165 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
166
167 return mon->self;
168}
169
170static inline bool tipc_mon_is_active(struct net *net, struct tipc_monitor *mon)
171{
172 struct tipc_net *tn = tipc_net(net);
173
174 return mon->peer_cnt > tn->mon_threshold;
175}
176
177/* mon_identify_lost_members() : - identify amd mark potentially lost members
178 */
179static void mon_identify_lost_members(struct tipc_peer *peer,
180 struct tipc_mon_domain *dom_bef,
181 int applied_bef)
182{
183 struct tipc_peer *member = peer;
184 struct tipc_mon_domain *dom_aft = peer->domain;
185 int applied_aft = peer->applied;
186 int i;
187
188 for (i = 0; i < applied_bef; i++) {
189 member = peer_nxt(member);
190
191 /* Do nothing if self or peer already see member as down */
192 if (!member->is_up || !map_get(dom_bef->up_map, i))
193 continue;
194
195 /* Loss of local node must be detected by active probing */
196 if (member->is_local)
197 continue;
198
199 /* Start probing if member was removed from applied domain */
200 if (!applied_aft || (applied_aft < i)) {
201 member->down_cnt = 1;
202 continue;
203 }
204
205 /* Member loss is confirmed if it is still in applied domain */
206 if (!map_get(dom_aft->up_map, i))
207 member->down_cnt++;
208 }
209}
210
211/* mon_apply_domain() : match a peer's domain record against monitor list
212 */
213static void mon_apply_domain(struct tipc_monitor *mon,
214 struct tipc_peer *peer)
215{
216 struct tipc_mon_domain *dom = peer->domain;
217 struct tipc_peer *member;
218 u32 addr;
219 int i;
220
221 if (!dom || !peer->is_up)
222 return;
223
224 /* Scan across domain members and match against monitor list */
225 peer->applied = 0;
226 member = peer_nxt(peer);
227 for (i = 0; i < dom->member_cnt; i++) {
228 addr = dom->members[i];
229 if (addr != member->addr)
230 return;
231 peer->applied++;
232 member = peer_nxt(member);
233 }
234}
235
236/* mon_update_local_domain() : update after peer addition/removal/up/down
237 */
238static void mon_update_local_domain(struct tipc_monitor *mon)
239{
240 struct tipc_peer *self = mon->self;
241 struct tipc_mon_domain *cache = &mon->cache;
242 struct tipc_mon_domain *dom = self->domain;
243 struct tipc_peer *peer = self;
244 u64 prev_up_map = dom->up_map;
245 u16 member_cnt, i;
246 bool diff;
247
248 /* Update local domain size based on current size of cluster */
249 member_cnt = dom_size(mon->peer_cnt) - 1;
250 self->applied = member_cnt;
251
252 /* Update native and cached outgoing local domain records */
253 dom->len = dom_rec_len(dom, member_cnt);
254 diff = dom->member_cnt != member_cnt;
255 dom->member_cnt = member_cnt;
256 for (i = 0; i < member_cnt; i++) {
257 peer = peer_nxt(peer);
258 diff |= dom->members[i] != peer->addr;
259 dom->members[i] = peer->addr;
260 map_set(&dom->up_map, i, peer->is_up);
261 cache->members[i] = htonl(peer->addr);
262 }
263 diff |= dom->up_map != prev_up_map;
264 if (!diff)
265 return;
266 dom->gen = ++mon->dom_gen;
267 cache->len = htons(dom->len);
268 cache->gen = htons(dom->gen);
269 cache->member_cnt = htons(member_cnt);
270 cache->up_map = cpu_to_be64(dom->up_map);
271 mon_apply_domain(mon, self);
272}
273
274/* mon_update_neighbors() : update preceding neighbors of added/removed peer
275 */
276static void mon_update_neighbors(struct tipc_monitor *mon,
277 struct tipc_peer *peer)
278{
279 int dz, i;
280
281 dz = dom_size(mon->peer_cnt);
282 for (i = 0; i < dz; i++) {
283 mon_apply_domain(mon, peer);
284 peer = peer_prev(peer);
285 }
286}
287
288/* mon_assign_roles() : reassign peer roles after a network change
289 * The monitor list is consistent at this stage; i.e., each peer is monitoring
290 * a set of domain members as matched between domain record and the monitor list
291 */
292static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head)
293{
294 struct tipc_peer *peer = peer_nxt(head);
295 struct tipc_peer *self = mon->self;
296 int i = 0;
297
298 for (; peer != self; peer = peer_nxt(peer)) {
299 peer->is_local = false;
300
301 /* Update domain member */
302 if (i++ < head->applied) {
303 peer->is_head = false;
304 if (head == self)
305 peer->is_local = true;
306 continue;
307 }
308 /* Assign next domain head */
309 if (!peer->is_up)
310 continue;
311 if (peer->is_head)
312 break;
313 head = peer;
314 head->is_head = true;
315 i = 0;
316 }
317 mon->list_gen++;
318}
319
320void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id)
321{
322 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
323 struct tipc_peer *self = get_self(net, bearer_id);
324 struct tipc_peer *peer, *prev, *head;
325
326 write_lock_bh(&mon->lock);
327 peer = get_peer(mon, addr);
328 if (!peer)
329 goto exit;
330 prev = peer_prev(peer);
331 list_del(&peer->list);
332 hlist_del(&peer->hash);
333 kfree(peer->domain);
334 kfree(peer);
335 mon->peer_cnt--;
336 head = peer_head(prev);
337 if (head == self)
338 mon_update_local_domain(mon);
339 mon_update_neighbors(mon, prev);
340
341 /* Revert to full-mesh monitoring if we reach threshold */
342 if (!tipc_mon_is_active(net, mon)) {
343 list_for_each_entry(peer, &self->list, list) {
344 kfree(peer->domain);
345 peer->domain = NULL;
346 peer->applied = 0;
347 }
348 }
349 mon_assign_roles(mon, head);
350exit:
351 write_unlock_bh(&mon->lock);
352}
353
354static bool tipc_mon_add_peer(struct tipc_monitor *mon, u32 addr,
355 struct tipc_peer **peer)
356{
357 struct tipc_peer *self = mon->self;
358 struct tipc_peer *cur, *prev, *p;
359
360 p = kzalloc(sizeof(*p), GFP_ATOMIC);
361 *peer = p;
362 if (!p)
363 return false;
364 p->addr = addr;
365
366 /* Add new peer to lookup list */
367 INIT_LIST_HEAD(&p->list);
368 hlist_add_head(&p->hash, &mon->peers[tipc_hashfn(addr)]);
369
370 /* Sort new peer into iterator list, in ascending circular order */
371 prev = self;
372 list_for_each_entry(cur, &self->list, list) {
373 if ((addr > prev->addr) && (addr < cur->addr))
374 break;
375 if (((addr < cur->addr) || (addr > prev->addr)) &&
376 (prev->addr > cur->addr))
377 break;
378 prev = cur;
379 }
380 list_add_tail(&p->list, &cur->list);
381 mon->peer_cnt++;
382 mon_update_neighbors(mon, p);
383 return true;
384}
385
386void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id)
387{
388 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
389 struct tipc_peer *self = get_self(net, bearer_id);
390 struct tipc_peer *peer, *head;
391
392 write_lock_bh(&mon->lock);
393 peer = get_peer(mon, addr);
394 if (!peer && !tipc_mon_add_peer(mon, addr, &peer))
395 goto exit;
396 peer->is_up = true;
397 head = peer_head(peer);
398 if (head == self)
399 mon_update_local_domain(mon);
400 mon_assign_roles(mon, head);
401exit:
402 write_unlock_bh(&mon->lock);
403}
404
405void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id)
406{
407 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
408 struct tipc_peer *self = get_self(net, bearer_id);
409 struct tipc_peer *peer, *head;
410 struct tipc_mon_domain *dom;
411 int applied;
412
413 write_lock_bh(&mon->lock);
414 peer = get_peer(mon, addr);
415 if (!peer) {
416 pr_warn("Mon: unknown link %x/%u DOWN\n", addr, bearer_id);
417 goto exit;
418 }
419 applied = peer->applied;
420 peer->applied = 0;
421 dom = peer->domain;
422 peer->domain = NULL;
423 if (peer->is_head)
424 mon_identify_lost_members(peer, dom, applied);
425 kfree(dom);
426 peer->is_up = false;
427 peer->is_head = false;
428 peer->is_local = false;
429 peer->down_cnt = 0;
430 head = peer_head(peer);
431 if (head == self)
432 mon_update_local_domain(mon);
433 mon_assign_roles(mon, head);
434exit:
435 write_unlock_bh(&mon->lock);
436}
437
438/* tipc_mon_rcv - process monitor domain event message
439 */
440void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
441 struct tipc_mon_state *state, int bearer_id)
442{
443 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
444 struct tipc_mon_domain *arrv_dom = data;
445 struct tipc_mon_domain dom_bef;
446 struct tipc_mon_domain *dom;
447 struct tipc_peer *peer;
448 u16 new_member_cnt = ntohs(arrv_dom->member_cnt);
449 int new_dlen = dom_rec_len(arrv_dom, new_member_cnt);
450 u16 new_gen = ntohs(arrv_dom->gen);
451 u16 acked_gen = ntohs(arrv_dom->ack_gen);
452 bool probing = state->probing;
453 int i, applied_bef;
454
455 state->probing = false;
456 if (!dlen)
457 return;
458
459 /* Sanity check received domain record */
460 if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) {
461 pr_warn_ratelimited("Received illegal domain record\n");
462 return;
463 }
464
465 /* Synch generation numbers with peer if link just came up */
466 if (!state->synched) {
467 state->peer_gen = new_gen - 1;
468 state->acked_gen = acked_gen;
469 state->synched = true;
470 }
471
472 if (more(acked_gen, state->acked_gen))
473 state->acked_gen = acked_gen;
474
475 /* Drop duplicate unless we are waiting for a probe response */
476 if (!more(new_gen, state->peer_gen) && !probing)
477 return;
478
479 write_lock_bh(&mon->lock);
480 peer = get_peer(mon, addr);
481 if (!peer || !peer->is_up)
482 goto exit;
483
484 /* Peer is confirmed, stop any ongoing probing */
485 peer->down_cnt = 0;
486
487 /* Task is done for duplicate record */
488 if (!more(new_gen, state->peer_gen))
489 goto exit;
490
491 state->peer_gen = new_gen;
492
493 /* Cache current domain record for later use */
494 dom_bef.member_cnt = 0;
495 dom = peer->domain;
496 if (dom)
497 memcpy(&dom_bef, dom, dom->len);
498
499 /* Transform and store received domain record */
500 if (!dom || (dom->len < new_dlen)) {
501 kfree(dom);
502 dom = kmalloc(new_dlen, GFP_ATOMIC);
503 peer->domain = dom;
504 if (!dom)
505 goto exit;
506 }
507 dom->len = new_dlen;
508 dom->gen = new_gen;
509 dom->member_cnt = new_member_cnt;
510 dom->up_map = be64_to_cpu(arrv_dom->up_map);
511 for (i = 0; i < new_member_cnt; i++)
512 dom->members[i] = ntohl(arrv_dom->members[i]);
513
514 /* Update peers affected by this domain record */
515 applied_bef = peer->applied;
516 mon_apply_domain(mon, peer);
517 mon_identify_lost_members(peer, &dom_bef, applied_bef);
518 mon_assign_roles(mon, peer_head(peer));
519exit:
520 write_unlock_bh(&mon->lock);
521}
522
523void tipc_mon_prep(struct net *net, void *data, int *dlen,
524 struct tipc_mon_state *state, int bearer_id)
525{
526 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
527 struct tipc_mon_domain *dom = data;
528 u16 gen = mon->dom_gen;
529 u16 len;
530
531 if (!tipc_mon_is_active(net, mon))
532 return;
533
534 /* Send only a dummy record with ack if peer has acked our last sent */
535 if (likely(state->acked_gen == gen)) {
536 len = dom_rec_len(dom, 0);
537 *dlen = len;
538 dom->len = htons(len);
539 dom->gen = htons(gen);
540 dom->ack_gen = htons(state->peer_gen);
541 dom->member_cnt = 0;
542 return;
543 }
544 /* Send the full record */
545 read_lock_bh(&mon->lock);
546 len = ntohs(mon->cache.len);
547 *dlen = len;
548 memcpy(data, &mon->cache, len);
549 read_unlock_bh(&mon->lock);
550 dom->ack_gen = htons(state->peer_gen);
551}
552
553void tipc_mon_get_state(struct net *net, u32 addr,
554 struct tipc_mon_state *state,
555 int bearer_id)
556{
557 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
558 struct tipc_peer *peer;
559
560 /* Used cached state if table has not changed */
561 if (!state->probing &&
562 (state->list_gen == mon->list_gen) &&
563 (state->acked_gen == mon->dom_gen))
564 return;
565
566 read_lock_bh(&mon->lock);
567 peer = get_peer(mon, addr);
568 if (peer) {
569 state->probing = state->acked_gen != mon->dom_gen;
570 state->probing |= peer->down_cnt;
571 state->reset |= peer->down_cnt >= MAX_PEER_DOWN_EVENTS;
572 state->monitoring = peer->is_local;
573 state->monitoring |= peer->is_head;
574 state->list_gen = mon->list_gen;
575 }
576 read_unlock_bh(&mon->lock);
577}
578
579static void mon_timeout(unsigned long m)
580{
581 struct tipc_monitor *mon = (void *)m;
582 struct tipc_peer *self;
583 int best_member_cnt = dom_size(mon->peer_cnt) - 1;
584
585 write_lock_bh(&mon->lock);
586 self = mon->self;
587 if (self && (best_member_cnt != self->applied)) {
588 mon_update_local_domain(mon);
589 mon_assign_roles(mon, self);
590 }
591 write_unlock_bh(&mon->lock);
592 mod_timer(&mon->timer, jiffies + mon->timer_intv);
593}
594
595int tipc_mon_create(struct net *net, int bearer_id)
596{
597 struct tipc_net *tn = tipc_net(net);
598 struct tipc_monitor *mon;
599 struct tipc_peer *self;
600 struct tipc_mon_domain *dom;
601
602 if (tn->monitors[bearer_id])
603 return 0;
604
605 mon = kzalloc(sizeof(*mon), GFP_ATOMIC);
606 self = kzalloc(sizeof(*self), GFP_ATOMIC);
607 dom = kzalloc(sizeof(*dom), GFP_ATOMIC);
608 if (!mon || !self || !dom) {
609 kfree(mon);
610 kfree(self);
611 kfree(dom);
612 return -ENOMEM;
613 }
614 tn->monitors[bearer_id] = mon;
615 rwlock_init(&mon->lock);
616 mon->net = net;
617 mon->peer_cnt = 1;
618 mon->self = self;
619 self->domain = dom;
620 self->addr = tipc_own_addr(net);
621 self->is_up = true;
622 self->is_head = true;
623 INIT_LIST_HEAD(&self->list);
624 setup_timer(&mon->timer, mon_timeout, (unsigned long)mon);
625 mon->timer_intv = msecs_to_jiffies(MON_TIMEOUT + (tn->random & 0xffff));
626 mod_timer(&mon->timer, jiffies + mon->timer_intv);
627 return 0;
628}
629
630void tipc_mon_delete(struct net *net, int bearer_id)
631{
632 struct tipc_net *tn = tipc_net(net);
633 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
634 struct tipc_peer *self = get_self(net, bearer_id);
635 struct tipc_peer *peer, *tmp;
636
637 write_lock_bh(&mon->lock);
638 tn->monitors[bearer_id] = NULL;
639 list_for_each_entry_safe(peer, tmp, &self->list, list) {
640 list_del(&peer->list);
641 hlist_del(&peer->hash);
642 kfree(peer->domain);
643 kfree(peer);
644 }
645 mon->self = NULL;
646 write_unlock_bh(&mon->lock);
647 del_timer_sync(&mon->timer);
648 kfree(self->domain);
649 kfree(self);
650 kfree(mon);
651}
diff --git a/net/tipc/monitor.h b/net/tipc/monitor.h
new file mode 100644
index 000000000000..598459cbed5d
--- /dev/null
+++ b/net/tipc/monitor.h
@@ -0,0 +1,73 @@
1/*
2 * net/tipc/monitor.h
3 *
4 * Copyright (c) 2015, Ericsson AB
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36#ifndef _TIPC_MONITOR_H
37#define _TIPC_MONITOR_H
38
39/* struct tipc_mon_state: link instance's cache of monitor list and domain state
40 * @list_gen: current generation of this node's monitor list
41 * @gen: current generation of this node's local domain
42 * @peer_gen: most recent domain generation received from peer
43 * @acked_gen: most recent generation of self's domain acked by peer
44 * @monitoring: this peer endpoint should continuously monitored
45 * @probing: peer endpoint should be temporarily probed for potential loss
46 * @synched: domain record's generation has been synched with peer after reset
47 */
48struct tipc_mon_state {
49 u16 list_gen;
50 u16 peer_gen;
51 u16 acked_gen;
52 bool monitoring :1;
53 bool probing :1;
54 bool reset :1;
55 bool synched :1;
56};
57
58int tipc_mon_create(struct net *net, int bearer_id);
59void tipc_mon_delete(struct net *net, int bearer_id);
60
61void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id);
62void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id);
63void tipc_mon_prep(struct net *net, void *data, int *dlen,
64 struct tipc_mon_state *state, int bearer_id);
65void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
66 struct tipc_mon_state *state, int bearer_id);
67void tipc_mon_get_state(struct net *net, u32 addr,
68 struct tipc_mon_state *state,
69 int bearer_id);
70void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id);
71
72extern const int tipc_max_domain_size;
73#endif
diff --git a/net/tipc/node.c b/net/tipc/node.c
index e01e2c71b5a1..a3fc0a3f4077 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -40,6 +40,7 @@
40#include "name_distr.h" 40#include "name_distr.h"
41#include "socket.h" 41#include "socket.h"
42#include "bcast.h" 42#include "bcast.h"
43#include "monitor.h"
43#include "discover.h" 44#include "discover.h"
44#include "netlink.h" 45#include "netlink.h"
45 46
@@ -205,17 +206,6 @@ u16 tipc_node_get_capabilities(struct net *net, u32 addr)
205 return caps; 206 return caps;
206} 207}
207 208
208/*
209 * A trivial power-of-two bitmask technique is used for speed, since this
210 * operation is done for every incoming TIPC packet. The number of hash table
211 * entries has been chosen so that no hash chain exceeds 8 nodes and will
212 * usually be much smaller (typically only a single node).
213 */
214static unsigned int tipc_hashfn(u32 addr)
215{
216 return addr & (NODE_HTABLE_SIZE - 1);
217}
218
219static void tipc_node_kref_release(struct kref *kref) 209static void tipc_node_kref_release(struct kref *kref)
220{ 210{
221 struct tipc_node *n = container_of(kref, struct tipc_node, kref); 211 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
@@ -279,6 +269,7 @@ static void tipc_node_write_unlock(struct tipc_node *n)
279 u32 addr = 0; 269 u32 addr = 0;
280 u32 flags = n->action_flags; 270 u32 flags = n->action_flags;
281 u32 link_id = 0; 271 u32 link_id = 0;
272 u32 bearer_id;
282 struct list_head *publ_list; 273 struct list_head *publ_list;
283 274
284 if (likely(!flags)) { 275 if (likely(!flags)) {
@@ -288,6 +279,7 @@ static void tipc_node_write_unlock(struct tipc_node *n)
288 279
289 addr = n->addr; 280 addr = n->addr;
290 link_id = n->link_id; 281 link_id = n->link_id;
282 bearer_id = link_id & 0xffff;
291 publ_list = &n->publ_list; 283 publ_list = &n->publ_list;
292 284
293 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 285 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
@@ -301,13 +293,16 @@ static void tipc_node_write_unlock(struct tipc_node *n)
301 if (flags & TIPC_NOTIFY_NODE_UP) 293 if (flags & TIPC_NOTIFY_NODE_UP)
302 tipc_named_node_up(net, addr); 294 tipc_named_node_up(net, addr);
303 295
304 if (flags & TIPC_NOTIFY_LINK_UP) 296 if (flags & TIPC_NOTIFY_LINK_UP) {
297 tipc_mon_peer_up(net, addr, bearer_id);
305 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, 298 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
306 TIPC_NODE_SCOPE, link_id, addr); 299 TIPC_NODE_SCOPE, link_id, addr);
307 300 }
308 if (flags & TIPC_NOTIFY_LINK_DOWN) 301 if (flags & TIPC_NOTIFY_LINK_DOWN) {
302 tipc_mon_peer_down(net, addr, bearer_id);
309 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, 303 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
310 link_id, addr); 304 link_id, addr);
305 }
311} 306}
312 307
313struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities) 308struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
@@ -378,14 +373,13 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
378{ 373{
379 unsigned long tol = tipc_link_tolerance(l); 374 unsigned long tol = tipc_link_tolerance(l);
380 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 375 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
381 unsigned long keepalive_intv = msecs_to_jiffies(intv);
382 376
383 /* Link with lowest tolerance determines timer interval */ 377 /* Link with lowest tolerance determines timer interval */
384 if (keepalive_intv < n->keepalive_intv) 378 if (intv < n->keepalive_intv)
385 n->keepalive_intv = keepalive_intv; 379 n->keepalive_intv = intv;
386 380
387 /* Ensure link's abort limit corresponds to current interval */ 381 /* Ensure link's abort limit corresponds to current tolerance */
388 tipc_link_set_abort_limit(l, tol / jiffies_to_msecs(n->keepalive_intv)); 382 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
389} 383}
390 384
391static void tipc_node_delete(struct tipc_node *node) 385static void tipc_node_delete(struct tipc_node *node)
@@ -526,7 +520,7 @@ static void tipc_node_timeout(unsigned long data)
526 if (rc & TIPC_LINK_DOWN_EVT) 520 if (rc & TIPC_LINK_DOWN_EVT)
527 tipc_node_link_down(n, bearer_id, false); 521 tipc_node_link_down(n, bearer_id, false);
528 } 522 }
529 mod_timer(&n->timer, jiffies + n->keepalive_intv); 523 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
530} 524}
531 525
532/** 526/**
@@ -692,6 +686,7 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
692 struct tipc_link *l = le->link; 686 struct tipc_link *l = le->link;
693 struct tipc_media_addr *maddr; 687 struct tipc_media_addr *maddr;
694 struct sk_buff_head xmitq; 688 struct sk_buff_head xmitq;
689 int old_bearer_id = bearer_id;
695 690
696 if (!l) 691 if (!l)
697 return; 692 return;
@@ -711,6 +706,8 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
711 tipc_link_fsm_evt(l, LINK_RESET_EVT); 706 tipc_link_fsm_evt(l, LINK_RESET_EVT);
712 } 707 }
713 tipc_node_write_unlock(n); 708 tipc_node_write_unlock(n);
709 if (delete)
710 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
714 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); 711 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
715 tipc_sk_rcv(n->net, &le->inputq); 712 tipc_sk_rcv(n->net, &le->inputq);
716} 713}
@@ -735,6 +732,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
735 bool accept_addr = false; 732 bool accept_addr = false;
736 bool reset = true; 733 bool reset = true;
737 char *if_name; 734 char *if_name;
735 unsigned long intv;
738 736
739 *dupl_addr = false; 737 *dupl_addr = false;
740 *respond = false; 738 *respond = false;
@@ -840,9 +838,11 @@ void tipc_node_check_dest(struct net *net, u32 onode,
840 le->link = l; 838 le->link = l;
841 n->link_cnt++; 839 n->link_cnt++;
842 tipc_node_calculate_timer(n, l); 840 tipc_node_calculate_timer(n, l);
843 if (n->link_cnt == 1) 841 if (n->link_cnt == 1) {
844 if (!mod_timer(&n->timer, jiffies + n->keepalive_intv)) 842 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
843 if (!mod_timer(&n->timer, intv))
845 tipc_node_get(n); 844 tipc_node_get(n);
845 }
846 } 846 }
847 memcpy(&le->maddr, maddr, sizeof(*maddr)); 847 memcpy(&le->maddr, maddr, sizeof(*maddr));
848exit: 848exit:
@@ -950,7 +950,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
950 state = SELF_UP_PEER_UP; 950 state = SELF_UP_PEER_UP;
951 break; 951 break;
952 case SELF_LOST_CONTACT_EVT: 952 case SELF_LOST_CONTACT_EVT:
953 state = SELF_DOWN_PEER_LEAVING; 953 state = SELF_DOWN_PEER_DOWN;
954 break; 954 break;
955 case SELF_ESTABL_CONTACT_EVT: 955 case SELF_ESTABL_CONTACT_EVT:
956 case PEER_LOST_CONTACT_EVT: 956 case PEER_LOST_CONTACT_EVT:
@@ -969,7 +969,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
969 state = SELF_UP_PEER_UP; 969 state = SELF_UP_PEER_UP;
970 break; 970 break;
971 case PEER_LOST_CONTACT_EVT: 971 case PEER_LOST_CONTACT_EVT:
972 state = SELF_LEAVING_PEER_DOWN; 972 state = SELF_DOWN_PEER_DOWN;
973 break; 973 break;
974 case SELF_LOST_CONTACT_EVT: 974 case SELF_LOST_CONTACT_EVT:
975 case PEER_ESTABL_CONTACT_EVT: 975 case PEER_ESTABL_CONTACT_EVT:
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 272d20a795d5..215849ce453d 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -418,13 +418,12 @@ static struct outqueue_entry *tipc_alloc_entry(void *data, int len)
418 if (!entry) 418 if (!entry)
419 return NULL; 419 return NULL;
420 420
421 buf = kmalloc(len, GFP_ATOMIC); 421 buf = kmemdup(data, len, GFP_ATOMIC);
422 if (!buf) { 422 if (!buf) {
423 kfree(entry); 423 kfree(entry);
424 return NULL; 424 return NULL;
425 } 425 }
426 426
427 memcpy(buf, data, len);
428 entry->iov.iov_base = buf; 427 entry->iov.iov_base = buf;
429 entry->iov.iov_len = len; 428 entry->iov.iov_len = len;
430 429
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index c9cf2be3674a..b016c011970b 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -63,7 +63,7 @@
63 */ 63 */
64struct udp_media_addr { 64struct udp_media_addr {
65 __be16 proto; 65 __be16 proto;
66 __be16 udp_port; 66 __be16 port;
67 union { 67 union {
68 struct in_addr ipv4; 68 struct in_addr ipv4;
69 struct in6_addr ipv6; 69 struct in6_addr ipv6;
@@ -108,9 +108,9 @@ static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size)
108 struct udp_media_addr *ua = (struct udp_media_addr *)&a->value; 108 struct udp_media_addr *ua = (struct udp_media_addr *)&a->value;
109 109
110 if (ntohs(ua->proto) == ETH_P_IP) 110 if (ntohs(ua->proto) == ETH_P_IP)
111 snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->udp_port)); 111 snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->port));
112 else if (ntohs(ua->proto) == ETH_P_IPV6) 112 else if (ntohs(ua->proto) == ETH_P_IPV6)
113 snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->udp_port)); 113 snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->port));
114 else 114 else
115 pr_err("Invalid UDP media address\n"); 115 pr_err("Invalid UDP media address\n");
116 return 0; 116 return 0;
@@ -178,8 +178,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
178 skb->dev = rt->dst.dev; 178 skb->dev = rt->dst.dev;
179 ttl = ip4_dst_hoplimit(&rt->dst); 179 ttl = ip4_dst_hoplimit(&rt->dst);
180 udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr, 180 udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
181 dst->ipv4.s_addr, 0, ttl, 0, src->udp_port, 181 dst->ipv4.s_addr, 0, ttl, 0, src->port,
182 dst->udp_port, false, true); 182 dst->port, false, true);
183#if IS_ENABLED(CONFIG_IPV6) 183#if IS_ENABLED(CONFIG_IPV6)
184 } else { 184 } else {
185 struct dst_entry *ndst; 185 struct dst_entry *ndst;
@@ -196,8 +196,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
196 ttl = ip6_dst_hoplimit(ndst); 196 ttl = ip6_dst_hoplimit(ndst);
197 err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, 197 err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
198 ndst->dev, &src->ipv6, 198 ndst->dev, &src->ipv6,
199 &dst->ipv6, 0, ttl, 0, src->udp_port, 199 &dst->ipv6, 0, ttl, 0, src->port,
200 dst->udp_port, false); 200 dst->port, false);
201#endif 201#endif
202 } 202 }
203 return err; 203 return err;
@@ -292,12 +292,12 @@ err:
292 292
293 ip4 = (struct sockaddr_in *)&sa_local; 293 ip4 = (struct sockaddr_in *)&sa_local;
294 local->proto = htons(ETH_P_IP); 294 local->proto = htons(ETH_P_IP);
295 local->udp_port = ip4->sin_port; 295 local->port = ip4->sin_port;
296 local->ipv4.s_addr = ip4->sin_addr.s_addr; 296 local->ipv4.s_addr = ip4->sin_addr.s_addr;
297 297
298 ip4 = (struct sockaddr_in *)&sa_remote; 298 ip4 = (struct sockaddr_in *)&sa_remote;
299 remote->proto = htons(ETH_P_IP); 299 remote->proto = htons(ETH_P_IP);
300 remote->udp_port = ip4->sin_port; 300 remote->port = ip4->sin_port;
301 remote->ipv4.s_addr = ip4->sin_addr.s_addr; 301 remote->ipv4.s_addr = ip4->sin_addr.s_addr;
302 return 0; 302 return 0;
303 303
@@ -312,13 +312,13 @@ err:
312 return -EINVAL; 312 return -EINVAL;
313 313
314 local->proto = htons(ETH_P_IPV6); 314 local->proto = htons(ETH_P_IPV6);
315 local->udp_port = ip6->sin6_port; 315 local->port = ip6->sin6_port;
316 memcpy(&local->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr)); 316 memcpy(&local->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr));
317 ub->ifindex = ip6->sin6_scope_id; 317 ub->ifindex = ip6->sin6_scope_id;
318 318
319 ip6 = (struct sockaddr_in6 *)&sa_remote; 319 ip6 = (struct sockaddr_in6 *)&sa_remote;
320 remote->proto = htons(ETH_P_IPV6); 320 remote->proto = htons(ETH_P_IPV6);
321 remote->udp_port = ip6->sin6_port; 321 remote->port = ip6->sin6_port;
322 memcpy(&remote->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr)); 322 memcpy(&remote->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr));
323 return 0; 323 return 0;
324#endif 324#endif
@@ -386,7 +386,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
386 err = -EAFNOSUPPORT; 386 err = -EAFNOSUPPORT;
387 goto err; 387 goto err;
388 } 388 }
389 udp_conf.local_udp_port = local.udp_port; 389 udp_conf.local_udp_port = local.port;
390 err = udp_sock_create(net, &udp_conf, &ub->ubsock); 390 err = udp_sock_create(net, &udp_conf, &ub->ubsock);
391 if (err) 391 if (err)
392 goto err; 392 goto err;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index ecca3896b9f7..39d9abd309ea 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -748,6 +748,36 @@ int wiphy_register(struct wiphy *wiphy)
748 nl80211_send_reg_change_event(&request); 748 nl80211_send_reg_change_event(&request);
749 } 749 }
750 750
751 /* Check that nobody globally advertises any capabilities they do not
752 * advertise on all possible interface types.
753 */
754 if (wiphy->extended_capabilities_len &&
755 wiphy->num_iftype_ext_capab &&
756 wiphy->iftype_ext_capab) {
757 u8 supported_on_all, j;
758 const struct wiphy_iftype_ext_capab *capab;
759
760 capab = wiphy->iftype_ext_capab;
761 for (j = 0; j < wiphy->extended_capabilities_len; j++) {
762 if (capab[0].extended_capabilities_len > j)
763 supported_on_all =
764 capab[0].extended_capabilities[j];
765 else
766 supported_on_all = 0x00;
767 for (i = 1; i < wiphy->num_iftype_ext_capab; i++) {
768 if (j >= capab[i].extended_capabilities_len) {
769 supported_on_all = 0x00;
770 break;
771 }
772 supported_on_all &=
773 capab[i].extended_capabilities[j];
774 }
775 if (WARN_ON(wiphy->extended_capabilities[j] &
776 ~supported_on_all))
777 break;
778 }
779 }
780
751 rdev->wiphy.registered = true; 781 rdev->wiphy.registered = true;
752 rtnl_unlock(); 782 rtnl_unlock();
753 783
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 025b7a5d508b..a4d547f99f8d 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -214,7 +214,7 @@ struct cfg80211_event {
214 size_t req_ie_len; 214 size_t req_ie_len;
215 size_t resp_ie_len; 215 size_t resp_ie_len;
216 struct cfg80211_bss *bss; 216 struct cfg80211_bss *bss;
217 u16 status; 217 int status; /* -1 = failed; 0..65535 = status code */
218 } cr; 218 } cr;
219 struct { 219 struct {
220 const u8 *req_ie; 220 const u8 *req_ie;
@@ -374,7 +374,7 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
374void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, 374void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
375 const u8 *req_ie, size_t req_ie_len, 375 const u8 *req_ie, size_t req_ie_len,
376 const u8 *resp_ie, size_t resp_ie_len, 376 const u8 *resp_ie, size_t resp_ie_len,
377 u16 status, bool wextev, 377 int status, bool wextev,
378 struct cfg80211_bss *bss); 378 struct cfg80211_bss *bss);
379void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, 379void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
380 size_t ie_len, u16 reason, bool from_ap); 380 size_t ie_len, u16 reason, bool from_ap);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d7599014055d..c503e96bfd5a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -167,6 +167,7 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
167 167
168 if (attrs[NL80211_ATTR_IFINDEX]) { 168 if (attrs[NL80211_ATTR_IFINDEX]) {
169 int ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); 169 int ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
170
170 netdev = __dev_get_by_index(netns, ifindex); 171 netdev = __dev_get_by_index(netns, ifindex);
171 if (netdev) { 172 if (netdev) {
172 if (netdev->ieee80211_ptr) 173 if (netdev->ieee80211_ptr)
@@ -731,6 +732,7 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
731 732
732 if (tb[NL80211_KEY_DEFAULT_TYPES]) { 733 if (tb[NL80211_KEY_DEFAULT_TYPES]) {
733 struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; 734 struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
735
734 err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, 736 err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
735 tb[NL80211_KEY_DEFAULT_TYPES], 737 tb[NL80211_KEY_DEFAULT_TYPES],
736 nl80211_key_default_policy); 738 nl80211_key_default_policy);
@@ -1264,7 +1266,7 @@ nl80211_send_mgmt_stypes(struct sk_buff *msg,
1264struct nl80211_dump_wiphy_state { 1266struct nl80211_dump_wiphy_state {
1265 s64 filter_wiphy; 1267 s64 filter_wiphy;
1266 long start; 1268 long start;
1267 long split_start, band_start, chan_start; 1269 long split_start, band_start, chan_start, capa_start;
1268 bool split; 1270 bool split;
1269}; 1271};
1270 1272
@@ -1382,6 +1384,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
1382 rdev->ops->get_antenna) { 1384 rdev->ops->get_antenna) {
1383 u32 tx_ant = 0, rx_ant = 0; 1385 u32 tx_ant = 0, rx_ant = 0;
1384 int res; 1386 int res;
1387
1385 res = rdev_get_antenna(rdev, &tx_ant, &rx_ant); 1388 res = rdev_get_antenna(rdev, &tx_ant, &rx_ant);
1386 if (!res) { 1389 if (!res) {
1387 if (nla_put_u32(msg, 1390 if (nla_put_u32(msg,
@@ -1761,6 +1764,47 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
1761 nla_nest_end(msg, nested); 1764 nla_nest_end(msg, nested);
1762 } 1765 }
1763 1766
1767 state->split_start++;
1768 break;
1769 case 13:
1770 if (rdev->wiphy.num_iftype_ext_capab &&
1771 rdev->wiphy.iftype_ext_capab) {
1772 struct nlattr *nested_ext_capab, *nested;
1773
1774 nested = nla_nest_start(msg,
1775 NL80211_ATTR_IFTYPE_EXT_CAPA);
1776 if (!nested)
1777 goto nla_put_failure;
1778
1779 for (i = state->capa_start;
1780 i < rdev->wiphy.num_iftype_ext_capab; i++) {
1781 const struct wiphy_iftype_ext_capab *capab;
1782
1783 capab = &rdev->wiphy.iftype_ext_capab[i];
1784
1785 nested_ext_capab = nla_nest_start(msg, i);
1786 if (!nested_ext_capab ||
1787 nla_put_u32(msg, NL80211_ATTR_IFTYPE,
1788 capab->iftype) ||
1789 nla_put(msg, NL80211_ATTR_EXT_CAPA,
1790 capab->extended_capabilities_len,
1791 capab->extended_capabilities) ||
1792 nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
1793 capab->extended_capabilities_len,
1794 capab->extended_capabilities_mask))
1795 goto nla_put_failure;
1796
1797 nla_nest_end(msg, nested_ext_capab);
1798 if (state->split)
1799 break;
1800 }
1801 nla_nest_end(msg, nested);
1802 if (i < rdev->wiphy.num_iftype_ext_capab) {
1803 state->capa_start = i + 1;
1804 break;
1805 }
1806 }
1807
1764 /* done */ 1808 /* done */
1765 state->split_start = 0; 1809 state->split_start = 0;
1766 break; 1810 break;
@@ -2116,7 +2160,6 @@ static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info)
2116 return rdev_set_wds_peer(rdev, dev, bssid); 2160 return rdev_set_wds_peer(rdev, dev, bssid);
2117} 2161}
2118 2162
2119
2120static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) 2163static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
2121{ 2164{
2122 struct cfg80211_registered_device *rdev; 2165 struct cfg80211_registered_device *rdev;
@@ -2251,6 +2294,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
2251 if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] && 2294 if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] &&
2252 info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]) { 2295 info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]) {
2253 u32 tx_ant, rx_ant; 2296 u32 tx_ant, rx_ant;
2297
2254 if ((!rdev->wiphy.available_antennas_tx && 2298 if ((!rdev->wiphy.available_antennas_tx &&
2255 !rdev->wiphy.available_antennas_rx) || 2299 !rdev->wiphy.available_antennas_rx) ||
2256 !rdev->ops->set_antenna) 2300 !rdev->ops->set_antenna)
@@ -2919,6 +2963,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2919 pairwise = !!mac_addr; 2963 pairwise = !!mac_addr;
2920 if (info->attrs[NL80211_ATTR_KEY_TYPE]) { 2964 if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
2921 u32 kt = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); 2965 u32 kt = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
2966
2922 if (kt >= NUM_NL80211_KEYTYPES) 2967 if (kt >= NUM_NL80211_KEYTYPES)
2923 return -EINVAL; 2968 return -EINVAL;
2924 if (kt != NL80211_KEYTYPE_GROUP && 2969 if (kt != NL80211_KEYTYPE_GROUP &&
@@ -3962,7 +4007,6 @@ static int nl80211_dump_station(struct sk_buff *skb,
3962 sta_idx++; 4007 sta_idx++;
3963 } 4008 }
3964 4009
3965
3966 out: 4010 out:
3967 cb->args[2] = sta_idx; 4011 cb->args[2] = sta_idx;
3968 err = skb->len; 4012 err = skb->len;
@@ -4763,7 +4807,6 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
4763 path_idx++; 4807 path_idx++;
4764 } 4808 }
4765 4809
4766
4767 out: 4810 out:
4768 cb->args[2] = path_idx; 4811 cb->args[2] = path_idx;
4769 err = skb->len; 4812 err = skb->len;
@@ -5053,7 +5096,6 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
5053 enum nl80211_user_reg_hint_type user_reg_hint_type; 5096 enum nl80211_user_reg_hint_type user_reg_hint_type;
5054 u32 owner_nlportid; 5097 u32 owner_nlportid;
5055 5098
5056
5057 /* 5099 /*
5058 * You should only get this when cfg80211 hasn't yet initialized 5100 * You should only get this when cfg80211 hasn't yet initialized
5059 * completely when built-in to the kernel right between the time 5101 * completely when built-in to the kernel right between the time
@@ -5262,7 +5304,6 @@ do { \
5262 } \ 5304 } \
5263} while (0) 5305} while (0)
5264 5306
5265
5266 if (!info->attrs[NL80211_ATTR_MESH_CONFIG]) 5307 if (!info->attrs[NL80211_ATTR_MESH_CONFIG])
5267 return -EINVAL; 5308 return -EINVAL;
5268 if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX, 5309 if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX,
@@ -5409,7 +5450,6 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
5409 IEEE80211_PATH_METRIC_VENDOR : 5450 IEEE80211_PATH_METRIC_VENDOR :
5410 IEEE80211_PATH_METRIC_AIRTIME; 5451 IEEE80211_PATH_METRIC_AIRTIME;
5411 5452
5412
5413 if (tb[NL80211_MESH_SETUP_IE]) { 5453 if (tb[NL80211_MESH_SETUP_IE]) {
5414 struct nlattr *ieattr = 5454 struct nlattr *ieattr =
5415 tb[NL80211_MESH_SETUP_IE]; 5455 tb[NL80211_MESH_SETUP_IE];
@@ -5796,10 +5836,8 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
5796 } 5836 }
5797 } 5837 }
5798 5838
5799 r = set_regdom(rd, REGD_SOURCE_CRDA); 5839 /* set_regdom takes ownership of rd */
5800 /* set_regdom took ownership */ 5840 return set_regdom(rd, REGD_SOURCE_CRDA);
5801 rd = NULL;
5802
5803 bad_reg: 5841 bad_reg:
5804 kfree(rd); 5842 kfree(rd);
5805 return r; 5843 return r;
@@ -6033,6 +6071,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
6033 /* all channels */ 6071 /* all channels */
6034 for (band = 0; band < NUM_NL80211_BANDS; band++) { 6072 for (band = 0; band < NUM_NL80211_BANDS; band++) {
6035 int j; 6073 int j;
6074
6036 if (!wiphy->bands[band]) 6075 if (!wiphy->bands[band])
6037 continue; 6076 continue;
6038 for (j = 0; j < wiphy->bands[band]->n_channels; j++) { 6077 for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
@@ -6442,6 +6481,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
6442 /* all channels */ 6481 /* all channels */
6443 for (band = 0; band < NUM_NL80211_BANDS; band++) { 6482 for (band = 0; band < NUM_NL80211_BANDS; band++) {
6444 int j; 6483 int j;
6484
6445 if (!wiphy->bands[band]) 6485 if (!wiphy->bands[band])
6446 continue; 6486 continue;
6447 for (j = 0; j < wiphy->bands[band]->n_channels; j++) { 6487 for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
@@ -6511,7 +6551,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
6511 nla_data(ssid), nla_len(ssid)); 6551 nla_data(ssid), nla_len(ssid));
6512 request->match_sets[i].ssid.ssid_len = 6552 request->match_sets[i].ssid.ssid_len =
6513 nla_len(ssid); 6553 nla_len(ssid);
6514 /* special attribute - old implemenation w/a */ 6554 /* special attribute - old implementation w/a */
6515 request->match_sets[i].rssi_thold = 6555 request->match_sets[i].rssi_thold =
6516 default_match_rssi; 6556 default_match_rssi;
6517 rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI]; 6557 rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
@@ -7204,6 +7244,7 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
7204 if (key.idx >= 0) { 7244 if (key.idx >= 0) {
7205 int i; 7245 int i;
7206 bool ok = false; 7246 bool ok = false;
7247
7207 for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) { 7248 for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) {
7208 if (key.p.cipher == rdev->wiphy.cipher_suites[i]) { 7249 if (key.p.cipher == rdev->wiphy.cipher_suites[i]) {
7209 ok = true; 7250 ok = true;
@@ -7282,6 +7323,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
7282 7323
7283 if (info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) { 7324 if (info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) {
7284 u16 proto; 7325 u16 proto;
7326
7285 proto = nla_get_u16( 7327 proto = nla_get_u16(
7286 info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]); 7328 info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]);
7287 settings->control_port_ethertype = cpu_to_be16(proto); 7329 settings->control_port_ethertype = cpu_to_be16(proto);
@@ -8435,6 +8477,7 @@ static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
8435 for (i = 0; i < rates_len; i++) { 8477 for (i = 0; i < rates_len; i++) {
8436 int rate = (rates[i] & 0x7f) * 5; 8478 int rate = (rates[i] & 0x7f) * 5;
8437 int ridx; 8479 int ridx;
8480
8438 for (ridx = 0; ridx < sband->n_bitrates; ridx++) { 8481 for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
8439 struct ieee80211_rate *srate = 8482 struct ieee80211_rate *srate =
8440 &sband->bitrates[ridx]; 8483 &sband->bitrates[ridx];
@@ -8743,7 +8786,6 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
8743 if (params.wait < NL80211_MIN_REMAIN_ON_CHANNEL_TIME || 8786 if (params.wait < NL80211_MIN_REMAIN_ON_CHANNEL_TIME ||
8744 params.wait > rdev->wiphy.max_remain_on_channel_duration) 8787 params.wait > rdev->wiphy.max_remain_on_channel_duration)
8745 return -EINVAL; 8788 return -EINVAL;
8746
8747 } 8789 }
8748 8790
8749 params.offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; 8791 params.offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
@@ -10590,7 +10632,6 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
10590} 10632}
10591EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_reply); 10633EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_reply);
10592 10634
10593
10594static int nl80211_set_qos_map(struct sk_buff *skb, 10635static int nl80211_set_qos_map(struct sk_buff *skb,
10595 struct genl_info *info) 10636 struct genl_info *info)
10596{ 10637{
@@ -10945,7 +10986,7 @@ static const struct genl_ops nl80211_ops[] = {
10945 .cmd = NL80211_CMD_SET_WIPHY, 10986 .cmd = NL80211_CMD_SET_WIPHY,
10946 .doit = nl80211_set_wiphy, 10987 .doit = nl80211_set_wiphy,
10947 .policy = nl80211_policy, 10988 .policy = nl80211_policy,
10948 .flags = GENL_ADMIN_PERM, 10989 .flags = GENL_UNS_ADMIN_PERM,
10949 .internal_flags = NL80211_FLAG_NEED_RTNL, 10990 .internal_flags = NL80211_FLAG_NEED_RTNL,
10950 }, 10991 },
10951 { 10992 {
@@ -10961,7 +11002,7 @@ static const struct genl_ops nl80211_ops[] = {
10961 .cmd = NL80211_CMD_SET_INTERFACE, 11002 .cmd = NL80211_CMD_SET_INTERFACE,
10962 .doit = nl80211_set_interface, 11003 .doit = nl80211_set_interface,
10963 .policy = nl80211_policy, 11004 .policy = nl80211_policy,
10964 .flags = GENL_ADMIN_PERM, 11005 .flags = GENL_UNS_ADMIN_PERM,
10965 .internal_flags = NL80211_FLAG_NEED_NETDEV | 11006 .internal_flags = NL80211_FLAG_NEED_NETDEV |
10966 NL80211_FLAG_NEED_RTNL, 11007 NL80211_FLAG_NEED_RTNL,
10967 }, 11008 },
@@ -10969,7 +11010,7 @@ static const struct genl_ops nl80211_ops[] = {
10969 .cmd = NL80211_CMD_NEW_INTERFACE, 11010 .cmd = NL80211_CMD_NEW_INTERFACE,
10970 .doit = nl80211_new_interface, 11011 .doit = nl80211_new_interface,
10971 .policy = nl80211_policy, 11012 .policy = nl80211_policy,
10972 .flags = GENL_ADMIN_PERM, 11013 .flags = GENL_UNS_ADMIN_PERM,
10973 .internal_flags = NL80211_FLAG_NEED_WIPHY | 11014 .internal_flags = NL80211_FLAG_NEED_WIPHY |
10974 NL80211_FLAG_NEED_RTNL, 11015 NL80211_FLAG_NEED_RTNL,
10975 }, 11016 },
@@ -10977,7 +11018,7 @@ static const struct genl_ops nl80211_ops[] = {
10977 .cmd = NL80211_CMD_DEL_INTERFACE, 11018 .cmd = NL80211_CMD_DEL_INTERFACE,
10978 .doit = nl80211_del_interface, 11019 .doit = nl80211_del_interface,
10979 .policy = nl80211_policy, 11020 .policy = nl80211_policy,
10980 .flags = GENL_ADMIN_PERM, 11021 .flags = GENL_UNS_ADMIN_PERM,
10981 .internal_flags = NL80211_FLAG_NEED_WDEV | 11022 .internal_flags = NL80211_FLAG_NEED_WDEV |
10982 NL80211_FLAG_NEED_RTNL, 11023 NL80211_FLAG_NEED_RTNL,
10983 }, 11024 },
@@ -10985,7 +11026,7 @@ static const struct genl_ops nl80211_ops[] = {
10985 .cmd = NL80211_CMD_GET_KEY, 11026 .cmd = NL80211_CMD_GET_KEY,
10986 .doit = nl80211_get_key, 11027 .doit = nl80211_get_key,
10987 .policy = nl80211_policy, 11028 .policy = nl80211_policy,
10988 .flags = GENL_ADMIN_PERM, 11029 .flags = GENL_UNS_ADMIN_PERM,
10989 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11030 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
10990 NL80211_FLAG_NEED_RTNL, 11031 NL80211_FLAG_NEED_RTNL,
10991 }, 11032 },
@@ -10993,7 +11034,7 @@ static const struct genl_ops nl80211_ops[] = {
10993 .cmd = NL80211_CMD_SET_KEY, 11034 .cmd = NL80211_CMD_SET_KEY,
10994 .doit = nl80211_set_key, 11035 .doit = nl80211_set_key,
10995 .policy = nl80211_policy, 11036 .policy = nl80211_policy,
10996 .flags = GENL_ADMIN_PERM, 11037 .flags = GENL_UNS_ADMIN_PERM,
10997 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11038 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
10998 NL80211_FLAG_NEED_RTNL | 11039 NL80211_FLAG_NEED_RTNL |
10999 NL80211_FLAG_CLEAR_SKB, 11040 NL80211_FLAG_CLEAR_SKB,
@@ -11002,7 +11043,7 @@ static const struct genl_ops nl80211_ops[] = {
11002 .cmd = NL80211_CMD_NEW_KEY, 11043 .cmd = NL80211_CMD_NEW_KEY,
11003 .doit = nl80211_new_key, 11044 .doit = nl80211_new_key,
11004 .policy = nl80211_policy, 11045 .policy = nl80211_policy,
11005 .flags = GENL_ADMIN_PERM, 11046 .flags = GENL_UNS_ADMIN_PERM,
11006 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11047 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11007 NL80211_FLAG_NEED_RTNL | 11048 NL80211_FLAG_NEED_RTNL |
11008 NL80211_FLAG_CLEAR_SKB, 11049 NL80211_FLAG_CLEAR_SKB,
@@ -11011,14 +11052,14 @@ static const struct genl_ops nl80211_ops[] = {
11011 .cmd = NL80211_CMD_DEL_KEY, 11052 .cmd = NL80211_CMD_DEL_KEY,
11012 .doit = nl80211_del_key, 11053 .doit = nl80211_del_key,
11013 .policy = nl80211_policy, 11054 .policy = nl80211_policy,
11014 .flags = GENL_ADMIN_PERM, 11055 .flags = GENL_UNS_ADMIN_PERM,
11015 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11056 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11016 NL80211_FLAG_NEED_RTNL, 11057 NL80211_FLAG_NEED_RTNL,
11017 }, 11058 },
11018 { 11059 {
11019 .cmd = NL80211_CMD_SET_BEACON, 11060 .cmd = NL80211_CMD_SET_BEACON,
11020 .policy = nl80211_policy, 11061 .policy = nl80211_policy,
11021 .flags = GENL_ADMIN_PERM, 11062 .flags = GENL_UNS_ADMIN_PERM,
11022 .doit = nl80211_set_beacon, 11063 .doit = nl80211_set_beacon,
11023 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11064 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11024 NL80211_FLAG_NEED_RTNL, 11065 NL80211_FLAG_NEED_RTNL,
@@ -11026,7 +11067,7 @@ static const struct genl_ops nl80211_ops[] = {
11026 { 11067 {
11027 .cmd = NL80211_CMD_START_AP, 11068 .cmd = NL80211_CMD_START_AP,
11028 .policy = nl80211_policy, 11069 .policy = nl80211_policy,
11029 .flags = GENL_ADMIN_PERM, 11070 .flags = GENL_UNS_ADMIN_PERM,
11030 .doit = nl80211_start_ap, 11071 .doit = nl80211_start_ap,
11031 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11072 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11032 NL80211_FLAG_NEED_RTNL, 11073 NL80211_FLAG_NEED_RTNL,
@@ -11034,7 +11075,7 @@ static const struct genl_ops nl80211_ops[] = {
11034 { 11075 {
11035 .cmd = NL80211_CMD_STOP_AP, 11076 .cmd = NL80211_CMD_STOP_AP,
11036 .policy = nl80211_policy, 11077 .policy = nl80211_policy,
11037 .flags = GENL_ADMIN_PERM, 11078 .flags = GENL_UNS_ADMIN_PERM,
11038 .doit = nl80211_stop_ap, 11079 .doit = nl80211_stop_ap,
11039 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11080 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11040 NL80211_FLAG_NEED_RTNL, 11081 NL80211_FLAG_NEED_RTNL,
@@ -11051,7 +11092,7 @@ static const struct genl_ops nl80211_ops[] = {
11051 .cmd = NL80211_CMD_SET_STATION, 11092 .cmd = NL80211_CMD_SET_STATION,
11052 .doit = nl80211_set_station, 11093 .doit = nl80211_set_station,
11053 .policy = nl80211_policy, 11094 .policy = nl80211_policy,
11054 .flags = GENL_ADMIN_PERM, 11095 .flags = GENL_UNS_ADMIN_PERM,
11055 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11096 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11056 NL80211_FLAG_NEED_RTNL, 11097 NL80211_FLAG_NEED_RTNL,
11057 }, 11098 },
@@ -11059,7 +11100,7 @@ static const struct genl_ops nl80211_ops[] = {
11059 .cmd = NL80211_CMD_NEW_STATION, 11100 .cmd = NL80211_CMD_NEW_STATION,
11060 .doit = nl80211_new_station, 11101 .doit = nl80211_new_station,
11061 .policy = nl80211_policy, 11102 .policy = nl80211_policy,
11062 .flags = GENL_ADMIN_PERM, 11103 .flags = GENL_UNS_ADMIN_PERM,
11063 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11104 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11064 NL80211_FLAG_NEED_RTNL, 11105 NL80211_FLAG_NEED_RTNL,
11065 }, 11106 },
@@ -11067,7 +11108,7 @@ static const struct genl_ops nl80211_ops[] = {
11067 .cmd = NL80211_CMD_DEL_STATION, 11108 .cmd = NL80211_CMD_DEL_STATION,
11068 .doit = nl80211_del_station, 11109 .doit = nl80211_del_station,
11069 .policy = nl80211_policy, 11110 .policy = nl80211_policy,
11070 .flags = GENL_ADMIN_PERM, 11111 .flags = GENL_UNS_ADMIN_PERM,
11071 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11112 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11072 NL80211_FLAG_NEED_RTNL, 11113 NL80211_FLAG_NEED_RTNL,
11073 }, 11114 },
@@ -11076,7 +11117,7 @@ static const struct genl_ops nl80211_ops[] = {
11076 .doit = nl80211_get_mpath, 11117 .doit = nl80211_get_mpath,
11077 .dumpit = nl80211_dump_mpath, 11118 .dumpit = nl80211_dump_mpath,
11078 .policy = nl80211_policy, 11119 .policy = nl80211_policy,
11079 .flags = GENL_ADMIN_PERM, 11120 .flags = GENL_UNS_ADMIN_PERM,
11080 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11121 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11081 NL80211_FLAG_NEED_RTNL, 11122 NL80211_FLAG_NEED_RTNL,
11082 }, 11123 },
@@ -11085,7 +11126,7 @@ static const struct genl_ops nl80211_ops[] = {
11085 .doit = nl80211_get_mpp, 11126 .doit = nl80211_get_mpp,
11086 .dumpit = nl80211_dump_mpp, 11127 .dumpit = nl80211_dump_mpp,
11087 .policy = nl80211_policy, 11128 .policy = nl80211_policy,
11088 .flags = GENL_ADMIN_PERM, 11129 .flags = GENL_UNS_ADMIN_PERM,
11089 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11130 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11090 NL80211_FLAG_NEED_RTNL, 11131 NL80211_FLAG_NEED_RTNL,
11091 }, 11132 },
@@ -11093,7 +11134,7 @@ static const struct genl_ops nl80211_ops[] = {
11093 .cmd = NL80211_CMD_SET_MPATH, 11134 .cmd = NL80211_CMD_SET_MPATH,
11094 .doit = nl80211_set_mpath, 11135 .doit = nl80211_set_mpath,
11095 .policy = nl80211_policy, 11136 .policy = nl80211_policy,
11096 .flags = GENL_ADMIN_PERM, 11137 .flags = GENL_UNS_ADMIN_PERM,
11097 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11138 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11098 NL80211_FLAG_NEED_RTNL, 11139 NL80211_FLAG_NEED_RTNL,
11099 }, 11140 },
@@ -11101,7 +11142,7 @@ static const struct genl_ops nl80211_ops[] = {
11101 .cmd = NL80211_CMD_NEW_MPATH, 11142 .cmd = NL80211_CMD_NEW_MPATH,
11102 .doit = nl80211_new_mpath, 11143 .doit = nl80211_new_mpath,
11103 .policy = nl80211_policy, 11144 .policy = nl80211_policy,
11104 .flags = GENL_ADMIN_PERM, 11145 .flags = GENL_UNS_ADMIN_PERM,
11105 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11146 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11106 NL80211_FLAG_NEED_RTNL, 11147 NL80211_FLAG_NEED_RTNL,
11107 }, 11148 },
@@ -11109,7 +11150,7 @@ static const struct genl_ops nl80211_ops[] = {
11109 .cmd = NL80211_CMD_DEL_MPATH, 11150 .cmd = NL80211_CMD_DEL_MPATH,
11110 .doit = nl80211_del_mpath, 11151 .doit = nl80211_del_mpath,
11111 .policy = nl80211_policy, 11152 .policy = nl80211_policy,
11112 .flags = GENL_ADMIN_PERM, 11153 .flags = GENL_UNS_ADMIN_PERM,
11113 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11154 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11114 NL80211_FLAG_NEED_RTNL, 11155 NL80211_FLAG_NEED_RTNL,
11115 }, 11156 },
@@ -11117,7 +11158,7 @@ static const struct genl_ops nl80211_ops[] = {
11117 .cmd = NL80211_CMD_SET_BSS, 11158 .cmd = NL80211_CMD_SET_BSS,
11118 .doit = nl80211_set_bss, 11159 .doit = nl80211_set_bss,
11119 .policy = nl80211_policy, 11160 .policy = nl80211_policy,
11120 .flags = GENL_ADMIN_PERM, 11161 .flags = GENL_UNS_ADMIN_PERM,
11121 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11162 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11122 NL80211_FLAG_NEED_RTNL, 11163 NL80211_FLAG_NEED_RTNL,
11123 }, 11164 },
@@ -11156,7 +11197,7 @@ static const struct genl_ops nl80211_ops[] = {
11156 .cmd = NL80211_CMD_SET_MESH_CONFIG, 11197 .cmd = NL80211_CMD_SET_MESH_CONFIG,
11157 .doit = nl80211_update_mesh_config, 11198 .doit = nl80211_update_mesh_config,
11158 .policy = nl80211_policy, 11199 .policy = nl80211_policy,
11159 .flags = GENL_ADMIN_PERM, 11200 .flags = GENL_UNS_ADMIN_PERM,
11160 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11201 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11161 NL80211_FLAG_NEED_RTNL, 11202 NL80211_FLAG_NEED_RTNL,
11162 }, 11203 },
@@ -11164,7 +11205,7 @@ static const struct genl_ops nl80211_ops[] = {
11164 .cmd = NL80211_CMD_TRIGGER_SCAN, 11205 .cmd = NL80211_CMD_TRIGGER_SCAN,
11165 .doit = nl80211_trigger_scan, 11206 .doit = nl80211_trigger_scan,
11166 .policy = nl80211_policy, 11207 .policy = nl80211_policy,
11167 .flags = GENL_ADMIN_PERM, 11208 .flags = GENL_UNS_ADMIN_PERM,
11168 .internal_flags = NL80211_FLAG_NEED_WDEV_UP | 11209 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
11169 NL80211_FLAG_NEED_RTNL, 11210 NL80211_FLAG_NEED_RTNL,
11170 }, 11211 },
@@ -11172,7 +11213,7 @@ static const struct genl_ops nl80211_ops[] = {
11172 .cmd = NL80211_CMD_ABORT_SCAN, 11213 .cmd = NL80211_CMD_ABORT_SCAN,
11173 .doit = nl80211_abort_scan, 11214 .doit = nl80211_abort_scan,
11174 .policy = nl80211_policy, 11215 .policy = nl80211_policy,
11175 .flags = GENL_ADMIN_PERM, 11216 .flags = GENL_UNS_ADMIN_PERM,
11176 .internal_flags = NL80211_FLAG_NEED_WDEV_UP | 11217 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
11177 NL80211_FLAG_NEED_RTNL, 11218 NL80211_FLAG_NEED_RTNL,
11178 }, 11219 },
@@ -11185,7 +11226,7 @@ static const struct genl_ops nl80211_ops[] = {
11185 .cmd = NL80211_CMD_START_SCHED_SCAN, 11226 .cmd = NL80211_CMD_START_SCHED_SCAN,
11186 .doit = nl80211_start_sched_scan, 11227 .doit = nl80211_start_sched_scan,
11187 .policy = nl80211_policy, 11228 .policy = nl80211_policy,
11188 .flags = GENL_ADMIN_PERM, 11229 .flags = GENL_UNS_ADMIN_PERM,
11189 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11230 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11190 NL80211_FLAG_NEED_RTNL, 11231 NL80211_FLAG_NEED_RTNL,
11191 }, 11232 },
@@ -11193,7 +11234,7 @@ static const struct genl_ops nl80211_ops[] = {
11193 .cmd = NL80211_CMD_STOP_SCHED_SCAN, 11234 .cmd = NL80211_CMD_STOP_SCHED_SCAN,
11194 .doit = nl80211_stop_sched_scan, 11235 .doit = nl80211_stop_sched_scan,
11195 .policy = nl80211_policy, 11236 .policy = nl80211_policy,
11196 .flags = GENL_ADMIN_PERM, 11237 .flags = GENL_UNS_ADMIN_PERM,
11197 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11238 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11198 NL80211_FLAG_NEED_RTNL, 11239 NL80211_FLAG_NEED_RTNL,
11199 }, 11240 },
@@ -11201,7 +11242,7 @@ static const struct genl_ops nl80211_ops[] = {
11201 .cmd = NL80211_CMD_AUTHENTICATE, 11242 .cmd = NL80211_CMD_AUTHENTICATE,
11202 .doit = nl80211_authenticate, 11243 .doit = nl80211_authenticate,
11203 .policy = nl80211_policy, 11244 .policy = nl80211_policy,
11204 .flags = GENL_ADMIN_PERM, 11245 .flags = GENL_UNS_ADMIN_PERM,
11205 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11246 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11206 NL80211_FLAG_NEED_RTNL | 11247 NL80211_FLAG_NEED_RTNL |
11207 NL80211_FLAG_CLEAR_SKB, 11248 NL80211_FLAG_CLEAR_SKB,
@@ -11210,7 +11251,7 @@ static const struct genl_ops nl80211_ops[] = {
11210 .cmd = NL80211_CMD_ASSOCIATE, 11251 .cmd = NL80211_CMD_ASSOCIATE,
11211 .doit = nl80211_associate, 11252 .doit = nl80211_associate,
11212 .policy = nl80211_policy, 11253 .policy = nl80211_policy,
11213 .flags = GENL_ADMIN_PERM, 11254 .flags = GENL_UNS_ADMIN_PERM,
11214 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11255 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11215 NL80211_FLAG_NEED_RTNL, 11256 NL80211_FLAG_NEED_RTNL,
11216 }, 11257 },
@@ -11218,7 +11259,7 @@ static const struct genl_ops nl80211_ops[] = {
11218 .cmd = NL80211_CMD_DEAUTHENTICATE, 11259 .cmd = NL80211_CMD_DEAUTHENTICATE,
11219 .doit = nl80211_deauthenticate, 11260 .doit = nl80211_deauthenticate,
11220 .policy = nl80211_policy, 11261 .policy = nl80211_policy,
11221 .flags = GENL_ADMIN_PERM, 11262 .flags = GENL_UNS_ADMIN_PERM,
11222 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11263 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11223 NL80211_FLAG_NEED_RTNL, 11264 NL80211_FLAG_NEED_RTNL,
11224 }, 11265 },
@@ -11226,7 +11267,7 @@ static const struct genl_ops nl80211_ops[] = {
11226 .cmd = NL80211_CMD_DISASSOCIATE, 11267 .cmd = NL80211_CMD_DISASSOCIATE,
11227 .doit = nl80211_disassociate, 11268 .doit = nl80211_disassociate,
11228 .policy = nl80211_policy, 11269 .policy = nl80211_policy,
11229 .flags = GENL_ADMIN_PERM, 11270 .flags = GENL_UNS_ADMIN_PERM,
11230 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11271 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11231 NL80211_FLAG_NEED_RTNL, 11272 NL80211_FLAG_NEED_RTNL,
11232 }, 11273 },
@@ -11234,7 +11275,7 @@ static const struct genl_ops nl80211_ops[] = {
11234 .cmd = NL80211_CMD_JOIN_IBSS, 11275 .cmd = NL80211_CMD_JOIN_IBSS,
11235 .doit = nl80211_join_ibss, 11276 .doit = nl80211_join_ibss,
11236 .policy = nl80211_policy, 11277 .policy = nl80211_policy,
11237 .flags = GENL_ADMIN_PERM, 11278 .flags = GENL_UNS_ADMIN_PERM,
11238 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11279 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11239 NL80211_FLAG_NEED_RTNL, 11280 NL80211_FLAG_NEED_RTNL,
11240 }, 11281 },
@@ -11242,7 +11283,7 @@ static const struct genl_ops nl80211_ops[] = {
11242 .cmd = NL80211_CMD_LEAVE_IBSS, 11283 .cmd = NL80211_CMD_LEAVE_IBSS,
11243 .doit = nl80211_leave_ibss, 11284 .doit = nl80211_leave_ibss,
11244 .policy = nl80211_policy, 11285 .policy = nl80211_policy,
11245 .flags = GENL_ADMIN_PERM, 11286 .flags = GENL_UNS_ADMIN_PERM,
11246 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11287 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11247 NL80211_FLAG_NEED_RTNL, 11288 NL80211_FLAG_NEED_RTNL,
11248 }, 11289 },
@@ -11252,7 +11293,7 @@ static const struct genl_ops nl80211_ops[] = {
11252 .doit = nl80211_testmode_do, 11293 .doit = nl80211_testmode_do,
11253 .dumpit = nl80211_testmode_dump, 11294 .dumpit = nl80211_testmode_dump,
11254 .policy = nl80211_policy, 11295 .policy = nl80211_policy,
11255 .flags = GENL_ADMIN_PERM, 11296 .flags = GENL_UNS_ADMIN_PERM,
11256 .internal_flags = NL80211_FLAG_NEED_WIPHY | 11297 .internal_flags = NL80211_FLAG_NEED_WIPHY |
11257 NL80211_FLAG_NEED_RTNL, 11298 NL80211_FLAG_NEED_RTNL,
11258 }, 11299 },
@@ -11261,7 +11302,7 @@ static const struct genl_ops nl80211_ops[] = {
11261 .cmd = NL80211_CMD_CONNECT, 11302 .cmd = NL80211_CMD_CONNECT,
11262 .doit = nl80211_connect, 11303 .doit = nl80211_connect,
11263 .policy = nl80211_policy, 11304 .policy = nl80211_policy,
11264 .flags = GENL_ADMIN_PERM, 11305 .flags = GENL_UNS_ADMIN_PERM,
11265 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11306 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11266 NL80211_FLAG_NEED_RTNL, 11307 NL80211_FLAG_NEED_RTNL,
11267 }, 11308 },
@@ -11269,7 +11310,7 @@ static const struct genl_ops nl80211_ops[] = {
11269 .cmd = NL80211_CMD_DISCONNECT, 11310 .cmd = NL80211_CMD_DISCONNECT,
11270 .doit = nl80211_disconnect, 11311 .doit = nl80211_disconnect,
11271 .policy = nl80211_policy, 11312 .policy = nl80211_policy,
11272 .flags = GENL_ADMIN_PERM, 11313 .flags = GENL_UNS_ADMIN_PERM,
11273 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11314 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11274 NL80211_FLAG_NEED_RTNL, 11315 NL80211_FLAG_NEED_RTNL,
11275 }, 11316 },
@@ -11277,7 +11318,7 @@ static const struct genl_ops nl80211_ops[] = {
11277 .cmd = NL80211_CMD_SET_WIPHY_NETNS, 11318 .cmd = NL80211_CMD_SET_WIPHY_NETNS,
11278 .doit = nl80211_wiphy_netns, 11319 .doit = nl80211_wiphy_netns,
11279 .policy = nl80211_policy, 11320 .policy = nl80211_policy,
11280 .flags = GENL_ADMIN_PERM, 11321 .flags = GENL_UNS_ADMIN_PERM,
11281 .internal_flags = NL80211_FLAG_NEED_WIPHY | 11322 .internal_flags = NL80211_FLAG_NEED_WIPHY |
11282 NL80211_FLAG_NEED_RTNL, 11323 NL80211_FLAG_NEED_RTNL,
11283 }, 11324 },
@@ -11290,7 +11331,7 @@ static const struct genl_ops nl80211_ops[] = {
11290 .cmd = NL80211_CMD_SET_PMKSA, 11331 .cmd = NL80211_CMD_SET_PMKSA,
11291 .doit = nl80211_setdel_pmksa, 11332 .doit = nl80211_setdel_pmksa,
11292 .policy = nl80211_policy, 11333 .policy = nl80211_policy,
11293 .flags = GENL_ADMIN_PERM, 11334 .flags = GENL_UNS_ADMIN_PERM,
11294 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11335 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11295 NL80211_FLAG_NEED_RTNL, 11336 NL80211_FLAG_NEED_RTNL,
11296 }, 11337 },
@@ -11298,7 +11339,7 @@ static const struct genl_ops nl80211_ops[] = {
11298 .cmd = NL80211_CMD_DEL_PMKSA, 11339 .cmd = NL80211_CMD_DEL_PMKSA,
11299 .doit = nl80211_setdel_pmksa, 11340 .doit = nl80211_setdel_pmksa,
11300 .policy = nl80211_policy, 11341 .policy = nl80211_policy,
11301 .flags = GENL_ADMIN_PERM, 11342 .flags = GENL_UNS_ADMIN_PERM,
11302 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11343 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11303 NL80211_FLAG_NEED_RTNL, 11344 NL80211_FLAG_NEED_RTNL,
11304 }, 11345 },
@@ -11306,7 +11347,7 @@ static const struct genl_ops nl80211_ops[] = {
11306 .cmd = NL80211_CMD_FLUSH_PMKSA, 11347 .cmd = NL80211_CMD_FLUSH_PMKSA,
11307 .doit = nl80211_flush_pmksa, 11348 .doit = nl80211_flush_pmksa,
11308 .policy = nl80211_policy, 11349 .policy = nl80211_policy,
11309 .flags = GENL_ADMIN_PERM, 11350 .flags = GENL_UNS_ADMIN_PERM,
11310 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11351 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11311 NL80211_FLAG_NEED_RTNL, 11352 NL80211_FLAG_NEED_RTNL,
11312 }, 11353 },
@@ -11314,7 +11355,7 @@ static const struct genl_ops nl80211_ops[] = {
11314 .cmd = NL80211_CMD_REMAIN_ON_CHANNEL, 11355 .cmd = NL80211_CMD_REMAIN_ON_CHANNEL,
11315 .doit = nl80211_remain_on_channel, 11356 .doit = nl80211_remain_on_channel,
11316 .policy = nl80211_policy, 11357 .policy = nl80211_policy,
11317 .flags = GENL_ADMIN_PERM, 11358 .flags = GENL_UNS_ADMIN_PERM,
11318 .internal_flags = NL80211_FLAG_NEED_WDEV_UP | 11359 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
11319 NL80211_FLAG_NEED_RTNL, 11360 NL80211_FLAG_NEED_RTNL,
11320 }, 11361 },
@@ -11322,7 +11363,7 @@ static const struct genl_ops nl80211_ops[] = {
11322 .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, 11363 .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
11323 .doit = nl80211_cancel_remain_on_channel, 11364 .doit = nl80211_cancel_remain_on_channel,
11324 .policy = nl80211_policy, 11365 .policy = nl80211_policy,
11325 .flags = GENL_ADMIN_PERM, 11366 .flags = GENL_UNS_ADMIN_PERM,
11326 .internal_flags = NL80211_FLAG_NEED_WDEV_UP | 11367 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
11327 NL80211_FLAG_NEED_RTNL, 11368 NL80211_FLAG_NEED_RTNL,
11328 }, 11369 },
@@ -11330,7 +11371,7 @@ static const struct genl_ops nl80211_ops[] = {
11330 .cmd = NL80211_CMD_SET_TX_BITRATE_MASK, 11371 .cmd = NL80211_CMD_SET_TX_BITRATE_MASK,
11331 .doit = nl80211_set_tx_bitrate_mask, 11372 .doit = nl80211_set_tx_bitrate_mask,
11332 .policy = nl80211_policy, 11373 .policy = nl80211_policy,
11333 .flags = GENL_ADMIN_PERM, 11374 .flags = GENL_UNS_ADMIN_PERM,
11334 .internal_flags = NL80211_FLAG_NEED_NETDEV | 11375 .internal_flags = NL80211_FLAG_NEED_NETDEV |
11335 NL80211_FLAG_NEED_RTNL, 11376 NL80211_FLAG_NEED_RTNL,
11336 }, 11377 },
@@ -11338,7 +11379,7 @@ static const struct genl_ops nl80211_ops[] = {
11338 .cmd = NL80211_CMD_REGISTER_FRAME, 11379 .cmd = NL80211_CMD_REGISTER_FRAME,
11339 .doit = nl80211_register_mgmt, 11380 .doit = nl80211_register_mgmt,
11340 .policy = nl80211_policy, 11381 .policy = nl80211_policy,
11341 .flags = GENL_ADMIN_PERM, 11382 .flags = GENL_UNS_ADMIN_PERM,
11342 .internal_flags = NL80211_FLAG_NEED_WDEV | 11383 .internal_flags = NL80211_FLAG_NEED_WDEV |
11343 NL80211_FLAG_NEED_RTNL, 11384 NL80211_FLAG_NEED_RTNL,
11344 }, 11385 },
@@ -11346,7 +11387,7 @@ static const struct genl_ops nl80211_ops[] = {
11346 .cmd = NL80211_CMD_FRAME, 11387 .cmd = NL80211_CMD_FRAME,
11347 .doit = nl80211_tx_mgmt, 11388 .doit = nl80211_tx_mgmt,
11348 .policy = nl80211_policy, 11389 .policy = nl80211_policy,
11349 .flags = GENL_ADMIN_PERM, 11390 .flags = GENL_UNS_ADMIN_PERM,
11350 .internal_flags = NL80211_FLAG_NEED_WDEV_UP | 11391 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
11351 NL80211_FLAG_NEED_RTNL, 11392 NL80211_FLAG_NEED_RTNL,
11352 }, 11393 },
@@ -11354,7 +11395,7 @@ static const struct genl_ops nl80211_ops[] = {
11354 .cmd = NL80211_CMD_FRAME_WAIT_CANCEL, 11395 .cmd = NL80211_CMD_FRAME_WAIT_CANCEL,
11355 .doit = nl80211_tx_mgmt_cancel_wait, 11396 .doit = nl80211_tx_mgmt_cancel_wait,
11356 .policy = nl80211_policy, 11397 .policy = nl80211_policy,
11357 .flags = GENL_ADMIN_PERM, 11398 .flags = GENL_UNS_ADMIN_PERM,
11358 .internal_flags = NL80211_FLAG_NEED_WDEV_UP | 11399 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
11359 NL80211_FLAG_NEED_RTNL, 11400 NL80211_FLAG_NEED_RTNL,
11360 }, 11401 },
@@ -11362,7 +11403,7 @@ static const struct genl_ops nl80211_ops[] = {
11362 .cmd = NL80211_CMD_SET_POWER_SAVE, 11403 .cmd = NL80211_CMD_SET_POWER_SAVE,
11363 .doit = nl80211_set_power_save, 11404 .doit = nl80211_set_power_save,
11364 .policy = nl80211_policy, 11405 .policy = nl80211_policy,
11365 .flags = GENL_ADMIN_PERM, 11406 .flags = GENL_UNS_ADMIN_PERM,
11366 .internal_flags = NL80211_FLAG_NEED_NETDEV | 11407 .internal_flags = NL80211_FLAG_NEED_NETDEV |
11367 NL80211_FLAG_NEED_RTNL, 11408 NL80211_FLAG_NEED_RTNL,
11368 }, 11409 },
@@ -11378,7 +11419,7 @@ static const struct genl_ops nl80211_ops[] = {
11378 .cmd = NL80211_CMD_SET_CQM, 11419 .cmd = NL80211_CMD_SET_CQM,
11379 .doit = nl80211_set_cqm, 11420 .doit = nl80211_set_cqm,
11380 .policy = nl80211_policy, 11421 .policy = nl80211_policy,
11381 .flags = GENL_ADMIN_PERM, 11422 .flags = GENL_UNS_ADMIN_PERM,
11382 .internal_flags = NL80211_FLAG_NEED_NETDEV | 11423 .internal_flags = NL80211_FLAG_NEED_NETDEV |
11383 NL80211_FLAG_NEED_RTNL, 11424 NL80211_FLAG_NEED_RTNL,
11384 }, 11425 },
@@ -11386,7 +11427,7 @@ static const struct genl_ops nl80211_ops[] = {
11386 .cmd = NL80211_CMD_SET_CHANNEL, 11427 .cmd = NL80211_CMD_SET_CHANNEL,
11387 .doit = nl80211_set_channel, 11428 .doit = nl80211_set_channel,
11388 .policy = nl80211_policy, 11429 .policy = nl80211_policy,
11389 .flags = GENL_ADMIN_PERM, 11430 .flags = GENL_UNS_ADMIN_PERM,
11390 .internal_flags = NL80211_FLAG_NEED_NETDEV | 11431 .internal_flags = NL80211_FLAG_NEED_NETDEV |
11391 NL80211_FLAG_NEED_RTNL, 11432 NL80211_FLAG_NEED_RTNL,
11392 }, 11433 },
@@ -11394,7 +11435,7 @@ static const struct genl_ops nl80211_ops[] = {
11394 .cmd = NL80211_CMD_SET_WDS_PEER, 11435 .cmd = NL80211_CMD_SET_WDS_PEER,
11395 .doit = nl80211_set_wds_peer, 11436 .doit = nl80211_set_wds_peer,
11396 .policy = nl80211_policy, 11437 .policy = nl80211_policy,
11397 .flags = GENL_ADMIN_PERM, 11438 .flags = GENL_UNS_ADMIN_PERM,
11398 .internal_flags = NL80211_FLAG_NEED_NETDEV | 11439 .internal_flags = NL80211_FLAG_NEED_NETDEV |
11399 NL80211_FLAG_NEED_RTNL, 11440 NL80211_FLAG_NEED_RTNL,
11400 }, 11441 },
@@ -11402,7 +11443,7 @@ static const struct genl_ops nl80211_ops[] = {
11402 .cmd = NL80211_CMD_JOIN_MESH, 11443 .cmd = NL80211_CMD_JOIN_MESH,
11403 .doit = nl80211_join_mesh, 11444 .doit = nl80211_join_mesh,
11404 .policy = nl80211_policy, 11445 .policy = nl80211_policy,
11405 .flags = GENL_ADMIN_PERM, 11446 .flags = GENL_UNS_ADMIN_PERM,
11406 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11447 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11407 NL80211_FLAG_NEED_RTNL, 11448 NL80211_FLAG_NEED_RTNL,
11408 }, 11449 },
@@ -11410,7 +11451,7 @@ static const struct genl_ops nl80211_ops[] = {
11410 .cmd = NL80211_CMD_LEAVE_MESH, 11451 .cmd = NL80211_CMD_LEAVE_MESH,
11411 .doit = nl80211_leave_mesh, 11452 .doit = nl80211_leave_mesh,
11412 .policy = nl80211_policy, 11453 .policy = nl80211_policy,
11413 .flags = GENL_ADMIN_PERM, 11454 .flags = GENL_UNS_ADMIN_PERM,
11414 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11455 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11415 NL80211_FLAG_NEED_RTNL, 11456 NL80211_FLAG_NEED_RTNL,
11416 }, 11457 },
@@ -11418,7 +11459,7 @@ static const struct genl_ops nl80211_ops[] = {
11418 .cmd = NL80211_CMD_JOIN_OCB, 11459 .cmd = NL80211_CMD_JOIN_OCB,
11419 .doit = nl80211_join_ocb, 11460 .doit = nl80211_join_ocb,
11420 .policy = nl80211_policy, 11461 .policy = nl80211_policy,
11421 .flags = GENL_ADMIN_PERM, 11462 .flags = GENL_UNS_ADMIN_PERM,
11422 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11463 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11423 NL80211_FLAG_NEED_RTNL, 11464 NL80211_FLAG_NEED_RTNL,
11424 }, 11465 },
@@ -11426,7 +11467,7 @@ static const struct genl_ops nl80211_ops[] = {
11426 .cmd = NL80211_CMD_LEAVE_OCB, 11467 .cmd = NL80211_CMD_LEAVE_OCB,
11427 .doit = nl80211_leave_ocb, 11468 .doit = nl80211_leave_ocb,
11428 .policy = nl80211_policy, 11469 .policy = nl80211_policy,
11429 .flags = GENL_ADMIN_PERM, 11470 .flags = GENL_UNS_ADMIN_PERM,
11430 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11471 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11431 NL80211_FLAG_NEED_RTNL, 11472 NL80211_FLAG_NEED_RTNL,
11432 }, 11473 },
@@ -11443,7 +11484,7 @@ static const struct genl_ops nl80211_ops[] = {
11443 .cmd = NL80211_CMD_SET_WOWLAN, 11484 .cmd = NL80211_CMD_SET_WOWLAN,
11444 .doit = nl80211_set_wowlan, 11485 .doit = nl80211_set_wowlan,
11445 .policy = nl80211_policy, 11486 .policy = nl80211_policy,
11446 .flags = GENL_ADMIN_PERM, 11487 .flags = GENL_UNS_ADMIN_PERM,
11447 .internal_flags = NL80211_FLAG_NEED_WIPHY | 11488 .internal_flags = NL80211_FLAG_NEED_WIPHY |
11448 NL80211_FLAG_NEED_RTNL, 11489 NL80211_FLAG_NEED_RTNL,
11449 }, 11490 },
@@ -11452,7 +11493,7 @@ static const struct genl_ops nl80211_ops[] = {
11452 .cmd = NL80211_CMD_SET_REKEY_OFFLOAD, 11493 .cmd = NL80211_CMD_SET_REKEY_OFFLOAD,
11453 .doit = nl80211_set_rekey_data, 11494 .doit = nl80211_set_rekey_data,
11454 .policy = nl80211_policy, 11495 .policy = nl80211_policy,
11455 .flags = GENL_ADMIN_PERM, 11496 .flags = GENL_UNS_ADMIN_PERM,
11456 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11497 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11457 NL80211_FLAG_NEED_RTNL | 11498 NL80211_FLAG_NEED_RTNL |
11458 NL80211_FLAG_CLEAR_SKB, 11499 NL80211_FLAG_CLEAR_SKB,
@@ -11461,7 +11502,7 @@ static const struct genl_ops nl80211_ops[] = {
11461 .cmd = NL80211_CMD_TDLS_MGMT, 11502 .cmd = NL80211_CMD_TDLS_MGMT,
11462 .doit = nl80211_tdls_mgmt, 11503 .doit = nl80211_tdls_mgmt,
11463 .policy = nl80211_policy, 11504 .policy = nl80211_policy,
11464 .flags = GENL_ADMIN_PERM, 11505 .flags = GENL_UNS_ADMIN_PERM,
11465 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11506 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11466 NL80211_FLAG_NEED_RTNL, 11507 NL80211_FLAG_NEED_RTNL,
11467 }, 11508 },
@@ -11469,7 +11510,7 @@ static const struct genl_ops nl80211_ops[] = {
11469 .cmd = NL80211_CMD_TDLS_OPER, 11510 .cmd = NL80211_CMD_TDLS_OPER,
11470 .doit = nl80211_tdls_oper, 11511 .doit = nl80211_tdls_oper,
11471 .policy = nl80211_policy, 11512 .policy = nl80211_policy,
11472 .flags = GENL_ADMIN_PERM, 11513 .flags = GENL_UNS_ADMIN_PERM,
11473 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11514 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11474 NL80211_FLAG_NEED_RTNL, 11515 NL80211_FLAG_NEED_RTNL,
11475 }, 11516 },
@@ -11477,7 +11518,7 @@ static const struct genl_ops nl80211_ops[] = {
11477 .cmd = NL80211_CMD_UNEXPECTED_FRAME, 11518 .cmd = NL80211_CMD_UNEXPECTED_FRAME,
11478 .doit = nl80211_register_unexpected_frame, 11519 .doit = nl80211_register_unexpected_frame,
11479 .policy = nl80211_policy, 11520 .policy = nl80211_policy,
11480 .flags = GENL_ADMIN_PERM, 11521 .flags = GENL_UNS_ADMIN_PERM,
11481 .internal_flags = NL80211_FLAG_NEED_NETDEV | 11522 .internal_flags = NL80211_FLAG_NEED_NETDEV |
11482 NL80211_FLAG_NEED_RTNL, 11523 NL80211_FLAG_NEED_RTNL,
11483 }, 11524 },
@@ -11485,7 +11526,7 @@ static const struct genl_ops nl80211_ops[] = {
11485 .cmd = NL80211_CMD_PROBE_CLIENT, 11526 .cmd = NL80211_CMD_PROBE_CLIENT,
11486 .doit = nl80211_probe_client, 11527 .doit = nl80211_probe_client,
11487 .policy = nl80211_policy, 11528 .policy = nl80211_policy,
11488 .flags = GENL_ADMIN_PERM, 11529 .flags = GENL_UNS_ADMIN_PERM,
11489 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11530 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11490 NL80211_FLAG_NEED_RTNL, 11531 NL80211_FLAG_NEED_RTNL,
11491 }, 11532 },
@@ -11493,7 +11534,7 @@ static const struct genl_ops nl80211_ops[] = {
11493 .cmd = NL80211_CMD_REGISTER_BEACONS, 11534 .cmd = NL80211_CMD_REGISTER_BEACONS,
11494 .doit = nl80211_register_beacons, 11535 .doit = nl80211_register_beacons,
11495 .policy = nl80211_policy, 11536 .policy = nl80211_policy,
11496 .flags = GENL_ADMIN_PERM, 11537 .flags = GENL_UNS_ADMIN_PERM,
11497 .internal_flags = NL80211_FLAG_NEED_WIPHY | 11538 .internal_flags = NL80211_FLAG_NEED_WIPHY |
11498 NL80211_FLAG_NEED_RTNL, 11539 NL80211_FLAG_NEED_RTNL,
11499 }, 11540 },
@@ -11501,7 +11542,7 @@ static const struct genl_ops nl80211_ops[] = {
11501 .cmd = NL80211_CMD_SET_NOACK_MAP, 11542 .cmd = NL80211_CMD_SET_NOACK_MAP,
11502 .doit = nl80211_set_noack_map, 11543 .doit = nl80211_set_noack_map,
11503 .policy = nl80211_policy, 11544 .policy = nl80211_policy,
11504 .flags = GENL_ADMIN_PERM, 11545 .flags = GENL_UNS_ADMIN_PERM,
11505 .internal_flags = NL80211_FLAG_NEED_NETDEV | 11546 .internal_flags = NL80211_FLAG_NEED_NETDEV |
11506 NL80211_FLAG_NEED_RTNL, 11547 NL80211_FLAG_NEED_RTNL,
11507 }, 11548 },
@@ -11509,7 +11550,7 @@ static const struct genl_ops nl80211_ops[] = {
11509 .cmd = NL80211_CMD_START_P2P_DEVICE, 11550 .cmd = NL80211_CMD_START_P2P_DEVICE,
11510 .doit = nl80211_start_p2p_device, 11551 .doit = nl80211_start_p2p_device,
11511 .policy = nl80211_policy, 11552 .policy = nl80211_policy,
11512 .flags = GENL_ADMIN_PERM, 11553 .flags = GENL_UNS_ADMIN_PERM,
11513 .internal_flags = NL80211_FLAG_NEED_WDEV | 11554 .internal_flags = NL80211_FLAG_NEED_WDEV |
11514 NL80211_FLAG_NEED_RTNL, 11555 NL80211_FLAG_NEED_RTNL,
11515 }, 11556 },
@@ -11517,7 +11558,7 @@ static const struct genl_ops nl80211_ops[] = {
11517 .cmd = NL80211_CMD_STOP_P2P_DEVICE, 11558 .cmd = NL80211_CMD_STOP_P2P_DEVICE,
11518 .doit = nl80211_stop_p2p_device, 11559 .doit = nl80211_stop_p2p_device,
11519 .policy = nl80211_policy, 11560 .policy = nl80211_policy,
11520 .flags = GENL_ADMIN_PERM, 11561 .flags = GENL_UNS_ADMIN_PERM,
11521 .internal_flags = NL80211_FLAG_NEED_WDEV_UP | 11562 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
11522 NL80211_FLAG_NEED_RTNL, 11563 NL80211_FLAG_NEED_RTNL,
11523 }, 11564 },
@@ -11525,7 +11566,7 @@ static const struct genl_ops nl80211_ops[] = {
11525 .cmd = NL80211_CMD_SET_MCAST_RATE, 11566 .cmd = NL80211_CMD_SET_MCAST_RATE,
11526 .doit = nl80211_set_mcast_rate, 11567 .doit = nl80211_set_mcast_rate,
11527 .policy = nl80211_policy, 11568 .policy = nl80211_policy,
11528 .flags = GENL_ADMIN_PERM, 11569 .flags = GENL_UNS_ADMIN_PERM,
11529 .internal_flags = NL80211_FLAG_NEED_NETDEV | 11570 .internal_flags = NL80211_FLAG_NEED_NETDEV |
11530 NL80211_FLAG_NEED_RTNL, 11571 NL80211_FLAG_NEED_RTNL,
11531 }, 11572 },
@@ -11533,7 +11574,7 @@ static const struct genl_ops nl80211_ops[] = {
11533 .cmd = NL80211_CMD_SET_MAC_ACL, 11574 .cmd = NL80211_CMD_SET_MAC_ACL,
11534 .doit = nl80211_set_mac_acl, 11575 .doit = nl80211_set_mac_acl,
11535 .policy = nl80211_policy, 11576 .policy = nl80211_policy,
11536 .flags = GENL_ADMIN_PERM, 11577 .flags = GENL_UNS_ADMIN_PERM,
11537 .internal_flags = NL80211_FLAG_NEED_NETDEV | 11578 .internal_flags = NL80211_FLAG_NEED_NETDEV |
11538 NL80211_FLAG_NEED_RTNL, 11579 NL80211_FLAG_NEED_RTNL,
11539 }, 11580 },
@@ -11541,7 +11582,7 @@ static const struct genl_ops nl80211_ops[] = {
11541 .cmd = NL80211_CMD_RADAR_DETECT, 11582 .cmd = NL80211_CMD_RADAR_DETECT,
11542 .doit = nl80211_start_radar_detection, 11583 .doit = nl80211_start_radar_detection,
11543 .policy = nl80211_policy, 11584 .policy = nl80211_policy,
11544 .flags = GENL_ADMIN_PERM, 11585 .flags = GENL_UNS_ADMIN_PERM,
11545 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11586 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11546 NL80211_FLAG_NEED_RTNL, 11587 NL80211_FLAG_NEED_RTNL,
11547 }, 11588 },
@@ -11554,7 +11595,7 @@ static const struct genl_ops nl80211_ops[] = {
11554 .cmd = NL80211_CMD_UPDATE_FT_IES, 11595 .cmd = NL80211_CMD_UPDATE_FT_IES,
11555 .doit = nl80211_update_ft_ies, 11596 .doit = nl80211_update_ft_ies,
11556 .policy = nl80211_policy, 11597 .policy = nl80211_policy,
11557 .flags = GENL_ADMIN_PERM, 11598 .flags = GENL_UNS_ADMIN_PERM,
11558 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11599 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11559 NL80211_FLAG_NEED_RTNL, 11600 NL80211_FLAG_NEED_RTNL,
11560 }, 11601 },
@@ -11562,7 +11603,7 @@ static const struct genl_ops nl80211_ops[] = {
11562 .cmd = NL80211_CMD_CRIT_PROTOCOL_START, 11603 .cmd = NL80211_CMD_CRIT_PROTOCOL_START,
11563 .doit = nl80211_crit_protocol_start, 11604 .doit = nl80211_crit_protocol_start,
11564 .policy = nl80211_policy, 11605 .policy = nl80211_policy,
11565 .flags = GENL_ADMIN_PERM, 11606 .flags = GENL_UNS_ADMIN_PERM,
11566 .internal_flags = NL80211_FLAG_NEED_WDEV_UP | 11607 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
11567 NL80211_FLAG_NEED_RTNL, 11608 NL80211_FLAG_NEED_RTNL,
11568 }, 11609 },
@@ -11570,7 +11611,7 @@ static const struct genl_ops nl80211_ops[] = {
11570 .cmd = NL80211_CMD_CRIT_PROTOCOL_STOP, 11611 .cmd = NL80211_CMD_CRIT_PROTOCOL_STOP,
11571 .doit = nl80211_crit_protocol_stop, 11612 .doit = nl80211_crit_protocol_stop,
11572 .policy = nl80211_policy, 11613 .policy = nl80211_policy,
11573 .flags = GENL_ADMIN_PERM, 11614 .flags = GENL_UNS_ADMIN_PERM,
11574 .internal_flags = NL80211_FLAG_NEED_WDEV_UP | 11615 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
11575 NL80211_FLAG_NEED_RTNL, 11616 NL80211_FLAG_NEED_RTNL,
11576 }, 11617 },
@@ -11585,7 +11626,7 @@ static const struct genl_ops nl80211_ops[] = {
11585 .cmd = NL80211_CMD_SET_COALESCE, 11626 .cmd = NL80211_CMD_SET_COALESCE,
11586 .doit = nl80211_set_coalesce, 11627 .doit = nl80211_set_coalesce,
11587 .policy = nl80211_policy, 11628 .policy = nl80211_policy,
11588 .flags = GENL_ADMIN_PERM, 11629 .flags = GENL_UNS_ADMIN_PERM,
11589 .internal_flags = NL80211_FLAG_NEED_WIPHY | 11630 .internal_flags = NL80211_FLAG_NEED_WIPHY |
11590 NL80211_FLAG_NEED_RTNL, 11631 NL80211_FLAG_NEED_RTNL,
11591 }, 11632 },
@@ -11593,7 +11634,7 @@ static const struct genl_ops nl80211_ops[] = {
11593 .cmd = NL80211_CMD_CHANNEL_SWITCH, 11634 .cmd = NL80211_CMD_CHANNEL_SWITCH,
11594 .doit = nl80211_channel_switch, 11635 .doit = nl80211_channel_switch,
11595 .policy = nl80211_policy, 11636 .policy = nl80211_policy,
11596 .flags = GENL_ADMIN_PERM, 11637 .flags = GENL_UNS_ADMIN_PERM,
11597 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11638 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11598 NL80211_FLAG_NEED_RTNL, 11639 NL80211_FLAG_NEED_RTNL,
11599 }, 11640 },
@@ -11602,7 +11643,7 @@ static const struct genl_ops nl80211_ops[] = {
11602 .doit = nl80211_vendor_cmd, 11643 .doit = nl80211_vendor_cmd,
11603 .dumpit = nl80211_vendor_cmd_dump, 11644 .dumpit = nl80211_vendor_cmd_dump,
11604 .policy = nl80211_policy, 11645 .policy = nl80211_policy,
11605 .flags = GENL_ADMIN_PERM, 11646 .flags = GENL_UNS_ADMIN_PERM,
11606 .internal_flags = NL80211_FLAG_NEED_WIPHY | 11647 .internal_flags = NL80211_FLAG_NEED_WIPHY |
11607 NL80211_FLAG_NEED_RTNL, 11648 NL80211_FLAG_NEED_RTNL,
11608 }, 11649 },
@@ -11610,7 +11651,7 @@ static const struct genl_ops nl80211_ops[] = {
11610 .cmd = NL80211_CMD_SET_QOS_MAP, 11651 .cmd = NL80211_CMD_SET_QOS_MAP,
11611 .doit = nl80211_set_qos_map, 11652 .doit = nl80211_set_qos_map,
11612 .policy = nl80211_policy, 11653 .policy = nl80211_policy,
11613 .flags = GENL_ADMIN_PERM, 11654 .flags = GENL_UNS_ADMIN_PERM,
11614 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11655 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11615 NL80211_FLAG_NEED_RTNL, 11656 NL80211_FLAG_NEED_RTNL,
11616 }, 11657 },
@@ -11618,7 +11659,7 @@ static const struct genl_ops nl80211_ops[] = {
11618 .cmd = NL80211_CMD_ADD_TX_TS, 11659 .cmd = NL80211_CMD_ADD_TX_TS,
11619 .doit = nl80211_add_tx_ts, 11660 .doit = nl80211_add_tx_ts,
11620 .policy = nl80211_policy, 11661 .policy = nl80211_policy,
11621 .flags = GENL_ADMIN_PERM, 11662 .flags = GENL_UNS_ADMIN_PERM,
11622 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11663 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11623 NL80211_FLAG_NEED_RTNL, 11664 NL80211_FLAG_NEED_RTNL,
11624 }, 11665 },
@@ -11626,7 +11667,7 @@ static const struct genl_ops nl80211_ops[] = {
11626 .cmd = NL80211_CMD_DEL_TX_TS, 11667 .cmd = NL80211_CMD_DEL_TX_TS,
11627 .doit = nl80211_del_tx_ts, 11668 .doit = nl80211_del_tx_ts,
11628 .policy = nl80211_policy, 11669 .policy = nl80211_policy,
11629 .flags = GENL_ADMIN_PERM, 11670 .flags = GENL_UNS_ADMIN_PERM,
11630 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11671 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11631 NL80211_FLAG_NEED_RTNL, 11672 NL80211_FLAG_NEED_RTNL,
11632 }, 11673 },
@@ -11634,7 +11675,7 @@ static const struct genl_ops nl80211_ops[] = {
11634 .cmd = NL80211_CMD_TDLS_CHANNEL_SWITCH, 11675 .cmd = NL80211_CMD_TDLS_CHANNEL_SWITCH,
11635 .doit = nl80211_tdls_channel_switch, 11676 .doit = nl80211_tdls_channel_switch,
11636 .policy = nl80211_policy, 11677 .policy = nl80211_policy,
11637 .flags = GENL_ADMIN_PERM, 11678 .flags = GENL_UNS_ADMIN_PERM,
11638 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11679 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11639 NL80211_FLAG_NEED_RTNL, 11680 NL80211_FLAG_NEED_RTNL,
11640 }, 11681 },
@@ -11642,7 +11683,7 @@ static const struct genl_ops nl80211_ops[] = {
11642 .cmd = NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH, 11683 .cmd = NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH,
11643 .doit = nl80211_tdls_cancel_channel_switch, 11684 .doit = nl80211_tdls_cancel_channel_switch,
11644 .policy = nl80211_policy, 11685 .policy = nl80211_policy,
11645 .flags = GENL_ADMIN_PERM, 11686 .flags = GENL_UNS_ADMIN_PERM,
11646 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 11687 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
11647 NL80211_FLAG_NEED_RTNL, 11688 NL80211_FLAG_NEED_RTNL,
11648 }, 11689 },
@@ -12092,7 +12133,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
12092 struct net_device *netdev, const u8 *bssid, 12133 struct net_device *netdev, const u8 *bssid,
12093 const u8 *req_ie, size_t req_ie_len, 12134 const u8 *req_ie, size_t req_ie_len,
12094 const u8 *resp_ie, size_t resp_ie_len, 12135 const u8 *resp_ie, size_t resp_ie_len,
12095 u16 status, gfp_t gfp) 12136 int status, gfp_t gfp)
12096{ 12137{
12097 struct sk_buff *msg; 12138 struct sk_buff *msg;
12098 void *hdr; 12139 void *hdr;
@@ -12110,7 +12151,10 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
12110 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 12151 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
12111 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || 12152 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
12112 (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) || 12153 (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
12113 nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) || 12154 nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
12155 status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
12156 status) ||
12157 (status < 0 && nla_put_flag(msg, NL80211_ATTR_TIMED_OUT)) ||
12114 (req_ie && 12158 (req_ie &&
12115 nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || 12159 nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
12116 (resp_ie && 12160 (resp_ie &&
@@ -12126,7 +12170,6 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
12126 nla_put_failure: 12170 nla_put_failure:
12127 genlmsg_cancel(msg, hdr); 12171 genlmsg_cancel(msg, hdr);
12128 nlmsg_free(msg); 12172 nlmsg_free(msg);
12129
12130} 12173}
12131 12174
12132void nl80211_send_roamed(struct cfg80211_registered_device *rdev, 12175void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
@@ -12165,7 +12208,6 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
12165 nla_put_failure: 12208 nla_put_failure:
12166 genlmsg_cancel(msg, hdr); 12209 genlmsg_cancel(msg, hdr);
12167 nlmsg_free(msg); 12210 nlmsg_free(msg);
12168
12169} 12211}
12170 12212
12171void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, 12213void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
@@ -12203,7 +12245,6 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
12203 nla_put_failure: 12245 nla_put_failure:
12204 genlmsg_cancel(msg, hdr); 12246 genlmsg_cancel(msg, hdr);
12205 nlmsg_free(msg); 12247 nlmsg_free(msg);
12206
12207} 12248}
12208 12249
12209void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, 12250void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
@@ -13545,7 +13586,6 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
13545 if (hdr) 13586 if (hdr)
13546 genlmsg_cancel(msg, hdr); 13587 genlmsg_cancel(msg, hdr);
13547 nlmsg_free(msg); 13588 nlmsg_free(msg);
13548
13549} 13589}
13550EXPORT_SYMBOL(cfg80211_crit_proto_stopped); 13590EXPORT_SYMBOL(cfg80211_crit_proto_stopped);
13551 13591
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 84d4edf1d545..a63f402b10b7 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -55,7 +55,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
55 struct net_device *netdev, const u8 *bssid, 55 struct net_device *netdev, const u8 *bssid,
56 const u8 *req_ie, size_t req_ie_len, 56 const u8 *req_ie, size_t req_ie_len,
57 const u8 *resp_ie, size_t resp_ie_len, 57 const u8 *resp_ie, size_t resp_ie_len,
58 u16 status, gfp_t gfp); 58 int status, gfp_t gfp);
59void nl80211_send_roamed(struct cfg80211_registered_device *rdev, 59void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
60 struct net_device *netdev, const u8 *bssid, 60 struct net_device *netdev, const u8 *bssid,
61 const u8 *req_ie, size_t req_ie_len, 61 const u8 *req_ie, size_t req_ie_len,
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 584fdc347221..add6824c44fd 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -244,9 +244,7 @@ void cfg80211_conn_work(struct work_struct *work)
244 if (cfg80211_conn_do_work(wdev)) { 244 if (cfg80211_conn_do_work(wdev)) {
245 __cfg80211_connect_result( 245 __cfg80211_connect_result(
246 wdev->netdev, bssid, 246 wdev->netdev, bssid,
247 NULL, 0, NULL, 0, 247 NULL, 0, NULL, 0, -1, false, NULL);
248 WLAN_STATUS_UNSPECIFIED_FAILURE,
249 false, NULL);
250 } 248 }
251 wdev_unlock(wdev); 249 wdev_unlock(wdev);
252 } 250 }
@@ -648,7 +646,7 @@ static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
648void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, 646void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
649 const u8 *req_ie, size_t req_ie_len, 647 const u8 *req_ie, size_t req_ie_len,
650 const u8 *resp_ie, size_t resp_ie_len, 648 const u8 *resp_ie, size_t resp_ie_len,
651 u16 status, bool wextev, 649 int status, bool wextev,
652 struct cfg80211_bss *bss) 650 struct cfg80211_bss *bss)
653{ 651{
654 struct wireless_dev *wdev = dev->ieee80211_ptr; 652 struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -757,7 +755,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
757void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, 755void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
758 struct cfg80211_bss *bss, const u8 *req_ie, 756 struct cfg80211_bss *bss, const u8 *req_ie,
759 size_t req_ie_len, const u8 *resp_ie, 757 size_t req_ie_len, const u8 *resp_ie,
760 size_t resp_ie_len, u16 status, gfp_t gfp) 758 size_t resp_ie_len, int status, gfp_t gfp)
761{ 759{
762 struct wireless_dev *wdev = dev->ieee80211_ptr; 760 struct wireless_dev *wdev = dev->ieee80211_ptr;
763 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 761 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
diff --git a/samples/bpf/sockex2_user.c b/samples/bpf/sockex2_user.c
index 29a276d766fc..8a4085c2d117 100644
--- a/samples/bpf/sockex2_user.c
+++ b/samples/bpf/sockex2_user.c
@@ -5,6 +5,7 @@
5#include "bpf_load.h" 5#include "bpf_load.h"
6#include <unistd.h> 6#include <unistd.h>
7#include <arpa/inet.h> 7#include <arpa/inet.h>
8#include <sys/resource.h>
8 9
9struct pair { 10struct pair {
10 __u64 packets; 11 __u64 packets;
@@ -13,11 +14,13 @@ struct pair {
13 14
14int main(int ac, char **argv) 15int main(int ac, char **argv)
15{ 16{
17 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
16 char filename[256]; 18 char filename[256];
17 FILE *f; 19 FILE *f;
18 int i, sock; 20 int i, sock;
19 21
20 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); 22 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
23 setrlimit(RLIMIT_MEMLOCK, &r);
21 24
22 if (load_bpf_file(filename)) { 25 if (load_bpf_file(filename)) {
23 printf("%s", bpf_log_buf); 26 printf("%s", bpf_log_buf);
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index 2617772d060d..d4184ab5f3ac 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -5,6 +5,7 @@
5#include "bpf_load.h" 5#include "bpf_load.h"
6#include <unistd.h> 6#include <unistd.h>
7#include <arpa/inet.h> 7#include <arpa/inet.h>
8#include <sys/resource.h>
8 9
9struct flow_keys { 10struct flow_keys {
10 __be32 src; 11 __be32 src;
@@ -23,11 +24,13 @@ struct pair {
23 24
24int main(int argc, char **argv) 25int main(int argc, char **argv)
25{ 26{
27 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
26 char filename[256]; 28 char filename[256];
27 FILE *f; 29 FILE *f;
28 int i, sock; 30 int i, sock;
29 31
30 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); 32 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
33 setrlimit(RLIMIT_MEMLOCK, &r);
31 34
32 if (load_bpf_file(filename)) { 35 if (load_bpf_file(filename)) {
33 printf("%s", bpf_log_buf); 36 printf("%s", bpf_log_buf);
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile
index 6173adae9f08..877a8a4721b6 100644
--- a/tools/virtio/ringtest/Makefile
+++ b/tools/virtio/ringtest/Makefile
@@ -1,6 +1,6 @@
1all: 1all:
2 2
3all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder noring 3all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring
4 4
5CFLAGS += -Wall 5CFLAGS += -Wall
6CFLAGS += -pthread -O2 -ggdb 6CFLAGS += -pthread -O2 -ggdb
@@ -8,6 +8,7 @@ LDFLAGS += -pthread -O2 -ggdb
8 8
9main.o: main.c main.h 9main.o: main.c main.h
10ring.o: ring.c main.h 10ring.o: ring.c main.h
11ptr_ring.o: ptr_ring.c main.h ../../../include/linux/ptr_ring.h
11virtio_ring_0_9.o: virtio_ring_0_9.c main.h 12virtio_ring_0_9.o: virtio_ring_0_9.c main.h
12virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h 13virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h
13virtio_ring_inorder.o: virtio_ring_inorder.c virtio_ring_0_9.c main.h 14virtio_ring_inorder.o: virtio_ring_inorder.c virtio_ring_0_9.c main.h
@@ -15,6 +16,7 @@ ring: ring.o main.o
15virtio_ring_0_9: virtio_ring_0_9.o main.o 16virtio_ring_0_9: virtio_ring_0_9.o main.o
16virtio_ring_poll: virtio_ring_poll.o main.o 17virtio_ring_poll: virtio_ring_poll.o main.o
17virtio_ring_inorder: virtio_ring_inorder.o main.o 18virtio_ring_inorder: virtio_ring_inorder.o main.o
19ptr_ring: ptr_ring.o main.o
18noring: noring.o main.o 20noring: noring.o main.o
19clean: 21clean:
20 -rm main.o 22 -rm main.o
@@ -22,6 +24,7 @@ clean:
22 -rm virtio_ring_0_9.o virtio_ring_0_9 24 -rm virtio_ring_0_9.o virtio_ring_0_9
23 -rm virtio_ring_poll.o virtio_ring_poll 25 -rm virtio_ring_poll.o virtio_ring_poll
24 -rm virtio_ring_inorder.o virtio_ring_inorder 26 -rm virtio_ring_inorder.o virtio_ring_inorder
27 -rm ptr_ring.o ptr_ring
25 -rm noring.o noring 28 -rm noring.o noring
26 29
27.PHONY: all clean 30.PHONY: all clean
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
new file mode 100644
index 000000000000..74abd746ae91
--- /dev/null
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -0,0 +1,192 @@
1#define _GNU_SOURCE
2#include "main.h"
3#include <stdlib.h>
4#include <stdio.h>
5#include <string.h>
6#include <pthread.h>
7#include <malloc.h>
8#include <assert.h>
9#include <errno.h>
10#include <limits.h>
11
12#define SMP_CACHE_BYTES 64
13#define cache_line_size() SMP_CACHE_BYTES
14#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
15#define unlikely(x) (__builtin_expect(!!(x), 0))
16#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
17typedef pthread_spinlock_t spinlock_t;
18
19typedef int gfp_t;
20static void *kzalloc(unsigned size, gfp_t gfp)
21{
22 void *p = memalign(64, size);
23 if (!p)
24 return p;
25 memset(p, 0, size);
26
27 return p;
28}
29
30static void kfree(void *p)
31{
32 if (p)
33 free(p);
34}
35
36static void spin_lock_init(spinlock_t *lock)
37{
38 int r = pthread_spin_init(lock, 0);
39 assert(!r);
40}
41
42static void spin_lock(spinlock_t *lock)
43{
44 int ret = pthread_spin_lock(lock);
45 assert(!ret);
46}
47
48static void spin_unlock(spinlock_t *lock)
49{
50 int ret = pthread_spin_unlock(lock);
51 assert(!ret);
52}
53
54static void spin_lock_bh(spinlock_t *lock)
55{
56 spin_lock(lock);
57}
58
59static void spin_unlock_bh(spinlock_t *lock)
60{
61 spin_unlock(lock);
62}
63
64static void spin_lock_irq(spinlock_t *lock)
65{
66 spin_lock(lock);
67}
68
69static void spin_unlock_irq(spinlock_t *lock)
70{
71 spin_unlock(lock);
72}
73
74static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
75{
76 spin_lock(lock);
77}
78
79static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
80{
81 spin_unlock(lock);
82}
83
84#include "../../../include/linux/ptr_ring.h"
85
86static unsigned long long headcnt, tailcnt;
87static struct ptr_ring array ____cacheline_aligned_in_smp;
88
89/* implemented by ring */
90void alloc_ring(void)
91{
92 int ret = ptr_ring_init(&array, ring_size, 0);
93 assert(!ret);
94}
95
96/* guest side */
97int add_inbuf(unsigned len, void *buf, void *datap)
98{
99 int ret;
100
101 ret = __ptr_ring_produce(&array, buf);
102 if (ret >= 0) {
103 ret = 0;
104 headcnt++;
105 }
106
107 return ret;
108}
109
110/*
111 * ptr_ring API provides no way for producer to find out whether a given
112 * buffer was consumed. Our tests merely require that a successful get_buf
113 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
114 * fake it accordingly.
115 */
116void *get_buf(unsigned *lenp, void **bufp)
117{
118 void *datap;
119
120 if (tailcnt == headcnt || __ptr_ring_full(&array))
121 datap = NULL;
122 else {
123 datap = "Buffer\n";
124 ++tailcnt;
125 }
126
127 return datap;
128}
129
130void poll_used(void)
131{
132 void *b;
133
134 do {
135 if (tailcnt == headcnt || __ptr_ring_full(&array)) {
136 b = NULL;
137 barrier();
138 } else {
139 b = "Buffer\n";
140 }
141 } while (!b);
142}
143
144void disable_call()
145{
146 assert(0);
147}
148
149bool enable_call()
150{
151 assert(0);
152}
153
154void kick_available(void)
155{
156 assert(0);
157}
158
159/* host side */
160void disable_kick()
161{
162 assert(0);
163}
164
165bool enable_kick()
166{
167 assert(0);
168}
169
170void poll_avail(void)
171{
172 void *b;
173
174 do {
175 barrier();
176 b = __ptr_ring_peek(&array);
177 } while (!b);
178}
179
180bool use_buf(unsigned *lenp, void **bufp)
181{
182 void *ptr;
183
184 ptr = __ptr_ring_consume(&array);
185
186 return ptr;
187}
188
189void call_used(void)
190{
191 assert(0);
192}