aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBob Peterson <rpeterso@redhat.com>2017-03-24 09:22:07 -0400
committerBob Peterson <rpeterso@redhat.com>2017-04-03 09:08:58 -0400
commitc369898759e0e143a513d022121290370f001d01 (patch)
tree5c770207fcec81a9fac2380821a7fad02cdd32e2
parentcc963a11b67b796c25c5b827b25d2bcc92ce1779 (diff)
parentf9fe1c12d126f9887441fa5bb165046f30ddd4b5 (diff)
Merge commit 'f9fe1c1' into for-next
Andreas Gruenbacher added function rhashtable_lookup_get_insert_fast to rhashtable.h. That patch went into the net-next tree. In a subsequent patch, GFS2 makes use of this new function. Therefore, we needed a merge commit to make the new function available to GFS2. Signed-off-by: Bob Peterson <rpeterso@redhat.com>
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcmgenet.txt19
-rw-r--r--Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt5
-rw-r--r--Documentation/devicetree/bindings/net/marvell-pp2.txt62
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt75
-rw-r--r--Documentation/networking/ip-sysctl.txt13
-rw-r--r--Documentation/networking/ipvs-sysctl.txt68
-rw-r--r--Documentation/networking/mpls-sysctl.txt19
-rw-r--r--MAINTAINERS13
-rw-r--r--drivers/atm/ambassador.c5
-rw-r--r--drivers/hv/ring_buffer.c94
-rw-r--r--drivers/infiniband/hw/qedr/main.c5
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h3
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c3
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi.h56
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c3
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig6
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_multi_8xx.h2
-rw-r--r--drivers/net/bonding/bond_3ad.c20
-rw-r--r--drivers/net/bonding/bond_main.c53
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c661
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c300
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h44
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c78
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h16
-rw-r--r--drivers/net/dummy.c15
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c3
-rw-r--r--drivers/net/ethernet/apm/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/Makefile1
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Kconfig11
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Makefile6
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/enet.c71
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/enet.h43
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.c116
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.h110
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c756
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.h75
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ring.c81
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ring.h119
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c74
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h5
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c103
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c34
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c51
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c292
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c15
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c62
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c19
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c126
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c73
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h36
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c39
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.h5
-rw-r--r--drivers/net/ethernet/ethoc.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c182
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c23
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h10
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c76
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c320
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c457
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c338
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c163
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c10
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h65
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c31
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h33
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h29
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c563
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.h166
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c89
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c65
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c514
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c168
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/marvell/Kconfig4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c61
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c1186
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c607
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c767
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c172
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1297
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h159
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1092
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c127
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_offload.c31
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h15
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1566
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c186
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h825
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c184
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c220
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c30
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h45
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c85
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c222
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c14
-rw-r--r--drivers/net/ethernet/realtek/8139too.c14
-rw-r--r--drivers/net/ethernet/realtek/r8169.c45
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c72
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/silan/sc92031.c83
-rw-r--r--drivers/net/ethernet/sis/sis190.c14
-rw-r--r--drivers/net/ethernet/sis/sis900.c18
-rw-r--r--drivers/net/ethernet/smsc/epic100.c16
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c51
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c98
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h77
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c371
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h103
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c373
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c225
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c56
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1866
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c149
-rw-r--r--drivers/net/ethernet/sun/cassini.c98
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c27
-rw-r--r--drivers/net/ethernet/sun/niu.c37
-rw-r--r--drivers/net/ethernet/sun/sungem.c99
-rw-r--r--drivers/net/ethernet/sun/sunhme.c62
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c116
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c56
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h27
-rw-r--r--drivers/net/ethernet/synopsys/Kconfig41
-rw-r--r--drivers/net/ethernet/synopsys/Makefile9
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c736
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c648
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c3146
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c1334
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c80
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h746
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h651
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c12
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c51
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c24
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c14
-rw-r--r--drivers/net/ethernet/via/via-rhine.c14
-rw-r--r--drivers/net/ethernet/via/via-velocity.c62
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/fjes/fjes_ethtool.c19
-rw-r--r--drivers/net/gtp.c531
-rw-r--r--drivers/net/hyperv/hyperv_net.h5
-rw-r--r--drivers/net/hyperv/netvsc.c195
-rw-r--r--drivers/net/hyperv/netvsc_drv.c58
-rw-r--r--drivers/net/hyperv/rndis_filter.c15
-rw-r--r--drivers/net/loopback.c34
-rw-r--r--drivers/net/ntb_netdev.c25
-rw-r--r--drivers/net/phy/bcm-phy-lib.c6
-rw-r--r--drivers/net/phy/bcm7xxx.c215
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c3
-rw-r--r--drivers/net/phy/mdio-xgene.c2
-rw-r--r--drivers/net/tun.c24
-rw-r--r--drivers/net/usb/asix_devices.c12
-rw-r--r--drivers/net/usb/ax88179_178a.c14
-rw-r--r--drivers/net/usb/catc.c31
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/mcs7830.c4
-rw-r--r--drivers/net/usb/pegasus.c14
-rw-r--r--drivers/net/usb/r8152.c166
-rw-r--r--drivers/net/usb/rtl8150.c35
-rw-r--r--drivers/net/usb/sierra_net.c4
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c24
-rw-r--r--drivers/net/usb/sr9700.c4
-rw-r--r--drivers/net/usb/sr9800.c4
-rw-r--r--drivers/net/usb/usbnet.c19
-rw-r--r--drivers/net/vrf.c8
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/scsi/qedf/Makefile2
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c190
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h93
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.c44
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.h85
-rw-r--r--drivers/scsi/qedf/qedf.h23
-rw-r--r--drivers/scsi/qedf/qedf_els.c25
-rw-r--r--drivers/scsi/qedf/qedf_io.c670
-rw-r--r--drivers/scsi/qedi/Makefile2
-rw-r--r--drivers/scsi/qedi/qedi_fw.c1068
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c781
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h117
-rw-r--r--drivers/scsi/qedi/qedi_fw_scsi.h55
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c12
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_version.h4
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/bpf_verifier.h5
-rw-r--r--include/linux/brcmphy.h3
-rw-r--r--include/linux/etherdevice.h15
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/filter.h10
-rw-r--r--include/linux/hyperv.h96
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/qed/common_hsi.h30
-rw-r--r--include/linux/qed/eth_common.h3
-rw-r--r--include/linux/qed/fcoe_common.h180
-rw-r--r--include/linux/qed/iscsi_common.h241
-rw-r--r--include/linux/qed/qed_if.h48
-rw-r--r--include/linux/qed/rdma_common.h3
-rw-r--r--include/linux/qed/roce_common.h17
-rw-r--r--include/linux/qed/storage_common.h30
-rw-r--r--include/linux/qed/tcp_common.h1
-rw-r--r--include/linux/rhashtable.h22
-rw-r--r--include/linux/stmmac.h41
-rw-r--r--include/linux/usb/usbnet.h8
-rw-r--r--include/net/dsa.h9
-rw-r--r--include/net/fib_rules.h1
-rw-r--r--include/net/ip_fib.h38
-rw-r--r--include/net/ip_vs.h16
-rw-r--r--include/net/mpls_iptunnel.h2
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h4
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h3
-rw-r--r--include/net/netfilter/nf_tables.h12
-rw-r--r--include/net/netfilter/nft_fib.h2
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/netns/mpls.h3
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/route.h6
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sctp/sm.h16
-rw-r--r--include/net/sctp/ulpevent.h8
-rw-r--r--include/net/secure_seq.h6
-rw-r--r--include/net/sock.h7
-rw-r--r--include/net/tc_act/tc_vlan.h5
-rw-r--r--include/net/tcp.h11
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/mpls_iptunnel.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h26
-rw-r--r--include/uapi/linux/pkt_sched.h8
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--include/uapi/linux/sctp.h31
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--kernel/bpf/arraymap.c29
-rw-r--r--kernel/bpf/hashtab.c31
-rw-r--r--kernel/bpf/syscall.c56
-rw-r--r--kernel/bpf/verifier.c129
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/atm/common.c22
-rw-r--r--net/bridge/br_netfilter_hooks.c3
-rw-r--r--net/bridge/netfilter/ebt_log.c34
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c6
-rw-r--r--net/core/drop_monitor.c5
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/fib_rules.c14
-rw-r--r--net/core/flow_dissector.c426
-rw-r--r--net/core/lwtunnel.c2
-rw-r--r--net/core/secure_seq.c13
-rw-r--r--net/core/sock.c91
-rw-r--r--net/decnet/af_decnet.c13
-rw-r--r--net/dsa/slave.c12
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/devinet.c32
-rw-r--r--net/ipv4/fib_notifier.c86
-rw-r--r--net/ipv4/fib_rules.c55
-rw-r--r--net/ipv4/fib_semantics.c11
-rw-r--r--net/ipv4/fib_trie.c108
-rw-r--r--net/ipv4/icmp.c19
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c19
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c15
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c3
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c4
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/route.c92
-rw-r--r--net/ipv4/sysctl_net_ipv4.c16
-rw-r--r--net/ipv4/tcp_input.c36
-rw-r--r--net/ipv4/tcp_ipv4.c41
-rw-r--r--net/ipv4/tcp_metrics.c147
-rw-r--r--net/ipv4/tcp_minisocks.c22
-rw-r--r--net/ipv4/tcp_westwood.c4
-rw-r--r--net/ipv6/addrconf.c114
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c32
-rw-r--r--net/ipv6/udp.c59
-rw-r--r--net/mpls/af_mpls.c98
-rw-r--r--net/mpls/internal.h7
-rw-r--r--net/mpls/mpls_iptunnel.c73
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_rr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c57
-rw-r--r--net/netfilter/nf_conntrack_expect.c10
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netfilter/nf_tables_api.c49
-rw-r--r--net/netfilter/nfnetlink_acct.c15
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c12
-rw-r--r--net/netfilter/nfnetlink_log.c14
-rw-r--r--net/netfilter/nft_compat.c8
-rw-r--r--net/netfilter/nft_counter.c3
-rw-r--r--net/netfilter/nft_ct.c171
-rw-r--r--net/netfilter/nft_dynset.c14
-rw-r--r--net/netfilter/nft_exthdr.c13
-rw-r--r--net/netfilter/nft_fib.c16
-rw-r--r--net/netfilter/nft_hash.c133
-rw-r--r--net/netfilter/nft_limit.c10
-rw-r--r--net/netfilter/nft_lookup.c14
-rw-r--r--net/netfilter/nft_masq.c4
-rw-r--r--net/netfilter/nft_meta.c4
-rw-r--r--net/netfilter/nft_nat.c4
-rw-r--r--net/netfilter/nft_objref.c14
-rw-r--r--net/netfilter/nft_quota.c3
-rw-r--r--net/netfilter/nft_redir.c4
-rw-r--r--net/netfilter/nft_reject.c5
-rw-r--r--net/netfilter/nft_reject_inet.c6
-rw-r--r--net/netfilter/nft_set_rbtree.c31
-rw-r--r--net/netfilter/xt_limit.c11
-rw-r--r--net/rds/ib_cm.c5
-rw-r--r--net/rds/ib_fmr.c38
-rw-r--r--net/rds/ib_mr.h2
-rw-r--r--net/sched/act_ife.c4
-rw-r--r--net/sched/sch_api.c42
-rw-r--r--net/sched/sch_cbq.c5
-rw-r--r--net/sched/sch_drr.c2
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sched/sch_mq.c2
-rw-r--r--net/sched/sch_mqprio.c41
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_netem.c26
-rw-r--r--net/sched/sch_prio.c5
-rw-r--r--net/sched/sch_qfq.c2
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfb.c2
-rw-r--r--net/sched/sch_tbf.c2
-rw-r--r--net/sctp/sm_statefuns.c15
-rw-r--r--net/sctp/socket.c81
-rw-r--r--net/sctp/stream.c396
-rw-r--r--net/sctp/sysctl.c7
-rw-r--r--net/sctp/ulpevent.c56
-rw-r--r--samples/bpf/map_perf_test_kern.c33
-rw-r--r--samples/bpf/map_perf_test_user.c32
-rwxr-xr-xtools/hv/bondvf.sh18
437 files changed, 28912 insertions, 10104 deletions
diff --git a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
index 10587bdadbbe..26c77d985faf 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
@@ -2,11 +2,14 @@
2 2
3Required properties: 3Required properties:
4- compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2", 4- compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2",
5 "brcm,genet-v3", "brcm,genet-v4". 5 "brcm,genet-v3", "brcm,genet-v4", "brcm,genet-v5".
6- reg: address and length of the register set for the device 6- reg: address and length of the register set for the device
7- interrupts: must be two cells, the first cell is the general purpose 7- interrupts and/or interrupts-extended: must be two cells, the first cell
8 interrupt line, while the second cell is the interrupt for the ring 8 is the general purpose interrupt line, while the second cell is the
9 RX and TX queues operating in ring mode 9 interrupt for the ring RX and TX queues operating in ring mode. An
10 optional third interrupt cell for Wake-on-LAN can be specified.
11 See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
12 for information on the property specifics.
10- phy-mode: see ethernet.txt file in the same directory 13- phy-mode: see ethernet.txt file in the same directory
11- #address-cells: should be 1 14- #address-cells: should be 1
12- #size-cells: should be 1 15- #size-cells: should be 1
@@ -29,15 +32,15 @@ Optional properties:
29 32
30Required child nodes: 33Required child nodes:
31 34
32- mdio bus node: this node should always be present regarless of the PHY 35- mdio bus node: this node should always be present regardless of the PHY
33 configuration of the GENET instance 36 configuration of the GENET instance
34 37
35MDIO bus node required properties: 38MDIO bus node required properties:
36 39
37- compatible: should contain one of "brcm,genet-mdio-v1", "brcm,genet-mdio-v2" 40- compatible: should contain one of "brcm,genet-mdio-v1", "brcm,genet-mdio-v2"
38 "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", the version has to match the 41 "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", "brcm,genet-mdio-v5", the version
39 parent node compatible property (e.g: brcm,genet-v4 pairs with 42 has to match the parent node compatible property (e.g: brcm,genet-v4 pairs
40 brcm,genet-mdio-v4) 43 with brcm,genet-mdio-v4)
41- reg: address and length relative to the parent node base register address 44- reg: address and length relative to the parent node base register address
42- #address-cells: address cell for MDIO bus addressing, should be 1 45- #address-cells: address cell for MDIO bus addressing, should be 1
43- #size-cells: size of the cells for MDIO bus addressing, should be 0 46- #size-cells: size of the cells for MDIO bus addressing, should be 0
diff --git a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
index ab0bb4247d14..4648948f7c3b 100644
--- a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
+++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
@@ -2,8 +2,9 @@
2 2
3Required properties: 3Required properties:
4- compatible: should one from "brcm,genet-mdio-v1", "brcm,genet-mdio-v2", 4- compatible: should one from "brcm,genet-mdio-v1", "brcm,genet-mdio-v2",
5 "brcm,genet-mdio-v3", "brcm,genet-mdio-v4" or "brcm,unimac-mdio" 5 "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", "brcm,genet-mdio-v5" or
6- reg: address and length of the regsiter set for the device, first one is the 6 "brcm,unimac-mdio"
7- reg: address and length of the register set for the device, first one is the
7 base register, and the second one is optional and for indirect accesses to 8 base register, and the second one is optional and for indirect accesses to
8 larger than 16-bits MDIO transactions 9 larger than 16-bits MDIO transactions
9- reg-names: name(s) of the register must be "mdio" and optional "mdio_indir_rw" 10- reg-names: name(s) of the register must be "mdio" and optional "mdio_indir_rw"
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
index 4754364df4c6..6b4956beff8c 100644
--- a/Documentation/devicetree/bindings/net/marvell-pp2.txt
+++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
@@ -1,17 +1,28 @@
1* Marvell Armada 375 Ethernet Controller (PPv2) 1* Marvell Armada 375 Ethernet Controller (PPv2.1)
2 Marvell Armada 7K/8K Ethernet Controller (PPv2.2)
2 3
3Required properties: 4Required properties:
4 5
5- compatible: should be "marvell,armada-375-pp2" 6- compatible: should be one of:
7 "marvell,armada-375-pp2"
8 "marvell,armada-7k-pp2"
6- reg: addresses and length of the register sets for the device. 9- reg: addresses and length of the register sets for the device.
7 Must contain the following register sets: 10 For "marvell,armada-375-pp2", must contain the following register
11 sets:
8 - common controller registers 12 - common controller registers
9 - LMS registers 13 - LMS registers
10 In addition, at least one port register set is required. 14 - one register area per Ethernet port
11- clocks: a pointer to the reference clocks for this device, consequently: 15 For "marvell,armada-7k-pp2", must contain the following register
12 - main controller clock 16 sets:
13 - GOP clock 17 - packet processor registers
14- clock-names: names of used clocks, must be "pp_clk" and "gop_clk". 18 - networking interfaces registers
19
20- clocks: pointers to the reference clocks for this device, consequently:
21 - main controller clock (for both armada-375-pp2 and armada-7k-pp2)
22 - GOP clock (for both armada-375-pp2 and armada-7k-pp2)
23 - MG clock (only for armada-7k-pp2)
24- clock-names: names of used clocks, must be "pp_clk", "gop_clk" and
25 "mg_clk" (the latter only for armada-7k-pp2).
15 26
16The ethernet ports are represented by subnodes. At least one port is 27The ethernet ports are represented by subnodes. At least one port is
17required. 28required.
@@ -19,8 +30,10 @@ required.
19Required properties (port): 30Required properties (port):
20 31
21- interrupts: interrupt for the port 32- interrupts: interrupt for the port
22- port-id: should be '0' or '1' for ethernet ports, and '2' for the 33- port-id: ID of the port from the MAC point of view
23 loopback port 34- gop-port-id: only for marvell,armada-7k-pp2, ID of the port from the
35 GOP (Group Of Ports) point of view. This ID is used to index the
36 per-port registers in the second register area.
24- phy-mode: See ethernet.txt file in the same directory 37- phy-mode: See ethernet.txt file in the same directory
25 38
26Optional properties (port): 39Optional properties (port):
@@ -29,7 +42,7 @@ Optional properties (port):
29- phy: a phandle to a phy node defining the PHY address (as the reg 42- phy: a phandle to a phy node defining the PHY address (as the reg
30 property, a single integer). 43 property, a single integer).
31 44
32Example: 45Example for marvell,armada-375-pp2:
33 46
34ethernet@f0000 { 47ethernet@f0000 {
35 compatible = "marvell,armada-375-pp2"; 48 compatible = "marvell,armada-375-pp2";
@@ -57,3 +70,30 @@ ethernet@f0000 {
57 phy-mode = "gmii"; 70 phy-mode = "gmii";
58 }; 71 };
59}; 72};
73
74Example for marvell,armada-7k-pp2:
75
76cpm_ethernet: ethernet@0 {
77 compatible = "marvell,armada-7k-pp22";
78 reg = <0x0 0x100000>, <0x129000 0xb000>;
79 clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>;
80 clock-names = "pp_clk", "gop_clk", "gp_clk";
81
82 eth0: eth0 {
83 interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
84 port-id = <0>;
85 gop-port-id = <0>;
86 };
87
88 eth1: eth1 {
89 interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
90 port-id = <1>;
91 gop-port-id = <2>;
92 };
93
94 eth2: eth2 {
95 interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
96 port-id = <2>;
97 gop-port-id = <3>;
98 };
99};
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index d3bfc2b30fb5..784d98862b52 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -28,9 +28,9 @@ Optional properties:
28 clocks may be specified in derived bindings. 28 clocks may be specified in derived bindings.
29- clock-names: One name for each entry in the clocks property, the 29- clock-names: One name for each entry in the clocks property, the
30 first one should be "stmmaceth" and the second one should be "pclk". 30 first one should be "stmmaceth" and the second one should be "pclk".
31- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is 31- ptp_ref: this is the PTP reference clock; in case of the PTP is available
32 available this clock is used for programming the Timestamp Addend Register. 32 this clock is used for programming the Timestamp Addend Register. If not
33 If not passed then the system clock will be used and this is fine on some 33 passed then the system clock will be used and this is fine on some
34 platforms. 34 platforms.
35- tx-fifo-depth: See ethernet.txt file in the same directory 35- tx-fifo-depth: See ethernet.txt file in the same directory
36- rx-fifo-depth: See ethernet.txt file in the same directory 36- rx-fifo-depth: See ethernet.txt file in the same directory
@@ -72,7 +72,43 @@ Optional properties:
72 - snps,mb: mixed-burst 72 - snps,mb: mixed-burst
73 - snps,rb: rebuild INCRx Burst 73 - snps,rb: rebuild INCRx Burst
74- mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus. 74- mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
75 75- Multiple RX Queues parameters: below the list of all the parameters to
76 configure the multiple RX queues:
77 - snps,rx-queues-to-use: number of RX queues to be used in the driver
78 - Choose one of these RX scheduling algorithms:
79 - snps,rx-sched-sp: Strict priority
80 - snps,rx-sched-wsp: Weighted Strict priority
81 - For each RX queue
82 - Choose one of these modes:
83 - snps,dcb-algorithm: Queue to be enabled as DCB
84 - snps,avb-algorithm: Queue to be enabled as AVB
85 - snps,map-to-dma-channel: Channel to map
86 - Specifiy specific packet routing:
87 - snps,route-avcp: AV Untagged Control packets
88 - snps,route-ptp: PTP Packets
89 - snps,route-dcbcp: DCB Control Packets
90 - snps,route-up: Untagged Packets
91 - snps,route-multi-broad: Multicast & Broadcast Packets
92 - snps,priority: RX queue priority (Range: 0x0 to 0xF)
93- Multiple TX Queues parameters: below the list of all the parameters to
94 configure the multiple TX queues:
95 - snps,tx-queues-to-use: number of TX queues to be used in the driver
96 - Choose one of these TX scheduling algorithms:
97 - snps,tx-sched-wrr: Weighted Round Robin
98 - snps,tx-sched-wfq: Weighted Fair Queuing
99 - snps,tx-sched-dwrr: Deficit Weighted Round Robin
100 - snps,tx-sched-sp: Strict priority
101 - For each TX queue
102 - snps,weight: TX queue weight (if using a DCB weight algorithm)
103 - Choose one of these modes:
104 - snps,dcb-algorithm: TX queue will be working in DCB
105 - snps,avb-algorithm: TX queue will be working in AVB
106 - Configure Credit Base Shaper (if AVB Mode selected):
107 - snps,send_slope: enable Low Power Interface
108 - snps,idle_slope: unlock on WoL
109 - snps,high_credit: max write outstanding req. limit
110 - snps,low_credit: max read outstanding req. limit
111 - snps,priority: TX queue priority (Range: 0x0 to 0xF)
76Examples: 112Examples:
77 113
78 stmmac_axi_setup: stmmac-axi-config { 114 stmmac_axi_setup: stmmac-axi-config {
@@ -81,6 +117,35 @@ Examples:
81 snps,blen = <256 128 64 32 0 0 0>; 117 snps,blen = <256 128 64 32 0 0 0>;
82 }; 118 };
83 119
120 mtl_rx_setup: rx-queues-config {
121 snps,rx-queues-to-use = <1>;
122 snps,rx-sched-sp;
123 queue0 {
124 snps,dcb-algorithm;
125 snps,map-to-dma-channel = <0x0>;
126 snps,priority = <0x0>;
127 };
128 };
129
130 mtl_tx_setup: tx-queues-config {
131 snps,tx-queues-to-use = <2>;
132 snps,tx-sched-wrr;
133 queue0 {
134 snps,weight = <0x10>;
135 snps,dcb-algorithm;
136 snps,priority = <0x0>;
137 };
138
139 queue1 {
140 snps,avb-algorithm;
141 snps,send_slope = <0x1000>;
142 snps,idle_slope = <0x1000>;
143 snps,high_credit = <0x3E800>;
144 snps,low_credit = <0xFFC18000>;
145 snps,priority = <0x1>;
146 };
147 };
148
84 gmac0: ethernet@e0800000 { 149 gmac0: ethernet@e0800000 {
85 compatible = "st,spear600-gmac"; 150 compatible = "st,spear600-gmac";
86 reg = <0xe0800000 0x8000>; 151 reg = <0xe0800000 0x8000>;
@@ -104,4 +169,6 @@ Examples:
104 phy1: ethernet-phy@0 { 169 phy1: ethernet-phy@0 {
105 }; 170 };
106 }; 171 };
172 snps,mtl-rx-config = <&mtl_rx_setup>;
173 snps,mtl-tx-config = <&mtl_tx_setup>;
107 }; 174 };
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ab0230461377..b57308e76b1d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -73,6 +73,14 @@ fib_multipath_use_neigh - BOOLEAN
73 0 - disabled 73 0 - disabled
74 1 - enabled 74 1 - enabled
75 75
76fib_multipath_hash_policy - INTEGER
77 Controls which hash policy to use for multipath routes. Only valid
78 for kernels built with CONFIG_IP_ROUTE_MULTIPATH enabled.
79 Default: 0 (Layer 3)
80 Possible values:
81 0 - Layer 3
82 1 - Layer 4
83
76route/max_size - INTEGER 84route/max_size - INTEGER
77 Maximum number of routes allowed in the kernel. Increase 85 Maximum number of routes allowed in the kernel. Increase
78 this when using large numbers of interfaces and/or routes. 86 this when using large numbers of interfaces and/or routes.
@@ -640,11 +648,6 @@ tcp_tso_win_divisor - INTEGER
640 building larger TSO frames. 648 building larger TSO frames.
641 Default: 3 649 Default: 3
642 650
643tcp_tw_recycle - BOOLEAN
644 Enable fast recycling TIME-WAIT sockets. Default value is 0.
645 It should not be changed without advice/request of technical
646 experts.
647
648tcp_tw_reuse - BOOLEAN 651tcp_tw_reuse - BOOLEAN
649 Allow to reuse TIME-WAIT sockets for new connections when it is 652 Allow to reuse TIME-WAIT sockets for new connections when it is
650 safe from protocol viewpoint. Default value is 0. 653 safe from protocol viewpoint. Default value is 0.
diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt
index e6b1c025fdd8..056898685d40 100644
--- a/Documentation/networking/ipvs-sysctl.txt
+++ b/Documentation/networking/ipvs-sysctl.txt
@@ -175,6 +175,14 @@ nat_icmp_send - BOOLEAN
175 for VS/NAT when the load balancer receives packets from real 175 for VS/NAT when the load balancer receives packets from real
176 servers but the connection entries don't exist. 176 servers but the connection entries don't exist.
177 177
178pmtu_disc - BOOLEAN
179 0 - disabled
180 not 0 - enabled (default)
181
182 By default, reject with FRAG_NEEDED all DF packets that exceed
183 the PMTU, irrespective of the forwarding method. For TUN method
184 the flag can be disabled to fragment such packets.
185
178secure_tcp - INTEGER 186secure_tcp - INTEGER
179 0 - disabled (default) 187 0 - disabled (default)
180 188
@@ -185,15 +193,59 @@ secure_tcp - INTEGER
185 The value definition is the same as that of drop_entry and 193 The value definition is the same as that of drop_entry and
186 drop_packet. 194 drop_packet.
187 195
188sync_threshold - INTEGER 196sync_threshold - vector of 2 INTEGERs: sync_threshold, sync_period
189 default 3 197 default 3 50
198
199 It sets synchronization threshold, which is the minimum number
200 of incoming packets that a connection needs to receive before
201 the connection will be synchronized. A connection will be
202 synchronized, every time the number of its incoming packets
203 modulus sync_period equals the threshold. The range of the
204 threshold is from 0 to sync_period.
205
206 When sync_period and sync_refresh_period are 0, send sync only
207 for state changes or only once when pkts matches sync_threshold
208
209sync_refresh_period - UNSIGNED INTEGER
210 default 0
211
212 In seconds, difference in reported connection timer that triggers
213 new sync message. It can be used to avoid sync messages for the
214 specified period (or half of the connection timeout if it is lower)
215 if connection state is not changed since last sync.
216
217 This is useful for normal connections with high traffic to reduce
218 sync rate. Additionally, retry sync_retries times with period of
219 sync_refresh_period/8.
220
221sync_retries - INTEGER
222 default 0
223
224 Defines sync retries with period of sync_refresh_period/8. Useful
225 to protect against loss of sync messages. The range of the
226 sync_retries is from 0 to 3.
227
228sync_qlen_max - UNSIGNED LONG
229
230 Hard limit for queued sync messages that are not sent yet. It
231 defaults to 1/32 of the memory pages but actually represents
232 number of messages. It will protect us from allocating large
233 parts of memory when the sending rate is lower than the queuing
234 rate.
235
236sync_sock_size - INTEGER
237 default 0
238
239 Configuration of SNDBUF (master) or RCVBUF (slave) socket limit.
240 Default value is 0 (preserve system defaults).
241
242sync_ports - INTEGER
243 default 1
190 244
191 It sets synchronization threshold, which is the minimum number 245 The number of threads that master and backup servers can use for
192 of incoming packets that a connection needs to receive before 246 sync traffic. Every thread will use single UDP port, thread 0 will
193 the connection will be synchronized. A connection will be 247 use the default port 8848 while last thread will use port
194 synchronized, every time the number of its incoming packets 248 8848+sync_ports-1.
195 modulus 50 equals the threshold. The range of the threshold is
196 from 0 to 49.
197 249
198snat_reroute - BOOLEAN 250snat_reroute - BOOLEAN
199 0 - disabled 251 0 - disabled
diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt
index 15d8d16934fd..2f24a1912a48 100644
--- a/Documentation/networking/mpls-sysctl.txt
+++ b/Documentation/networking/mpls-sysctl.txt
@@ -19,6 +19,25 @@ platform_labels - INTEGER
19 Possible values: 0 - 1048575 19 Possible values: 0 - 1048575
20 Default: 0 20 Default: 0
21 21
22ip_ttl_propagate - BOOL
23 Control whether TTL is propagated from the IPv4/IPv6 header to
24 the MPLS header on imposing labels and propagated from the
25 MPLS header to the IPv4/IPv6 header on popping the last label.
26
27 If disabled, the MPLS transport network will appear as a
28 single hop to transit traffic.
29
30 0 - disabled / RFC 3443 [Short] Pipe Model
31 1 - enabled / RFC 3443 Uniform Model (default)
32
33default_ttl - BOOL
34 Default TTL value to use for MPLS packets where it cannot be
35 propagated from an IP header, either because one isn't present
36 or ip_ttl_propagate has been disabled.
37
38 Possible values: 1 - 255
39 Default: 255
40
22conf/<interface>/input - BOOL 41conf/<interface>/input - BOOL
23 Control whether packets can be input on this interface. 42 Control whether packets can be input on this interface.
24 43
diff --git a/MAINTAINERS b/MAINTAINERS
index c776906f67a9..632e76223cc5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -896,12 +896,19 @@ F: arch/arm64/boot/dts/apm/
896APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER 896APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER
897M: Iyappan Subramanian <isubramanian@apm.com> 897M: Iyappan Subramanian <isubramanian@apm.com>
898M: Keyur Chudgar <kchudgar@apm.com> 898M: Keyur Chudgar <kchudgar@apm.com>
899M: Quan Nguyen <qnguyen@apm.com>
899S: Supported 900S: Supported
900F: drivers/net/ethernet/apm/xgene/ 901F: drivers/net/ethernet/apm/xgene/
901F: drivers/net/phy/mdio-xgene.c 902F: drivers/net/phy/mdio-xgene.c
902F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt 903F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt
903F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt 904F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
904 905
906APPLIED MICRO (APM) X-GENE SOC ETHERNET (V2) DRIVER
907M: Iyappan Subramanian <isubramanian@apm.com>
908M: Keyur Chudgar <kchudgar@apm.com>
909S: Supported
910F: drivers/net/ethernet/apm/xgene-v2/
911
905APPLIED MICRO (APM) X-GENE SOC PMU 912APPLIED MICRO (APM) X-GENE SOC PMU
906M: Tai Nguyen <ttnguyen@apm.com> 913M: Tai Nguyen <ttnguyen@apm.com>
907S: Supported 914S: Supported
@@ -11061,6 +11068,12 @@ F: include/linux/dma/dw.h
11061F: include/linux/platform_data/dma-dw.h 11068F: include/linux/platform_data/dma-dw.h
11062F: drivers/dma/dw/ 11069F: drivers/dma/dw/
11063 11070
11071SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER
11072M: Jie Deng <jiedeng@synopsys.com>
11073L: netdev@vger.kernel.org
11074S: Supported
11075F: drivers/net/ethernet/synopsys/
11076
11064SYNOPSYS DESIGNWARE I2C DRIVER 11077SYNOPSYS DESIGNWARE I2C DRIVER
11065M: Jarkko Nikula <jarkko.nikula@linux.intel.com> 11078M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
11066R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 11079R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 4a610795b585..906705e5f776 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -2267,9 +2267,8 @@ static int amb_probe(struct pci_dev *pci_dev,
2267 dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS; 2267 dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
2268 dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS; 2268 dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
2269 2269
2270 init_timer(&dev->housekeeping); 2270 setup_timer(&dev->housekeeping, do_housekeeping,
2271 dev->housekeeping.function = do_housekeeping; 2271 (unsigned long)dev);
2272 dev->housekeeping.data = (unsigned long) dev;
2273 mod_timer(&dev->housekeeping, jiffies); 2272 mod_timer(&dev->housekeeping, jiffies);
2274 2273
2275 // enable host interrupts 2274 // enable host interrupts
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 87799e81af97..c3f1a9e33cef 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -32,6 +32,8 @@
32 32
33#include "hyperv_vmbus.h" 33#include "hyperv_vmbus.h"
34 34
35#define VMBUS_PKT_TRAILER 8
36
35/* 37/*
36 * When we write to the ring buffer, check if the host needs to 38 * When we write to the ring buffer, check if the host needs to
37 * be signaled. Here is the details of this protocol: 39 * be signaled. Here is the details of this protocol:
@@ -336,6 +338,12 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
336 return 0; 338 return 0;
337} 339}
338 340
341static inline void
342init_cached_read_index(struct hv_ring_buffer_info *rbi)
343{
344 rbi->cached_read_index = rbi->ring_buffer->read_index;
345}
346
339int hv_ringbuffer_read(struct vmbus_channel *channel, 347int hv_ringbuffer_read(struct vmbus_channel *channel,
340 void *buffer, u32 buflen, u32 *buffer_actual_len, 348 void *buffer, u32 buflen, u32 *buffer_actual_len,
341 u64 *requestid, bool raw) 349 u64 *requestid, bool raw)
@@ -366,7 +374,8 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
366 return ret; 374 return ret;
367 } 375 }
368 376
369 init_cached_read_index(channel); 377 init_cached_read_index(inring_info);
378
370 next_read_location = hv_get_next_read_location(inring_info); 379 next_read_location = hv_get_next_read_location(inring_info);
371 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, 380 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
372 sizeof(desc), 381 sizeof(desc),
@@ -410,3 +419,86 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
410 419
411 return ret; 420 return ret;
412} 421}
422
423/*
424 * Determine number of bytes available in ring buffer after
425 * the current iterator (priv_read_index) location.
426 *
427 * This is similar to hv_get_bytes_to_read but with private
428 * read index instead.
429 */
430static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
431{
432 u32 priv_read_loc = rbi->priv_read_index;
433 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
434
435 if (write_loc >= priv_read_loc)
436 return write_loc - priv_read_loc;
437 else
438 return (rbi->ring_datasize - priv_read_loc) + write_loc;
439}
440
441/*
442 * Get first vmbus packet from ring buffer after read_index
443 *
444 * If ring buffer is empty, returns NULL and no other action needed.
445 */
446struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
447{
448 struct hv_ring_buffer_info *rbi = &channel->inbound;
449
450 /* set state for later hv_signal_on_read() */
451 init_cached_read_index(rbi);
452
453 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
454 return NULL;
455
456 return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
457}
458EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
459
460/*
461 * Get next vmbus packet from ring buffer.
462 *
463 * Advances the current location (priv_read_index) and checks for more
464 * data. If the end of the ring buffer is reached, then return NULL.
465 */
466struct vmpacket_descriptor *
467__hv_pkt_iter_next(struct vmbus_channel *channel,
468 const struct vmpacket_descriptor *desc)
469{
470 struct hv_ring_buffer_info *rbi = &channel->inbound;
471 u32 packetlen = desc->len8 << 3;
472 u32 dsize = rbi->ring_datasize;
473
474 /* bump offset to next potential packet */
475 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
476 if (rbi->priv_read_index >= dsize)
477 rbi->priv_read_index -= dsize;
478
479 /* more data? */
480 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
481 return NULL;
482 else
483 return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
484}
485EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
486
487/*
488 * Update host ring buffer after iterating over packets.
489 */
490void hv_pkt_iter_close(struct vmbus_channel *channel)
491{
492 struct hv_ring_buffer_info *rbi = &channel->inbound;
493
494 /*
495 * Make sure all reads are done before we update the read index since
496 * the writer may start writing to the read area once the read index
497 * is updated.
498 */
499 virt_rmb();
500 rbi->ring_buffer->read_index = rbi->priv_read_index;
501
502 hv_signal_on_read(channel);
503}
504EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index b9b47e5cc8b3..ced0461d6e9f 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -587,9 +587,8 @@ void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
587#define EVENT_TYPE_CQ 1 587#define EVENT_TYPE_CQ 1
588#define EVENT_TYPE_QP 2 588#define EVENT_TYPE_QP 2
589 struct qedr_dev *dev = (struct qedr_dev *)context; 589 struct qedr_dev *dev = (struct qedr_dev *)context;
590 union event_ring_data *data = fw_handle; 590 struct regpair *async_handle = (struct regpair *)fw_handle;
591 u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) + 591 u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
592 data->roce_handle.lo;
593 u8 event_type = EVENT_TYPE_NOT_DEFINED; 592 u8 event_type = EVENT_TYPE_NOT_DEFINED;
594 struct ib_event event; 593 struct ib_event event;
595 struct ib_cq *ibcq; 594 struct ib_cq *ibcq;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index bb32e4792ec9..5cb9195513bd 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -38,7 +38,8 @@
38#include <linux/qed/qed_chain.h> 38#include <linux/qed/qed_chain.h>
39#include <linux/qed/qed_roce_if.h> 39#include <linux/qed/qed_roce_if.h>
40#include <linux/qed/qede_roce.h> 40#include <linux/qed/qede_roce.h>
41#include "qedr_hsi.h" 41#include <linux/qed/roce_common.h>
42#include "qedr_hsi_rdma.h"
42 43
43#define QEDR_MODULE_VERSION "8.10.10.0" 44#define QEDR_MODULE_VERSION "8.10.10.0"
44#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA" 45#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 699632893dd9..a6280ce3e2a5 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -43,14 +43,11 @@
43#include <rdma/ib_addr.h> 43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h> 44#include <rdma/ib_cache.h>
45 45
46#include "qedr_hsi.h"
47#include <linux/qed/qed_if.h> 46#include <linux/qed/qed_if.h>
48#include <linux/qed/qed_roce_if.h> 47#include <linux/qed/qed_roce_if.h>
49#include "qedr.h" 48#include "qedr.h"
50#include "qedr_hsi.h"
51#include "verbs.h" 49#include "verbs.h"
52#include <rdma/qedr-abi.h> 50#include <rdma/qedr-abi.h>
53#include "qedr_hsi.h"
54#include "qedr_cm.h" 51#include "qedr_cm.h"
55 52
56void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info) 53void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h
deleted file mode 100644
index 66d27521373f..000000000000
--- a/drivers/infiniband/hw/qedr/qedr_hsi.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __QED_HSI_ROCE__
33#define __QED_HSI_ROCE__
34
35#include <linux/qed/common_hsi.h>
36#include <linux/qed/roce_common.h>
37#include "qedr_hsi_rdma.h"
38
39/* Affiliated asynchronous events / errors enumeration */
40enum roce_async_events_type {
41 ROCE_ASYNC_EVENT_NONE = 0,
42 ROCE_ASYNC_EVENT_COMM_EST = 1,
43 ROCE_ASYNC_EVENT_SQ_DRAINED,
44 ROCE_ASYNC_EVENT_SRQ_LIMIT,
45 ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
46 ROCE_ASYNC_EVENT_CQ_ERR,
47 ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
48 ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
49 ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
50 ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
51 ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
52 ROCE_ASYNC_EVENT_SRQ_EMPTY,
53 MAX_ROCE_ASYNC_EVENTS_TYPE
54};
55
56#endif /* __QED_HSI_ROCE__ */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 6b3bb32803bd..2091902848e6 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -43,7 +43,8 @@
43#include <rdma/ib_addr.h> 43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h> 44#include <rdma/ib_cache.h>
45 45
46#include "qedr_hsi.h" 46#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
47#include <linux/qed/qed_if.h> 48#include <linux/qed/qed_if.h>
48#include "qedr.h" 49#include "qedr.h"
49#include "verbs.h" 50#include "verbs.h"
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index 09df54fc1fef..fda912b0833f 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -13,7 +13,7 @@ config MISDN_HFCPCI
13 13
14config MISDN_HFCMULTI 14config MISDN_HFCMULTI
15 tristate "Support for HFC multiport cards (HFC-4S/8S/E1)" 15 tristate "Support for HFC multiport cards (HFC-4S/8S/E1)"
16 depends on PCI || 8xx 16 depends on PCI || CPM1
17 depends on MISDN 17 depends on MISDN
18 help 18 help
19 Enable support for cards with Cologne Chip AG's HFC multiport 19 Enable support for cards with Cologne Chip AG's HFC multiport
@@ -27,8 +27,8 @@ config MISDN_HFCMULTI_8xx
27 bool "Support for XHFC embedded board in HFC multiport driver" 27 bool "Support for XHFC embedded board in HFC multiport driver"
28 depends on MISDN 28 depends on MISDN
29 depends on MISDN_HFCMULTI 29 depends on MISDN_HFCMULTI
30 depends on 8xx 30 depends on CPM1
31 default 8xx 31 default CPM1
32 help 32 help
33 Enable support for the XHFC embedded solution from Speech Design. 33 Enable support for the XHFC embedded solution from Speech Design.
34 34
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
index 0eafe9f04fca..8a254747768e 100644
--- a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
+++ b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
@@ -6,7 +6,7 @@
6 * 6 *
7 */ 7 */
8 8
9#include <asm/8xx_immap.h> 9#include <asm/cpm1.h>
10 10
11/* Change this to the value used by your board */ 11/* Change this to the value used by your board */
12#ifndef IMAP_ADDR 12#ifndef IMAP_ADDR
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index edc70ffad660..508713b4e533 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -92,6 +92,7 @@ enum ad_link_speed_type {
92 AD_LINK_SPEED_2500MBPS, 92 AD_LINK_SPEED_2500MBPS,
93 AD_LINK_SPEED_10000MBPS, 93 AD_LINK_SPEED_10000MBPS,
94 AD_LINK_SPEED_20000MBPS, 94 AD_LINK_SPEED_20000MBPS,
95 AD_LINK_SPEED_25000MBPS,
95 AD_LINK_SPEED_40000MBPS, 96 AD_LINK_SPEED_40000MBPS,
96 AD_LINK_SPEED_56000MBPS, 97 AD_LINK_SPEED_56000MBPS,
97 AD_LINK_SPEED_100000MBPS, 98 AD_LINK_SPEED_100000MBPS,
@@ -260,6 +261,7 @@ static inline int __check_agg_selection_timer(struct port *port)
260 * %AD_LINK_SPEED_2500MBPS, 261 * %AD_LINK_SPEED_2500MBPS,
261 * %AD_LINK_SPEED_10000MBPS 262 * %AD_LINK_SPEED_10000MBPS
262 * %AD_LINK_SPEED_20000MBPS 263 * %AD_LINK_SPEED_20000MBPS
264 * %AD_LINK_SPEED_25000MBPS
263 * %AD_LINK_SPEED_40000MBPS 265 * %AD_LINK_SPEED_40000MBPS
264 * %AD_LINK_SPEED_56000MBPS 266 * %AD_LINK_SPEED_56000MBPS
265 * %AD_LINK_SPEED_100000MBPS 267 * %AD_LINK_SPEED_100000MBPS
@@ -302,6 +304,10 @@ static u16 __get_link_speed(struct port *port)
302 speed = AD_LINK_SPEED_20000MBPS; 304 speed = AD_LINK_SPEED_20000MBPS;
303 break; 305 break;
304 306
307 case SPEED_25000:
308 speed = AD_LINK_SPEED_25000MBPS;
309 break;
310
305 case SPEED_40000: 311 case SPEED_40000:
306 speed = AD_LINK_SPEED_40000MBPS; 312 speed = AD_LINK_SPEED_40000MBPS;
307 break; 313 break;
@@ -707,6 +713,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
707 case AD_LINK_SPEED_20000MBPS: 713 case AD_LINK_SPEED_20000MBPS:
708 bandwidth = nports * 20000; 714 bandwidth = nports * 20000;
709 break; 715 break;
716 case AD_LINK_SPEED_25000MBPS:
717 bandwidth = nports * 25000;
718 break;
710 case AD_LINK_SPEED_40000MBPS: 719 case AD_LINK_SPEED_40000MBPS:
711 bandwidth = nports * 40000; 720 bandwidth = nports * 40000;
712 break; 721 break;
@@ -1052,8 +1061,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1052 port->sm_rx_state = AD_RX_INITIALIZE; 1061 port->sm_rx_state = AD_RX_INITIALIZE;
1053 port->sm_vars |= AD_PORT_CHURNED; 1062 port->sm_vars |= AD_PORT_CHURNED;
1054 /* check if port is not enabled */ 1063 /* check if port is not enabled */
1055 } else if (!(port->sm_vars & AD_PORT_BEGIN) 1064 } else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled)
1056 && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED))
1057 port->sm_rx_state = AD_RX_PORT_DISABLED; 1065 port->sm_rx_state = AD_RX_PORT_DISABLED;
1058 /* check if new lacpdu arrived */ 1066 /* check if new lacpdu arrived */
1059 else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || 1067 else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) ||
@@ -1081,11 +1089,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1081 /* if no lacpdu arrived and no timer is on */ 1089 /* if no lacpdu arrived and no timer is on */
1082 switch (port->sm_rx_state) { 1090 switch (port->sm_rx_state) {
1083 case AD_RX_PORT_DISABLED: 1091 case AD_RX_PORT_DISABLED:
1084 if (port->sm_vars & AD_PORT_MOVED) 1092 if (port->is_enabled &&
1085 port->sm_rx_state = AD_RX_INITIALIZE; 1093 (port->sm_vars & AD_PORT_LACP_ENABLED))
1086 else if (port->is_enabled
1087 && (port->sm_vars
1088 & AD_PORT_LACP_ENABLED))
1089 port->sm_rx_state = AD_RX_EXPIRED; 1094 port->sm_rx_state = AD_RX_EXPIRED;
1090 else if (port->is_enabled 1095 else if (port->is_enabled
1091 && ((port->sm_vars 1096 && ((port->sm_vars
@@ -1115,7 +1120,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1115 port->sm_vars &= ~AD_PORT_SELECTED; 1120 port->sm_vars &= ~AD_PORT_SELECTED;
1116 __record_default(port); 1121 __record_default(port);
1117 port->actor_oper_port_state &= ~AD_STATE_EXPIRED; 1122 port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
1118 port->sm_vars &= ~AD_PORT_MOVED;
1119 port->sm_rx_state = AD_RX_PORT_DISABLED; 1123 port->sm_rx_state = AD_RX_PORT_DISABLED;
1120 1124
1121 /* Fall Through */ 1125 /* Fall Through */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8a4ba8b88e52..ba934020dfaa 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -201,12 +201,6 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
201 201
202unsigned int bond_net_id __read_mostly; 202unsigned int bond_net_id __read_mostly;
203 203
204static __be32 arp_target[BOND_MAX_ARP_TARGETS];
205static int arp_ip_count;
206static int bond_mode = BOND_MODE_ROUNDROBIN;
207static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
208static int lacp_fast;
209
210/*-------------------------- Forward declarations ---------------------------*/ 204/*-------------------------- Forward declarations ---------------------------*/
211 205
212static int bond_init(struct net_device *bond_dev); 206static int bond_init(struct net_device *bond_dev);
@@ -2575,10 +2569,8 @@ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
2575 * arp is transmitted to generate traffic. see activebackup_arp_monitor for 2569 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
2576 * arp monitoring in active backup mode. 2570 * arp monitoring in active backup mode.
2577 */ 2571 */
2578static void bond_loadbalance_arp_mon(struct work_struct *work) 2572static void bond_loadbalance_arp_mon(struct bonding *bond)
2579{ 2573{
2580 struct bonding *bond = container_of(work, struct bonding,
2581 arp_work.work);
2582 struct slave *slave, *oldcurrent; 2574 struct slave *slave, *oldcurrent;
2583 struct list_head *iter; 2575 struct list_head *iter;
2584 int do_failover = 0, slave_state_changed = 0; 2576 int do_failover = 0, slave_state_changed = 0;
@@ -2916,10 +2908,8 @@ check_state:
2916 return should_notify_rtnl; 2908 return should_notify_rtnl;
2917} 2909}
2918 2910
2919static void bond_activebackup_arp_mon(struct work_struct *work) 2911static void bond_activebackup_arp_mon(struct bonding *bond)
2920{ 2912{
2921 struct bonding *bond = container_of(work, struct bonding,
2922 arp_work.work);
2923 bool should_notify_peers = false; 2913 bool should_notify_peers = false;
2924 bool should_notify_rtnl = false; 2914 bool should_notify_rtnl = false;
2925 int delta_in_ticks; 2915 int delta_in_ticks;
@@ -2972,6 +2962,17 @@ re_arm:
2972 } 2962 }
2973} 2963}
2974 2964
2965static void bond_arp_monitor(struct work_struct *work)
2966{
2967 struct bonding *bond = container_of(work, struct bonding,
2968 arp_work.work);
2969
2970 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
2971 bond_activebackup_arp_mon(bond);
2972 else
2973 bond_loadbalance_arp_mon(bond);
2974}
2975
2975/*-------------------------- netdev event handling --------------------------*/ 2976/*-------------------------- netdev event handling --------------------------*/
2976 2977
2977/* Change device name */ 2978/* Change device name */
@@ -3228,10 +3229,7 @@ static void bond_work_init_all(struct bonding *bond)
3228 bond_resend_igmp_join_requests_delayed); 3229 bond_resend_igmp_join_requests_delayed);
3229 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); 3230 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3230 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); 3231 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3231 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 3232 INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
3232 INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
3233 else
3234 INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
3235 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); 3233 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3236 INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler); 3234 INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
3237} 3235}
@@ -3266,8 +3264,6 @@ static int bond_open(struct net_device *bond_dev)
3266 } 3264 }
3267 } 3265 }
3268 3266
3269 bond_work_init_all(bond);
3270
3271 if (bond_is_lb(bond)) { 3267 if (bond_is_lb(bond)) {
3272 /* bond_alb_initialize must be called before the timer 3268 /* bond_alb_initialize must be called before the timer
3273 * is started. 3269 * is started.
@@ -4252,6 +4248,12 @@ static int bond_check_params(struct bond_params *params)
4252 int arp_all_targets_value; 4248 int arp_all_targets_value;
4253 u16 ad_actor_sys_prio = 0; 4249 u16 ad_actor_sys_prio = 0;
4254 u16 ad_user_port_key = 0; 4250 u16 ad_user_port_key = 0;
4251 __be32 arp_target[BOND_MAX_ARP_TARGETS];
4252 int arp_ip_count;
4253 int bond_mode = BOND_MODE_ROUNDROBIN;
4254 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
4255 int lacp_fast = 0;
4256 int tlb_dynamic_lb = 0;
4255 4257
4256 /* Convert string parameters. */ 4258 /* Convert string parameters. */
4257 if (mode) { 4259 if (mode) {
@@ -4564,6 +4566,17 @@ static int bond_check_params(struct bond_params *params)
4564 } 4566 }
4565 ad_user_port_key = valptr->value; 4567 ad_user_port_key = valptr->value;
4566 4568
4569 if (bond_mode == BOND_MODE_TLB) {
4570 bond_opt_initstr(&newval, "default");
4571 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
4572 &newval);
4573 if (!valptr) {
4574 pr_err("Error: No tlb_dynamic_lb default value");
4575 return -EINVAL;
4576 }
4577 tlb_dynamic_lb = valptr->value;
4578 }
4579
4567 if (lp_interval == 0) { 4580 if (lp_interval == 0) {
4568 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n", 4581 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
4569 INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL); 4582 INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
@@ -4591,7 +4604,7 @@ static int bond_check_params(struct bond_params *params)
4591 params->min_links = min_links; 4604 params->min_links = min_links;
4592 params->lp_interval = lp_interval; 4605 params->lp_interval = lp_interval;
4593 params->packets_per_slave = packets_per_slave; 4606 params->packets_per_slave = packets_per_slave;
4594 params->tlb_dynamic_lb = 1; /* Default value */ 4607 params->tlb_dynamic_lb = tlb_dynamic_lb;
4595 params->ad_actor_sys_prio = ad_actor_sys_prio; 4608 params->ad_actor_sys_prio = ad_actor_sys_prio;
4596 eth_zero_addr(params->ad_actor_system); 4609 eth_zero_addr(params->ad_actor_system);
4597 params->ad_user_port_key = ad_user_port_key; 4610 params->ad_user_port_key = ad_user_port_key;
@@ -4687,6 +4700,8 @@ int bond_create(struct net *net, const char *name)
4687 4700
4688 netif_carrier_off(bond_dev); 4701 netif_carrier_off(bond_dev);
4689 4702
4703 bond_work_init_all(bond);
4704
4690 rtnl_unlock(); 4705 rtnl_unlock();
4691 if (res < 0) 4706 if (res < 0)
4692 bond_destructor(bond_dev); 4707 bond_destructor(bond_dev);
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index c36be318de1a..31d37a90cec7 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -1,5 +1,6 @@
1obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o 1obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
2mv88e6xxx-objs := chip.o 2mv88e6xxx-objs := chip.o
3mv88e6xxx-objs += global1.o 3mv88e6xxx-objs += global1.o
4mv88e6xxx-objs += global1_atu.o
4mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o 5mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
5mv88e6xxx-objs += port.o 6mv88e6xxx-objs += port.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 03dc886ed3d6..2bca297d9296 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -687,11 +687,6 @@ static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip)
687 return chip->info->family == MV88E6XXX_FAMILY_6165; 687 return chip->info->family == MV88E6XXX_FAMILY_6165;
688} 688}
689 689
690static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
691{
692 return chip->info->family == MV88E6XXX_FAMILY_6320;
693}
694
695static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip) 690static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip)
696{ 691{
697 return chip->info->family == MV88E6XXX_FAMILY_6341; 692 return chip->info->family == MV88E6XXX_FAMILY_6341;
@@ -1066,11 +1061,6 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
1066 mutex_unlock(&chip->reg_lock); 1061 mutex_unlock(&chip->reg_lock);
1067} 1062}
1068 1063
1069static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip)
1070{
1071 return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
1072}
1073
1074static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, 1064static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
1075 struct ethtool_eee *e) 1065 struct ethtool_eee *e)
1076{ 1066{
@@ -1130,122 +1120,6 @@ out:
1130 return err; 1120 return err;
1131} 1121}
1132 1122
1133static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd)
1134{
1135 u16 val;
1136 int err;
1137
1138 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_ATU_FID)) {
1139 err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid);
1140 if (err)
1141 return err;
1142 } else if (mv88e6xxx_num_databases(chip) == 256) {
1143 /* ATU DBNum[7:4] are located in ATU Control 15:12 */
1144 err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
1145 if (err)
1146 return err;
1147
1148 err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
1149 (val & 0xfff) | ((fid << 8) & 0xf000));
1150 if (err)
1151 return err;
1152
1153 /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
1154 cmd |= fid & 0xf;
1155 }
1156
1157 err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, cmd);
1158 if (err)
1159 return err;
1160
1161 return _mv88e6xxx_atu_wait(chip);
1162}
1163
1164static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip,
1165 struct mv88e6xxx_atu_entry *entry)
1166{
1167 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
1168
1169 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1170 unsigned int mask, shift;
1171
1172 if (entry->trunk) {
1173 data |= GLOBAL_ATU_DATA_TRUNK;
1174 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1175 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1176 } else {
1177 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1178 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1179 }
1180
1181 data |= (entry->portv_trunkid << shift) & mask;
1182 }
1183
1184 return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
1185}
1186
1187static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip,
1188 struct mv88e6xxx_atu_entry *entry,
1189 bool static_too)
1190{
1191 int op;
1192 int err;
1193
1194 err = _mv88e6xxx_atu_wait(chip);
1195 if (err)
1196 return err;
1197
1198 err = _mv88e6xxx_atu_data_write(chip, entry);
1199 if (err)
1200 return err;
1201
1202 if (entry->fid) {
1203 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
1204 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
1205 } else {
1206 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
1207 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
1208 }
1209
1210 return _mv88e6xxx_atu_cmd(chip, entry->fid, op);
1211}
1212
1213static int _mv88e6xxx_atu_flush(struct mv88e6xxx_chip *chip,
1214 u16 fid, bool static_too)
1215{
1216 struct mv88e6xxx_atu_entry entry = {
1217 .fid = fid,
1218 .state = 0, /* EntryState bits must be 0 */
1219 };
1220
1221 return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
1222}
1223
1224static int _mv88e6xxx_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
1225 int from_port, int to_port, bool static_too)
1226{
1227 struct mv88e6xxx_atu_entry entry = {
1228 .trunk = false,
1229 .fid = fid,
1230 };
1231
1232 /* EntryState bits must be 0xF */
1233 entry.state = GLOBAL_ATU_DATA_STATE_MASK;
1234
1235 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1236 entry.portv_trunkid = (to_port & 0x0f) << 4;
1237 entry.portv_trunkid |= from_port & 0x0f;
1238
1239 return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
1240}
1241
1242static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid,
1243 int port, bool static_too)
1244{
1245 /* Destination port 0xF means remove the entries */
1246 return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too);
1247}
1248
1249static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port) 1123static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
1250{ 1124{
1251 struct dsa_switch *ds = chip->ds; 1125 struct dsa_switch *ds = chip->ds;
@@ -1306,13 +1180,28 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
1306 netdev_err(ds->ports[port].netdev, "failed to update state\n"); 1180 netdev_err(ds->ports[port].netdev, "failed to update state\n");
1307} 1181}
1308 1182
1183static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip)
1184{
1185 int err;
1186
1187 err = mv88e6xxx_g1_atu_flush(chip, 0, true);
1188 if (err)
1189 return err;
1190
1191 err = mv88e6xxx_g1_atu_set_learn2all(chip, true);
1192 if (err)
1193 return err;
1194
1195 return mv88e6xxx_g1_atu_set_age_time(chip, 300000);
1196}
1197
1309static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port) 1198static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
1310{ 1199{
1311 struct mv88e6xxx_chip *chip = ds->priv; 1200 struct mv88e6xxx_chip *chip = ds->priv;
1312 int err; 1201 int err;
1313 1202
1314 mutex_lock(&chip->reg_lock); 1203 mutex_lock(&chip->reg_lock);
1315 err = _mv88e6xxx_atu_remove(chip, 0, port, false); 1204 err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
1316 mutex_unlock(&chip->reg_lock); 1205 mutex_unlock(&chip->reg_lock);
1317 1206
1318 if (err) 1207 if (err)
@@ -1662,7 +1551,7 @@ loadpurge:
1662 return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE); 1551 return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1663} 1552}
1664 1553
1665static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid) 1554static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
1666{ 1555{
1667 DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); 1556 DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
1668 struct mv88e6xxx_vtu_entry vlan; 1557 struct mv88e6xxx_vtu_entry vlan;
@@ -1703,7 +1592,7 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
1703 return -ENOSPC; 1592 return -ENOSPC;
1704 1593
1705 /* Clear the database */ 1594 /* Clear the database */
1706 return _mv88e6xxx_atu_flush(chip, *fid, true); 1595 return mv88e6xxx_g1_atu_flush(chip, *fid, true);
1707} 1596}
1708 1597
1709static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid, 1598static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
@@ -1716,7 +1605,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
1716 }; 1605 };
1717 int i, err; 1606 int i, err;
1718 1607
1719 err = _mv88e6xxx_fid_new(chip, &vlan.fid); 1608 err = mv88e6xxx_atu_new(chip, &vlan.fid);
1720 if (err) 1609 if (err)
1721 return err; 1610 return err;
1722 1611
@@ -1964,7 +1853,7 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
1964 if (err) 1853 if (err)
1965 return err; 1854 return err;
1966 1855
1967 return _mv88e6xxx_atu_remove(chip, vlan.fid, port, false); 1856 return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
1968} 1857}
1969 1858
1970static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, 1859static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
@@ -2001,96 +1890,6 @@ unlock:
2001 return err; 1890 return err;
2002} 1891}
2003 1892
2004static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
2005 const unsigned char *addr)
2006{
2007 int i, err;
2008
2009 for (i = 0; i < 3; i++) {
2010 err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i,
2011 (addr[i * 2] << 8) | addr[i * 2 + 1]);
2012 if (err)
2013 return err;
2014 }
2015
2016 return 0;
2017}
2018
2019static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip,
2020 unsigned char *addr)
2021{
2022 u16 val;
2023 int i, err;
2024
2025 for (i = 0; i < 3; i++) {
2026 err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
2027 if (err)
2028 return err;
2029
2030 addr[i * 2] = val >> 8;
2031 addr[i * 2 + 1] = val & 0xff;
2032 }
2033
2034 return 0;
2035}
2036
2037static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip,
2038 struct mv88e6xxx_atu_entry *entry)
2039{
2040 int ret;
2041
2042 ret = _mv88e6xxx_atu_wait(chip);
2043 if (ret < 0)
2044 return ret;
2045
2046 ret = _mv88e6xxx_atu_mac_write(chip, entry->mac);
2047 if (ret < 0)
2048 return ret;
2049
2050 ret = _mv88e6xxx_atu_data_write(chip, entry);
2051 if (ret < 0)
2052 return ret;
2053
2054 return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
2055}
2056
2057static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
2058 struct mv88e6xxx_atu_entry *entry);
2059
2060static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid,
2061 const u8 *addr, struct mv88e6xxx_atu_entry *entry)
2062{
2063 struct mv88e6xxx_atu_entry next;
2064 int err;
2065
2066 memcpy(next.mac, addr, ETH_ALEN);
2067 eth_addr_dec(next.mac);
2068
2069 err = _mv88e6xxx_atu_mac_write(chip, next.mac);
2070 if (err)
2071 return err;
2072
2073 do {
2074 err = _mv88e6xxx_atu_getnext(chip, fid, &next);
2075 if (err)
2076 return err;
2077
2078 if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
2079 break;
2080
2081 if (ether_addr_equal(next.mac, addr)) {
2082 *entry = next;
2083 return 0;
2084 }
2085 } while (ether_addr_greater(addr, next.mac));
2086
2087 memset(entry, 0, sizeof(*entry));
2088 entry->fid = fid;
2089 ether_addr_copy(entry->mac, addr);
2090
2091 return 0;
2092}
2093
2094static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, 1893static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
2095 const unsigned char *addr, u16 vid, 1894 const unsigned char *addr, u16 vid,
2096 u8 state) 1895 u8 state)
@@ -2107,21 +1906,32 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
2107 if (err) 1906 if (err)
2108 return err; 1907 return err;
2109 1908
2110 err = mv88e6xxx_atu_get(chip, vlan.fid, addr, &entry); 1909 entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
1910 ether_addr_copy(entry.mac, addr);
1911 eth_addr_dec(entry.mac);
1912
1913 err = mv88e6xxx_g1_atu_getnext(chip, vlan.fid, &entry);
2111 if (err) 1914 if (err)
2112 return err; 1915 return err;
2113 1916
1917 /* Initialize a fresh ATU entry if it isn't found */
1918 if (entry.state == GLOBAL_ATU_DATA_STATE_UNUSED ||
1919 !ether_addr_equal(entry.mac, addr)) {
1920 memset(&entry, 0, sizeof(entry));
1921 ether_addr_copy(entry.mac, addr);
1922 }
1923
2114 /* Purge the ATU entry only if no port is using it anymore */ 1924 /* Purge the ATU entry only if no port is using it anymore */
2115 if (state == GLOBAL_ATU_DATA_STATE_UNUSED) { 1925 if (state == GLOBAL_ATU_DATA_STATE_UNUSED) {
2116 entry.portv_trunkid &= ~BIT(port); 1926 entry.portvec &= ~BIT(port);
2117 if (!entry.portv_trunkid) 1927 if (!entry.portvec)
2118 entry.state = GLOBAL_ATU_DATA_STATE_UNUSED; 1928 entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
2119 } else { 1929 } else {
2120 entry.portv_trunkid |= BIT(port); 1930 entry.portvec |= BIT(port);
2121 entry.state = state; 1931 entry.state = state;
2122 } 1932 }
2123 1933
2124 return _mv88e6xxx_atu_load(chip, &entry); 1934 return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry);
2125} 1935}
2126 1936
2127static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, 1937static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
@@ -2161,75 +1971,26 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
2161 return err; 1971 return err;
2162} 1972}
2163 1973
2164static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
2165 struct mv88e6xxx_atu_entry *entry)
2166{
2167 struct mv88e6xxx_atu_entry next = { 0 };
2168 u16 val;
2169 int err;
2170
2171 next.fid = fid;
2172
2173 err = _mv88e6xxx_atu_wait(chip);
2174 if (err)
2175 return err;
2176
2177 err = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
2178 if (err)
2179 return err;
2180
2181 err = _mv88e6xxx_atu_mac_read(chip, next.mac);
2182 if (err)
2183 return err;
2184
2185 err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
2186 if (err)
2187 return err;
2188
2189 next.state = val & GLOBAL_ATU_DATA_STATE_MASK;
2190 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
2191 unsigned int mask, shift;
2192
2193 if (val & GLOBAL_ATU_DATA_TRUNK) {
2194 next.trunk = true;
2195 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
2196 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
2197 } else {
2198 next.trunk = false;
2199 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
2200 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
2201 }
2202
2203 next.portv_trunkid = (val & mask) >> shift;
2204 }
2205
2206 *entry = next;
2207 return 0;
2208}
2209
2210static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, 1974static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
2211 u16 fid, u16 vid, int port, 1975 u16 fid, u16 vid, int port,
2212 struct switchdev_obj *obj, 1976 struct switchdev_obj *obj,
2213 int (*cb)(struct switchdev_obj *obj)) 1977 int (*cb)(struct switchdev_obj *obj))
2214{ 1978{
2215 struct mv88e6xxx_atu_entry addr = { 1979 struct mv88e6xxx_atu_entry addr;
2216 .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
2217 };
2218 int err; 1980 int err;
2219 1981
2220 err = _mv88e6xxx_atu_mac_write(chip, addr.mac); 1982 addr.state = GLOBAL_ATU_DATA_STATE_UNUSED;
2221 if (err) 1983 eth_broadcast_addr(addr.mac);
2222 return err;
2223 1984
2224 do { 1985 do {
2225 err = _mv88e6xxx_atu_getnext(chip, fid, &addr); 1986 err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
2226 if (err) 1987 if (err)
2227 return err; 1988 return err;
2228 1989
2229 if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED) 1990 if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
2230 break; 1991 break;
2231 1992
2232 if (addr.trunk || (addr.portv_trunkid & BIT(port)) == 0) 1993 if (addr.trunk || (addr.portvec & BIT(port)) == 0)
2233 continue; 1994 continue;
2234 1995
2235 if (obj->id == SWITCHDEV_OBJ_ID_PORT_FDB) { 1996 if (obj->id == SWITCHDEV_OBJ_ID_PORT_FDB) {
@@ -2433,70 +2194,85 @@ static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
2433 return err; 2194 return err;
2434} 2195}
2435 2196
2436static int mv88e6xxx_setup_port_dsa(struct mv88e6xxx_chip *chip, int port, 2197static int mv88e6xxx_set_port_mode(struct mv88e6xxx_chip *chip, int port,
2437 int upstream_port) 2198 enum mv88e6xxx_frame_mode frame, u16 egress,
2199 u16 etype)
2438{ 2200{
2439 int err; 2201 int err;
2440 2202
2441 err = chip->info->ops->port_set_frame_mode( 2203 if (!chip->info->ops->port_set_frame_mode)
2442 chip, port, MV88E6XXX_FRAME_MODE_DSA); 2204 return -EOPNOTSUPP;
2205
2206 err = mv88e6xxx_port_set_egress_mode(chip, port, egress);
2207 if (err)
2208 return err;
2209
2210 err = chip->info->ops->port_set_frame_mode(chip, port, frame);
2443 if (err) 2211 if (err)
2444 return err; 2212 return err;
2445 2213
2446 return chip->info->ops->port_set_egress_unknowns( 2214 if (chip->info->ops->port_set_ether_type)
2447 chip, port, port == upstream_port); 2215 return chip->info->ops->port_set_ether_type(chip, port, etype);
2216
2217 return 0;
2448} 2218}
2449 2219
2450static int mv88e6xxx_setup_port_cpu(struct mv88e6xxx_chip *chip, int port) 2220static int mv88e6xxx_set_port_mode_normal(struct mv88e6xxx_chip *chip, int port)
2451{ 2221{
2452 int err; 2222 return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_NORMAL,
2223 PORT_CONTROL_EGRESS_UNMODIFIED,
2224 PORT_ETH_TYPE_DEFAULT);
2225}
2453 2226
2454 switch (chip->info->tag_protocol) { 2227static int mv88e6xxx_set_port_mode_dsa(struct mv88e6xxx_chip *chip, int port)
2455 case DSA_TAG_PROTO_EDSA: 2228{
2456 err = chip->info->ops->port_set_frame_mode( 2229 return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_DSA,
2457 chip, port, MV88E6XXX_FRAME_MODE_ETHERTYPE); 2230 PORT_CONTROL_EGRESS_UNMODIFIED,
2458 if (err) 2231 PORT_ETH_TYPE_DEFAULT);
2459 return err; 2232}
2460 2233
2461 err = mv88e6xxx_port_set_egress_mode( 2234static int mv88e6xxx_set_port_mode_edsa(struct mv88e6xxx_chip *chip, int port)
2462 chip, port, PORT_CONTROL_EGRESS_ADD_TAG); 2235{
2463 if (err) 2236 return mv88e6xxx_set_port_mode(chip, port,
2464 return err; 2237 MV88E6XXX_FRAME_MODE_ETHERTYPE,
2238 PORT_CONTROL_EGRESS_ADD_TAG, ETH_P_EDSA);
2239}
2465 2240
2466 if (chip->info->ops->port_set_ether_type) 2241static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port)
2467 err = chip->info->ops->port_set_ether_type( 2242{
2468 chip, port, ETH_P_EDSA); 2243 if (dsa_is_dsa_port(chip->ds, port))
2469 break; 2244 return mv88e6xxx_set_port_mode_dsa(chip, port);
2470 2245
2471 case DSA_TAG_PROTO_DSA: 2246 if (dsa_is_normal_port(chip->ds, port))
2472 err = chip->info->ops->port_set_frame_mode( 2247 return mv88e6xxx_set_port_mode_normal(chip, port);
2473 chip, port, MV88E6XXX_FRAME_MODE_DSA);
2474 if (err)
2475 return err;
2476 2248
2477 err = mv88e6xxx_port_set_egress_mode( 2249 /* Setup CPU port mode depending on its supported tag format */
2478 chip, port, PORT_CONTROL_EGRESS_UNMODIFIED); 2250 if (chip->info->tag_protocol == DSA_TAG_PROTO_DSA)
2479 break; 2251 return mv88e6xxx_set_port_mode_dsa(chip, port);
2480 default:
2481 err = -EINVAL;
2482 }
2483 2252
2484 if (err) 2253 if (chip->info->tag_protocol == DSA_TAG_PROTO_EDSA)
2485 return err; 2254 return mv88e6xxx_set_port_mode_edsa(chip, port);
2486 2255
2487 return chip->info->ops->port_set_egress_unknowns(chip, port, true); 2256 return -EINVAL;
2488} 2257}
2489 2258
2490static int mv88e6xxx_setup_port_normal(struct mv88e6xxx_chip *chip, int port) 2259static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port)
2491{ 2260{
2492 int err; 2261 bool message = dsa_is_dsa_port(chip->ds, port);
2493 2262
2494 err = chip->info->ops->port_set_frame_mode( 2263 return mv88e6xxx_port_set_message_port(chip, port, message);
2495 chip, port, MV88E6XXX_FRAME_MODE_NORMAL); 2264}
2496 if (err)
2497 return err;
2498 2265
2499 return chip->info->ops->port_set_egress_unknowns(chip, port, false); 2266static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
2267{
2268 bool flood = port == dsa_upstream_port(chip->ds);
2269
2270 /* Upstream ports flood frames with unknown unicast or multicast DA */
2271 if (chip->info->ops->port_set_egress_floods)
2272 return chip->info->ops->port_set_egress_floods(chip, port,
2273 flood, flood);
2274
2275 return 0;
2500} 2276}
2501 2277
2502static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) 2278static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
@@ -2541,14 +2317,11 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
2541 if (err) 2317 if (err)
2542 return err; 2318 return err;
2543 2319
2544 if (dsa_is_cpu_port(ds, port)) { 2320 err = mv88e6xxx_setup_port_mode(chip, port);
2545 err = mv88e6xxx_setup_port_cpu(chip, port); 2321 if (err)
2546 } else if (dsa_is_dsa_port(ds, port)) { 2322 return err;
2547 err = mv88e6xxx_setup_port_dsa(chip, port, 2323
2548 dsa_upstream_port(ds)); 2324 err = mv88e6xxx_setup_egress_floods(chip, port);
2549 } else {
2550 err = mv88e6xxx_setup_port_normal(chip, port);
2551 }
2552 if (err) 2325 if (err)
2553 return err; 2326 return err;
2554 2327
@@ -2623,20 +2396,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
2623 return err; 2396 return err;
2624 } 2397 }
2625 2398
2626 if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || 2399 if (chip->info->ops->port_disable_learn_limit) {
2627 mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || 2400 err = chip->info->ops->port_disable_learn_limit(chip, port);
2628 mv88e6xxx_6320_family(chip) || mv88e6xxx_6341_family(chip)) { 2401 if (err)
2629 /* Port ATU control: disable limiting the number of 2402 return err;
2630 * address database entries that this port is allowed 2403 }
2631 * to use. 2404
2632 */ 2405 if (chip->info->ops->port_disable_pri_override) {
2633 err = mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL, 2406 err = chip->info->ops->port_disable_pri_override(chip, port);
2634 0x0000);
2635 /* Priority Override: disable DA, SA and VTU priority
2636 * override.
2637 */
2638 err = mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE,
2639 0x0000);
2640 if (err) 2407 if (err)
2641 return err; 2408 return err;
2642 } 2409 }
@@ -2653,10 +2420,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
2653 return err; 2420 return err;
2654 } 2421 }
2655 2422
2656 /* Port Control 1: disable trunking, disable sending 2423 err = mv88e6xxx_setup_message_port(chip, port);
2657 * learning messages to this port.
2658 */
2659 err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, 0x0000);
2660 if (err) 2424 if (err)
2661 return err; 2425 return err;
2662 2426
@@ -2697,33 +2461,6 @@ static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
2697 return 0; 2461 return 0;
2698} 2462}
2699 2463
2700static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
2701 unsigned int msecs)
2702{
2703 const unsigned int coeff = chip->info->age_time_coeff;
2704 const unsigned int min = 0x01 * coeff;
2705 const unsigned int max = 0xff * coeff;
2706 u8 age_time;
2707 u16 val;
2708 int err;
2709
2710 if (msecs < min || msecs > max)
2711 return -ERANGE;
2712
2713 /* Round to nearest multiple of coeff */
2714 age_time = (msecs + coeff / 2) / coeff;
2715
2716 err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
2717 if (err)
2718 return err;
2719
2720 /* AgeTime is 11:4 bits */
2721 val &= ~0xff0;
2722 val |= age_time << 4;
2723
2724 return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
2725}
2726
2727static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds, 2464static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
2728 unsigned int ageing_time) 2465 unsigned int ageing_time)
2729{ 2466{
@@ -2731,7 +2468,7 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
2731 int err; 2468 int err;
2732 2469
2733 mutex_lock(&chip->reg_lock); 2470 mutex_lock(&chip->reg_lock);
2734 err = mv88e6xxx_g1_set_age_time(chip, ageing_time); 2471 err = mv88e6xxx_g1_atu_set_age_time(chip, ageing_time);
2735 mutex_unlock(&chip->reg_lock); 2472 mutex_unlock(&chip->reg_lock);
2736 2473
2737 return err; 2474 return err;
@@ -2774,24 +2511,6 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
2774 if (err < 0) 2511 if (err < 0)
2775 return err; 2512 return err;
2776 2513
2777 /* Set the default address aging time to 5 minutes, and
2778 * enable address learn messages to be sent to all message
2779 * ports.
2780 */
2781 err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
2782 GLOBAL_ATU_CONTROL_LEARN2ALL);
2783 if (err)
2784 return err;
2785
2786 err = mv88e6xxx_g1_set_age_time(chip, 300000);
2787 if (err)
2788 return err;
2789
2790 /* Clear all ATU entries */
2791 err = _mv88e6xxx_atu_flush(chip, 0, true);
2792 if (err)
2793 return err;
2794
2795 /* Configure the IP ToS mapping registers. */ 2514 /* Configure the IP ToS mapping registers. */
2796 err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_0, 0x0000); 2515 err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_0, 0x0000);
2797 if (err) 2516 if (err)
@@ -2872,6 +2591,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2872 goto unlock; 2591 goto unlock;
2873 } 2592 }
2874 2593
2594 err = mv88e6xxx_atu_setup(chip);
2595 if (err)
2596 goto unlock;
2597
2875 /* Some generations have the configuration of sending reserved 2598 /* Some generations have the configuration of sending reserved
2876 * management frames to the CPU in global2, others in 2599 * management frames to the CPU in global2, others in
2877 * global1. Hence it does not fit the two setup functions 2600 * global1. Hence it does not fit the two setup functions
@@ -3101,10 +2824,12 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
3101 .port_set_speed = mv88e6185_port_set_speed, 2824 .port_set_speed = mv88e6185_port_set_speed,
3102 .port_tag_remap = mv88e6095_port_tag_remap, 2825 .port_tag_remap = mv88e6095_port_tag_remap,
3103 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 2826 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3104 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 2827 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3105 .port_set_ether_type = mv88e6351_port_set_ether_type, 2828 .port_set_ether_type = mv88e6351_port_set_ether_type,
3106 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 2829 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3107 .port_pause_config = mv88e6097_port_pause_config, 2830 .port_pause_config = mv88e6097_port_pause_config,
2831 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2832 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3108 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 2833 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
3109 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2834 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3110 .stats_get_strings = mv88e6095_stats_get_strings, 2835 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3127,7 +2852,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
3127 .port_set_duplex = mv88e6xxx_port_set_duplex, 2852 .port_set_duplex = mv88e6xxx_port_set_duplex,
3128 .port_set_speed = mv88e6185_port_set_speed, 2853 .port_set_speed = mv88e6185_port_set_speed,
3129 .port_set_frame_mode = mv88e6085_port_set_frame_mode, 2854 .port_set_frame_mode = mv88e6085_port_set_frame_mode,
3130 .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns, 2855 .port_set_egress_floods = mv88e6185_port_set_egress_floods,
3131 .port_set_upstream_port = mv88e6095_port_set_upstream_port, 2856 .port_set_upstream_port = mv88e6095_port_set_upstream_port,
3132 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 2857 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
3133 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2858 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3149,11 +2874,13 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
3149 .port_set_speed = mv88e6185_port_set_speed, 2874 .port_set_speed = mv88e6185_port_set_speed,
3150 .port_tag_remap = mv88e6095_port_tag_remap, 2875 .port_tag_remap = mv88e6095_port_tag_remap,
3151 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 2876 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3152 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 2877 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3153 .port_set_ether_type = mv88e6351_port_set_ether_type, 2878 .port_set_ether_type = mv88e6351_port_set_ether_type,
3154 .port_jumbo_config = mv88e6165_port_jumbo_config, 2879 .port_jumbo_config = mv88e6165_port_jumbo_config,
3155 .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, 2880 .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
3156 .port_pause_config = mv88e6097_port_pause_config, 2881 .port_pause_config = mv88e6097_port_pause_config,
2882 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2883 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3157 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 2884 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
3158 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2885 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3159 .stats_get_strings = mv88e6095_stats_get_strings, 2886 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3174,7 +2901,9 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
3174 .port_set_duplex = mv88e6xxx_port_set_duplex, 2901 .port_set_duplex = mv88e6xxx_port_set_duplex,
3175 .port_set_speed = mv88e6185_port_set_speed, 2902 .port_set_speed = mv88e6185_port_set_speed,
3176 .port_set_frame_mode = mv88e6085_port_set_frame_mode, 2903 .port_set_frame_mode = mv88e6085_port_set_frame_mode,
3177 .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns, 2904 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
2905 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2906 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3178 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 2907 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
3179 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2908 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3180 .stats_get_strings = mv88e6095_stats_get_strings, 2909 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3196,7 +2925,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
3196 .port_set_speed = mv88e6185_port_set_speed, 2925 .port_set_speed = mv88e6185_port_set_speed,
3197 .port_tag_remap = mv88e6095_port_tag_remap, 2926 .port_tag_remap = mv88e6095_port_tag_remap,
3198 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 2927 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3199 .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns, 2928 .port_set_egress_floods = mv88e6185_port_set_egress_floods,
3200 .port_set_ether_type = mv88e6351_port_set_ether_type, 2929 .port_set_ether_type = mv88e6351_port_set_ether_type,
3201 .port_set_upstream_port = mv88e6095_port_set_upstream_port, 2930 .port_set_upstream_port = mv88e6095_port_set_upstream_port,
3202 .port_jumbo_config = mv88e6165_port_jumbo_config, 2931 .port_jumbo_config = mv88e6165_port_jumbo_config,
@@ -3225,11 +2954,13 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
3225 .port_set_speed = mv88e6185_port_set_speed, 2954 .port_set_speed = mv88e6185_port_set_speed,
3226 .port_tag_remap = mv88e6095_port_tag_remap, 2955 .port_tag_remap = mv88e6095_port_tag_remap,
3227 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 2956 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3228 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 2957 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3229 .port_set_ether_type = mv88e6351_port_set_ether_type, 2958 .port_set_ether_type = mv88e6351_port_set_ether_type,
3230 .port_jumbo_config = mv88e6165_port_jumbo_config, 2959 .port_jumbo_config = mv88e6165_port_jumbo_config,
3231 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 2960 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3232 .port_pause_config = mv88e6097_port_pause_config, 2961 .port_pause_config = mv88e6097_port_pause_config,
2962 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2963 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3233 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 2964 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
3234 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2965 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3235 .stats_get_strings = mv88e6095_stats_get_strings, 2966 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3249,6 +2980,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
3249 .port_set_link = mv88e6xxx_port_set_link, 2980 .port_set_link = mv88e6xxx_port_set_link,
3250 .port_set_duplex = mv88e6xxx_port_set_duplex, 2981 .port_set_duplex = mv88e6xxx_port_set_duplex,
3251 .port_set_speed = mv88e6185_port_set_speed, 2982 .port_set_speed = mv88e6185_port_set_speed,
2983 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2984 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3252 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 2985 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
3253 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2986 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3254 .stats_get_strings = mv88e6095_stats_get_strings, 2987 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3271,11 +3004,13 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
3271 .port_set_speed = mv88e6185_port_set_speed, 3004 .port_set_speed = mv88e6185_port_set_speed,
3272 .port_tag_remap = mv88e6095_port_tag_remap, 3005 .port_tag_remap = mv88e6095_port_tag_remap,
3273 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3006 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3274 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3007 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3275 .port_set_ether_type = mv88e6351_port_set_ether_type, 3008 .port_set_ether_type = mv88e6351_port_set_ether_type,
3276 .port_jumbo_config = mv88e6165_port_jumbo_config, 3009 .port_jumbo_config = mv88e6165_port_jumbo_config,
3277 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3010 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3278 .port_pause_config = mv88e6097_port_pause_config, 3011 .port_pause_config = mv88e6097_port_pause_config,
3012 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3013 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3279 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3014 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3280 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3015 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3281 .stats_get_strings = mv88e6095_stats_get_strings, 3016 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3300,11 +3035,13 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
3300 .port_set_speed = mv88e6352_port_set_speed, 3035 .port_set_speed = mv88e6352_port_set_speed,
3301 .port_tag_remap = mv88e6095_port_tag_remap, 3036 .port_tag_remap = mv88e6095_port_tag_remap,
3302 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3037 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3303 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3038 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3304 .port_set_ether_type = mv88e6351_port_set_ether_type, 3039 .port_set_ether_type = mv88e6351_port_set_ether_type,
3305 .port_jumbo_config = mv88e6165_port_jumbo_config, 3040 .port_jumbo_config = mv88e6165_port_jumbo_config,
3306 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3041 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3307 .port_pause_config = mv88e6097_port_pause_config, 3042 .port_pause_config = mv88e6097_port_pause_config,
3043 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3044 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3308 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3045 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3309 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3046 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3310 .stats_get_strings = mv88e6095_stats_get_strings, 3047 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3327,11 +3064,13 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
3327 .port_set_speed = mv88e6185_port_set_speed, 3064 .port_set_speed = mv88e6185_port_set_speed,
3328 .port_tag_remap = mv88e6095_port_tag_remap, 3065 .port_tag_remap = mv88e6095_port_tag_remap,
3329 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3066 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3330 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3067 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3331 .port_set_ether_type = mv88e6351_port_set_ether_type, 3068 .port_set_ether_type = mv88e6351_port_set_ether_type,
3332 .port_jumbo_config = mv88e6165_port_jumbo_config, 3069 .port_jumbo_config = mv88e6165_port_jumbo_config,
3333 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3070 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3334 .port_pause_config = mv88e6097_port_pause_config, 3071 .port_pause_config = mv88e6097_port_pause_config,
3072 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3073 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3335 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3074 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3336 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3075 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3337 .stats_get_strings = mv88e6095_stats_get_strings, 3076 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3356,11 +3095,13 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
3356 .port_set_speed = mv88e6352_port_set_speed, 3095 .port_set_speed = mv88e6352_port_set_speed,
3357 .port_tag_remap = mv88e6095_port_tag_remap, 3096 .port_tag_remap = mv88e6095_port_tag_remap,
3358 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3097 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3359 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3098 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3360 .port_set_ether_type = mv88e6351_port_set_ether_type, 3099 .port_set_ether_type = mv88e6351_port_set_ether_type,
3361 .port_jumbo_config = mv88e6165_port_jumbo_config, 3100 .port_jumbo_config = mv88e6165_port_jumbo_config,
3362 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3101 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3363 .port_pause_config = mv88e6097_port_pause_config, 3102 .port_pause_config = mv88e6097_port_pause_config,
3103 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3104 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3364 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3105 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3365 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3106 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3366 .stats_get_strings = mv88e6095_stats_get_strings, 3107 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3381,7 +3122,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
3381 .port_set_duplex = mv88e6xxx_port_set_duplex, 3122 .port_set_duplex = mv88e6xxx_port_set_duplex,
3382 .port_set_speed = mv88e6185_port_set_speed, 3123 .port_set_speed = mv88e6185_port_set_speed,
3383 .port_set_frame_mode = mv88e6085_port_set_frame_mode, 3124 .port_set_frame_mode = mv88e6085_port_set_frame_mode,
3384 .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns, 3125 .port_set_egress_floods = mv88e6185_port_set_egress_floods,
3385 .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, 3126 .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
3386 .port_set_upstream_port = mv88e6095_port_set_upstream_port, 3127 .port_set_upstream_port = mv88e6095_port_set_upstream_port,
3387 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 3128 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3410,9 +3151,11 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
3410 .port_set_speed = mv88e6390_port_set_speed, 3151 .port_set_speed = mv88e6390_port_set_speed,
3411 .port_tag_remap = mv88e6390_port_tag_remap, 3152 .port_tag_remap = mv88e6390_port_tag_remap,
3412 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3153 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3413 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3154 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3414 .port_set_ether_type = mv88e6351_port_set_ether_type, 3155 .port_set_ether_type = mv88e6351_port_set_ether_type,
3415 .port_pause_config = mv88e6390_port_pause_config, 3156 .port_pause_config = mv88e6390_port_pause_config,
3157 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3158 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3416 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3159 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3417 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3160 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
3418 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3161 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3438,9 +3181,11 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
3438 .port_set_speed = mv88e6390x_port_set_speed, 3181 .port_set_speed = mv88e6390x_port_set_speed,
3439 .port_tag_remap = mv88e6390_port_tag_remap, 3182 .port_tag_remap = mv88e6390_port_tag_remap,
3440 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3183 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3441 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3184 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3442 .port_set_ether_type = mv88e6351_port_set_ether_type, 3185 .port_set_ether_type = mv88e6351_port_set_ether_type,
3443 .port_pause_config = mv88e6390_port_pause_config, 3186 .port_pause_config = mv88e6390_port_pause_config,
3187 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3188 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3444 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3189 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3445 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3190 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
3446 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3191 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3466,9 +3211,11 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
3466 .port_set_speed = mv88e6390_port_set_speed, 3211 .port_set_speed = mv88e6390_port_set_speed,
3467 .port_tag_remap = mv88e6390_port_tag_remap, 3212 .port_tag_remap = mv88e6390_port_tag_remap,
3468 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3213 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3469 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3214 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3470 .port_set_ether_type = mv88e6351_port_set_ether_type, 3215 .port_set_ether_type = mv88e6351_port_set_ether_type,
3471 .port_pause_config = mv88e6390_port_pause_config, 3216 .port_pause_config = mv88e6390_port_pause_config,
3217 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3218 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3472 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3219 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3473 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3220 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
3474 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3221 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3494,11 +3241,13 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
3494 .port_set_speed = mv88e6352_port_set_speed, 3241 .port_set_speed = mv88e6352_port_set_speed,
3495 .port_tag_remap = mv88e6095_port_tag_remap, 3242 .port_tag_remap = mv88e6095_port_tag_remap,
3496 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3243 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3497 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3244 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3498 .port_set_ether_type = mv88e6351_port_set_ether_type, 3245 .port_set_ether_type = mv88e6351_port_set_ether_type,
3499 .port_jumbo_config = mv88e6165_port_jumbo_config, 3246 .port_jumbo_config = mv88e6165_port_jumbo_config,
3500 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3247 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3501 .port_pause_config = mv88e6097_port_pause_config, 3248 .port_pause_config = mv88e6097_port_pause_config,
3249 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3250 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3502 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3251 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3503 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3252 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3504 .stats_get_strings = mv88e6095_stats_get_strings, 3253 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3523,10 +3272,12 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
3523 .port_set_speed = mv88e6390_port_set_speed, 3272 .port_set_speed = mv88e6390_port_set_speed,
3524 .port_tag_remap = mv88e6390_port_tag_remap, 3273 .port_tag_remap = mv88e6390_port_tag_remap,
3525 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3274 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3526 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3275 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3527 .port_set_ether_type = mv88e6351_port_set_ether_type, 3276 .port_set_ether_type = mv88e6351_port_set_ether_type,
3528 .port_pause_config = mv88e6390_port_pause_config, 3277 .port_pause_config = mv88e6390_port_pause_config,
3529 .port_set_cmode = mv88e6390x_port_set_cmode, 3278 .port_set_cmode = mv88e6390x_port_set_cmode,
3279 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3280 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3530 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3281 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3531 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3282 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
3532 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3283 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3551,11 +3302,13 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
3551 .port_set_speed = mv88e6185_port_set_speed, 3302 .port_set_speed = mv88e6185_port_set_speed,
3552 .port_tag_remap = mv88e6095_port_tag_remap, 3303 .port_tag_remap = mv88e6095_port_tag_remap,
3553 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3304 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3554 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3305 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3555 .port_set_ether_type = mv88e6351_port_set_ether_type, 3306 .port_set_ether_type = mv88e6351_port_set_ether_type,
3556 .port_jumbo_config = mv88e6165_port_jumbo_config, 3307 .port_jumbo_config = mv88e6165_port_jumbo_config,
3557 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3308 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3558 .port_pause_config = mv88e6097_port_pause_config, 3309 .port_pause_config = mv88e6097_port_pause_config,
3310 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3311 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3559 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3312 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3560 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3313 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
3561 .stats_get_strings = mv88e6320_stats_get_strings, 3314 .stats_get_strings = mv88e6320_stats_get_strings,
@@ -3578,11 +3331,13 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
3578 .port_set_speed = mv88e6185_port_set_speed, 3331 .port_set_speed = mv88e6185_port_set_speed,
3579 .port_tag_remap = mv88e6095_port_tag_remap, 3332 .port_tag_remap = mv88e6095_port_tag_remap,
3580 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3333 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3581 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3334 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3582 .port_set_ether_type = mv88e6351_port_set_ether_type, 3335 .port_set_ether_type = mv88e6351_port_set_ether_type,
3583 .port_jumbo_config = mv88e6165_port_jumbo_config, 3336 .port_jumbo_config = mv88e6165_port_jumbo_config,
3584 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3337 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3585 .port_pause_config = mv88e6097_port_pause_config, 3338 .port_pause_config = mv88e6097_port_pause_config,
3339 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3340 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3586 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3341 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3587 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3342 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
3588 .stats_get_strings = mv88e6320_stats_get_strings, 3343 .stats_get_strings = mv88e6320_stats_get_strings,
@@ -3603,11 +3358,13 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
3603 .port_set_speed = mv88e6185_port_set_speed, 3358 .port_set_speed = mv88e6185_port_set_speed,
3604 .port_tag_remap = mv88e6095_port_tag_remap, 3359 .port_tag_remap = mv88e6095_port_tag_remap,
3605 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3360 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3606 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3361 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3607 .port_set_ether_type = mv88e6351_port_set_ether_type, 3362 .port_set_ether_type = mv88e6351_port_set_ether_type,
3608 .port_jumbo_config = mv88e6165_port_jumbo_config, 3363 .port_jumbo_config = mv88e6165_port_jumbo_config,
3609 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3364 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3610 .port_pause_config = mv88e6097_port_pause_config, 3365 .port_pause_config = mv88e6097_port_pause_config,
3366 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3367 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3611 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3368 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3612 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3369 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3613 .stats_get_strings = mv88e6095_stats_get_strings, 3370 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3630,11 +3387,13 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
3630 .port_set_speed = mv88e6185_port_set_speed, 3387 .port_set_speed = mv88e6185_port_set_speed,
3631 .port_tag_remap = mv88e6095_port_tag_remap, 3388 .port_tag_remap = mv88e6095_port_tag_remap,
3632 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3389 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3633 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3390 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3634 .port_set_ether_type = mv88e6351_port_set_ether_type, 3391 .port_set_ether_type = mv88e6351_port_set_ether_type,
3635 .port_jumbo_config = mv88e6165_port_jumbo_config, 3392 .port_jumbo_config = mv88e6165_port_jumbo_config,
3636 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3393 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3637 .port_pause_config = mv88e6097_port_pause_config, 3394 .port_pause_config = mv88e6097_port_pause_config,
3395 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3396 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3638 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3397 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3639 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3398 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3640 .stats_get_strings = mv88e6095_stats_get_strings, 3399 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3659,11 +3418,13 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
3659 .port_set_speed = mv88e6352_port_set_speed, 3418 .port_set_speed = mv88e6352_port_set_speed,
3660 .port_tag_remap = mv88e6095_port_tag_remap, 3419 .port_tag_remap = mv88e6095_port_tag_remap,
3661 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3420 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3662 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3421 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3663 .port_set_ether_type = mv88e6351_port_set_ether_type, 3422 .port_set_ether_type = mv88e6351_port_set_ether_type,
3664 .port_jumbo_config = mv88e6165_port_jumbo_config, 3423 .port_jumbo_config = mv88e6165_port_jumbo_config,
3665 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3424 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3666 .port_pause_config = mv88e6097_port_pause_config, 3425 .port_pause_config = mv88e6097_port_pause_config,
3426 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3427 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3667 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3428 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3668 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3429 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
3669 .stats_get_strings = mv88e6095_stats_get_strings, 3430 .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3688,11 +3449,13 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
3688 .port_set_speed = mv88e6390_port_set_speed, 3449 .port_set_speed = mv88e6390_port_set_speed,
3689 .port_tag_remap = mv88e6095_port_tag_remap, 3450 .port_tag_remap = mv88e6095_port_tag_remap,
3690 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3451 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3691 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3452 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3692 .port_set_ether_type = mv88e6351_port_set_ether_type, 3453 .port_set_ether_type = mv88e6351_port_set_ether_type,
3693 .port_jumbo_config = mv88e6165_port_jumbo_config, 3454 .port_jumbo_config = mv88e6165_port_jumbo_config,
3694 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3455 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3695 .port_pause_config = mv88e6097_port_pause_config, 3456 .port_pause_config = mv88e6097_port_pause_config,
3457 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3458 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3696 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3459 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3697 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3460 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
3698 .stats_get_strings = mv88e6320_stats_get_strings, 3461 .stats_get_strings = mv88e6320_stats_get_strings,
@@ -3717,11 +3480,13 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
3717 .port_set_speed = mv88e6390_port_set_speed, 3480 .port_set_speed = mv88e6390_port_set_speed,
3718 .port_tag_remap = mv88e6095_port_tag_remap, 3481 .port_tag_remap = mv88e6095_port_tag_remap,
3719 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3482 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3720 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3483 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3721 .port_set_ether_type = mv88e6351_port_set_ether_type, 3484 .port_set_ether_type = mv88e6351_port_set_ether_type,
3722 .port_jumbo_config = mv88e6165_port_jumbo_config, 3485 .port_jumbo_config = mv88e6165_port_jumbo_config,
3723 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3486 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3724 .port_pause_config = mv88e6097_port_pause_config, 3487 .port_pause_config = mv88e6097_port_pause_config,
3488 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3489 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3725 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3490 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3726 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3491 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
3727 .stats_get_strings = mv88e6320_stats_get_strings, 3492 .stats_get_strings = mv88e6320_stats_get_strings,
@@ -3746,12 +3511,14 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
3746 .port_set_speed = mv88e6390_port_set_speed, 3511 .port_set_speed = mv88e6390_port_set_speed,
3747 .port_tag_remap = mv88e6390_port_tag_remap, 3512 .port_tag_remap = mv88e6390_port_tag_remap,
3748 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3513 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3749 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3514 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3750 .port_set_ether_type = mv88e6351_port_set_ether_type, 3515 .port_set_ether_type = mv88e6351_port_set_ether_type,
3751 .port_jumbo_config = mv88e6165_port_jumbo_config, 3516 .port_jumbo_config = mv88e6165_port_jumbo_config,
3752 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3517 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3753 .port_pause_config = mv88e6390_port_pause_config, 3518 .port_pause_config = mv88e6390_port_pause_config,
3754 .port_set_cmode = mv88e6390x_port_set_cmode, 3519 .port_set_cmode = mv88e6390x_port_set_cmode,
3520 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3521 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3755 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3522 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3756 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3523 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
3757 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3524 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3777,11 +3544,13 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
3777 .port_set_speed = mv88e6390x_port_set_speed, 3544 .port_set_speed = mv88e6390x_port_set_speed,
3778 .port_tag_remap = mv88e6390_port_tag_remap, 3545 .port_tag_remap = mv88e6390_port_tag_remap,
3779 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3546 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3780 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3547 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3781 .port_set_ether_type = mv88e6351_port_set_ether_type, 3548 .port_set_ether_type = mv88e6351_port_set_ether_type,
3782 .port_jumbo_config = mv88e6165_port_jumbo_config, 3549 .port_jumbo_config = mv88e6165_port_jumbo_config,
3783 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3550 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3784 .port_pause_config = mv88e6390_port_pause_config, 3551 .port_pause_config = mv88e6390_port_pause_config,
3552 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3553 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3785 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3554 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3786 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3555 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
3787 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3556 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3807,9 +3576,11 @@ static const struct mv88e6xxx_ops mv88e6391_ops = {
3807 .port_set_speed = mv88e6390_port_set_speed, 3576 .port_set_speed = mv88e6390_port_set_speed,
3808 .port_tag_remap = mv88e6390_port_tag_remap, 3577 .port_tag_remap = mv88e6390_port_tag_remap,
3809 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3578 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3810 .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, 3579 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3811 .port_set_ether_type = mv88e6351_port_set_ether_type, 3580 .port_set_ether_type = mv88e6351_port_set_ether_type,
3812 .port_pause_config = mv88e6390_port_pause_config, 3581 .port_pause_config = mv88e6390_port_pause_config,
3582 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3583 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3813 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3584 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3814 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3585 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
3815 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3586 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3822,22 +3593,6 @@ static const struct mv88e6xxx_ops mv88e6391_ops = {
3822 .reset = mv88e6352_g1_reset, 3593 .reset = mv88e6352_g1_reset,
3823}; 3594};
3824 3595
3825static int mv88e6xxx_verify_madatory_ops(struct mv88e6xxx_chip *chip,
3826 const struct mv88e6xxx_ops *ops)
3827{
3828 if (!ops->port_set_frame_mode) {
3829 dev_err(chip->dev, "Missing port_set_frame_mode");
3830 return -EINVAL;
3831 }
3832
3833 if (!ops->port_set_egress_unknowns) {
3834 dev_err(chip->dev, "Missing port_set_egress_mode");
3835 return -EINVAL;
3836 }
3837
3838 return 0;
3839}
3840
3841static const struct mv88e6xxx_info mv88e6xxx_table[] = { 3596static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3842 [MV88E6085] = { 3597 [MV88E6085] = {
3843 .prod_num = PORT_SWITCH_ID_PROD_NUM_6085, 3598 .prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
@@ -3849,6 +3604,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3849 .global1_addr = 0x1b, 3604 .global1_addr = 0x1b,
3850 .age_time_coeff = 15000, 3605 .age_time_coeff = 15000,
3851 .g1_irqs = 8, 3606 .g1_irqs = 8,
3607 .atu_move_port_mask = 0xf,
3852 .tag_protocol = DSA_TAG_PROTO_DSA, 3608 .tag_protocol = DSA_TAG_PROTO_DSA,
3853 .flags = MV88E6XXX_FLAGS_FAMILY_6097, 3609 .flags = MV88E6XXX_FLAGS_FAMILY_6097,
3854 .ops = &mv88e6085_ops, 3610 .ops = &mv88e6085_ops,
@@ -3864,6 +3620,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3864 .global1_addr = 0x1b, 3620 .global1_addr = 0x1b,
3865 .age_time_coeff = 15000, 3621 .age_time_coeff = 15000,
3866 .g1_irqs = 8, 3622 .g1_irqs = 8,
3623 .atu_move_port_mask = 0xf,
3867 .tag_protocol = DSA_TAG_PROTO_DSA, 3624 .tag_protocol = DSA_TAG_PROTO_DSA,
3868 .flags = MV88E6XXX_FLAGS_FAMILY_6095, 3625 .flags = MV88E6XXX_FLAGS_FAMILY_6095,
3869 .ops = &mv88e6095_ops, 3626 .ops = &mv88e6095_ops,
@@ -3879,6 +3636,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3879 .global1_addr = 0x1b, 3636 .global1_addr = 0x1b,
3880 .age_time_coeff = 15000, 3637 .age_time_coeff = 15000,
3881 .g1_irqs = 8, 3638 .g1_irqs = 8,
3639 .atu_move_port_mask = 0xf,
3882 .tag_protocol = DSA_TAG_PROTO_EDSA, 3640 .tag_protocol = DSA_TAG_PROTO_EDSA,
3883 .flags = MV88E6XXX_FLAGS_FAMILY_6097, 3641 .flags = MV88E6XXX_FLAGS_FAMILY_6097,
3884 .ops = &mv88e6097_ops, 3642 .ops = &mv88e6097_ops,
@@ -3894,6 +3652,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3894 .global1_addr = 0x1b, 3652 .global1_addr = 0x1b,
3895 .age_time_coeff = 15000, 3653 .age_time_coeff = 15000,
3896 .g1_irqs = 9, 3654 .g1_irqs = 9,
3655 .atu_move_port_mask = 0xf,
3897 .tag_protocol = DSA_TAG_PROTO_DSA, 3656 .tag_protocol = DSA_TAG_PROTO_DSA,
3898 .flags = MV88E6XXX_FLAGS_FAMILY_6165, 3657 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3899 .ops = &mv88e6123_ops, 3658 .ops = &mv88e6123_ops,
@@ -3909,6 +3668,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3909 .global1_addr = 0x1b, 3668 .global1_addr = 0x1b,
3910 .age_time_coeff = 15000, 3669 .age_time_coeff = 15000,
3911 .g1_irqs = 9, 3670 .g1_irqs = 9,
3671 .atu_move_port_mask = 0xf,
3912 .tag_protocol = DSA_TAG_PROTO_DSA, 3672 .tag_protocol = DSA_TAG_PROTO_DSA,
3913 .flags = MV88E6XXX_FLAGS_FAMILY_6185, 3673 .flags = MV88E6XXX_FLAGS_FAMILY_6185,
3914 .ops = &mv88e6131_ops, 3674 .ops = &mv88e6131_ops,
@@ -3924,6 +3684,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3924 .global1_addr = 0x1b, 3684 .global1_addr = 0x1b,
3925 .age_time_coeff = 15000, 3685 .age_time_coeff = 15000,
3926 .g1_irqs = 9, 3686 .g1_irqs = 9,
3687 .atu_move_port_mask = 0xf,
3927 .tag_protocol = DSA_TAG_PROTO_DSA, 3688 .tag_protocol = DSA_TAG_PROTO_DSA,
3928 .flags = MV88E6XXX_FLAGS_FAMILY_6165, 3689 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3929 .ops = &mv88e6161_ops, 3690 .ops = &mv88e6161_ops,
@@ -3939,6 +3700,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3939 .global1_addr = 0x1b, 3700 .global1_addr = 0x1b,
3940 .age_time_coeff = 15000, 3701 .age_time_coeff = 15000,
3941 .g1_irqs = 9, 3702 .g1_irqs = 9,
3703 .atu_move_port_mask = 0xf,
3942 .tag_protocol = DSA_TAG_PROTO_DSA, 3704 .tag_protocol = DSA_TAG_PROTO_DSA,
3943 .flags = MV88E6XXX_FLAGS_FAMILY_6165, 3705 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3944 .ops = &mv88e6165_ops, 3706 .ops = &mv88e6165_ops,
@@ -3954,6 +3716,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3954 .global1_addr = 0x1b, 3716 .global1_addr = 0x1b,
3955 .age_time_coeff = 15000, 3717 .age_time_coeff = 15000,
3956 .g1_irqs = 9, 3718 .g1_irqs = 9,
3719 .atu_move_port_mask = 0xf,
3957 .tag_protocol = DSA_TAG_PROTO_EDSA, 3720 .tag_protocol = DSA_TAG_PROTO_EDSA,
3958 .flags = MV88E6XXX_FLAGS_FAMILY_6351, 3721 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3959 .ops = &mv88e6171_ops, 3722 .ops = &mv88e6171_ops,
@@ -3969,6 +3732,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3969 .global1_addr = 0x1b, 3732 .global1_addr = 0x1b,
3970 .age_time_coeff = 15000, 3733 .age_time_coeff = 15000,
3971 .g1_irqs = 9, 3734 .g1_irqs = 9,
3735 .atu_move_port_mask = 0xf,
3972 .tag_protocol = DSA_TAG_PROTO_EDSA, 3736 .tag_protocol = DSA_TAG_PROTO_EDSA,
3973 .flags = MV88E6XXX_FLAGS_FAMILY_6352, 3737 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3974 .ops = &mv88e6172_ops, 3738 .ops = &mv88e6172_ops,
@@ -3984,6 +3748,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3984 .global1_addr = 0x1b, 3748 .global1_addr = 0x1b,
3985 .age_time_coeff = 15000, 3749 .age_time_coeff = 15000,
3986 .g1_irqs = 9, 3750 .g1_irqs = 9,
3751 .atu_move_port_mask = 0xf,
3987 .tag_protocol = DSA_TAG_PROTO_EDSA, 3752 .tag_protocol = DSA_TAG_PROTO_EDSA,
3988 .flags = MV88E6XXX_FLAGS_FAMILY_6351, 3753 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3989 .ops = &mv88e6175_ops, 3754 .ops = &mv88e6175_ops,
@@ -3999,6 +3764,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3999 .global1_addr = 0x1b, 3764 .global1_addr = 0x1b,
4000 .age_time_coeff = 15000, 3765 .age_time_coeff = 15000,
4001 .g1_irqs = 9, 3766 .g1_irqs = 9,
3767 .atu_move_port_mask = 0xf,
4002 .tag_protocol = DSA_TAG_PROTO_EDSA, 3768 .tag_protocol = DSA_TAG_PROTO_EDSA,
4003 .flags = MV88E6XXX_FLAGS_FAMILY_6352, 3769 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
4004 .ops = &mv88e6176_ops, 3770 .ops = &mv88e6176_ops,
@@ -4014,6 +3780,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4014 .global1_addr = 0x1b, 3780 .global1_addr = 0x1b,
4015 .age_time_coeff = 15000, 3781 .age_time_coeff = 15000,
4016 .g1_irqs = 8, 3782 .g1_irqs = 8,
3783 .atu_move_port_mask = 0xf,
4017 .tag_protocol = DSA_TAG_PROTO_EDSA, 3784 .tag_protocol = DSA_TAG_PROTO_EDSA,
4018 .flags = MV88E6XXX_FLAGS_FAMILY_6185, 3785 .flags = MV88E6XXX_FLAGS_FAMILY_6185,
4019 .ops = &mv88e6185_ops, 3786 .ops = &mv88e6185_ops,
@@ -4030,6 +3797,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4030 .tag_protocol = DSA_TAG_PROTO_DSA, 3797 .tag_protocol = DSA_TAG_PROTO_DSA,
4031 .age_time_coeff = 3750, 3798 .age_time_coeff = 3750,
4032 .g1_irqs = 9, 3799 .g1_irqs = 9,
3800 .atu_move_port_mask = 0x1f,
4033 .flags = MV88E6XXX_FLAGS_FAMILY_6390, 3801 .flags = MV88E6XXX_FLAGS_FAMILY_6390,
4034 .ops = &mv88e6190_ops, 3802 .ops = &mv88e6190_ops,
4035 }, 3803 },
@@ -4044,6 +3812,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4044 .global1_addr = 0x1b, 3812 .global1_addr = 0x1b,
4045 .age_time_coeff = 3750, 3813 .age_time_coeff = 3750,
4046 .g1_irqs = 9, 3814 .g1_irqs = 9,
3815 .atu_move_port_mask = 0x1f,
4047 .tag_protocol = DSA_TAG_PROTO_DSA, 3816 .tag_protocol = DSA_TAG_PROTO_DSA,
4048 .flags = MV88E6XXX_FLAGS_FAMILY_6390, 3817 .flags = MV88E6XXX_FLAGS_FAMILY_6390,
4049 .ops = &mv88e6190x_ops, 3818 .ops = &mv88e6190x_ops,
@@ -4059,6 +3828,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4059 .global1_addr = 0x1b, 3828 .global1_addr = 0x1b,
4060 .age_time_coeff = 3750, 3829 .age_time_coeff = 3750,
4061 .g1_irqs = 9, 3830 .g1_irqs = 9,
3831 .atu_move_port_mask = 0x1f,
4062 .tag_protocol = DSA_TAG_PROTO_DSA, 3832 .tag_protocol = DSA_TAG_PROTO_DSA,
4063 .flags = MV88E6XXX_FLAGS_FAMILY_6390, 3833 .flags = MV88E6XXX_FLAGS_FAMILY_6390,
4064 .ops = &mv88e6391_ops, 3834 .ops = &mv88e6391_ops,
@@ -4074,6 +3844,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4074 .global1_addr = 0x1b, 3844 .global1_addr = 0x1b,
4075 .age_time_coeff = 15000, 3845 .age_time_coeff = 15000,
4076 .g1_irqs = 9, 3846 .g1_irqs = 9,
3847 .atu_move_port_mask = 0xf,
4077 .tag_protocol = DSA_TAG_PROTO_EDSA, 3848 .tag_protocol = DSA_TAG_PROTO_EDSA,
4078 .flags = MV88E6XXX_FLAGS_FAMILY_6352, 3849 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
4079 .ops = &mv88e6240_ops, 3850 .ops = &mv88e6240_ops,
@@ -4089,6 +3860,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4089 .global1_addr = 0x1b, 3860 .global1_addr = 0x1b,
4090 .age_time_coeff = 3750, 3861 .age_time_coeff = 3750,
4091 .g1_irqs = 9, 3862 .g1_irqs = 9,
3863 .atu_move_port_mask = 0x1f,
4092 .tag_protocol = DSA_TAG_PROTO_DSA, 3864 .tag_protocol = DSA_TAG_PROTO_DSA,
4093 .flags = MV88E6XXX_FLAGS_FAMILY_6390, 3865 .flags = MV88E6XXX_FLAGS_FAMILY_6390,
4094 .ops = &mv88e6290_ops, 3866 .ops = &mv88e6290_ops,
@@ -4104,6 +3876,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4104 .global1_addr = 0x1b, 3876 .global1_addr = 0x1b,
4105 .age_time_coeff = 15000, 3877 .age_time_coeff = 15000,
4106 .g1_irqs = 8, 3878 .g1_irqs = 8,
3879 .atu_move_port_mask = 0xf,
4107 .tag_protocol = DSA_TAG_PROTO_EDSA, 3880 .tag_protocol = DSA_TAG_PROTO_EDSA,
4108 .flags = MV88E6XXX_FLAGS_FAMILY_6320, 3881 .flags = MV88E6XXX_FLAGS_FAMILY_6320,
4109 .ops = &mv88e6320_ops, 3882 .ops = &mv88e6320_ops,
@@ -4119,6 +3892,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4119 .global1_addr = 0x1b, 3892 .global1_addr = 0x1b,
4120 .age_time_coeff = 15000, 3893 .age_time_coeff = 15000,
4121 .g1_irqs = 8, 3894 .g1_irqs = 8,
3895 .atu_move_port_mask = 0xf,
4122 .tag_protocol = DSA_TAG_PROTO_EDSA, 3896 .tag_protocol = DSA_TAG_PROTO_EDSA,
4123 .flags = MV88E6XXX_FLAGS_FAMILY_6320, 3897 .flags = MV88E6XXX_FLAGS_FAMILY_6320,
4124 .ops = &mv88e6321_ops, 3898 .ops = &mv88e6321_ops,
@@ -4133,6 +3907,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4133 .port_base_addr = 0x10, 3907 .port_base_addr = 0x10,
4134 .global1_addr = 0x1b, 3908 .global1_addr = 0x1b,
4135 .age_time_coeff = 3750, 3909 .age_time_coeff = 3750,
3910 .atu_move_port_mask = 0x1f,
4136 .tag_protocol = DSA_TAG_PROTO_EDSA, 3911 .tag_protocol = DSA_TAG_PROTO_EDSA,
4137 .flags = MV88E6XXX_FLAGS_FAMILY_6341, 3912 .flags = MV88E6XXX_FLAGS_FAMILY_6341,
4138 .ops = &mv88e6141_ops, 3913 .ops = &mv88e6141_ops,
@@ -4147,6 +3922,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4147 .port_base_addr = 0x10, 3922 .port_base_addr = 0x10,
4148 .global1_addr = 0x1b, 3923 .global1_addr = 0x1b,
4149 .age_time_coeff = 3750, 3924 .age_time_coeff = 3750,
3925 .atu_move_port_mask = 0x1f,
4150 .tag_protocol = DSA_TAG_PROTO_EDSA, 3926 .tag_protocol = DSA_TAG_PROTO_EDSA,
4151 .flags = MV88E6XXX_FLAGS_FAMILY_6341, 3927 .flags = MV88E6XXX_FLAGS_FAMILY_6341,
4152 .ops = &mv88e6341_ops, 3928 .ops = &mv88e6341_ops,
@@ -4162,6 +3938,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4162 .global1_addr = 0x1b, 3938 .global1_addr = 0x1b,
4163 .age_time_coeff = 15000, 3939 .age_time_coeff = 15000,
4164 .g1_irqs = 9, 3940 .g1_irqs = 9,
3941 .atu_move_port_mask = 0xf,
4165 .tag_protocol = DSA_TAG_PROTO_EDSA, 3942 .tag_protocol = DSA_TAG_PROTO_EDSA,
4166 .flags = MV88E6XXX_FLAGS_FAMILY_6351, 3943 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
4167 .ops = &mv88e6350_ops, 3944 .ops = &mv88e6350_ops,
@@ -4177,6 +3954,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4177 .global1_addr = 0x1b, 3954 .global1_addr = 0x1b,
4178 .age_time_coeff = 15000, 3955 .age_time_coeff = 15000,
4179 .g1_irqs = 9, 3956 .g1_irqs = 9,
3957 .atu_move_port_mask = 0xf,
4180 .tag_protocol = DSA_TAG_PROTO_EDSA, 3958 .tag_protocol = DSA_TAG_PROTO_EDSA,
4181 .flags = MV88E6XXX_FLAGS_FAMILY_6351, 3959 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
4182 .ops = &mv88e6351_ops, 3960 .ops = &mv88e6351_ops,
@@ -4192,6 +3970,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4192 .global1_addr = 0x1b, 3970 .global1_addr = 0x1b,
4193 .age_time_coeff = 15000, 3971 .age_time_coeff = 15000,
4194 .g1_irqs = 9, 3972 .g1_irqs = 9,
3973 .atu_move_port_mask = 0xf,
4195 .tag_protocol = DSA_TAG_PROTO_EDSA, 3974 .tag_protocol = DSA_TAG_PROTO_EDSA,
4196 .flags = MV88E6XXX_FLAGS_FAMILY_6352, 3975 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
4197 .ops = &mv88e6352_ops, 3976 .ops = &mv88e6352_ops,
@@ -4206,6 +3985,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4206 .global1_addr = 0x1b, 3985 .global1_addr = 0x1b,
4207 .age_time_coeff = 3750, 3986 .age_time_coeff = 3750,
4208 .g1_irqs = 9, 3987 .g1_irqs = 9,
3988 .atu_move_port_mask = 0x1f,
4209 .tag_protocol = DSA_TAG_PROTO_DSA, 3989 .tag_protocol = DSA_TAG_PROTO_DSA,
4210 .flags = MV88E6XXX_FLAGS_FAMILY_6390, 3990 .flags = MV88E6XXX_FLAGS_FAMILY_6390,
4211 .ops = &mv88e6390_ops, 3991 .ops = &mv88e6390_ops,
@@ -4220,6 +4000,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
4220 .global1_addr = 0x1b, 4000 .global1_addr = 0x1b,
4221 .age_time_coeff = 3750, 4001 .age_time_coeff = 3750,
4222 .g1_irqs = 9, 4002 .g1_irqs = 9,
4003 .atu_move_port_mask = 0x1f,
4223 .tag_protocol = DSA_TAG_PROTO_DSA, 4004 .tag_protocol = DSA_TAG_PROTO_DSA,
4224 .flags = MV88E6XXX_FLAGS_FAMILY_6390, 4005 .flags = MV88E6XXX_FLAGS_FAMILY_6390,
4225 .ops = &mv88e6390x_ops, 4006 .ops = &mv88e6390x_ops,
@@ -4472,6 +4253,8 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
4472 4253
4473 ds->priv = chip; 4254 ds->priv = chip;
4474 ds->ops = &mv88e6xxx_switch_ops; 4255 ds->ops = &mv88e6xxx_switch_ops;
4256 ds->ageing_time_min = chip->info->age_time_coeff;
4257 ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
4475 4258
4476 dev_set_drvdata(dev, ds); 4259 dev_set_drvdata(dev, ds);
4477 4260
@@ -4502,10 +4285,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
4502 4285
4503 chip->info = compat_info; 4286 chip->info = compat_info;
4504 4287
4505 err = mv88e6xxx_verify_madatory_ops(chip, chip->info->ops);
4506 if (err)
4507 return err;
4508
4509 err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); 4288 err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
4510 if (err) 4289 if (err)
4511 return err; 4290 return err;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 1aec7382c02d..eece7418e67d 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -38,4 +38,15 @@ int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
38int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); 38int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
39int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); 39int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
40 40
41int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all);
42int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
43 unsigned int msecs);
44int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
45 struct mv88e6xxx_atu_entry *entry);
46int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
47 struct mv88e6xxx_atu_entry *entry);
48int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all);
49int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
50 bool all);
51
41#endif /* _MV88E6XXX_GLOBAL1_H */ 52#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
new file mode 100644
index 000000000000..120b7f41a735
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -0,0 +1,300 @@
1/*
2 * Marvell 88E6xxx Address Translation Unit (ATU) support
3 *
4 * Copyright (c) 2008 Marvell Semiconductor
5 * Copyright (c) 2017 Savoir-faire Linux, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include "mv88e6xxx.h"
14#include "global1.h"
15
16/* Offset 0x01: ATU FID Register */
17
18static int mv88e6xxx_g1_atu_fid_write(struct mv88e6xxx_chip *chip, u16 fid)
19{
20 return mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid & 0xfff);
21}
22
23/* Offset 0x0A: ATU Control Register */
24
25int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all)
26{
27 u16 val;
28 int err;
29
30 err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
31 if (err)
32 return err;
33
34 if (learn2all)
35 val |= GLOBAL_ATU_CONTROL_LEARN2ALL;
36 else
37 val &= ~GLOBAL_ATU_CONTROL_LEARN2ALL;
38
39 return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
40}
41
42int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
43 unsigned int msecs)
44{
45 const unsigned int coeff = chip->info->age_time_coeff;
46 const unsigned int min = 0x01 * coeff;
47 const unsigned int max = 0xff * coeff;
48 u8 age_time;
49 u16 val;
50 int err;
51
52 if (msecs < min || msecs > max)
53 return -ERANGE;
54
55 /* Round to nearest multiple of coeff */
56 age_time = (msecs + coeff / 2) / coeff;
57
58 err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
59 if (err)
60 return err;
61
62 /* AgeTime is 11:4 bits */
63 val &= ~0xff0;
64 val |= age_time << 4;
65
66 return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
67}
68
69/* Offset 0x0B: ATU Operation Register */
70
71static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
72{
73 return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
74}
75
76static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
77{
78 u16 val;
79 int err;
80
81 /* FID bits are dispatched all around gradually as more are supported */
82 if (mv88e6xxx_num_databases(chip) > 256) {
83 err = mv88e6xxx_g1_atu_fid_write(chip, fid);
84 if (err)
85 return err;
86 } else {
87 if (mv88e6xxx_num_databases(chip) > 16) {
88 /* ATU DBNum[7:4] are located in ATU Control 15:12 */
89 err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
90 if (err)
91 return err;
92
93 val = (val & 0x0fff) | ((fid << 8) & 0xf000);
94 err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
95 if (err)
96 return err;
97 }
98
99 /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
100 op |= fid & 0xf;
101 }
102
103 err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, op);
104 if (err)
105 return err;
106
107 return mv88e6xxx_g1_atu_op_wait(chip);
108}
109
110/* Offset 0x0C: ATU Data Register */
111
112static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
113 struct mv88e6xxx_atu_entry *entry)
114{
115 u16 val;
116 int err;
117
118 err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
119 if (err)
120 return err;
121
122 entry->state = val & 0xf;
123 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
124 if (val & GLOBAL_ATU_DATA_TRUNK)
125 entry->trunk = true;
126
127 entry->portvec = (val >> 4) & mv88e6xxx_port_mask(chip);
128 }
129
130 return 0;
131}
132
133static int mv88e6xxx_g1_atu_data_write(struct mv88e6xxx_chip *chip,
134 struct mv88e6xxx_atu_entry *entry)
135{
136 u16 data = entry->state & 0xf;
137
138 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
139 if (entry->trunk)
140 data |= GLOBAL_ATU_DATA_TRUNK;
141
142 data |= (entry->portvec & mv88e6xxx_port_mask(chip)) << 4;
143 }
144
145 return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
146}
147
148/* Offset 0x0D: ATU MAC Address Register Bytes 0 & 1
149 * Offset 0x0E: ATU MAC Address Register Bytes 2 & 3
150 * Offset 0x0F: ATU MAC Address Register Bytes 4 & 5
151 */
152
153static int mv88e6xxx_g1_atu_mac_read(struct mv88e6xxx_chip *chip,
154 struct mv88e6xxx_atu_entry *entry)
155{
156 u16 val;
157 int i, err;
158
159 for (i = 0; i < 3; i++) {
160 err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
161 if (err)
162 return err;
163
164 entry->mac[i * 2] = val >> 8;
165 entry->mac[i * 2 + 1] = val & 0xff;
166 }
167
168 return 0;
169}
170
171static int mv88e6xxx_g1_atu_mac_write(struct mv88e6xxx_chip *chip,
172 struct mv88e6xxx_atu_entry *entry)
173{
174 u16 val;
175 int i, err;
176
177 for (i = 0; i < 3; i++) {
178 val = (entry->mac[i * 2] << 8) | entry->mac[i * 2 + 1];
179 err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i, val);
180 if (err)
181 return err;
182 }
183
184 return 0;
185}
186
187/* Address Translation Unit operations */
188
189int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
190 struct mv88e6xxx_atu_entry *entry)
191{
192 int err;
193
194 err = mv88e6xxx_g1_atu_op_wait(chip);
195 if (err)
196 return err;
197
198 /* Write the MAC address to iterate from only once */
199 if (entry->state == GLOBAL_ATU_DATA_STATE_UNUSED) {
200 err = mv88e6xxx_g1_atu_mac_write(chip, entry);
201 if (err)
202 return err;
203 }
204
205 err = mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
206 if (err)
207 return err;
208
209 err = mv88e6xxx_g1_atu_data_read(chip, entry);
210 if (err)
211 return err;
212
213 return mv88e6xxx_g1_atu_mac_read(chip, entry);
214}
215
216int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
217 struct mv88e6xxx_atu_entry *entry)
218{
219 int err;
220
221 err = mv88e6xxx_g1_atu_op_wait(chip);
222 if (err)
223 return err;
224
225 err = mv88e6xxx_g1_atu_mac_write(chip, entry);
226 if (err)
227 return err;
228
229 err = mv88e6xxx_g1_atu_data_write(chip, entry);
230 if (err)
231 return err;
232
233 return mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_LOAD_DB);
234}
235
236static int mv88e6xxx_g1_atu_flushmove(struct mv88e6xxx_chip *chip, u16 fid,
237 struct mv88e6xxx_atu_entry *entry,
238 bool all)
239{
240 u16 op;
241 int err;
242
243 err = mv88e6xxx_g1_atu_op_wait(chip);
244 if (err)
245 return err;
246
247 err = mv88e6xxx_g1_atu_data_write(chip, entry);
248 if (err)
249 return err;
250
251 /* Flush/Move all or non-static entries from all or a given database */
252 if (all && fid)
253 op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB;
254 else if (fid)
255 op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
256 else if (all)
257 op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL;
258 else
259 op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
260
261 return mv88e6xxx_g1_atu_op(chip, fid, op);
262}
263
264int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all)
265{
266 struct mv88e6xxx_atu_entry entry = {
267 .state = 0, /* Null EntryState means Flush */
268 };
269
270 return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
271}
272
273static int mv88e6xxx_g1_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
274 int from_port, int to_port, bool all)
275{
276 struct mv88e6xxx_atu_entry entry = { 0 };
277 unsigned long mask;
278 int shift;
279
280 if (!chip->info->atu_move_port_mask)
281 return -EOPNOTSUPP;
282
283 mask = chip->info->atu_move_port_mask;
284 shift = bitmap_weight(&mask, 16);
285
286 entry.state = 0xf, /* Full EntryState means Move */
287 entry.portvec = from_port & mask;
288 entry.portvec |= (to_port & mask) << shift;
289
290 return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
291}
292
293int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
294 bool all)
295{
296 int from_port = port;
297 int to_port = chip->info->atu_move_port_mask;
298
299 return mv88e6xxx_g1_atu_move(chip, fid, from_port, to_port, all);
300}
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index 6033f2f6260a..75be2c339a49 100644
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -132,18 +132,19 @@
132#define PORT_CONTROL_TAG_IF_BOTH BIT(6) 132#define PORT_CONTROL_TAG_IF_BOTH BIT(6)
133#define PORT_CONTROL_USE_IP BIT(5) 133#define PORT_CONTROL_USE_IP BIT(5)
134#define PORT_CONTROL_USE_TAG BIT(4) 134#define PORT_CONTROL_USE_TAG BIT(4)
135#define PORT_CONTROL_FORWARD_UNKNOWN_MC BIT(3)
136#define PORT_CONTROL_FORWARD_UNKNOWN BIT(2) 135#define PORT_CONTROL_FORWARD_UNKNOWN BIT(2)
137#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_DA (0x0 << 2) 136#define PORT_CONTROL_EGRESS_FLOODS_MASK (0x3 << 2)
138#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_MULTICAST_DA (0x1 << 2) 137#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA (0x0 << 2)
139#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_UNITCAST_DA (0x2 << 2) 138#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA (0x1 << 2)
140#define PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA (0x3 << 2) 139#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA (0x2 << 2)
140#define PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA (0x3 << 2)
141#define PORT_CONTROL_STATE_MASK 0x03 141#define PORT_CONTROL_STATE_MASK 0x03
142#define PORT_CONTROL_STATE_DISABLED 0x00 142#define PORT_CONTROL_STATE_DISABLED 0x00
143#define PORT_CONTROL_STATE_BLOCKING 0x01 143#define PORT_CONTROL_STATE_BLOCKING 0x01
144#define PORT_CONTROL_STATE_LEARNING 0x02 144#define PORT_CONTROL_STATE_LEARNING 0x02
145#define PORT_CONTROL_STATE_FORWARDING 0x03 145#define PORT_CONTROL_STATE_FORWARDING 0x03
146#define PORT_CONTROL_1 0x05 146#define PORT_CONTROL_1 0x05
147#define PORT_CONTROL_1_MESSAGE_PORT BIT(15)
147#define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0) 148#define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0)
148#define PORT_BASE_VLAN 0x06 149#define PORT_BASE_VLAN 0x06
149#define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12) 150#define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12)
@@ -166,7 +167,6 @@
166#define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8) 167#define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8)
167#define PORT_CONTROL_2_MAP_DA BIT(7) 168#define PORT_CONTROL_2_MAP_DA BIT(7)
168#define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6) 169#define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6)
169#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6)
170#define PORT_CONTROL_2_EGRESS_MONITOR BIT(5) 170#define PORT_CONTROL_2_EGRESS_MONITOR BIT(5)
171#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4) 171#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4)
172#define PORT_CONTROL_2_UPSTREAM_MASK 0x0f 172#define PORT_CONTROL_2_UPSTREAM_MASK 0x0f
@@ -181,6 +181,7 @@
181#define PORT_ATU_CONTROL 0x0c 181#define PORT_ATU_CONTROL 0x0c
182#define PORT_PRI_OVERRIDE 0x0d 182#define PORT_PRI_OVERRIDE 0x0d
183#define PORT_ETH_TYPE 0x0f 183#define PORT_ETH_TYPE 0x0f
184#define PORT_ETH_TYPE_DEFAULT 0x9100
184#define PORT_IN_DISCARD_LO 0x10 185#define PORT_IN_DISCARD_LO 0x10
185#define PORT_IN_DISCARD_HI 0x11 186#define PORT_IN_DISCARD_HI 0x11
186#define PORT_IN_FILTERED 0x12 187#define PORT_IN_FILTERED 0x12
@@ -551,7 +552,6 @@ enum mv88e6xxx_cap {
551 552
552#define MV88E6XXX_FLAG_SERDES BIT_ULL(MV88E6XXX_CAP_SERDES) 553#define MV88E6XXX_FLAG_SERDES BIT_ULL(MV88E6XXX_CAP_SERDES)
553 554
554#define MV88E6XXX_FLAG_G1_ATU_FID BIT_ULL(MV88E6XXX_CAP_G1_ATU_FID)
555#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID) 555#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID)
556 556
557#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2) 557#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
@@ -594,8 +594,7 @@ enum mv88e6xxx_cap {
594 MV88E6XXX_FLAGS_MULTI_CHIP) 594 MV88E6XXX_FLAGS_MULTI_CHIP)
595 595
596#define MV88E6XXX_FLAGS_FAMILY_6097 \ 596#define MV88E6XXX_FLAGS_FAMILY_6097 \
597 (MV88E6XXX_FLAG_G1_ATU_FID | \ 597 (MV88E6XXX_FLAG_G1_VTU_FID | \
598 MV88E6XXX_FLAG_G1_VTU_FID | \
599 MV88E6XXX_FLAG_GLOBAL2 | \ 598 MV88E6XXX_FLAG_GLOBAL2 | \
600 MV88E6XXX_FLAG_G2_INT | \ 599 MV88E6XXX_FLAG_G2_INT | \
601 MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ 600 MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
@@ -608,8 +607,7 @@ enum mv88e6xxx_cap {
608 MV88E6XXX_FLAGS_PVT) 607 MV88E6XXX_FLAGS_PVT)
609 608
610#define MV88E6XXX_FLAGS_FAMILY_6165 \ 609#define MV88E6XXX_FLAGS_FAMILY_6165 \
611 (MV88E6XXX_FLAG_G1_ATU_FID | \ 610 (MV88E6XXX_FLAG_G1_VTU_FID | \
612 MV88E6XXX_FLAG_G1_VTU_FID | \
613 MV88E6XXX_FLAG_GLOBAL2 | \ 611 MV88E6XXX_FLAG_GLOBAL2 | \
614 MV88E6XXX_FLAG_G2_INT | \ 612 MV88E6XXX_FLAG_G2_INT | \
615 MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ 613 MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
@@ -641,7 +639,6 @@ enum mv88e6xxx_cap {
641 639
642#define MV88E6XXX_FLAGS_FAMILY_6341 \ 640#define MV88E6XXX_FLAGS_FAMILY_6341 \
643 (MV88E6XXX_FLAG_EEE | \ 641 (MV88E6XXX_FLAG_EEE | \
644 MV88E6XXX_FLAG_G1_ATU_FID | \
645 MV88E6XXX_FLAG_G1_VTU_FID | \ 642 MV88E6XXX_FLAG_G1_VTU_FID | \
646 MV88E6XXX_FLAG_GLOBAL2 | \ 643 MV88E6XXX_FLAG_GLOBAL2 | \
647 MV88E6XXX_FLAG_G2_INT | \ 644 MV88E6XXX_FLAG_G2_INT | \
@@ -654,8 +651,7 @@ enum mv88e6xxx_cap {
654 MV88E6XXX_FLAGS_SERDES) 651 MV88E6XXX_FLAGS_SERDES)
655 652
656#define MV88E6XXX_FLAGS_FAMILY_6351 \ 653#define MV88E6XXX_FLAGS_FAMILY_6351 \
657 (MV88E6XXX_FLAG_G1_ATU_FID | \ 654 (MV88E6XXX_FLAG_G1_VTU_FID | \
658 MV88E6XXX_FLAG_G1_VTU_FID | \
659 MV88E6XXX_FLAG_GLOBAL2 | \ 655 MV88E6XXX_FLAG_GLOBAL2 | \
660 MV88E6XXX_FLAG_G2_INT | \ 656 MV88E6XXX_FLAG_G2_INT | \
661 MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ 657 MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
@@ -669,7 +665,6 @@ enum mv88e6xxx_cap {
669 665
670#define MV88E6XXX_FLAGS_FAMILY_6352 \ 666#define MV88E6XXX_FLAGS_FAMILY_6352 \
671 (MV88E6XXX_FLAG_EEE | \ 667 (MV88E6XXX_FLAG_EEE | \
672 MV88E6XXX_FLAG_G1_ATU_FID | \
673 MV88E6XXX_FLAG_G1_VTU_FID | \ 668 MV88E6XXX_FLAG_G1_VTU_FID | \
674 MV88E6XXX_FLAG_GLOBAL2 | \ 669 MV88E6XXX_FLAG_GLOBAL2 | \
675 MV88E6XXX_FLAG_G2_INT | \ 670 MV88E6XXX_FLAG_G2_INT | \
@@ -707,14 +702,18 @@ struct mv88e6xxx_info {
707 unsigned int g1_irqs; 702 unsigned int g1_irqs;
708 enum dsa_tag_protocol tag_protocol; 703 enum dsa_tag_protocol tag_protocol;
709 unsigned long long flags; 704 unsigned long long flags;
705
706 /* Mask for FromPort and ToPort value of PortVec used in ATU Move
707 * operation. 0 means that the ATU Move operation is not supported.
708 */
709 u8 atu_move_port_mask;
710 const struct mv88e6xxx_ops *ops; 710 const struct mv88e6xxx_ops *ops;
711}; 711};
712 712
713struct mv88e6xxx_atu_entry { 713struct mv88e6xxx_atu_entry {
714 u16 fid;
715 u8 state; 714 u8 state;
716 bool trunk; 715 bool trunk;
717 u16 portv_trunkid; 716 u16 portvec;
718 u8 mac[ETH_ALEN]; 717 u8 mac[ETH_ALEN];
719}; 718};
720 719
@@ -864,14 +863,16 @@ struct mv88e6xxx_ops {
864 863
865 int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port, 864 int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
866 enum mv88e6xxx_frame_mode mode); 865 enum mv88e6xxx_frame_mode mode);
867 int (*port_set_egress_unknowns)(struct mv88e6xxx_chip *chip, int port, 866 int (*port_set_egress_floods)(struct mv88e6xxx_chip *chip, int port,
868 bool on); 867 bool unicast, bool multicast);
869 int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port, 868 int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
870 u16 etype); 869 u16 etype);
871 int (*port_jumbo_config)(struct mv88e6xxx_chip *chip, int port); 870 int (*port_jumbo_config)(struct mv88e6xxx_chip *chip, int port);
872 871
873 int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port); 872 int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port);
874 int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port); 873 int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port);
874 int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port);
875 int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port);
875 876
876 /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc. 877 /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc.
877 * Some chips allow this to be configured on specific ports. 878 * Some chips allow this to be configured on specific ports.
@@ -944,6 +945,11 @@ static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
944 return chip->info->num_ports; 945 return chip->info->num_ports;
945} 946}
946 947
948static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
949{
950 return GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
951}
952
947int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val); 953int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
948int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); 954int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
949int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, 955int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 8875784c4718..d4868bb50ed5 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -497,8 +497,8 @@ int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
497 return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg); 497 return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
498} 498}
499 499
500int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, 500static int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
501 bool on) 501 int port, bool unicast)
502{ 502{
503 int err; 503 int err;
504 u16 reg; 504 u16 reg;
@@ -507,7 +507,7 @@ int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
507 if (err) 507 if (err)
508 return err; 508 return err;
509 509
510 if (on) 510 if (unicast)
511 reg |= PORT_CONTROL_FORWARD_UNKNOWN; 511 reg |= PORT_CONTROL_FORWARD_UNKNOWN;
512 else 512 else
513 reg &= ~PORT_CONTROL_FORWARD_UNKNOWN; 513 reg &= ~PORT_CONTROL_FORWARD_UNKNOWN;
@@ -515,8 +515,8 @@ int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
515 return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg); 515 return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
516} 516}
517 517
518int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, 518int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
519 bool on) 519 bool unicast, bool multicast)
520{ 520{
521 int err; 521 int err;
522 u16 reg; 522 u16 reg;
@@ -525,21 +525,45 @@ int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
525 if (err) 525 if (err)
526 return err; 526 return err;
527 527
528 if (on) 528 reg &= ~PORT_CONTROL_EGRESS_FLOODS_MASK;
529 reg |= PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA; 529
530 if (unicast && multicast)
531 reg |= PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA;
532 else if (unicast)
533 reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA;
534 else if (multicast)
535 reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA;
530 else 536 else
531 reg &= ~PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA; 537 reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA;
532 538
533 return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg); 539 return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
534} 540}
535 541
536/* Offset 0x05: Port Control 1 */ 542/* Offset 0x05: Port Control 1 */
537 543
544int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
545 bool message_port)
546{
547 u16 val;
548 int err;
549
550 err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &val);
551 if (err)
552 return err;
553
554 if (message_port)
555 val |= PORT_CONTROL_1_MESSAGE_PORT;
556 else
557 val &= ~PORT_CONTROL_1_MESSAGE_PORT;
558
559 return mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, val);
560}
561
538/* Offset 0x06: Port Based VLAN Map */ 562/* Offset 0x06: Port Based VLAN Map */
539 563
540int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map) 564int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
541{ 565{
542 const u16 mask = GENMASK(mv88e6xxx_num_ports(chip) - 1, 0); 566 const u16 mask = mv88e6xxx_port_mask(chip);
543 u16 reg; 567 u16 reg;
544 int err; 568 int err;
545 569
@@ -672,8 +696,8 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
672 [PORT_CONTROL_2_8021Q_SECURE] = "Secure", 696 [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
673}; 697};
674 698
675int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, 699static int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
676 bool on) 700 int port, bool multicast)
677{ 701{
678 int err; 702 int err;
679 u16 reg; 703 u16 reg;
@@ -682,14 +706,26 @@ int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
682 if (err) 706 if (err)
683 return err; 707 return err;
684 708
685 if (on) 709 if (multicast)
686 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN; 710 reg |= PORT_CONTROL_2_DEFAULT_FORWARD;
687 else 711 else
688 reg &= ~PORT_CONTROL_2_FORWARD_UNKNOWN; 712 reg &= ~PORT_CONTROL_2_DEFAULT_FORWARD;
689 713
690 return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg); 714 return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
691} 715}
692 716
717int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
718 bool unicast, bool multicast)
719{
720 int err;
721
722 err = mv88e6185_port_set_forward_unknown(chip, port, unicast);
723 if (err)
724 return err;
725
726 return mv88e6185_port_set_default_forward(chip, port, multicast);
727}
728
693int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, 729int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
694 int upstream_port) 730 int upstream_port)
695{ 731{
@@ -769,6 +805,20 @@ int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
769 return mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL, 0x0001); 805 return mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL, 0x0001);
770} 806}
771 807
808/* Offset 0x0C: Port ATU Control */
809
810int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port)
811{
812 return mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL, 0);
813}
814
815/* Offset 0x0D: (Priority) Override Register */
816
817int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port)
818{
819 return mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE, 0);
820}
821
772/* Offset 0x0f: Port Ether type */ 822/* Offset 0x0f: Port Ether type */
773 823
774int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, 824int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index c83cbb3f4491..c2425ddab287 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -56,14 +56,14 @@ int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
56 enum mv88e6xxx_frame_mode mode); 56 enum mv88e6xxx_frame_mode mode);
57int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port, 57int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
58 enum mv88e6xxx_frame_mode mode); 58 enum mv88e6xxx_frame_mode mode);
59int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, 59int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
60 bool on); 60 bool unicast, bool multicast);
61int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, 61int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
62 bool on); 62 bool unicast, bool multicast);
63int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
64 bool on);
65int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, 63int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
66 u16 etype); 64 u16 etype);
65int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
66 bool message_port);
67int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port); 67int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port);
68int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port); 68int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
69int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port); 69int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
@@ -75,4 +75,8 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
75int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port); 75int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
76int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, 76int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
77 int upstream_port); 77 int upstream_port);
78
79int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port);
80int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port);
81
78#endif /* _MV88E6XXX_PORT_H */ 82#endif /* _MV88E6XXX_PORT_H */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 2c80611b94ae..149244aac20a 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -35,6 +35,7 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/moduleparam.h> 36#include <linux/moduleparam.h>
37#include <linux/rtnetlink.h> 37#include <linux/rtnetlink.h>
38#include <linux/net_tstamp.h>
38#include <net/rtnetlink.h> 39#include <net/rtnetlink.h>
39#include <linux/u64_stats_sync.h> 40#include <linux/u64_stats_sync.h>
40 41
@@ -125,6 +126,7 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
125 dstats->tx_bytes += skb->len; 126 dstats->tx_bytes += skb->len;
126 u64_stats_update_end(&dstats->syncp); 127 u64_stats_update_end(&dstats->syncp);
127 128
129 skb_tx_timestamp(skb);
128 dev_kfree_skb(skb); 130 dev_kfree_skb(skb);
129 return NETDEV_TX_OK; 131 return NETDEV_TX_OK;
130} 132}
@@ -304,8 +306,21 @@ static void dummy_get_drvinfo(struct net_device *dev,
304 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 306 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
305} 307}
306 308
309static int dummy_get_ts_info(struct net_device *dev,
310 struct ethtool_ts_info *ts_info)
311{
312 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
313 SOF_TIMESTAMPING_RX_SOFTWARE |
314 SOF_TIMESTAMPING_SOFTWARE;
315
316 ts_info->phc_index = -1;
317
318 return 0;
319};
320
307static const struct ethtool_ops dummy_ethtool_ops = { 321static const struct ethtool_ops dummy_ethtool_ops = {
308 .get_drvinfo = dummy_get_drvinfo, 322 .get_drvinfo = dummy_get_drvinfo,
323 .get_ts_info = dummy_get_ts_info,
309}; 324};
310 325
311static void dummy_free_netdev(struct net_device *dev) 326static void dummy_free_netdev(struct net_device *dev)
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 8c08f9deef92..edae15ac0e98 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -180,5 +180,6 @@ source "drivers/net/ethernet/via/Kconfig"
180source "drivers/net/ethernet/wiznet/Kconfig" 180source "drivers/net/ethernet/wiznet/Kconfig"
181source "drivers/net/ethernet/xilinx/Kconfig" 181source "drivers/net/ethernet/xilinx/Kconfig"
182source "drivers/net/ethernet/xircom/Kconfig" 182source "drivers/net/ethernet/xircom/Kconfig"
183source "drivers/net/ethernet/synopsys/Kconfig"
183 184
184endif # ETHERNET 185endif # ETHERNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 26dce5bf2c18..bf7f4502cabc 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -91,3 +91,4 @@ obj-$(CONFIG_NET_VENDOR_VIA) += via/
91obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ 91obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
92obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ 92obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
93obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ 93obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
94obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index ffea9859f5a7..7ec2c9717cf1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1854,7 +1854,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
1854 if (tc_to_netdev->type != TC_SETUP_MQPRIO) 1854 if (tc_to_netdev->type != TC_SETUP_MQPRIO)
1855 return -EINVAL; 1855 return -EINVAL;
1856 1856
1857 tc = tc_to_netdev->tc; 1857 tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1858 tc = tc_to_netdev->mqprio->num_tc;
1858 1859
1859 if (tc > pdata->hw_feat.tc_cnt) 1860 if (tc > pdata->hw_feat.tc_cnt)
1860 return -EINVAL; 1861 return -EINVAL;
diff --git a/drivers/net/ethernet/apm/Kconfig b/drivers/net/ethernet/apm/Kconfig
index ec63d706d464..59efe5b145dd 100644
--- a/drivers/net/ethernet/apm/Kconfig
+++ b/drivers/net/ethernet/apm/Kconfig
@@ -1 +1,2 @@
1source "drivers/net/ethernet/apm/xgene/Kconfig" 1source "drivers/net/ethernet/apm/xgene/Kconfig"
2source "drivers/net/ethernet/apm/xgene-v2/Kconfig"
diff --git a/drivers/net/ethernet/apm/Makefile b/drivers/net/ethernet/apm/Makefile
index 65ce32ad1b2c..946b2a4c882d 100644
--- a/drivers/net/ethernet/apm/Makefile
+++ b/drivers/net/ethernet/apm/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_NET_XGENE) += xgene/ 5obj-$(CONFIG_NET_XGENE) += xgene/
6obj-$(CONFIG_NET_XGENE_V2) += xgene-v2/
diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig
new file mode 100644
index 000000000000..1205861b6318
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig
@@ -0,0 +1,11 @@
1config NET_XGENE_V2
2 tristate "APM X-Gene SoC Ethernet-v2 Driver"
3 depends on HAS_DMA
4 depends on ARCH_XGENE || COMPILE_TEST
5 help
6 This is the Ethernet driver for the on-chip ethernet interface
7 which uses a linked list of DMA descriptor architecture (v2) for
8 APM X-Gene SoCs.
9
10 To compile this driver as a module, choose M here. This module will
11 be called xgene-enet-v2.
diff --git a/drivers/net/ethernet/apm/xgene-v2/Makefile b/drivers/net/ethernet/apm/xgene-v2/Makefile
new file mode 100644
index 000000000000..735309c0b8b1
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for APM X-Gene Ethernet v2 driver
3#
4
5xgene-enet-v2-objs := main.o mac.o enet.o ring.o
6obj-$(CONFIG_NET_XGENE_V2) += xgene-enet-v2.o
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.c b/drivers/net/ethernet/apm/xgene-v2/enet.c
new file mode 100644
index 000000000000..b49edeeb6275
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/enet.c
@@ -0,0 +1,71 @@
1/*
2 * Applied Micro X-Gene SoC Ethernet v2 Driver
3 *
4 * Copyright (c) 2017, Applied Micro Circuits Corporation
5 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "main.h"
23
24void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val)
25{
26 void __iomem *addr = pdata->resources.base_addr + offset;
27
28 iowrite32(val, addr);
29}
30
31u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset)
32{
33 void __iomem *addr = pdata->resources.base_addr + offset;
34
35 return ioread32(addr);
36}
37
38int xge_port_reset(struct net_device *ndev)
39{
40 struct xge_pdata *pdata = netdev_priv(ndev);
41
42 xge_wr_csr(pdata, ENET_SRST, 0x3);
43 xge_wr_csr(pdata, ENET_SRST, 0x2);
44 xge_wr_csr(pdata, ENET_SRST, 0x0);
45
46 xge_wr_csr(pdata, ENET_SHIM, DEVM_ARAUX_COH | DEVM_AWAUX_COH);
47
48 return 0;
49}
50
51static void xge_traffic_resume(struct net_device *ndev)
52{
53 struct xge_pdata *pdata = netdev_priv(ndev);
54
55 xge_wr_csr(pdata, CFG_FORCE_LINK_STATUS_EN, 1);
56 xge_wr_csr(pdata, FORCE_LINK_STATUS, 1);
57
58 xge_wr_csr(pdata, CFG_LINK_AGGR_RESUME, 1);
59 xge_wr_csr(pdata, RX_DV_GATE_REG, 1);
60}
61
62int xge_port_init(struct net_device *ndev)
63{
64 struct xge_pdata *pdata = netdev_priv(ndev);
65
66 pdata->phy_speed = SPEED_1000;
67 xge_mac_init(pdata);
68 xge_traffic_resume(ndev);
69
70 return 0;
71}
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.h b/drivers/net/ethernet/apm/xgene-v2/enet.h
new file mode 100644
index 000000000000..40371cfcfce4
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/enet.h
@@ -0,0 +1,43 @@
1/*
2 * Applied Micro X-Gene SoC Ethernet v2 Driver
3 *
4 * Copyright (c) 2017, Applied Micro Circuits Corporation
5 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __XGENE_ENET_V2_ENET_H__
23#define __XGENE_ENET_V2_ENET_H__
24
25#define ENET_CLKEN 0xc008
26#define ENET_SRST 0xc000
27#define ENET_SHIM 0xc010
28#define CFG_MEM_RAM_SHUTDOWN 0xd070
29#define BLOCK_MEM_RDY 0xd074
30
31#define DEVM_ARAUX_COH BIT(19)
32#define DEVM_AWAUX_COH BIT(3)
33
34#define CFG_FORCE_LINK_STATUS_EN 0x229c
35#define FORCE_LINK_STATUS 0x22a0
36#define CFG_LINK_AGGR_RESUME 0x27c8
37#define RX_DV_GATE_REG 0x2dfc
38
39void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val);
40u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset);
41int xge_port_reset(struct net_device *ndev);
42
43#endif /* __XGENE_ENET_V2_ENET__H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c
new file mode 100644
index 000000000000..c3189de3df55
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.c
@@ -0,0 +1,116 @@
1/*
2 * Applied Micro X-Gene SoC Ethernet v2 Driver
3 *
4 * Copyright (c) 2017, Applied Micro Circuits Corporation
5 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "main.h"
23
24void xge_mac_reset(struct xge_pdata *pdata)
25{
26 xge_wr_csr(pdata, MAC_CONFIG_1, SOFT_RESET);
27 xge_wr_csr(pdata, MAC_CONFIG_1, 0);
28}
29
30static void xge_mac_set_speed(struct xge_pdata *pdata)
31{
32 u32 icm0, icm2, ecm0, mc2;
33 u32 intf_ctrl, rgmii;
34
35 icm0 = xge_rd_csr(pdata, ICM_CONFIG0_REG_0);
36 icm2 = xge_rd_csr(pdata, ICM_CONFIG2_REG_0);
37 ecm0 = xge_rd_csr(pdata, ECM_CONFIG0_REG_0);
38 rgmii = xge_rd_csr(pdata, RGMII_REG_0);
39 mc2 = xge_rd_csr(pdata, MAC_CONFIG_2);
40 intf_ctrl = xge_rd_csr(pdata, INTERFACE_CONTROL);
41 icm2 |= CFG_WAITASYNCRD_EN;
42
43 switch (pdata->phy_speed) {
44 case SPEED_10:
45 SET_REG_BITS(&mc2, INTF_MODE, 1);
46 SET_REG_BITS(&intf_ctrl, HD_MODE, 0);
47 SET_REG_BITS(&icm0, CFG_MACMODE, 0);
48 SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 500);
49 SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
50 break;
51 case SPEED_100:
52 SET_REG_BITS(&mc2, INTF_MODE, 1);
53 SET_REG_BITS(&intf_ctrl, HD_MODE, 1);
54 SET_REG_BITS(&icm0, CFG_MACMODE, 1);
55 SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 80);
56 SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
57 break;
58 default:
59 SET_REG_BITS(&mc2, INTF_MODE, 2);
60 SET_REG_BITS(&intf_ctrl, HD_MODE, 2);
61 SET_REG_BITS(&icm0, CFG_MACMODE, 2);
62 SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 16);
63 SET_REG_BIT(&rgmii, CFG_SPEED_125, 1);
64 break;
65 }
66
67 mc2 |= FULL_DUPLEX | CRC_EN | PAD_CRC;
68 SET_REG_BITS(&ecm0, CFG_WFIFOFULLTHR, 0x32);
69
70 xge_wr_csr(pdata, MAC_CONFIG_2, mc2);
71 xge_wr_csr(pdata, INTERFACE_CONTROL, intf_ctrl);
72 xge_wr_csr(pdata, RGMII_REG_0, rgmii);
73 xge_wr_csr(pdata, ICM_CONFIG0_REG_0, icm0);
74 xge_wr_csr(pdata, ICM_CONFIG2_REG_0, icm2);
75 xge_wr_csr(pdata, ECM_CONFIG0_REG_0, ecm0);
76}
77
78void xge_mac_set_station_addr(struct xge_pdata *pdata)
79{
80 u8 *dev_addr = pdata->ndev->dev_addr;
81 u32 addr0, addr1;
82
83 addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
84 (dev_addr[1] << 8) | dev_addr[0];
85 addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
86
87 xge_wr_csr(pdata, STATION_ADDR0, addr0);
88 xge_wr_csr(pdata, STATION_ADDR1, addr1);
89}
90
91void xge_mac_init(struct xge_pdata *pdata)
92{
93 xge_mac_reset(pdata);
94 xge_mac_set_speed(pdata);
95 xge_mac_set_station_addr(pdata);
96}
97
98void xge_mac_enable(struct xge_pdata *pdata)
99{
100 u32 data;
101
102 data = xge_rd_csr(pdata, MAC_CONFIG_1);
103 data |= TX_EN | RX_EN;
104 xge_wr_csr(pdata, MAC_CONFIG_1, data);
105
106 data = xge_rd_csr(pdata, MAC_CONFIG_1);
107}
108
109void xge_mac_disable(struct xge_pdata *pdata)
110{
111 u32 data;
112
113 data = xge_rd_csr(pdata, MAC_CONFIG_1);
114 data &= ~(TX_EN | RX_EN);
115 xge_wr_csr(pdata, MAC_CONFIG_1, data);
116}
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.h b/drivers/net/ethernet/apm/xgene-v2/mac.h
new file mode 100644
index 000000000000..0fce6ae15ce0
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.h
@@ -0,0 +1,110 @@
1/*
2 * Applied Micro X-Gene SoC Ethernet v2 Driver
3 *
4 * Copyright (c) 2017, Applied Micro Circuits Corporation
5 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __XGENE_ENET_V2_MAC_H__
23#define __XGENE_ENET_V2_MAC_H__
24
25/* Register offsets */
26#define MAC_CONFIG_1 0xa000
27#define MAC_CONFIG_2 0xa004
28#define MII_MGMT_CONFIG 0xa020
29#define MII_MGMT_COMMAND 0xa024
30#define MII_MGMT_ADDRESS 0xa028
31#define MII_MGMT_CONTROL 0xa02c
32#define MII_MGMT_STATUS 0xa030
33#define MII_MGMT_INDICATORS 0xa034
34#define INTERFACE_CONTROL 0xa038
35#define STATION_ADDR0 0xa040
36#define STATION_ADDR1 0xa044
37#define RBYT 0xa09c
38#define RPKT 0xa0a0
39#define RFCS 0xa0a4
40
41#define RGMII_REG_0 0x27e0
42#define ICM_CONFIG0_REG_0 0x2c00
43#define ICM_CONFIG2_REG_0 0x2c08
44#define ECM_CONFIG0_REG_0 0x2d00
45
46/* Register fields */
47#define SOFT_RESET BIT(31)
48#define TX_EN BIT(0)
49#define RX_EN BIT(2)
50#define PAD_CRC BIT(2)
51#define CRC_EN BIT(1)
52#define FULL_DUPLEX BIT(0)
53
54#define INTF_MODE_POS 8
55#define INTF_MODE_LEN 2
56#define HD_MODE_POS 25
57#define HD_MODE_LEN 2
58#define CFG_MACMODE_POS 18
59#define CFG_MACMODE_LEN 2
60#define CFG_WAITASYNCRD_POS 0
61#define CFG_WAITASYNCRD_LEN 16
62#define CFG_SPEED_125_POS 24
63#define CFG_WFIFOFULLTHR_POS 0
64#define CFG_WFIFOFULLTHR_LEN 7
65#define MGMT_CLOCK_SEL_POS 0
66#define MGMT_CLOCK_SEL_LEN 3
67#define PHY_ADDR_POS 8
68#define PHY_ADDR_LEN 5
69#define REG_ADDR_POS 0
70#define REG_ADDR_LEN 5
71#define MII_MGMT_BUSY BIT(0)
72#define MII_READ_CYCLE BIT(0)
73#define CFG_WAITASYNCRD_EN BIT(16)
74
75static inline void xgene_set_reg_bits(u32 *var, int pos, int len, u32 val)
76{
77 u32 mask = GENMASK(pos + len, pos);
78
79 *var &= ~mask;
80 *var |= ((val << pos) & mask);
81}
82
83static inline u32 xgene_get_reg_bits(u32 var, int pos, int len)
84{
85 u32 mask = GENMASK(pos + len, pos);
86
87 return (var & mask) >> pos;
88}
89
90#define SET_REG_BITS(var, field, val) \
91 xgene_set_reg_bits(var, field ## _POS, field ## _LEN, val)
92
93#define SET_REG_BIT(var, field, val) \
94 xgene_set_reg_bits(var, field ## _POS, 1, val)
95
96#define GET_REG_BITS(var, field) \
97 xgene_get_reg_bits(var, field ## _POS, field ## _LEN)
98
99#define GET_REG_BIT(var, field) ((var) & (field))
100
101struct xge_pdata;
102
103void xge_mac_reset(struct xge_pdata *pdata);
104void xge_mac_enable(struct xge_pdata *pdata);
105void xge_mac_disable(struct xge_pdata *pdata);
106void xge_mac_init(struct xge_pdata *pdata);
107int xge_port_init(struct net_device *ndev);
108void xge_mac_set_station_addr(struct xge_pdata *pdata);
109
110#endif /* __XGENE_ENET_V2_MAC_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
new file mode 100644
index 000000000000..ae76977d10b4
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -0,0 +1,756 @@
1/*
2 * Applied Micro X-Gene SoC Ethernet v2 Driver
3 *
4 * Copyright (c) 2017, Applied Micro Circuits Corporation
5 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "main.h"
23
24static const struct acpi_device_id xge_acpi_match[];
25
26static int xge_get_resources(struct xge_pdata *pdata)
27{
28 struct platform_device *pdev;
29 struct net_device *ndev;
30 int phy_mode, ret = 0;
31 struct resource *res;
32 struct device *dev;
33
34 pdev = pdata->pdev;
35 dev = &pdev->dev;
36 ndev = pdata->ndev;
37
38 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
39 if (!res) {
40 dev_err(dev, "Resource enet_csr not defined\n");
41 return -ENODEV;
42 }
43
44 pdata->resources.base_addr = devm_ioremap(dev, res->start,
45 resource_size(res));
46 if (!pdata->resources.base_addr) {
47 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
48 return -ENOMEM;
49 }
50
51 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
52 eth_hw_addr_random(ndev);
53
54 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
55
56 phy_mode = device_get_phy_mode(dev);
57 if (phy_mode < 0) {
58 dev_err(dev, "Unable to get phy-connection-type\n");
59 return phy_mode;
60 }
61 pdata->resources.phy_mode = phy_mode;
62
63 if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
64 dev_err(dev, "Incorrect phy-connection-type specified\n");
65 return -ENODEV;
66 }
67
68 ret = platform_get_irq(pdev, 0);
69 if (ret <= 0) {
70 dev_err(dev, "Unable to get ENET IRQ\n");
71 ret = ret ? : -ENXIO;
72 return ret;
73 }
74 pdata->resources.irq = ret;
75
76 return 0;
77}
78
79static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
80{
81 struct xge_pdata *pdata = netdev_priv(ndev);
82 struct xge_desc_ring *ring = pdata->rx_ring;
83 const u8 slots = XGENE_ENET_NUM_DESC - 1;
84 struct device *dev = &pdata->pdev->dev;
85 struct xge_raw_desc *raw_desc;
86 u64 addr_lo, addr_hi;
87 u8 tail = ring->tail;
88 struct sk_buff *skb;
89 dma_addr_t dma_addr;
90 u16 len;
91 int i;
92
93 for (i = 0; i < nbuf; i++) {
94 raw_desc = &ring->raw_desc[tail];
95
96 len = XGENE_ENET_STD_MTU;
97 skb = netdev_alloc_skb(ndev, len);
98 if (unlikely(!skb))
99 return -ENOMEM;
100
101 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
102 if (dma_mapping_error(dev, dma_addr)) {
103 netdev_err(ndev, "DMA mapping error\n");
104 dev_kfree_skb_any(skb);
105 return -EINVAL;
106 }
107
108 ring->pkt_info[tail].skb = skb;
109 ring->pkt_info[tail].dma_addr = dma_addr;
110
111 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
112 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
113 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
114 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
115 SET_BITS(PKT_ADDRH,
116 upper_32_bits(dma_addr)));
117
118 dma_wmb();
119 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
120 SET_BITS(E, 1));
121 tail = (tail + 1) & slots;
122 }
123
124 ring->tail = tail;
125
126 return 0;
127}
128
129static int xge_init_hw(struct net_device *ndev)
130{
131 struct xge_pdata *pdata = netdev_priv(ndev);
132 int ret;
133
134 ret = xge_port_reset(ndev);
135 if (ret)
136 return ret;
137
138 xge_port_init(ndev);
139 pdata->nbufs = NUM_BUFS;
140
141 return 0;
142}
143
144static irqreturn_t xge_irq(const int irq, void *data)
145{
146 struct xge_pdata *pdata = data;
147
148 if (napi_schedule_prep(&pdata->napi)) {
149 xge_intr_disable(pdata);
150 __napi_schedule(&pdata->napi);
151 }
152
153 return IRQ_HANDLED;
154}
155
156static int xge_request_irq(struct net_device *ndev)
157{
158 struct xge_pdata *pdata = netdev_priv(ndev);
159 struct device *dev = &pdata->pdev->dev;
160 int ret;
161
162 snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
163
164 ret = devm_request_irq(dev, pdata->resources.irq, xge_irq,
165 0, pdata->irq_name, pdata);
166 if (ret)
167 netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
168
169 return ret;
170}
171
172static void xge_free_irq(struct net_device *ndev)
173{
174 struct xge_pdata *pdata = netdev_priv(ndev);
175 struct device *dev = &pdata->pdev->dev;
176
177 devm_free_irq(dev, pdata->resources.irq, pdata);
178}
179
180static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
181{
182 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
183 (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
184 return true;
185
186 return false;
187}
188
189static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
190{
191 struct xge_pdata *pdata = netdev_priv(ndev);
192 struct device *dev = &pdata->pdev->dev;
193 struct xge_desc_ring *tx_ring;
194 struct xge_raw_desc *raw_desc;
195 static dma_addr_t dma_addr;
196 u64 addr_lo, addr_hi;
197 void *pkt_buf;
198 u8 tail;
199 u16 len;
200
201 tx_ring = pdata->tx_ring;
202 tail = tx_ring->tail;
203 len = skb_headlen(skb);
204 raw_desc = &tx_ring->raw_desc[tail];
205
206 if (!is_tx_slot_available(raw_desc)) {
207 netif_stop_queue(ndev);
208 return NETDEV_TX_BUSY;
209 }
210
211 /* Packet buffers should be 64B aligned */
212 pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
213 GFP_ATOMIC);
214 if (unlikely(!pkt_buf)) {
215 dev_kfree_skb_any(skb);
216 return NETDEV_TX_OK;
217 }
218 memcpy(pkt_buf, skb->data, len);
219
220 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
221 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
222 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
223 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
224 SET_BITS(PKT_ADDRH,
225 upper_32_bits(dma_addr)));
226
227 tx_ring->pkt_info[tail].skb = skb;
228 tx_ring->pkt_info[tail].dma_addr = dma_addr;
229 tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
230
231 dma_wmb();
232
233 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
234 SET_BITS(PKT_SIZE, len) |
235 SET_BITS(E, 0));
236 skb_tx_timestamp(skb);
237 xge_wr_csr(pdata, DMATXCTRL, 1);
238
239 tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
240
241 return NETDEV_TX_OK;
242}
243
244static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
245{
246 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
247 !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
248 return true;
249
250 return false;
251}
252
253static void xge_txc_poll(struct net_device *ndev)
254{
255 struct xge_pdata *pdata = netdev_priv(ndev);
256 struct device *dev = &pdata->pdev->dev;
257 struct xge_desc_ring *tx_ring;
258 struct xge_raw_desc *raw_desc;
259 dma_addr_t dma_addr;
260 struct sk_buff *skb;
261 void *pkt_buf;
262 u32 data;
263 u8 head;
264
265 tx_ring = pdata->tx_ring;
266 head = tx_ring->head;
267
268 data = xge_rd_csr(pdata, DMATXSTATUS);
269 if (!GET_BITS(TXPKTCOUNT, data))
270 return;
271
272 while (1) {
273 raw_desc = &tx_ring->raw_desc[head];
274
275 if (!is_tx_hw_done(raw_desc))
276 break;
277
278 dma_rmb();
279
280 skb = tx_ring->pkt_info[head].skb;
281 dma_addr = tx_ring->pkt_info[head].dma_addr;
282 pkt_buf = tx_ring->pkt_info[head].pkt_buf;
283 pdata->stats.tx_packets++;
284 pdata->stats.tx_bytes += skb->len;
285 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
286 dev_kfree_skb_any(skb);
287
288 /* clear pktstart address and pktsize */
289 raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
290 SET_BITS(PKT_SIZE, SLOT_EMPTY));
291 xge_wr_csr(pdata, DMATXSTATUS, 1);
292
293 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
294 }
295
296 if (netif_queue_stopped(ndev))
297 netif_wake_queue(ndev);
298
299 tx_ring->head = head;
300}
301
302static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
303{
304 struct xge_pdata *pdata = netdev_priv(ndev);
305 struct device *dev = &pdata->pdev->dev;
306 struct xge_desc_ring *rx_ring;
307 struct xge_raw_desc *raw_desc;
308 struct sk_buff *skb;
309 dma_addr_t dma_addr;
310 int processed = 0;
311 u8 head, rx_error;
312 int i, ret;
313 u32 data;
314 u16 len;
315
316 rx_ring = pdata->rx_ring;
317 head = rx_ring->head;
318
319 data = xge_rd_csr(pdata, DMARXSTATUS);
320 if (!GET_BITS(RXPKTCOUNT, data))
321 return 0;
322
323 for (i = 0; i < budget; i++) {
324 raw_desc = &rx_ring->raw_desc[head];
325
326 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
327 break;
328
329 dma_rmb();
330
331 skb = rx_ring->pkt_info[head].skb;
332 rx_ring->pkt_info[head].skb = NULL;
333 dma_addr = rx_ring->pkt_info[head].dma_addr;
334 len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
335 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
336 DMA_FROM_DEVICE);
337
338 rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
339 if (unlikely(rx_error)) {
340 pdata->stats.rx_errors++;
341 dev_kfree_skb_any(skb);
342 goto out;
343 }
344
345 skb_put(skb, len);
346 skb->protocol = eth_type_trans(skb, ndev);
347
348 pdata->stats.rx_packets++;
349 pdata->stats.rx_bytes += len;
350 napi_gro_receive(&pdata->napi, skb);
351out:
352 ret = xge_refill_buffers(ndev, 1);
353 xge_wr_csr(pdata, DMARXSTATUS, 1);
354 xge_wr_csr(pdata, DMARXCTRL, 1);
355
356 if (ret)
357 break;
358
359 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
360 processed++;
361 }
362
363 rx_ring->head = head;
364
365 return processed;
366}
367
368static void xge_delete_desc_ring(struct net_device *ndev,
369 struct xge_desc_ring *ring)
370{
371 struct xge_pdata *pdata = netdev_priv(ndev);
372 struct device *dev = &pdata->pdev->dev;
373 u16 size;
374
375 if (!ring)
376 return;
377
378 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
379 if (ring->desc_addr)
380 dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
381
382 kfree(ring->pkt_info);
383 kfree(ring);
384}
385
386static void xge_free_buffers(struct net_device *ndev)
387{
388 struct xge_pdata *pdata = netdev_priv(ndev);
389 struct xge_desc_ring *ring = pdata->rx_ring;
390 struct device *dev = &pdata->pdev->dev;
391 struct sk_buff *skb;
392 dma_addr_t dma_addr;
393 int i;
394
395 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
396 skb = ring->pkt_info[i].skb;
397 dma_addr = ring->pkt_info[i].dma_addr;
398
399 if (!skb)
400 continue;
401
402 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
403 DMA_FROM_DEVICE);
404 dev_kfree_skb_any(skb);
405 }
406}
407
408static void xge_delete_desc_rings(struct net_device *ndev)
409{
410 struct xge_pdata *pdata = netdev_priv(ndev);
411
412 xge_txc_poll(ndev);
413 xge_delete_desc_ring(ndev, pdata->tx_ring);
414
415 xge_rx_poll(ndev, 64);
416 xge_free_buffers(ndev);
417 xge_delete_desc_ring(ndev, pdata->rx_ring);
418}
419
420static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
421{
422 struct xge_pdata *pdata = netdev_priv(ndev);
423 struct device *dev = &pdata->pdev->dev;
424 struct xge_desc_ring *ring;
425 u16 size;
426
427 ring = kzalloc(sizeof(struct xge_desc_ring), GFP_KERNEL);
428 if (!ring)
429 return NULL;
430
431 ring->ndev = ndev;
432
433 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
434 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
435 GFP_KERNEL);
436 if (!ring->desc_addr)
437 goto err;
438
439 ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(struct pkt_info),
440 GFP_KERNEL);
441 if (!ring->pkt_info)
442 goto err;
443
444 xge_setup_desc(ring);
445
446 return ring;
447
448err:
449 xge_delete_desc_ring(ndev, ring);
450
451 return NULL;
452}
453
454static int xge_create_desc_rings(struct net_device *ndev)
455{
456 struct xge_pdata *pdata = netdev_priv(ndev);
457 struct xge_desc_ring *ring;
458 int ret;
459
460 /* create tx ring */
461 ring = xge_create_desc_ring(ndev);
462 if (!ring)
463 goto err;
464
465 pdata->tx_ring = ring;
466 xge_update_tx_desc_addr(pdata);
467
468 /* create rx ring */
469 ring = xge_create_desc_ring(ndev);
470 if (!ring)
471 goto err;
472
473 pdata->rx_ring = ring;
474 xge_update_rx_desc_addr(pdata);
475
476 ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
477 if (ret)
478 goto err;
479
480 return 0;
481err:
482 xge_delete_desc_rings(ndev);
483
484 return -ENOMEM;
485}
486
487static int xge_open(struct net_device *ndev)
488{
489 struct xge_pdata *pdata = netdev_priv(ndev);
490 int ret;
491
492 ret = xge_create_desc_rings(ndev);
493 if (ret)
494 return ret;
495
496 napi_enable(&pdata->napi);
497 ret = xge_request_irq(ndev);
498 if (ret)
499 return ret;
500
501 xge_intr_enable(pdata);
502 xge_wr_csr(pdata, DMARXCTRL, 1);
503 xge_mac_enable(pdata);
504 netif_start_queue(ndev);
505 netif_carrier_on(ndev);
506
507 return 0;
508}
509
510static int xge_close(struct net_device *ndev)
511{
512 struct xge_pdata *pdata = netdev_priv(ndev);
513
514 netif_carrier_off(ndev);
515 netif_stop_queue(ndev);
516 xge_mac_disable(pdata);
517
518 xge_intr_disable(pdata);
519 xge_free_irq(ndev);
520 napi_disable(&pdata->napi);
521 xge_delete_desc_rings(ndev);
522
523 return 0;
524}
525
526static int xge_napi(struct napi_struct *napi, const int budget)
527{
528 struct net_device *ndev = napi->dev;
529 struct xge_pdata *pdata;
530 int processed;
531
532 pdata = netdev_priv(ndev);
533
534 xge_txc_poll(ndev);
535 processed = xge_rx_poll(ndev, budget);
536
537 if (processed < budget) {
538 napi_complete_done(napi, processed);
539 xge_intr_enable(pdata);
540 }
541
542 return processed;
543}
544
545static int xge_set_mac_addr(struct net_device *ndev, void *addr)
546{
547 struct xge_pdata *pdata = netdev_priv(ndev);
548 int ret;
549
550 ret = eth_mac_addr(ndev, addr);
551 if (ret)
552 return ret;
553
554 xge_mac_set_station_addr(pdata);
555
556 return 0;
557}
558
559static bool is_tx_pending(struct xge_raw_desc *raw_desc)
560{
561 if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
562 return true;
563
564 return false;
565}
566
567static void xge_free_pending_skb(struct net_device *ndev)
568{
569 struct xge_pdata *pdata = netdev_priv(ndev);
570 struct device *dev = &pdata->pdev->dev;
571 struct xge_desc_ring *tx_ring;
572 struct xge_raw_desc *raw_desc;
573 dma_addr_t dma_addr;
574 struct sk_buff *skb;
575 void *pkt_buf;
576 int i;
577
578 tx_ring = pdata->tx_ring;
579
580 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
581 raw_desc = &tx_ring->raw_desc[i];
582
583 if (!is_tx_pending(raw_desc))
584 continue;
585
586 skb = tx_ring->pkt_info[i].skb;
587 dma_addr = tx_ring->pkt_info[i].dma_addr;
588 pkt_buf = tx_ring->pkt_info[i].pkt_buf;
589 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
590 dev_kfree_skb_any(skb);
591 }
592}
593
594static void xge_timeout(struct net_device *ndev)
595{
596 struct xge_pdata *pdata = netdev_priv(ndev);
597
598 rtnl_lock();
599
600 if (netif_running(ndev)) {
601 netif_carrier_off(ndev);
602 netif_stop_queue(ndev);
603 xge_intr_disable(pdata);
604 napi_disable(&pdata->napi);
605
606 xge_wr_csr(pdata, DMATXCTRL, 0);
607 xge_txc_poll(ndev);
608 xge_free_pending_skb(ndev);
609 xge_wr_csr(pdata, DMATXSTATUS, ~0U);
610
611 xge_setup_desc(pdata->tx_ring);
612 xge_update_tx_desc_addr(pdata);
613 xge_mac_init(pdata);
614
615 napi_enable(&pdata->napi);
616 xge_intr_enable(pdata);
617 xge_mac_enable(pdata);
618 netif_start_queue(ndev);
619 netif_carrier_on(ndev);
620 }
621
622 rtnl_unlock();
623}
624
625static void xge_get_stats64(struct net_device *ndev,
626 struct rtnl_link_stats64 *storage)
627{
628 struct xge_pdata *pdata = netdev_priv(ndev);
629 struct xge_stats *stats = &pdata->stats;
630
631 storage->tx_packets += stats->tx_packets;
632 storage->tx_bytes += stats->tx_bytes;
633
634 storage->rx_packets += stats->rx_packets;
635 storage->rx_bytes += stats->rx_bytes;
636 storage->rx_errors += stats->rx_errors;
637}
638
639static const struct net_device_ops xgene_ndev_ops = {
640 .ndo_open = xge_open,
641 .ndo_stop = xge_close,
642 .ndo_start_xmit = xge_start_xmit,
643 .ndo_set_mac_address = xge_set_mac_addr,
644 .ndo_tx_timeout = xge_timeout,
645 .ndo_get_stats64 = xge_get_stats64,
646};
647
648static int xge_probe(struct platform_device *pdev)
649{
650 struct device *dev = &pdev->dev;
651 struct net_device *ndev;
652 struct xge_pdata *pdata;
653 int ret;
654
655 ndev = alloc_etherdev(sizeof(struct xge_pdata));
656 if (!ndev)
657 return -ENOMEM;
658
659 pdata = netdev_priv(ndev);
660
661 pdata->pdev = pdev;
662 pdata->ndev = ndev;
663 SET_NETDEV_DEV(ndev, dev);
664 platform_set_drvdata(pdev, pdata);
665 ndev->netdev_ops = &xgene_ndev_ops;
666
667 ndev->features |= NETIF_F_GSO |
668 NETIF_F_GRO;
669
670 ret = xge_get_resources(pdata);
671 if (ret)
672 goto err;
673
674 ndev->hw_features = ndev->features;
675
676 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
677 if (ret) {
678 netdev_err(ndev, "No usable DMA configuration\n");
679 goto err;
680 }
681
682 ret = xge_init_hw(ndev);
683 if (ret)
684 goto err;
685
686 netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
687
688 netif_carrier_off(ndev);
689 ret = register_netdev(ndev);
690 if (ret) {
691 netdev_err(ndev, "Failed to register netdev\n");
692 goto err;
693 }
694
695 return 0;
696
697err:
698 free_netdev(ndev);
699
700 return ret;
701}
702
703static int xge_remove(struct platform_device *pdev)
704{
705 struct xge_pdata *pdata;
706 struct net_device *ndev;
707
708 pdata = platform_get_drvdata(pdev);
709 ndev = pdata->ndev;
710
711 rtnl_lock();
712 if (netif_running(ndev))
713 dev_close(ndev);
714 rtnl_unlock();
715
716 unregister_netdev(ndev);
717 free_netdev(ndev);
718
719 return 0;
720}
721
722static void xge_shutdown(struct platform_device *pdev)
723{
724 struct xge_pdata *pdata;
725
726 pdata = platform_get_drvdata(pdev);
727 if (!pdata)
728 return;
729
730 if (!pdata->ndev)
731 return;
732
733 xge_remove(pdev);
734}
735
736static const struct acpi_device_id xge_acpi_match[] = {
737 { "APMC0D80" },
738 { }
739};
740MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
741
742static struct platform_driver xge_driver = {
743 .driver = {
744 .name = "xgene-enet-v2",
745 .acpi_match_table = ACPI_PTR(xge_acpi_match),
746 },
747 .probe = xge_probe,
748 .remove = xge_remove,
749 .shutdown = xge_shutdown,
750};
751module_platform_driver(xge_driver);
752
753MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
754MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
755MODULE_VERSION(XGENE_ENET_V2_VERSION);
756MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h
new file mode 100644
index 000000000000..ada7b0e82586
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/main.h
@@ -0,0 +1,75 @@
1/*
2 * Applied Micro X-Gene SoC Ethernet v2 Driver
3 *
4 * Copyright (c) 2017, Applied Micro Circuits Corporation
5 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __XGENE_ENET_V2_MAIN_H__
23#define __XGENE_ENET_V2_MAIN_H__
24
25#include <linux/acpi.h>
26#include <linux/clk.h>
27#include <linux/efi.h>
28#include <linux/if_vlan.h>
29#include <linux/irq.h>
30#include <linux/io.h>
31#include <linux/module.h>
32#include <linux/of_platform.h>
33#include <linux/of_net.h>
34#include <linux/of_mdio.h>
35#include <linux/prefetch.h>
36#include <linux/phy.h>
37#include <net/ip.h>
38#include "mac.h"
39#include "enet.h"
40#include "ring.h"
41
42#define XGENE_ENET_V2_VERSION "v1.0"
43#define XGENE_ENET_STD_MTU 1536
44#define XGENE_ENET_MIN_FRAME 60
45#define IRQ_ID_SIZE 16
46
47struct xge_resource {
48 void __iomem *base_addr;
49 int phy_mode;
50 u32 irq;
51};
52
53struct xge_stats {
54 u64 tx_packets;
55 u64 tx_bytes;
56 u64 rx_packets;
57 u64 rx_bytes;
58 u64 rx_errors;
59};
60
61/* ethernet private data */
62struct xge_pdata {
63 struct xge_resource resources;
64 struct xge_desc_ring *tx_ring;
65 struct xge_desc_ring *rx_ring;
66 struct platform_device *pdev;
67 char irq_name[IRQ_ID_SIZE];
68 struct net_device *ndev;
69 struct napi_struct napi;
70 struct xge_stats stats;
71 int phy_speed;
72 u8 nbufs;
73};
74
75#endif /* __XGENE_ENET_V2_MAIN_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.c b/drivers/net/ethernet/apm/xgene-v2/ring.c
new file mode 100644
index 000000000000..38810828f8f0
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ring.c
@@ -0,0 +1,81 @@
1/*
2 * Applied Micro X-Gene SoC Ethernet v2 Driver
3 *
4 * Copyright (c) 2017, Applied Micro Circuits Corporation
5 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "main.h"
23
24/* create circular linked list of descriptors */
25void xge_setup_desc(struct xge_desc_ring *ring)
26{
27 struct xge_raw_desc *raw_desc;
28 dma_addr_t dma_h, next_dma;
29 u16 offset;
30 int i;
31
32 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
33 raw_desc = &ring->raw_desc[i];
34
35 offset = (i + 1) & (XGENE_ENET_NUM_DESC - 1);
36 next_dma = ring->dma_addr + (offset * XGENE_ENET_DESC_SIZE);
37
38 raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
39 SET_BITS(PKT_SIZE, SLOT_EMPTY));
40 dma_h = upper_32_bits(next_dma);
41 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, next_dma) |
42 SET_BITS(NEXT_DESC_ADDRH, dma_h));
43 }
44}
45
46void xge_update_tx_desc_addr(struct xge_pdata *pdata)
47{
48 struct xge_desc_ring *ring = pdata->tx_ring;
49 dma_addr_t dma_addr = ring->dma_addr;
50
51 xge_wr_csr(pdata, DMATXDESCL, dma_addr);
52 xge_wr_csr(pdata, DMATXDESCH, upper_32_bits(dma_addr));
53
54 ring->head = 0;
55 ring->tail = 0;
56}
57
58void xge_update_rx_desc_addr(struct xge_pdata *pdata)
59{
60 struct xge_desc_ring *ring = pdata->rx_ring;
61 dma_addr_t dma_addr = ring->dma_addr;
62
63 xge_wr_csr(pdata, DMARXDESCL, dma_addr);
64 xge_wr_csr(pdata, DMARXDESCH, upper_32_bits(dma_addr));
65
66 ring->head = 0;
67 ring->tail = 0;
68}
69
70void xge_intr_enable(struct xge_pdata *pdata)
71{
72 u32 data;
73
74 data = RX_PKT_RCVD | TX_PKT_SENT;
75 xge_wr_csr(pdata, DMAINTRMASK, data);
76}
77
78void xge_intr_disable(struct xge_pdata *pdata)
79{
80 xge_wr_csr(pdata, DMAINTRMASK, 0);
81}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.h b/drivers/net/ethernet/apm/xgene-v2/ring.h
new file mode 100644
index 000000000000..abc8c9a84954
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ring.h
@@ -0,0 +1,119 @@
1/*
2 * Applied Micro X-Gene SoC Ethernet v2 Driver
3 *
4 * Copyright (c) 2017, Applied Micro Circuits Corporation
5 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __XGENE_ENET_V2_RING_H__
23#define __XGENE_ENET_V2_RING_H__
24
25#define XGENE_ENET_DESC_SIZE 64
26#define XGENE_ENET_NUM_DESC 256
27#define NUM_BUFS 8
28#define SLOT_EMPTY 0xfff
29
30#define DMATXCTRL 0xa180
31#define DMATXDESCL 0xa184
32#define DMATXDESCH 0xa1a0
33#define DMATXSTATUS 0xa188
34#define DMARXCTRL 0xa18c
35#define DMARXDESCL 0xa190
36#define DMARXDESCH 0xa1a4
37#define DMARXSTATUS 0xa194
38#define DMAINTRMASK 0xa198
39#define DMAINTERRUPT 0xa19c
40
41#define D_POS 62
42#define D_LEN 2
43#define E_POS 63
44#define E_LEN 1
45#define PKT_ADDRL_POS 0
46#define PKT_ADDRL_LEN 32
47#define PKT_ADDRH_POS 32
48#define PKT_ADDRH_LEN 10
49#define PKT_SIZE_POS 32
50#define PKT_SIZE_LEN 12
51#define NEXT_DESC_ADDRL_POS 0
52#define NEXT_DESC_ADDRL_LEN 32
53#define NEXT_DESC_ADDRH_POS 48
54#define NEXT_DESC_ADDRH_LEN 10
55
56#define TXPKTCOUNT_POS 16
57#define TXPKTCOUNT_LEN 8
58#define RXPKTCOUNT_POS 16
59#define RXPKTCOUNT_LEN 8
60
61#define TX_PKT_SENT BIT(0)
62#define TX_BUS_ERROR BIT(3)
63#define RX_PKT_RCVD BIT(4)
64#define RX_BUS_ERROR BIT(7)
65#define RXSTATUS_RXPKTRCVD BIT(0)
66
67struct xge_raw_desc {
68 __le64 m0;
69 __le64 m1;
70 __le64 m2;
71 __le64 m3;
72 __le64 m4;
73 __le64 m5;
74 __le64 m6;
75 __le64 m7;
76};
77
78struct pkt_info {
79 struct sk_buff *skb;
80 dma_addr_t dma_addr;
81 void *pkt_buf;
82};
83
84/* software context of a descriptor ring */
85struct xge_desc_ring {
86 struct net_device *ndev;
87 dma_addr_t dma_addr;
88 u8 head;
89 u8 tail;
90 union {
91 void *desc_addr;
92 struct xge_raw_desc *raw_desc;
93 };
94 struct pkt_info (*pkt_info);
95};
96
97static inline u64 xge_set_desc_bits(int pos, int len, u64 val)
98{
99 return (val & ((1ULL << len) - 1)) << pos;
100}
101
102static inline u64 xge_get_desc_bits(int pos, int len, u64 src)
103{
104 return (src >> pos) & ((1ULL << len) - 1);
105}
106
107#define SET_BITS(field, val) \
108 xge_set_desc_bits(field ## _POS, field ## _LEN, val)
109
110#define GET_BITS(field, src) \
111 xge_get_desc_bits(field ## _POS, field ## _LEN, src)
112
113void xge_setup_desc(struct xge_desc_ring *ring);
114void xge_update_tx_desc_addr(struct xge_pdata *pdata);
115void xge_update_rx_desc_addr(struct xge_pdata *pdata);
116void xge_intr_enable(struct xge_pdata *pdata);
117void xge_intr_disable(struct xge_pdata *pdata);
118
119#endif /* __XGENE_ENET_V2_RING_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 06e681697c17..2a835e07adfb 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -494,7 +494,7 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
494 break; 494 break;
495 } 495 }
496 496
497 mc2 |= FULL_DUPLEX2 | PAD_CRC; 497 mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
498 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); 498 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
499 xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); 499 xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
500 xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); 500 xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
@@ -623,6 +623,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
623 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); 623 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
624 cb |= CFG_CLE_BYPASS_EN0; 624 cb |= CFG_CLE_BYPASS_EN0;
625 CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); 625 CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
626 CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
626 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); 627 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
627 628
628 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); 629 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 5f83037bb96b..d250bfe94d24 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -163,6 +163,7 @@ enum xgene_enet_rm {
163#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3) 163#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3)
164 164
165#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) 165#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
166#define CFG_CLE_IP_HDR_LEN_SET(dst, val) xgene_set_bits(dst, val, 8, 5)
166#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) 167#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
167#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) 168#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4)
168#define CFG_CLE_NXTFPSEL0_SET(dst, val) xgene_set_bits(dst, val, 20, 4) 169#define CFG_CLE_NXTFPSEL0_SET(dst, val) xgene_set_bits(dst, val, 20, 4)
@@ -215,6 +216,7 @@ enum xgene_enet_rm {
215#define ENET_GHD_MODE BIT(26) 216#define ENET_GHD_MODE BIT(26)
216#define FULL_DUPLEX2 BIT(0) 217#define FULL_DUPLEX2 BIT(0)
217#define PAD_CRC BIT(2) 218#define PAD_CRC BIT(2)
219#define LENGTH_CHK BIT(4)
218#define SCAN_AUTO_INCR BIT(5) 220#define SCAN_AUTO_INCR BIT(5)
219#define TBYT_ADDR 0x38 221#define TBYT_ADDR 0x38
220#define TPKT_ADDR 0x39 222#define TPKT_ADDR 0x39
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index b3568c453b14..5f37ed3506d5 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -601,14 +601,24 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
601 return NETDEV_TX_OK; 601 return NETDEV_TX_OK;
602} 602}
603 603
604static void xgene_enet_skip_csum(struct sk_buff *skb) 604static void xgene_enet_rx_csum(struct sk_buff *skb)
605{ 605{
606 struct net_device *ndev = skb->dev;
606 struct iphdr *iph = ip_hdr(skb); 607 struct iphdr *iph = ip_hdr(skb);
607 608
608 if (!ip_is_fragment(iph) || 609 if (!(ndev->features & NETIF_F_RXCSUM))
609 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { 610 return;
610 skb->ip_summed = CHECKSUM_UNNECESSARY; 611
611 } 612 if (skb->protocol != htons(ETH_P_IP))
613 return;
614
615 if (ip_is_fragment(iph))
616 return;
617
618 if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
619 return;
620
621 skb->ip_summed = CHECKSUM_UNNECESSARY;
612} 622}
613 623
614static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool, 624static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
@@ -648,12 +658,24 @@ static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
648 buf_pool->head = head; 658 buf_pool->head = head;
649} 659}
650 660
661/* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
662static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
663{
664 if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) {
665 if (ntohs(eth_hdr(skb)->h_proto) < 46)
666 return true;
667 }
668
669 return false;
670}
671
651static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, 672static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
652 struct xgene_enet_raw_desc *raw_desc, 673 struct xgene_enet_raw_desc *raw_desc,
653 struct xgene_enet_raw_desc *exp_desc) 674 struct xgene_enet_raw_desc *exp_desc)
654{ 675{
655 struct xgene_enet_desc_ring *buf_pool, *page_pool; 676 struct xgene_enet_desc_ring *buf_pool, *page_pool;
656 u32 datalen, frag_size, skb_index; 677 u32 datalen, frag_size, skb_index;
678 struct xgene_enet_pdata *pdata;
657 struct net_device *ndev; 679 struct net_device *ndev;
658 dma_addr_t dma_addr; 680 dma_addr_t dma_addr;
659 struct sk_buff *skb; 681 struct sk_buff *skb;
@@ -666,6 +688,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
666 bool nv; 688 bool nv;
667 689
668 ndev = rx_ring->ndev; 690 ndev = rx_ring->ndev;
691 pdata = netdev_priv(ndev);
669 dev = ndev_to_dev(rx_ring->ndev); 692 dev = ndev_to_dev(rx_ring->ndev);
670 buf_pool = rx_ring->buf_pool; 693 buf_pool = rx_ring->buf_pool;
671 page_pool = rx_ring->page_pool; 694 page_pool = rx_ring->page_pool;
@@ -676,30 +699,29 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
676 skb = buf_pool->rx_skb[skb_index]; 699 skb = buf_pool->rx_skb[skb_index];
677 buf_pool->rx_skb[skb_index] = NULL; 700 buf_pool->rx_skb[skb_index] = NULL;
678 701
702 datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
703 skb_put(skb, datalen);
704 prefetch(skb->data - NET_IP_ALIGN);
705 skb->protocol = eth_type_trans(skb, ndev);
706
679 /* checking for error */ 707 /* checking for error */
680 status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) || 708 status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
681 GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 709 GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
682 if (unlikely(status > 2)) { 710 if (unlikely(status)) {
683 dev_kfree_skb_any(skb); 711 if (!xgene_enet_errata_10GE_8(skb, datalen, status)) {
684 xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc); 712 dev_kfree_skb_any(skb);
685 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), 713 xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
686 status); 714 xgene_enet_parse_error(rx_ring, pdata, status);
687 ret = -EIO; 715 goto out;
688 goto out; 716 }
689 } 717 }
690 718
691 /* strip off CRC as HW isn't doing this */
692 datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
693
694 nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); 719 nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
695 if (!nv) 720 if (!nv) {
721 /* strip off CRC as HW isn't doing this */
696 datalen -= 4; 722 datalen -= 4;
697
698 skb_put(skb, datalen);
699 prefetch(skb->data - NET_IP_ALIGN);
700
701 if (!nv)
702 goto skip_jumbo; 723 goto skip_jumbo;
724 }
703 725
704 slots = page_pool->slots - 1; 726 slots = page_pool->slots - 1;
705 head = page_pool->head; 727 head = page_pool->head;
@@ -728,11 +750,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
728 750
729skip_jumbo: 751skip_jumbo:
730 skb_checksum_none_assert(skb); 752 skb_checksum_none_assert(skb);
731 skb->protocol = eth_type_trans(skb, ndev); 753 xgene_enet_rx_csum(skb);
732 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
733 skb->protocol == htons(ETH_P_IP))) {
734 xgene_enet_skip_csum(skb);
735 }
736 754
737 rx_ring->rx_packets++; 755 rx_ring->rx_packets++;
738 rx_ring->rx_bytes += datalen; 756 rx_ring->rx_bytes += datalen;
@@ -2039,7 +2057,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
2039 xgene_enet_setup_ops(pdata); 2057 xgene_enet_setup_ops(pdata);
2040 2058
2041 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 2059 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
2042 ndev->features |= NETIF_F_TSO; 2060 ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
2043 spin_lock_init(&pdata->mss_lock); 2061 spin_lock_init(&pdata->mss_lock);
2044 } 2062 }
2045 ndev->hw_features = ndev->features; 2063 ndev->hw_features = ndev->features;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 52571741da9f..0d4be2425ebc 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -41,6 +41,7 @@
41#include "../../../phy/mdio-xgene.h" 41#include "../../../phy/mdio-xgene.h"
42 42
43#define XGENE_DRV_VERSION "v1.0" 43#define XGENE_DRV_VERSION "v1.0"
44#define ETHER_MIN_PACKET 64
44#define XGENE_ENET_STD_MTU 1536 45#define XGENE_ENET_STD_MTU 1536
45#define XGENE_ENET_MAX_MTU 9600 46#define XGENE_ENET_MAX_MTU 9600
46#define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN) 47#define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index ece19e6d68e3..423240c97d39 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -341,8 +341,15 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
341 341
342 xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data); 342 xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
343 data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; 343 data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
344 /* Errata 10GE_1 - FIFO threshold default value incorrect */
345 RSIF_CLE_BUFF_THRESH_SET(&data, XG_RSIF_CLE_BUFF_THRESH);
344 xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data); 346 xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
345 347
348 /* Errata 10GE_1 - FIFO threshold default value incorrect */
349 xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, &data);
350 RSIF_PLC_CLE_BUFF_THRESH_SET(&data, XG_RSIF_PLC_CLE_BUFF_THRESH);
351 xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, data);
352
346 xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data); 353 xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
347 data |= BIT(12); 354 data |= BIT(12);
348 xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data); 355 xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index 03b847ad8937..e644a429ebf4 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -65,6 +65,11 @@
65#define XG_DEF_PAUSE_THRES 0x390 65#define XG_DEF_PAUSE_THRES 0x390
66#define XG_DEF_PAUSE_OFF_THRES 0x2c0 66#define XG_DEF_PAUSE_OFF_THRES 0x2c0
67#define XG_RSIF_CONFIG_REG_ADDR 0x00a0 67#define XG_RSIF_CONFIG_REG_ADDR 0x00a0
68#define XG_RSIF_CLE_BUFF_THRESH 0x3
69#define RSIF_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 3)
70#define XG_RSIF_CONFIG1_REG_ADDR 0x00b8
71#define XG_RSIF_PLC_CLE_BUFF_THRESH 0x1
72#define RSIF_PLC_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 2)
68#define XCLE_BYPASS_REG0_ADDR 0x0160 73#define XCLE_BYPASS_REG0_ADDR 0x0160
69#define XCLE_BYPASS_REG1_ADDR 0x0164 74#define XCLE_BYPASS_REG1_ADDR 0x0164
70#define XG_CFG_BYPASS_ADDR 0x0204 75#define XG_CFG_BYPASS_ADDR 0x0204
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 940fb24bba21..96413808c726 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -109,7 +109,6 @@ config TIGON3
109 tristate "Broadcom Tigon3 support" 109 tristate "Broadcom Tigon3 support"
110 depends on PCI 110 depends on PCI
111 select PHYLIB 111 select PHYLIB
112 select HWMON
113 imply PTP_1588_CLOCK 112 imply PTP_1588_CLOCK
114 ---help--- 113 ---help---
115 This driver supports Broadcom Tigon3 based gigabit Ethernet cards. 114 This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -117,6 +116,13 @@ config TIGON3
117 To compile this driver as a module, choose M here: the module 116 To compile this driver as a module, choose M here: the module
118 will be called tg3. This is recommended. 117 will be called tg3. This is recommended.
119 118
119config TIGON3_HWMON
120 bool "Broadcom Tigon3 HWMON support"
121 default y
122 depends on TIGON3 && HWMON && !(TIGON3=y && HWMON=m)
123 ---help---
124 Say Y if you want to expose the thermal sensor on Tigon3 devices.
125
120config BNX2X 126config BNX2X
121 tristate "Broadcom NetXtremeII 10Gb support" 127 tristate "Broadcom NetXtremeII 10Gb support"
122 depends on PCI 128 depends on PCI
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index d59cfcc4c4d5..6322594ab260 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -11,6 +11,7 @@
11#include <linux/bcma/bcma.h> 11#include <linux/bcma/bcma.h>
12#include <linux/brcmphy.h> 12#include <linux/brcmphy.h>
13#include <linux/etherdevice.h> 13#include <linux/etherdevice.h>
14#include <linux/of_net.h>
14#include "bgmac.h" 15#include "bgmac.h"
15 16
16static inline bool bgmac_is_bcm4707_family(struct bcma_device *core) 17static inline bool bgmac_is_bcm4707_family(struct bcma_device *core)
@@ -114,7 +115,7 @@ static int bgmac_probe(struct bcma_device *core)
114 struct ssb_sprom *sprom = &core->bus->sprom; 115 struct ssb_sprom *sprom = &core->bus->sprom;
115 struct mii_bus *mii_bus; 116 struct mii_bus *mii_bus;
116 struct bgmac *bgmac; 117 struct bgmac *bgmac;
117 u8 *mac; 118 const u8 *mac = NULL;
118 int err; 119 int err;
119 120
120 bgmac = bgmac_alloc(&core->dev); 121 bgmac = bgmac_alloc(&core->dev);
@@ -127,21 +128,27 @@ static int bgmac_probe(struct bcma_device *core)
127 128
128 bcma_set_drvdata(core, bgmac); 129 bcma_set_drvdata(core, bgmac);
129 130
130 switch (core->core_unit) { 131 if (bgmac->dev->of_node)
131 case 0: 132 mac = of_get_mac_address(bgmac->dev->of_node);
132 mac = sprom->et0mac; 133
133 break; 134 /* If no MAC address assigned via device tree, check SPROM */
134 case 1: 135 if (!mac) {
135 mac = sprom->et1mac; 136 switch (core->core_unit) {
136 break; 137 case 0:
137 case 2: 138 mac = sprom->et0mac;
138 mac = sprom->et2mac; 139 break;
139 break; 140 case 1:
140 default: 141 mac = sprom->et1mac;
141 dev_err(bgmac->dev, "Unsupported core_unit %d\n", 142 break;
142 core->core_unit); 143 case 2:
143 err = -ENOTSUPP; 144 mac = sprom->et2mac;
144 goto err; 145 break;
146 default:
147 dev_err(bgmac->dev, "Unsupported core_unit %d\n",
148 core->core_unit);
149 err = -ENOTSUPP;
150 goto err;
151 }
145 } 152 }
146 153
147 ether_addr_copy(bgmac->net_dev->dev_addr, mac); 154 ether_addr_copy(bgmac->net_dev->dev_addr, mac);
@@ -192,36 +199,50 @@ static int bgmac_probe(struct bcma_device *core)
192 goto err1; 199 goto err1;
193 } 200 }
194 201
195 bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & 202 bgmac->has_robosw = !!(sprom->boardflags_lo & BGMAC_BFL_ENETROBO);
196 BGMAC_BFL_ENETROBO);
197 if (bgmac->has_robosw) 203 if (bgmac->has_robosw)
198 dev_warn(bgmac->dev, "Support for Roboswitch not implemented\n"); 204 dev_warn(bgmac->dev, "Support for Roboswitch not implemented\n");
199 205
200 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) 206 if (sprom->boardflags_lo & BGMAC_BFL_ENETADM)
201 dev_warn(bgmac->dev, "Support for ADMtek ethernet switch not implemented\n"); 207 dev_warn(bgmac->dev, "Support for ADMtek ethernet switch not implemented\n");
202 208
203 /* Feature Flags */ 209 /* Feature Flags */
204 switch (core->bus->chipinfo.id) { 210 switch (ci->id) {
211 /* BCM 471X/535X family */
212 case BCMA_CHIP_ID_BCM4716:
213 bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
214 /* fallthrough */
215 case BCMA_CHIP_ID_BCM47162:
216 bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
217 bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
218 break;
205 case BCMA_CHIP_ID_BCM5357: 219 case BCMA_CHIP_ID_BCM5357:
220 case BCMA_CHIP_ID_BCM53572:
206 bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; 221 bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
207 bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; 222 bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
208 bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; 223 bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
209 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; 224 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
210 if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47186) { 225 if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
211 bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; 226 ci->pkg == BCMA_PKG_ID_BCM47186) {
212 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; 227 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
228 bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
213 } 229 }
214 if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM5358) 230 if (ci->pkg == BCMA_PKG_ID_BCM5358)
215 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII; 231 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
216 break; 232 break;
217 case BCMA_CHIP_ID_BCM53572: 233 case BCMA_CHIP_ID_BCM53573:
218 bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
219 bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; 234 bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
220 bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; 235 bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
221 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; 236 if (ci->pkg == BCMA_PKG_ID_BCM47189)
222 if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47188) {
223 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
224 bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; 237 bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
238 if (core->core_unit == 0) {
239 bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
240 if (ci->pkg == BCMA_PKG_ID_BCM47189)
241 bgmac->feature_flags |=
242 BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
243 } else if (core->core_unit == 1) {
244 bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
245 bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
225 } 246 }
226 break; 247 break;
227 case BCMA_CHIP_ID_BCM4749: 248 case BCMA_CHIP_ID_BCM4749:
@@ -229,18 +250,11 @@ static int bgmac_probe(struct bcma_device *core)
229 bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; 250 bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
230 bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; 251 bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
231 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; 252 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
232 if (core->bus->chipinfo.pkg == 10) { 253 if (ci->pkg == 10) {
233 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; 254 bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
234 bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; 255 bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
235 } 256 }
236 break; 257 break;
237 case BCMA_CHIP_ID_BCM4716:
238 bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
239 /* fallthrough */
240 case BCMA_CHIP_ID_BCM47162:
241 bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
242 bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
243 break;
244 /* bcm4707_family */ 258 /* bcm4707_family */
245 case BCMA_CHIP_ID_BCM4707: 259 case BCMA_CHIP_ID_BCM4707:
246 case BCMA_CHIP_ID_BCM47094: 260 case BCMA_CHIP_ID_BCM47094:
@@ -249,21 +263,6 @@ static int bgmac_probe(struct bcma_device *core)
249 bgmac->feature_flags |= BGMAC_FEAT_NO_RESET; 263 bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
250 bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500; 264 bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
251 break; 265 break;
252 case BCMA_CHIP_ID_BCM53573:
253 bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
254 bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
255 if (ci->pkg == BCMA_PKG_ID_BCM47189)
256 bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
257 if (core->core_unit == 0) {
258 bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
259 if (ci->pkg == BCMA_PKG_ID_BCM47189)
260 bgmac->feature_flags |=
261 BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
262 } else if (core->core_unit == 1) {
263 bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
264 bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
265 }
266 break;
267 default: 266 default:
268 bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; 267 bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
269 bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; 268 bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index da1b8b225eb9..73aca97a96bc 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -21,8 +21,12 @@
21#include <linux/of_net.h> 21#include <linux/of_net.h>
22#include "bgmac.h" 22#include "bgmac.h"
23 23
24#define NICPM_PADRING_CFG 0x00000004
24#define NICPM_IOMUX_CTRL 0x00000008 25#define NICPM_IOMUX_CTRL 0x00000008
25 26
27#define NICPM_PADRING_CFG_INIT_VAL 0x74000000
28#define NICPM_IOMUX_CTRL_INIT_VAL_AX 0x21880000
29
26#define NICPM_IOMUX_CTRL_INIT_VAL 0x3196e000 30#define NICPM_IOMUX_CTRL_INIT_VAL 0x3196e000
27#define NICPM_IOMUX_CTRL_SPD_SHIFT 10 31#define NICPM_IOMUX_CTRL_SPD_SHIFT 10
28#define NICPM_IOMUX_CTRL_SPD_10M 0 32#define NICPM_IOMUX_CTRL_SPD_10M 0
@@ -113,6 +117,10 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev)
113 if (!bgmac->plat.nicpm_base) 117 if (!bgmac->plat.nicpm_base)
114 return; 118 return;
115 119
120 /* SET RGMII IO CONFIG */
121 writel(NICPM_PADRING_CFG_INIT_VAL,
122 bgmac->plat.nicpm_base + NICPM_PADRING_CFG);
123
116 val = NICPM_IOMUX_CTRL_INIT_VAL; 124 val = NICPM_IOMUX_CTRL_INIT_VAL;
117 switch (bgmac->net_dev->phydev->speed) { 125 switch (bgmac->net_dev->phydev->speed) {
118 default: 126 default:
@@ -244,6 +252,31 @@ static int bgmac_remove(struct platform_device *pdev)
244 return 0; 252 return 0;
245} 253}
246 254
255#ifdef CONFIG_PM
256static int bgmac_suspend(struct device *dev)
257{
258 struct bgmac *bgmac = dev_get_drvdata(dev);
259
260 return bgmac_enet_suspend(bgmac);
261}
262
263static int bgmac_resume(struct device *dev)
264{
265 struct bgmac *bgmac = dev_get_drvdata(dev);
266
267 return bgmac_enet_resume(bgmac);
268}
269
270static const struct dev_pm_ops bgmac_pm_ops = {
271 .suspend = bgmac_suspend,
272 .resume = bgmac_resume
273};
274
275#define BGMAC_PM_OPS (&bgmac_pm_ops)
276#else
277#define BGMAC_PM_OPS NULL
278#endif /* CONFIG_PM */
279
247static const struct of_device_id bgmac_of_enet_match[] = { 280static const struct of_device_id bgmac_of_enet_match[] = {
248 {.compatible = "brcm,amac",}, 281 {.compatible = "brcm,amac",},
249 {.compatible = "brcm,nsp-amac",}, 282 {.compatible = "brcm,nsp-amac",},
@@ -257,6 +290,7 @@ static struct platform_driver bgmac_enet_driver = {
257 .driver = { 290 .driver = {
258 .name = "bgmac-enet", 291 .name = "bgmac-enet",
259 .of_match_table = bgmac_of_enet_match, 292 .of_match_table = bgmac_of_enet_match,
293 .pm = BGMAC_PM_OPS
260 }, 294 },
261 .probe = bgmac_probe, 295 .probe = bgmac_probe,
262 .remove = bgmac_remove, 296 .remove = bgmac_remove,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index fd66fca00e01..e1a24ee6ab8b 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1480,6 +1480,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
1480 1480
1481 net_dev->irq = bgmac->irq; 1481 net_dev->irq = bgmac->irq;
1482 SET_NETDEV_DEV(net_dev, bgmac->dev); 1482 SET_NETDEV_DEV(net_dev, bgmac->dev);
1483 dev_set_drvdata(bgmac->dev, bgmac);
1483 1484
1484 if (!is_valid_ether_addr(net_dev->dev_addr)) { 1485 if (!is_valid_ether_addr(net_dev->dev_addr)) {
1485 dev_err(bgmac->dev, "Invalid MAC addr: %pM\n", 1486 dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
@@ -1552,5 +1553,55 @@ void bgmac_enet_remove(struct bgmac *bgmac)
1552} 1553}
1553EXPORT_SYMBOL_GPL(bgmac_enet_remove); 1554EXPORT_SYMBOL_GPL(bgmac_enet_remove);
1554 1555
1556int bgmac_enet_suspend(struct bgmac *bgmac)
1557{
1558 if (!netif_running(bgmac->net_dev))
1559 return 0;
1560
1561 phy_stop(bgmac->net_dev->phydev);
1562
1563 netif_stop_queue(bgmac->net_dev);
1564
1565 napi_disable(&bgmac->napi);
1566
1567 netif_tx_lock(bgmac->net_dev);
1568 netif_device_detach(bgmac->net_dev);
1569 netif_tx_unlock(bgmac->net_dev);
1570
1571 bgmac_chip_intrs_off(bgmac);
1572 bgmac_chip_reset(bgmac);
1573 bgmac_dma_cleanup(bgmac);
1574
1575 return 0;
1576}
1577EXPORT_SYMBOL_GPL(bgmac_enet_suspend);
1578
1579int bgmac_enet_resume(struct bgmac *bgmac)
1580{
1581 int rc;
1582
1583 if (!netif_running(bgmac->net_dev))
1584 return 0;
1585
1586 rc = bgmac_dma_init(bgmac);
1587 if (rc)
1588 return rc;
1589
1590 bgmac_chip_init(bgmac);
1591
1592 napi_enable(&bgmac->napi);
1593
1594 netif_tx_lock(bgmac->net_dev);
1595 netif_device_attach(bgmac->net_dev);
1596 netif_tx_unlock(bgmac->net_dev);
1597
1598 netif_start_queue(bgmac->net_dev);
1599
1600 phy_start(bgmac->net_dev->phydev);
1601
1602 return 0;
1603}
1604EXPORT_SYMBOL_GPL(bgmac_enet_resume);
1605
1555MODULE_AUTHOR("Rafał Miłecki"); 1606MODULE_AUTHOR("Rafał Miłecki");
1556MODULE_LICENSE("GPL"); 1607MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 6d1c6ff1ed96..c1818766c501 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -402,7 +402,7 @@
402 402
403#define BGMAC_WEIGHT 64 403#define BGMAC_WEIGHT 64
404 404
405#define ETHER_MAX_LEN 1518 405#define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN)
406 406
407/* Feature Flags */ 407/* Feature Flags */
408#define BGMAC_FEAT_TX_MASK_SETUP BIT(0) 408#define BGMAC_FEAT_TX_MASK_SETUP BIT(0)
@@ -537,6 +537,8 @@ int bgmac_enet_probe(struct bgmac *bgmac);
537void bgmac_enet_remove(struct bgmac *bgmac); 537void bgmac_enet_remove(struct bgmac *bgmac);
538void bgmac_adjust_link(struct net_device *net_dev); 538void bgmac_adjust_link(struct net_device *net_dev);
539int bgmac_phy_connect_direct(struct bgmac *bgmac); 539int bgmac_phy_connect_direct(struct bgmac *bgmac);
540int bgmac_enet_suspend(struct bgmac *bgmac);
541int bgmac_enet_resume(struct bgmac *bgmac);
540 542
541struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac); 543struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac);
542void bcma_mdio_mii_unregister(struct mii_bus *mii_bus); 544void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9e8c06130c09..ad3e0631877e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4277,7 +4277,10 @@ int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
4277{ 4277{
4278 if (tc->type != TC_SETUP_MQPRIO) 4278 if (tc->type != TC_SETUP_MQPRIO)
4279 return -EINVAL; 4279 return -EINVAL;
4280 return bnx2x_setup_tc(dev, tc->tc); 4280
4281 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4282
4283 return bnx2x_setup_tc(dev, tc->mqprio->num_tc);
4281} 4284}
4282 4285
4283/* called with rtnl_lock */ 4286/* called with rtnl_lock */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 32de4589d16a..174ec8f84637 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6905,7 +6905,9 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6905 if (ntc->type != TC_SETUP_MQPRIO) 6905 if (ntc->type != TC_SETUP_MQPRIO)
6906 return -EINVAL; 6906 return -EINVAL;
6907 6907
6908 return bnxt_setup_mq_tc(dev, ntc->tc); 6908 ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
6909
6910 return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc);
6909} 6911}
6910 6912
6911#ifdef CONFIG_RFS_ACCEL 6913#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 69015fa50f20..f493276d432a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -621,7 +621,7 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
621 621
622 /* GENET TDMA hardware does not support a configurable timeout, but will 622 /* GENET TDMA hardware does not support a configurable timeout, but will
623 * always generate an interrupt either after MBDONE packets have been 623 * always generate an interrupt either after MBDONE packets have been
624 * transmitted, or when the ring is emtpy. 624 * transmitted, or when the ring is empty.
625 */ 625 */
626 if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high || 626 if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
627 ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low) 627 ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
@@ -707,6 +707,19 @@ struct bcmgenet_stats {
707 .reg_offset = offset, \ 707 .reg_offset = offset, \
708} 708}
709 709
710#define STAT_GENET_Q(num) \
711 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
712 tx_rings[num].packets), \
713 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
714 tx_rings[num].bytes), \
715 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
716 rx_rings[num].bytes), \
717 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
718 rx_rings[num].packets), \
719 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
720 rx_rings[num].errors), \
721 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
722 rx_rings[num].dropped)
710 723
711/* There is a 0xC gap between the end of RX and beginning of TX stats and then 724/* There is a 0xC gap between the end of RX and beginning of TX stats and then
712 * between the end of TX stats and the beginning of the RX RUNT 725 * between the end of TX stats and the beginning of the RX RUNT
@@ -801,6 +814,12 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
801 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 814 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
802 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), 815 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
803 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), 816 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
817 /* Per TX queues */
818 STAT_GENET_Q(0),
819 STAT_GENET_Q(1),
820 STAT_GENET_Q(2),
821 STAT_GENET_Q(3),
822 STAT_GENET_Q(16),
804}; 823};
805 824
806#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) 825#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -1078,8 +1097,17 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1078 /* Power down LED */ 1097 /* Power down LED */
1079 if (priv->hw_params->flags & GENET_HAS_EXT) { 1098 if (priv->hw_params->flags & GENET_HAS_EXT) {
1080 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 1099 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1081 reg |= (EXT_PWR_DOWN_PHY | 1100 if (GENET_IS_V5(priv))
1082 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); 1101 reg |= EXT_PWR_DOWN_PHY_EN |
1102 EXT_PWR_DOWN_PHY_RD |
1103 EXT_PWR_DOWN_PHY_SD |
1104 EXT_PWR_DOWN_PHY_RX |
1105 EXT_PWR_DOWN_PHY_TX |
1106 EXT_IDDQ_GLBL_PWR;
1107 else
1108 reg |= EXT_PWR_DOWN_PHY;
1109
1110 reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1083 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 1111 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1084 1112
1085 bcmgenet_phy_power_set(priv->dev, false); 1113 bcmgenet_phy_power_set(priv->dev, false);
@@ -1104,12 +1132,34 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1104 1132
1105 switch (mode) { 1133 switch (mode) {
1106 case GENET_POWER_PASSIVE: 1134 case GENET_POWER_PASSIVE:
1107 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | 1135 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1108 EXT_PWR_DOWN_BIAS); 1136 if (GENET_IS_V5(priv)) {
1109 /* fallthrough */ 1137 reg &= ~(EXT_PWR_DOWN_PHY_EN |
1138 EXT_PWR_DOWN_PHY_RD |
1139 EXT_PWR_DOWN_PHY_SD |
1140 EXT_PWR_DOWN_PHY_RX |
1141 EXT_PWR_DOWN_PHY_TX |
1142 EXT_IDDQ_GLBL_PWR);
1143 reg |= EXT_PHY_RESET;
1144 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1145 mdelay(1);
1146
1147 reg &= ~EXT_PHY_RESET;
1148 } else {
1149 reg &= ~EXT_PWR_DOWN_PHY;
1150 reg |= EXT_PWR_DN_EN_LD;
1151 }
1152 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1153 bcmgenet_phy_power_set(priv->dev, true);
1154 bcmgenet_mii_reset(priv->dev);
1155 break;
1156
1110 case GENET_POWER_CABLE_SENSE: 1157 case GENET_POWER_CABLE_SENSE:
1111 /* enable APD */ 1158 /* enable APD */
1112 reg |= EXT_PWR_DN_EN_LD; 1159 if (!GENET_IS_V5(priv)) {
1160 reg |= EXT_PWR_DN_EN_LD;
1161 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1162 }
1113 break; 1163 break;
1114 case GENET_POWER_WOL_MAGIC: 1164 case GENET_POWER_WOL_MAGIC:
1115 bcmgenet_wol_power_up_cfg(priv, mode); 1165 bcmgenet_wol_power_up_cfg(priv, mode);
@@ -1117,39 +1167,20 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1117 default: 1167 default:
1118 break; 1168 break;
1119 } 1169 }
1120
1121 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1122 if (mode == GENET_POWER_PASSIVE) {
1123 bcmgenet_phy_power_set(priv->dev, true);
1124 bcmgenet_mii_reset(priv->dev);
1125 }
1126} 1170}
1127 1171
1128/* ioctl handle special commands that are not present in ethtool. */ 1172/* ioctl handle special commands that are not present in ethtool. */
1129static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1173static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1130{ 1174{
1131 struct bcmgenet_priv *priv = netdev_priv(dev); 1175 struct bcmgenet_priv *priv = netdev_priv(dev);
1132 int val = 0;
1133 1176
1134 if (!netif_running(dev)) 1177 if (!netif_running(dev))
1135 return -EINVAL; 1178 return -EINVAL;
1136 1179
1137 switch (cmd) { 1180 if (!priv->phydev)
1138 case SIOCGMIIPHY: 1181 return -ENODEV;
1139 case SIOCGMIIREG:
1140 case SIOCSMIIREG:
1141 if (!priv->phydev)
1142 val = -ENODEV;
1143 else
1144 val = phy_mii_ioctl(priv->phydev, rq, cmd);
1145 break;
1146
1147 default:
1148 val = -EINVAL;
1149 break;
1150 }
1151 1182
1152 return val; 1183 return phy_mii_ioctl(priv->phydev, rq, cmd);
1153} 1184}
1154 1185
1155static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, 1186static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1240,14 +1271,18 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1240 unsigned int txbds_ready; 1271 unsigned int txbds_ready;
1241 unsigned int txbds_processed = 0; 1272 unsigned int txbds_processed = 0;
1242 1273
1243 /* Compute how many buffers are transmitted since last xmit call */ 1274 /* Clear status before servicing to reduce spurious interrupts */
1244 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); 1275 if (ring->index == DESC_INDEX)
1245 c_index &= DMA_C_INDEX_MASK; 1276 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
1246 1277 INTRL2_CPU_CLEAR);
1247 if (likely(c_index >= ring->c_index))
1248 txbds_ready = c_index - ring->c_index;
1249 else 1278 else
1250 txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index; 1279 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
1280 INTRL2_CPU_CLEAR);
1281
1282 /* Compute how many buffers are transmitted since last xmit call */
1283 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
1284 & DMA_C_INDEX_MASK;
1285 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
1251 1286
1252 netif_dbg(priv, tx_done, dev, 1287 netif_dbg(priv, tx_done, dev,
1253 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", 1288 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
@@ -1280,15 +1315,15 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1280 } 1315 }
1281 1316
1282 ring->free_bds += txbds_processed; 1317 ring->free_bds += txbds_processed;
1283 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; 1318 ring->c_index = c_index;
1284 1319
1285 dev->stats.tx_packets += pkts_compl; 1320 ring->packets += pkts_compl;
1286 dev->stats.tx_bytes += bytes_compl; 1321 ring->bytes += bytes_compl;
1287 1322
1288 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), 1323 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1289 pkts_compl, bytes_compl); 1324 pkts_compl, bytes_compl);
1290 1325
1291 return pkts_compl; 1326 return txbds_processed;
1292} 1327}
1293 1328
1294static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, 1329static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
@@ -1657,18 +1692,28 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1657 unsigned long dma_flag; 1692 unsigned long dma_flag;
1658 int len; 1693 int len;
1659 unsigned int rxpktprocessed = 0, rxpkttoprocess; 1694 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1660 unsigned int p_index; 1695 unsigned int p_index, mask;
1661 unsigned int discards; 1696 unsigned int discards;
1662 unsigned int chksum_ok = 0; 1697 unsigned int chksum_ok = 0;
1663 1698
1699 /* Clear status before servicing to reduce spurious interrupts */
1700 if (ring->index == DESC_INDEX) {
1701 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
1702 INTRL2_CPU_CLEAR);
1703 } else {
1704 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
1705 bcmgenet_intrl2_1_writel(priv,
1706 mask,
1707 INTRL2_CPU_CLEAR);
1708 }
1709
1664 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); 1710 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1665 1711
1666 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & 1712 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1667 DMA_P_INDEX_DISCARD_CNT_MASK; 1713 DMA_P_INDEX_DISCARD_CNT_MASK;
1668 if (discards > ring->old_discards) { 1714 if (discards > ring->old_discards) {
1669 discards = discards - ring->old_discards; 1715 discards = discards - ring->old_discards;
1670 dev->stats.rx_missed_errors += discards; 1716 ring->errors += discards;
1671 dev->stats.rx_errors += discards;
1672 ring->old_discards += discards; 1717 ring->old_discards += discards;
1673 1718
1674 /* Clear HW register when we reach 75% of maximum 0xFFFF */ 1719 /* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -1680,12 +1725,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1680 } 1725 }
1681 1726
1682 p_index &= DMA_P_INDEX_MASK; 1727 p_index &= DMA_P_INDEX_MASK;
1683 1728 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
1684 if (likely(p_index >= ring->c_index))
1685 rxpkttoprocess = p_index - ring->c_index;
1686 else
1687 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
1688 p_index;
1689 1729
1690 netif_dbg(priv, rx_status, dev, 1730 netif_dbg(priv, rx_status, dev,
1691 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); 1731 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
@@ -1696,7 +1736,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1696 skb = bcmgenet_rx_refill(priv, cb); 1736 skb = bcmgenet_rx_refill(priv, cb);
1697 1737
1698 if (unlikely(!skb)) { 1738 if (unlikely(!skb)) {
1699 dev->stats.rx_dropped++; 1739 ring->dropped++;
1700 goto next; 1740 goto next;
1701 } 1741 }
1702 1742
@@ -1724,7 +1764,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1724 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { 1764 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1725 netif_err(priv, rx_status, dev, 1765 netif_err(priv, rx_status, dev,
1726 "dropping fragmented packet!\n"); 1766 "dropping fragmented packet!\n");
1727 dev->stats.rx_errors++; 1767 ring->errors++;
1728 dev_kfree_skb_any(skb); 1768 dev_kfree_skb_any(skb);
1729 goto next; 1769 goto next;
1730 } 1770 }
@@ -1773,8 +1813,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1773 1813
1774 /*Finish setting up the received SKB and send it to the kernel*/ 1814 /*Finish setting up the received SKB and send it to the kernel*/
1775 skb->protocol = eth_type_trans(skb, priv->dev); 1815 skb->protocol = eth_type_trans(skb, priv->dev);
1776 dev->stats.rx_packets++; 1816 ring->packets++;
1777 dev->stats.rx_bytes += len; 1817 ring->bytes += len;
1778 if (dma_flag & DMA_RX_MULT) 1818 if (dma_flag & DMA_RX_MULT)
1779 dev->stats.multicast++; 1819 dev->stats.multicast++;
1780 1820
@@ -1912,10 +1952,8 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1912 /* Mask all interrupts.*/ 1952 /* Mask all interrupts.*/
1913 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); 1953 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1914 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); 1954 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1915 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1916 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); 1955 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1917 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); 1956 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1918 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1919} 1957}
1920 1958
1921static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) 1959static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
@@ -1942,8 +1980,6 @@ static int init_umac(struct bcmgenet_priv *priv)
1942 int ret; 1980 int ret;
1943 u32 reg; 1981 u32 reg;
1944 u32 int0_enable = 0; 1982 u32 int0_enable = 0;
1945 u32 int1_enable = 0;
1946 int i;
1947 1983
1948 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); 1984 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1949 1985
@@ -1970,12 +2006,6 @@ static int init_umac(struct bcmgenet_priv *priv)
1970 2006
1971 bcmgenet_intr_disable(priv); 2007 bcmgenet_intr_disable(priv);
1972 2008
1973 /* Enable Rx default queue 16 interrupts */
1974 int0_enable |= UMAC_IRQ_RXDMA_DONE;
1975
1976 /* Enable Tx default queue 16 interrupts */
1977 int0_enable |= UMAC_IRQ_TXDMA_DONE;
1978
1979 /* Configure backpressure vectors for MoCA */ 2009 /* Configure backpressure vectors for MoCA */
1980 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { 2010 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1981 reg = bcmgenet_bp_mc_get(priv); 2011 reg = bcmgenet_bp_mc_get(priv);
@@ -1993,18 +2023,8 @@ static int init_umac(struct bcmgenet_priv *priv)
1993 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) 2023 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1994 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); 2024 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1995 2025
1996 /* Enable Rx priority queue interrupts */
1997 for (i = 0; i < priv->hw_params->rx_queues; ++i)
1998 int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
1999
2000 /* Enable Tx priority queue interrupts */
2001 for (i = 0; i < priv->hw_params->tx_queues; ++i)
2002 int1_enable |= (1 << i);
2003
2004 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); 2026 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2005 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
2006 2027
2007 /* Enable rx/tx engine.*/
2008 dev_dbg(kdev, "done init umac\n"); 2028 dev_dbg(kdev, "done init umac\n");
2009 2029
2010 return 0; 2030 return 0;
@@ -2136,22 +2156,33 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
2136static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) 2156static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2137{ 2157{
2138 unsigned int i; 2158 unsigned int i;
2159 u32 int0_enable = UMAC_IRQ_TXDMA_DONE;
2160 u32 int1_enable = 0;
2139 struct bcmgenet_tx_ring *ring; 2161 struct bcmgenet_tx_ring *ring;
2140 2162
2141 for (i = 0; i < priv->hw_params->tx_queues; ++i) { 2163 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2142 ring = &priv->tx_rings[i]; 2164 ring = &priv->tx_rings[i];
2143 napi_enable(&ring->napi); 2165 napi_enable(&ring->napi);
2166 int1_enable |= (1 << i);
2144 } 2167 }
2145 2168
2146 ring = &priv->tx_rings[DESC_INDEX]; 2169 ring = &priv->tx_rings[DESC_INDEX];
2147 napi_enable(&ring->napi); 2170 napi_enable(&ring->napi);
2171
2172 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2173 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
2148} 2174}
2149 2175
2150static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) 2176static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2151{ 2177{
2152 unsigned int i; 2178 unsigned int i;
2179 u32 int0_disable = UMAC_IRQ_TXDMA_DONE;
2180 u32 int1_disable = 0xffff;
2153 struct bcmgenet_tx_ring *ring; 2181 struct bcmgenet_tx_ring *ring;
2154 2182
2183 bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
2184 bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
2185
2155 for (i = 0; i < priv->hw_params->tx_queues; ++i) { 2186 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2156 ring = &priv->tx_rings[i]; 2187 ring = &priv->tx_rings[i];
2157 napi_disable(&ring->napi); 2188 napi_disable(&ring->napi);
@@ -2264,22 +2295,33 @@ static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
2264static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) 2295static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2265{ 2296{
2266 unsigned int i; 2297 unsigned int i;
2298 u32 int0_enable = UMAC_IRQ_RXDMA_DONE;
2299 u32 int1_enable = 0;
2267 struct bcmgenet_rx_ring *ring; 2300 struct bcmgenet_rx_ring *ring;
2268 2301
2269 for (i = 0; i < priv->hw_params->rx_queues; ++i) { 2302 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2270 ring = &priv->rx_rings[i]; 2303 ring = &priv->rx_rings[i];
2271 napi_enable(&ring->napi); 2304 napi_enable(&ring->napi);
2305 int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
2272 } 2306 }
2273 2307
2274 ring = &priv->rx_rings[DESC_INDEX]; 2308 ring = &priv->rx_rings[DESC_INDEX];
2275 napi_enable(&ring->napi); 2309 napi_enable(&ring->napi);
2310
2311 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2312 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
2276} 2313}
2277 2314
2278static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) 2315static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2279{ 2316{
2280 unsigned int i; 2317 unsigned int i;
2318 u32 int0_disable = UMAC_IRQ_RXDMA_DONE;
2319 u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT;
2281 struct bcmgenet_rx_ring *ring; 2320 struct bcmgenet_rx_ring *ring;
2282 2321
2322 bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
2323 bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
2324
2283 for (i = 0; i < priv->hw_params->rx_queues; ++i) { 2325 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2284 ring = &priv->rx_rings[i]; 2326 ring = &priv->rx_rings[i];
2285 napi_disable(&ring->napi); 2327 napi_disable(&ring->napi);
@@ -2634,6 +2676,15 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2634 } 2676 }
2635 } 2677 }
2636 2678
2679 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2680 UMAC_IRQ_PHY_DET_F |
2681 UMAC_IRQ_LINK_EVENT |
2682 UMAC_IRQ_HFB_SM |
2683 UMAC_IRQ_HFB_MM)) {
2684 /* all other interested interrupts handled in bottom half */
2685 schedule_work(&priv->bcmgenet_irq_work);
2686 }
2687
2637 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 2688 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2638 status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { 2689 status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2639 wake_up(&priv->wq); 2690 wake_up(&priv->wq);
@@ -2921,7 +2972,7 @@ static int bcmgenet_close(struct net_device *dev)
2921 if (ret) 2972 if (ret)
2922 return ret; 2973 return ret;
2923 2974
2924 /* Disable MAC transmit. TX DMA disabled have to done before this */ 2975 /* Disable MAC transmit. TX DMA disabled must be done before this */
2925 umac_enable_set(priv, CMD_TX_EN, false); 2976 umac_enable_set(priv, CMD_TX_EN, false);
2926 2977
2927 /* tx reclaim */ 2978 /* tx reclaim */
@@ -3101,6 +3152,48 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3101 return 0; 3152 return 0;
3102} 3153}
3103 3154
3155static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
3156{
3157 struct bcmgenet_priv *priv = netdev_priv(dev);
3158 unsigned long tx_bytes = 0, tx_packets = 0;
3159 unsigned long rx_bytes = 0, rx_packets = 0;
3160 unsigned long rx_errors = 0, rx_dropped = 0;
3161 struct bcmgenet_tx_ring *tx_ring;
3162 struct bcmgenet_rx_ring *rx_ring;
3163 unsigned int q;
3164
3165 for (q = 0; q < priv->hw_params->tx_queues; q++) {
3166 tx_ring = &priv->tx_rings[q];
3167 tx_bytes += tx_ring->bytes;
3168 tx_packets += tx_ring->packets;
3169 }
3170 tx_ring = &priv->tx_rings[DESC_INDEX];
3171 tx_bytes += tx_ring->bytes;
3172 tx_packets += tx_ring->packets;
3173
3174 for (q = 0; q < priv->hw_params->rx_queues; q++) {
3175 rx_ring = &priv->rx_rings[q];
3176
3177 rx_bytes += rx_ring->bytes;
3178 rx_packets += rx_ring->packets;
3179 rx_errors += rx_ring->errors;
3180 rx_dropped += rx_ring->dropped;
3181 }
3182 rx_ring = &priv->rx_rings[DESC_INDEX];
3183 rx_bytes += rx_ring->bytes;
3184 rx_packets += rx_ring->packets;
3185 rx_errors += rx_ring->errors;
3186 rx_dropped += rx_ring->dropped;
3187
3188 dev->stats.tx_bytes = tx_bytes;
3189 dev->stats.tx_packets = tx_packets;
3190 dev->stats.rx_bytes = rx_bytes;
3191 dev->stats.rx_packets = rx_packets;
3192 dev->stats.rx_errors = rx_errors;
3193 dev->stats.rx_missed_errors = rx_errors;
3194 return &dev->stats;
3195}
3196
3104static const struct net_device_ops bcmgenet_netdev_ops = { 3197static const struct net_device_ops bcmgenet_netdev_ops = {
3105 .ndo_open = bcmgenet_open, 3198 .ndo_open = bcmgenet_open,
3106 .ndo_stop = bcmgenet_close, 3199 .ndo_stop = bcmgenet_close,
@@ -3113,6 +3206,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
3113#ifdef CONFIG_NET_POLL_CONTROLLER 3206#ifdef CONFIG_NET_POLL_CONTROLLER
3114 .ndo_poll_controller = bcmgenet_poll_controller, 3207 .ndo_poll_controller = bcmgenet_poll_controller,
3115#endif 3208#endif
3209 .ndo_get_stats = bcmgenet_get_stats,
3116}; 3210};
3117 3211
3118/* Array of GENET hardware parameters/characteristics */ 3212/* Array of GENET hardware parameters/characteristics */
@@ -3186,6 +3280,25 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3186 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | 3280 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3187 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, 3281 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3188 }, 3282 },
3283 [GENET_V5] = {
3284 .tx_queues = 4,
3285 .tx_bds_per_q = 32,
3286 .rx_queues = 0,
3287 .rx_bds_per_q = 0,
3288 .bp_in_en_shift = 17,
3289 .bp_in_mask = 0x1ffff,
3290 .hfb_filter_cnt = 48,
3291 .hfb_filter_size = 128,
3292 .qtag_mask = 0x3F,
3293 .tbuf_offset = 0x0600,
3294 .hfb_offset = 0x8000,
3295 .hfb_reg_offset = 0xfc00,
3296 .rdma_offset = 0x2000,
3297 .tdma_offset = 0x4000,
3298 .words_per_bd = 3,
3299 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3300 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3301 },
3189}; 3302};
3190 3303
3191/* Infer hardware parameters from the detected GENET version */ 3304/* Infer hardware parameters from the detected GENET version */
@@ -3196,26 +3309,22 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3196 u8 major; 3309 u8 major;
3197 u16 gphy_rev; 3310 u16 gphy_rev;
3198 3311
3199 if (GENET_IS_V4(priv)) { 3312 if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
3200 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; 3313 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3201 genet_dma_ring_regs = genet_dma_ring_regs_v4; 3314 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3202 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; 3315 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3203 priv->version = GENET_V4;
3204 } else if (GENET_IS_V3(priv)) { 3316 } else if (GENET_IS_V3(priv)) {
3205 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; 3317 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3206 genet_dma_ring_regs = genet_dma_ring_regs_v123; 3318 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3207 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; 3319 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3208 priv->version = GENET_V3;
3209 } else if (GENET_IS_V2(priv)) { 3320 } else if (GENET_IS_V2(priv)) {
3210 bcmgenet_dma_regs = bcmgenet_dma_regs_v2; 3321 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3211 genet_dma_ring_regs = genet_dma_ring_regs_v123; 3322 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3212 priv->dma_rx_chk_bit = DMA_RX_CHK_V12; 3323 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3213 priv->version = GENET_V2;
3214 } else if (GENET_IS_V1(priv)) { 3324 } else if (GENET_IS_V1(priv)) {
3215 bcmgenet_dma_regs = bcmgenet_dma_regs_v1; 3325 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3216 genet_dma_ring_regs = genet_dma_ring_regs_v123; 3326 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3217 priv->dma_rx_chk_bit = DMA_RX_CHK_V12; 3327 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3218 priv->version = GENET_V1;
3219 } 3328 }
3220 3329
3221 /* enum genet_version starts at 1 */ 3330 /* enum genet_version starts at 1 */
@@ -3225,7 +3334,9 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3225 /* Read GENET HW version */ 3334 /* Read GENET HW version */
3226 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); 3335 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3227 major = (reg >> 24 & 0x0f); 3336 major = (reg >> 24 & 0x0f);
3228 if (major == 5) 3337 if (major == 6)
3338 major = 5;
3339 else if (major == 5)
3229 major = 4; 3340 major = 4;
3230 else if (major == 0) 3341 else if (major == 0)
3231 major = 1; 3342 major = 1;
@@ -3253,19 +3364,25 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3253 */ 3364 */
3254 gphy_rev = reg & 0xffff; 3365 gphy_rev = reg & 0xffff;
3255 3366
3367 if (GENET_IS_V5(priv)) {
3368 /* The EPHY revision should come from the MDIO registers of
3369 * the PHY not from GENET.
3370 */
3371 if (gphy_rev != 0) {
3372 pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3373 gphy_rev);
3374 }
3256 /* This is reserved so should require special treatment */ 3375 /* This is reserved so should require special treatment */
3257 if (gphy_rev == 0 || gphy_rev == 0x01ff) { 3376 } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3258 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); 3377 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3259 return; 3378 return;
3260 }
3261
3262 /* This is the good old scheme, just GPHY major, no minor nor patch */ 3379 /* This is the good old scheme, just GPHY major, no minor nor patch */
3263 if ((gphy_rev & 0xf0) != 0) 3380 } else if ((gphy_rev & 0xf0) != 0) {
3264 priv->gphy_rev = gphy_rev << 8; 3381 priv->gphy_rev = gphy_rev << 8;
3265
3266 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ 3382 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3267 else if ((gphy_rev & 0xff00) != 0) 3383 } else if ((gphy_rev & 0xff00) != 0) {
3268 priv->gphy_rev = gphy_rev; 3384 priv->gphy_rev = gphy_rev;
3385 }
3269 3386
3270#ifdef CONFIG_PHYS_ADDR_T_64BIT 3387#ifdef CONFIG_PHYS_ADDR_T_64BIT
3271 if (!(params->flags & GENET_HAS_40BITS)) 3388 if (!(params->flags & GENET_HAS_40BITS))
@@ -3295,6 +3412,7 @@ static const struct of_device_id bcmgenet_match[] = {
3295 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, 3412 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
3296 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, 3413 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
3297 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, 3414 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3415 { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
3298 { }, 3416 { },
3299}; 3417};
3300MODULE_DEVICE_TABLE(of, bcmgenet_match); 3418MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3492,7 +3610,7 @@ static int bcmgenet_suspend(struct device *d)
3492 if (ret) 3610 if (ret)
3493 return ret; 3611 return ret;
3494 3612
3495 /* Disable MAC transmit. TX DMA disabled have to done before this */ 3613 /* Disable MAC transmit. TX DMA disabled must be done before this */
3496 umac_enable_set(priv, CMD_TX_EN, false); 3614 umac_enable_set(priv, CMD_TX_EN, false);
3497 3615
3498 /* tx reclaim */ 3616 /* tx reclaim */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index db7f289d65ae..efd07020b89f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -355,8 +355,14 @@ struct bcmgenet_mib_counters {
355#define EXT_PWR_DN_EN_LD (1 << 3) 355#define EXT_PWR_DN_EN_LD (1 << 3)
356#define EXT_ENERGY_DET (1 << 4) 356#define EXT_ENERGY_DET (1 << 4)
357#define EXT_IDDQ_FROM_PHY (1 << 5) 357#define EXT_IDDQ_FROM_PHY (1 << 5)
358#define EXT_IDDQ_GLBL_PWR (1 << 7)
358#define EXT_PHY_RESET (1 << 8) 359#define EXT_PHY_RESET (1 << 8)
359#define EXT_ENERGY_DET_MASK (1 << 12) 360#define EXT_ENERGY_DET_MASK (1 << 12)
361#define EXT_PWR_DOWN_PHY_TX (1 << 16)
362#define EXT_PWR_DOWN_PHY_RX (1 << 17)
363#define EXT_PWR_DOWN_PHY_SD (1 << 18)
364#define EXT_PWR_DOWN_PHY_RD (1 << 19)
365#define EXT_PWR_DOWN_PHY_EN (1 << 20)
360 366
361#define EXT_RGMII_OOB_CTRL 0x0C 367#define EXT_RGMII_OOB_CTRL 0x0C
362#define RGMII_LINK (1 << 4) 368#define RGMII_LINK (1 << 4)
@@ -499,13 +505,15 @@ enum bcmgenet_version {
499 GENET_V1 = 1, 505 GENET_V1 = 1,
500 GENET_V2, 506 GENET_V2,
501 GENET_V3, 507 GENET_V3,
502 GENET_V4 508 GENET_V4,
509 GENET_V5
503}; 510};
504 511
505#define GENET_IS_V1(p) ((p)->version == GENET_V1) 512#define GENET_IS_V1(p) ((p)->version == GENET_V1)
506#define GENET_IS_V2(p) ((p)->version == GENET_V2) 513#define GENET_IS_V2(p) ((p)->version == GENET_V2)
507#define GENET_IS_V3(p) ((p)->version == GENET_V3) 514#define GENET_IS_V3(p) ((p)->version == GENET_V3)
508#define GENET_IS_V4(p) ((p)->version == GENET_V4) 515#define GENET_IS_V4(p) ((p)->version == GENET_V4)
516#define GENET_IS_V5(p) ((p)->version == GENET_V5)
509 517
510/* Hardware flags */ 518/* Hardware flags */
511#define GENET_HAS_40BITS (1 << 0) 519#define GENET_HAS_40BITS (1 << 0)
@@ -544,6 +552,8 @@ struct bcmgenet_skb_cb {
544struct bcmgenet_tx_ring { 552struct bcmgenet_tx_ring {
545 spinlock_t lock; /* ring lock */ 553 spinlock_t lock; /* ring lock */
546 struct napi_struct napi; /* NAPI per tx queue */ 554 struct napi_struct napi; /* NAPI per tx queue */
555 unsigned long packets;
556 unsigned long bytes;
547 unsigned int index; /* ring index */ 557 unsigned int index; /* ring index */
548 unsigned int queue; /* queue index */ 558 unsigned int queue; /* queue index */
549 struct enet_cb *cbs; /* tx ring buffer control block*/ 559 struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -562,6 +572,10 @@ struct bcmgenet_tx_ring {
562 572
563struct bcmgenet_rx_ring { 573struct bcmgenet_rx_ring {
564 struct napi_struct napi; /* Rx NAPI struct */ 574 struct napi_struct napi; /* Rx NAPI struct */
575 unsigned long bytes;
576 unsigned long packets;
577 unsigned long errors;
578 unsigned long dropped;
565 unsigned int index; /* Rx ring index */ 579 unsigned int index; /* Rx ring index */
566 struct enet_cb *cbs; /* Rx ring buffer control block */ 580 struct enet_cb *cbs; /* Rx ring buffer control block */
567 unsigned int size; /* Rx ring size */ 581 unsigned int size; /* Rx ring size */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index b97122926d3a..2fbd027f0148 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support 2 * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
3 * 3 *
4 * Copyright (c) 2014 Broadcom Corporation 4 * Copyright (c) 2014-2017 Broadcom
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -127,7 +127,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
127 enum bcmgenet_power_mode mode) 127 enum bcmgenet_power_mode mode)
128{ 128{
129 struct net_device *dev = priv->dev; 129 struct net_device *dev = priv->dev;
130 u32 cpu_mask_clear;
131 int retries = 0; 130 int retries = 0;
132 u32 reg; 131 u32 reg;
133 132
@@ -173,18 +172,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
173 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 172 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
174 } 173 }
175 174
176 /* Enable the MPD interrupt */
177 cpu_mask_clear = UMAC_IRQ_MPD_R;
178
179 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
180
181 return 0; 175 return 0;
182} 176}
183 177
184void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, 178void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
185 enum bcmgenet_power_mode mode) 179 enum bcmgenet_power_mode mode)
186{ 180{
187 u32 cpu_mask_set;
188 u32 reg; 181 u32 reg;
189 182
190 if (mode != GENET_POWER_WOL_MAGIC) { 183 if (mode != GENET_POWER_WOL_MAGIC) {
@@ -201,10 +194,4 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
201 reg &= ~CMD_CRC_FWD; 194 reg &= ~CMD_CRC_FWD;
202 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 195 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
203 priv->crc_fwd_en = 0; 196 priv->crc_fwd_en = 0;
204
205 /* Stop monitoring magic packet IRQ */
206 cpu_mask_set = UMAC_IRQ_MPD_R;
207
208 /* Stop monitoring magic packet IRQ */
209 bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET);
210} 197}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e87607621e62..8df47c90cfc5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Broadcom GENET MDIO routines 2 * Broadcom GENET MDIO routines
3 * 3 *
4 * Copyright (c) 2014 Broadcom Corporation 4 * Copyright (c) 2014-2017 Broadcom
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -195,29 +195,31 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
195 u32 reg = 0; 195 u32 reg = 0;
196 196
197 /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ 197 /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
198 if (!GENET_IS_V4(priv)) 198 if (GENET_IS_V4(priv)) {
199 return; 199 reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
200 200 if (enable) {
201 reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); 201 reg &= ~EXT_CK25_DIS;
202 if (enable) { 202 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
203 reg &= ~EXT_CK25_DIS; 203 mdelay(1);
204 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); 204
205 mdelay(1); 205 reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
206 206 reg |= EXT_GPHY_RESET;
207 reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); 207 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
208 reg |= EXT_GPHY_RESET; 208 mdelay(1);
209
210 reg &= ~EXT_GPHY_RESET;
211 } else {
212 reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
213 EXT_GPHY_RESET;
214 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
215 mdelay(1);
216 reg |= EXT_CK25_DIS;
217 }
209 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); 218 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
210 mdelay(1); 219 udelay(60);
211
212 reg &= ~EXT_GPHY_RESET;
213 } else { 220 } else {
214 reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET;
215 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
216 mdelay(1); 221 mdelay(1);
217 reg |= EXT_CK25_DIS;
218 } 222 }
219 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
220 udelay(60);
221} 223}
222 224
223static void bcmgenet_internal_phy_setup(struct net_device *dev) 225static void bcmgenet_internal_phy_setup(struct net_device *dev)
@@ -227,10 +229,12 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
227 229
228 /* Power up PHY */ 230 /* Power up PHY */
229 bcmgenet_phy_power_set(dev, true); 231 bcmgenet_phy_power_set(dev, true);
230 /* enable APD */ 232 if (!GENET_IS_V5(priv)) {
231 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 233 /* enable APD */
232 reg |= EXT_PWR_DN_EN_LD; 234 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
233 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 235 reg |= EXT_PWR_DN_EN_LD;
236 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
237 }
234 bcmgenet_mii_reset(dev); 238 bcmgenet_mii_reset(dev);
235} 239}
236 240
@@ -238,10 +242,12 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
238{ 242{
239 u32 reg; 243 u32 reg;
240 244
241 /* Speed settings are set in bcmgenet_mii_setup() */ 245 if (!GENET_IS_V5(priv)) {
242 reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); 246 /* Speed settings are set in bcmgenet_mii_setup() */
243 reg |= LED_ACT_SOURCE_MAC; 247 reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
244 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); 248 reg |= LED_ACT_SOURCE_MAC;
249 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
250 }
245 251
246 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) 252 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
247 fixed_phy_set_link_update(priv->phydev, 253 fixed_phy_set_link_update(priv->phydev,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 30d1eb9ebec9..f395b951f5e7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -825,6 +825,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
825 return timeout_us ? 0 : -EBUSY; 825 return timeout_us ? 0 : -EBUSY;
826} 826}
827 827
828#ifdef CONFIG_TIGON3_HWMON
828static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 829static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
829{ 830{
830 u32 i, apedata; 831 u32 i, apedata;
@@ -904,6 +905,7 @@ static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
904 905
905 return 0; 906 return 0;
906} 907}
908#endif
907 909
908static int tg3_ape_send_event(struct tg3 *tp, u32 event) 910static int tg3_ape_send_event(struct tg3 *tp, u32 event)
909{ 911{
@@ -10744,6 +10746,7 @@ static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10744 return tg3_reset_hw(tp, reset_phy); 10746 return tg3_reset_hw(tp, reset_phy);
10745} 10747}
10746 10748
10749#ifdef CONFIG_TIGON3_HWMON
10747static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10750static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10748{ 10751{
10749 int i; 10752 int i;
@@ -10826,6 +10829,10 @@ static void tg3_hwmon_open(struct tg3 *tp)
10826 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10829 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10827 } 10830 }
10828} 10831}
10832#else
10833static inline void tg3_hwmon_close(struct tg3 *tp) { }
10834static inline void tg3_hwmon_open(struct tg3 *tp) { }
10835#endif /* CONFIG_TIGON3_HWMON */
10829 10836
10830 10837
10831#define TG3_STAT_ADD32(PSTAT, REG) \ 10838#define TG3_STAT_ADD32(PSTAT, REG) \
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index f629c2fe04a4..65a1a9e7a159 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -131,11 +131,20 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
131 131
132 case OCTNET_CMD_CHANGE_MACADDR: 132 case OCTNET_CMD_CHANGE_MACADDR:
133 mac = ((u8 *)&nctrl->udd[0]) + 2; 133 mac = ((u8 *)&nctrl->udd[0]) + 2;
134 netif_info(lio, probe, lio->netdev, 134 if (nctrl->ncmd.s.param1) {
135 "MACAddr changed to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 135 /* vfidx is 0 based, but vf_num (param1) is 1 based */
136 mac[0], mac[1], 136 int vfidx = nctrl->ncmd.s.param1 - 1;
137 mac[2], mac[3], 137 bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
138 mac[4], mac[5]); 138
139 if (mac_is_admin_assigned)
140 netif_info(lio, probe, lio->netdev,
141 "MAC Address %pM is configured for VF %d\n",
142 mac, vfidx);
143 } else {
144 netif_info(lio, probe, lio->netdev,
145 " MACAddr changed to %pM\n",
146 mac);
147 }
139 break; 148 break;
140 149
141 case OCTNET_CMD_CHANGE_MTU: 150 case OCTNET_CMD_CHANGE_MTU:
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 50384cede8be..6eef3b999130 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -213,17 +213,23 @@ static int lio_get_link_ksettings(struct net_device *netdev,
213 struct lio *lio = GET_LIO(netdev); 213 struct lio *lio = GET_LIO(netdev);
214 struct octeon_device *oct = lio->oct_dev; 214 struct octeon_device *oct = lio->oct_dev;
215 struct oct_link_info *linfo; 215 struct oct_link_info *linfo;
216 u32 supported, advertising; 216 u32 supported = 0, advertising = 0;
217 217
218 linfo = &lio->linfo; 218 linfo = &lio->linfo;
219 219
220 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || 220 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
221 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || 221 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
222 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
222 linfo->link.s.if_mode == INTERFACE_MODE_XFI) { 223 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
223 ecmd->base.port = PORT_FIBRE; 224 ecmd->base.port = PORT_FIBRE;
224 supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE | 225
225 SUPPORTED_Pause); 226 if (linfo->link.s.speed == SPEED_10000) {
226 advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause); 227 supported = SUPPORTED_10000baseT_Full;
228 advertising = ADVERTISED_10000baseT_Full;
229 }
230
231 supported |= SUPPORTED_FIBRE | SUPPORTED_Pause;
232 advertising |= ADVERTISED_Pause;
227 ethtool_convert_legacy_u32_to_link_mode( 233 ethtool_convert_legacy_u32_to_link_mode(
228 ecmd->link_modes.supported, supported); 234 ecmd->link_modes.supported, supported);
229 ethtool_convert_legacy_u32_to_link_mode( 235 ethtool_convert_legacy_u32_to_link_mode(
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 92f46b1375c3..b23485c3af13 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -60,12 +60,6 @@ MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
60 60
61static int ptp_enable = 1; 61static int ptp_enable = 1;
62 62
63/* Bit mask values for lio->ifstate */
64#define LIO_IFSTATE_DROQ_OPS 0x01
65#define LIO_IFSTATE_REGISTERED 0x02
66#define LIO_IFSTATE_RUNNING 0x04
67#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
68
69/* Polling interval for determining when NIC application is alive */ 63/* Polling interval for determining when NIC application is alive */
70#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 64#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
71 65
@@ -531,36 +525,6 @@ static void liquidio_deinit_pci(void)
531} 525}
532 526
533/** 527/**
534 * \brief check interface state
535 * @param lio per-network private data
536 * @param state_flag flag state to check
537 */
538static inline int ifstate_check(struct lio *lio, int state_flag)
539{
540 return atomic_read(&lio->ifstate) & state_flag;
541}
542
543/**
544 * \brief set interface state
545 * @param lio per-network private data
546 * @param state_flag flag state to set
547 */
548static inline void ifstate_set(struct lio *lio, int state_flag)
549{
550 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
551}
552
553/**
554 * \brief clear interface state
555 * @param lio per-network private data
556 * @param state_flag flag state to clear
557 */
558static inline void ifstate_reset(struct lio *lio, int state_flag)
559{
560 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
561}
562
563/**
564 * \brief Stop Tx queues 528 * \brief Stop Tx queues
565 * @param netdev network device 529 * @param netdev network device
566 */ 530 */
@@ -805,7 +769,7 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
805 } 769 }
806 770
807 for (i = 0; i < num_iqs; i++) { 771 for (i = 0; i < num_iqs; i++) {
808 int numa_node = cpu_to_node(i % num_online_cpus()); 772 int numa_node = dev_to_node(&oct->pci_dev->dev);
809 773
810 spin_lock_init(&lio->glist_lock[i]); 774 spin_lock_init(&lio->glist_lock[i]);
811 775
@@ -1084,16 +1048,35 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
1084 int i; 1048 int i;
1085 int num_ioq_vectors; 1049 int num_ioq_vectors;
1086 int num_alloc_ioq_vectors; 1050 int num_alloc_ioq_vectors;
1051 char *queue_irq_names = NULL;
1052 char *aux_irq_name = NULL;
1087 1053
1088 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { 1054 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
1089 oct->num_msix_irqs = oct->sriov_info.num_pf_rings; 1055 oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
1090 /* one non ioq interrupt for handling sli_mac_pf_int_sum */ 1056 /* one non ioq interrupt for handling sli_mac_pf_int_sum */
1091 oct->num_msix_irqs += 1; 1057 oct->num_msix_irqs += 1;
1092 1058
1059 /* allocate storage for the names assigned to each irq */
1060 oct->irq_name_storage =
1061 kcalloc((MAX_IOQ_INTERRUPTS_PER_PF + 1), INTRNAMSIZ,
1062 GFP_KERNEL);
1063 if (!oct->irq_name_storage) {
1064 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1065 return -ENOMEM;
1066 }
1067
1068 queue_irq_names = oct->irq_name_storage;
1069 aux_irq_name = &queue_irq_names
1070 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1071
1093 oct->msix_entries = kcalloc( 1072 oct->msix_entries = kcalloc(
1094 oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); 1073 oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
1095 if (!oct->msix_entries) 1074 if (!oct->msix_entries) {
1096 return 1; 1075 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1076 kfree(oct->irq_name_storage);
1077 oct->irq_name_storage = NULL;
1078 return -ENOMEM;
1079 }
1097 1080
1098 msix_entries = (struct msix_entry *)oct->msix_entries; 1081 msix_entries = (struct msix_entry *)oct->msix_entries;
1099 /*Assumption is that pf msix vectors start from pf srn to pf to 1082 /*Assumption is that pf msix vectors start from pf srn to pf to
@@ -1111,7 +1094,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
1111 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); 1094 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1112 kfree(oct->msix_entries); 1095 kfree(oct->msix_entries);
1113 oct->msix_entries = NULL; 1096 oct->msix_entries = NULL;
1114 return 1; 1097 kfree(oct->irq_name_storage);
1098 oct->irq_name_storage = NULL;
1099 return num_alloc_ioq_vectors;
1115 } 1100 }
1116 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); 1101 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1117 1102
@@ -1119,9 +1104,12 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
1119 1104
1120 /** For PF, there is one non-ioq interrupt handler */ 1105 /** For PF, there is one non-ioq interrupt handler */
1121 num_ioq_vectors -= 1; 1106 num_ioq_vectors -= 1;
1107
1108 snprintf(aux_irq_name, INTRNAMSIZ,
1109 "LiquidIO%u-pf%u-aux", oct->octeon_id, oct->pf_num);
1122 irqret = request_irq(msix_entries[num_ioq_vectors].vector, 1110 irqret = request_irq(msix_entries[num_ioq_vectors].vector,
1123 liquidio_legacy_intr_handler, 0, "octeon", 1111 liquidio_legacy_intr_handler, 0,
1124 oct); 1112 aux_irq_name, oct);
1125 if (irqret) { 1113 if (irqret) {
1126 dev_err(&oct->pci_dev->dev, 1114 dev_err(&oct->pci_dev->dev,
1127 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", 1115 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -1129,13 +1117,20 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
1129 pci_disable_msix(oct->pci_dev); 1117 pci_disable_msix(oct->pci_dev);
1130 kfree(oct->msix_entries); 1118 kfree(oct->msix_entries);
1131 oct->msix_entries = NULL; 1119 oct->msix_entries = NULL;
1132 return 1; 1120 kfree(oct->irq_name_storage);
1121 oct->irq_name_storage = NULL;
1122 return irqret;
1133 } 1123 }
1134 1124
1135 for (i = 0; i < num_ioq_vectors; i++) { 1125 for (i = 0; i < num_ioq_vectors; i++) {
1126 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
1127 "LiquidIO%u-pf%u-rxtx-%u",
1128 oct->octeon_id, oct->pf_num, i);
1129
1136 irqret = request_irq(msix_entries[i].vector, 1130 irqret = request_irq(msix_entries[i].vector,
1137 liquidio_msix_intr_handler, 0, 1131 liquidio_msix_intr_handler, 0,
1138 "octeon", &oct->ioq_vector[i]); 1132 &queue_irq_names[IRQ_NAME_OFF(i)],
1133 &oct->ioq_vector[i]);
1139 if (irqret) { 1134 if (irqret) {
1140 dev_err(&oct->pci_dev->dev, 1135 dev_err(&oct->pci_dev->dev,
1141 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", 1136 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -1155,7 +1150,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
1155 pci_disable_msix(oct->pci_dev); 1150 pci_disable_msix(oct->pci_dev);
1156 kfree(oct->msix_entries); 1151 kfree(oct->msix_entries);
1157 oct->msix_entries = NULL; 1152 oct->msix_entries = NULL;
1158 return 1; 1153 kfree(oct->irq_name_storage);
1154 oct->irq_name_storage = NULL;
1155 return irqret;
1159 } 1156 }
1160 oct->ioq_vector[i].vector = msix_entries[i].vector; 1157 oct->ioq_vector[i].vector = msix_entries[i].vector;
1161 /* assign the cpu mask for this msix interrupt vector */ 1158 /* assign the cpu mask for this msix interrupt vector */
@@ -1173,15 +1170,29 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
1173 else 1170 else
1174 oct->flags |= LIO_FLAG_MSI_ENABLED; 1171 oct->flags |= LIO_FLAG_MSI_ENABLED;
1175 1172
1173 /* allocate storage for the names assigned to the irq */
1174 oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
1175 if (!oct->irq_name_storage)
1176 return -ENOMEM;
1177
1178 queue_irq_names = oct->irq_name_storage;
1179
1180 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1181 "LiquidIO%u-pf%u-rxtx-%u",
1182 oct->octeon_id, oct->pf_num, 0);
1183
1176 irqret = request_irq(oct->pci_dev->irq, 1184 irqret = request_irq(oct->pci_dev->irq,
1177 liquidio_legacy_intr_handler, IRQF_SHARED, 1185 liquidio_legacy_intr_handler,
1178 "octeon", oct); 1186 IRQF_SHARED,
1187 &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1179 if (irqret) { 1188 if (irqret) {
1180 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1189 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1181 pci_disable_msi(oct->pci_dev); 1190 pci_disable_msi(oct->pci_dev);
1182 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", 1191 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1183 irqret); 1192 irqret);
1184 return 1; 1193 kfree(oct->irq_name_storage);
1194 oct->irq_name_storage = NULL;
1195 return irqret;
1185 } 1196 }
1186 } 1197 }
1187 return 0; 1198 return 0;
@@ -1449,6 +1460,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
1449 pci_disable_msi(oct->pci_dev); 1460 pci_disable_msi(oct->pci_dev);
1450 } 1461 }
1451 1462
1463 kfree(oct->irq_name_storage);
1464 oct->irq_name_storage = NULL;
1465
1452 /* fallthrough */ 1466 /* fallthrough */
1453 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 1467 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1454 if (OCTEON_CN23XX_PF(oct)) 1468 if (OCTEON_CN23XX_PF(oct))
@@ -2211,8 +2225,8 @@ static void if_cfg_callback(struct octeon_device *oct,
2211 2225
2212 oct = lio_get_device(ctx->octeon_id); 2226 oct = lio_get_device(ctx->octeon_id);
2213 if (resp->status) 2227 if (resp->status)
2214 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", 2228 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
2215 CVM_CAST64(resp->status)); 2229 CVM_CAST64(resp->status), status);
2216 WRITE_ONCE(ctx->cond, 1); 2230 WRITE_ONCE(ctx->cond, 1);
2217 2231
2218 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", 2232 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
@@ -2555,6 +2569,15 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
2555 __func__); 2569 __func__);
2556 return 1; 2570 return 1;
2557 } 2571 }
2572
2573 if (octeon_dev->ioq_vector) {
2574 struct octeon_ioq_vector *ioq_vector;
2575
2576 ioq_vector = &octeon_dev->ioq_vector[q];
2577 netif_set_xps_queue(netdev,
2578 &ioq_vector->affinity_mask,
2579 ioq_vector->iq_index);
2580 }
2558 } 2581 }
2559 2582
2560 return 0; 2583 return 0;
@@ -3596,7 +3619,8 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
3596 nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0); 3619 nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
3597 nctrl.ncmd.s.more = 1; 3620 nctrl.ncmd.s.more = 1;
3598 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3621 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3599 nctrl.cb_fn = 0; 3622 nctrl.netpndev = (u64)netdev;
3623 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3600 nctrl.wait_time = LIO_CMD_WAIT_TM; 3624 nctrl.wait_time = LIO_CMD_WAIT_TM;
3601 3625
3602 nctrl.udd[0] = 0; 3626 nctrl.udd[0] = 0;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 7b83be4ce1fe..f72db33fcd3a 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -39,12 +39,6 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
39 39
40#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 40#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
41 41
42/* Bit mask values for lio->ifstate */
43#define LIO_IFSTATE_DROQ_OPS 0x01
44#define LIO_IFSTATE_REGISTERED 0x02
45#define LIO_IFSTATE_RUNNING 0x04
46#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
47
48struct liquidio_if_cfg_context { 42struct liquidio_if_cfg_context {
49 int octeon_id; 43 int octeon_id;
50 44
@@ -336,36 +330,6 @@ static struct pci_driver liquidio_vf_pci_driver = {
336}; 330};
337 331
338/** 332/**
339 * \brief check interface state
340 * @param lio per-network private data
341 * @param state_flag flag state to check
342 */
343static int ifstate_check(struct lio *lio, int state_flag)
344{
345 return atomic_read(&lio->ifstate) & state_flag;
346}
347
348/**
349 * \brief set interface state
350 * @param lio per-network private data
351 * @param state_flag flag state to set
352 */
353static void ifstate_set(struct lio *lio, int state_flag)
354{
355 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
356}
357
358/**
359 * \brief clear interface state
360 * @param lio per-network private data
361 * @param state_flag flag state to clear
362 */
363static void ifstate_reset(struct lio *lio, int state_flag)
364{
365 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
366}
367
368/**
369 * \brief Stop Tx queues 333 * \brief Stop Tx queues
370 * @param netdev network device 334 * @param netdev network device
371 */ 335 */
@@ -780,6 +744,7 @@ liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
780static int octeon_setup_interrupt(struct octeon_device *oct) 744static int octeon_setup_interrupt(struct octeon_device *oct)
781{ 745{
782 struct msix_entry *msix_entries; 746 struct msix_entry *msix_entries;
747 char *queue_irq_names = NULL;
783 int num_alloc_ioq_vectors; 748 int num_alloc_ioq_vectors;
784 int num_ioq_vectors; 749 int num_ioq_vectors;
785 int irqret; 750 int irqret;
@@ -788,10 +753,25 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
788 if (oct->msix_on) { 753 if (oct->msix_on) {
789 oct->num_msix_irqs = oct->sriov_info.rings_per_vf; 754 oct->num_msix_irqs = oct->sriov_info.rings_per_vf;
790 755
756 /* allocate storage for the names assigned to each irq */
757 oct->irq_name_storage =
758 kcalloc(MAX_IOQ_INTERRUPTS_PER_VF, INTRNAMSIZ,
759 GFP_KERNEL);
760 if (!oct->irq_name_storage) {
761 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
762 return -ENOMEM;
763 }
764
765 queue_irq_names = oct->irq_name_storage;
766
791 oct->msix_entries = kcalloc( 767 oct->msix_entries = kcalloc(
792 oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); 768 oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
793 if (!oct->msix_entries) 769 if (!oct->msix_entries) {
794 return 1; 770 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
771 kfree(oct->irq_name_storage);
772 oct->irq_name_storage = NULL;
773 return -ENOMEM;
774 }
795 775
796 msix_entries = (struct msix_entry *)oct->msix_entries; 776 msix_entries = (struct msix_entry *)oct->msix_entries;
797 777
@@ -805,16 +785,23 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
805 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); 785 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
806 kfree(oct->msix_entries); 786 kfree(oct->msix_entries);
807 oct->msix_entries = NULL; 787 oct->msix_entries = NULL;
808 return 1; 788 kfree(oct->irq_name_storage);
789 oct->irq_name_storage = NULL;
790 return num_alloc_ioq_vectors;
809 } 791 }
810 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); 792 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
811 793
812 num_ioq_vectors = oct->num_msix_irqs; 794 num_ioq_vectors = oct->num_msix_irqs;
813 795
814 for (i = 0; i < num_ioq_vectors; i++) { 796 for (i = 0; i < num_ioq_vectors; i++) {
797 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
798 "LiquidIO%u-vf%u-rxtx-%u",
799 oct->octeon_id, oct->vf_num, i);
800
815 irqret = request_irq(msix_entries[i].vector, 801 irqret = request_irq(msix_entries[i].vector,
816 liquidio_msix_intr_handler, 0, 802 liquidio_msix_intr_handler, 0,
817 "octeon", &oct->ioq_vector[i]); 803 &queue_irq_names[IRQ_NAME_OFF(i)],
804 &oct->ioq_vector[i]);
818 if (irqret) { 805 if (irqret) {
819 dev_err(&oct->pci_dev->dev, 806 dev_err(&oct->pci_dev->dev,
820 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", 807 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -830,7 +817,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
830 pci_disable_msix(oct->pci_dev); 817 pci_disable_msix(oct->pci_dev);
831 kfree(oct->msix_entries); 818 kfree(oct->msix_entries);
832 oct->msix_entries = NULL; 819 oct->msix_entries = NULL;
833 return 1; 820 kfree(oct->irq_name_storage);
821 oct->irq_name_storage = NULL;
822 return irqret;
834 } 823 }
835 oct->ioq_vector[i].vector = msix_entries[i].vector; 824 oct->ioq_vector[i].vector = msix_entries[i].vector;
836 /* assign the cpu mask for this msix interrupt vector */ 825 /* assign the cpu mask for this msix interrupt vector */
@@ -975,6 +964,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
975 pci_disable_msix(oct->pci_dev); 964 pci_disable_msix(oct->pci_dev);
976 kfree(oct->msix_entries); 965 kfree(oct->msix_entries);
977 oct->msix_entries = NULL; 966 oct->msix_entries = NULL;
967 kfree(oct->irq_name_storage);
968 oct->irq_name_storage = NULL;
978 } 969 }
979 /* Soft reset the octeon device before exiting */ 970 /* Soft reset the octeon device before exiting */
980 if (oct->pci_dev->reset_fn) 971 if (oct->pci_dev->reset_fn)
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 294c6f3c6b48..4a07c0ac9fab 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -100,6 +100,11 @@ enum octeon_tag_type {
100 100
101#define BYTES_PER_DHLEN_UNIT 8 101#define BYTES_PER_DHLEN_UNIT 8
102#define MAX_REG_CNT 2000000U 102#define MAX_REG_CNT 2000000U
103#define INTRNAMSIZ 32
104#define IRQ_NAME_OFF(i) ((i) * INTRNAMSIZ)
105#define MAX_IOQ_INTERRUPTS_PER_PF (64 * 2)
106#define MAX_IOQ_INTERRUPTS_PER_VF (8 * 2)
107
103 108
104static inline u32 incr_index(u32 index, u32 count, u32 max) 109static inline u32 incr_index(u32 index, u32 count, u32 max)
105{ 110{
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 9675ffbf25e6..e21b477d0159 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -793,7 +793,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
793 u32 num_descs = 0; 793 u32 num_descs = 0;
794 u32 iq_no = 0; 794 u32 iq_no = 0;
795 union oct_txpciq txpciq; 795 union oct_txpciq txpciq;
796 int numa_node = cpu_to_node(iq_no % num_online_cpus()); 796 int numa_node = dev_to_node(&oct->pci_dev->dev);
797 797
798 if (OCTEON_CN6XXX(oct)) 798 if (OCTEON_CN6XXX(oct))
799 num_descs = 799 num_descs =
@@ -837,7 +837,7 @@ int octeon_setup_output_queues(struct octeon_device *oct)
837 u32 num_descs = 0; 837 u32 num_descs = 0;
838 u32 desc_size = 0; 838 u32 desc_size = 0;
839 u32 oq_no = 0; 839 u32 oq_no = 0;
840 int numa_node = cpu_to_node(oq_no % num_online_cpus()); 840 int numa_node = dev_to_node(&oct->pci_dev->dev);
841 841
842 if (OCTEON_CN6XXX(oct)) { 842 if (OCTEON_CN6XXX(oct)) {
843 num_descs = 843 num_descs =
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index c301a3852482..8c5d33e53cfa 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -517,6 +517,9 @@ struct octeon_device {
517 517
518 void *msix_entries; 518 void *msix_entries;
519 519
520 /* when requesting IRQs, the names are stored here */
521 void *irq_name_storage;
522
520 struct octeon_sriov_info sriov_info; 523 struct octeon_sriov_info sriov_info;
521 524
522 struct octeon_pf_vf_hs_word pfvf_hsword; 525 struct octeon_pf_vf_hs_word pfvf_hsword;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 79f809479af6..00970597ada8 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -226,8 +226,7 @@ int octeon_init_droq(struct octeon_device *oct,
226 struct octeon_droq *droq; 226 struct octeon_droq *droq;
227 u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; 227 u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
228 u32 c_pkts_per_intr = 0, c_refill_threshold = 0; 228 u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
229 int orig_node = dev_to_node(&oct->pci_dev->dev); 229 int numa_node = dev_to_node(&oct->pci_dev->dev);
230 int numa_node = cpu_to_node(q_no % num_online_cpus());
231 230
232 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); 231 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
233 232
@@ -267,13 +266,8 @@ int octeon_init_droq(struct octeon_device *oct,
267 droq->buffer_size = c_buf_size; 266 droq->buffer_size = c_buf_size;
268 267
269 desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; 268 desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
270 set_dev_node(&oct->pci_dev->dev, numa_node);
271 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, 269 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
272 (dma_addr_t *)&droq->desc_ring_dma); 270 (dma_addr_t *)&droq->desc_ring_dma);
273 set_dev_node(&oct->pci_dev->dev, orig_node);
274 if (!droq->desc_ring)
275 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
276 (dma_addr_t *)&droq->desc_ring_dma);
277 271
278 if (!droq->desc_ring) { 272 if (!droq->desc_ring) {
279 dev_err(&oct->pci_dev->dev, 273 dev_err(&oct->pci_dev->dev,
@@ -970,7 +964,7 @@ int octeon_create_droq(struct octeon_device *oct,
970 u32 desc_size, void *app_ctx) 964 u32 desc_size, void *app_ctx)
971{ 965{
972 struct octeon_droq *droq; 966 struct octeon_droq *droq;
973 int numa_node = cpu_to_node(q_no % num_online_cpus()); 967 int numa_node = dev_to_node(&oct->pci_dev->dev);
974 968
975 if (oct->droq[q_no]) { 969 if (oct->droq[q_no]) {
976 dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", 970 dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 4608a5af35a3..5063a12613e5 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -152,7 +152,7 @@ struct octeon_instr_queue {
152 struct oct_iq_stats stats; 152 struct oct_iq_stats stats;
153 153
154 /** DMA mapped base address of the input descriptor ring. */ 154 /** DMA mapped base address of the input descriptor ring. */
155 u64 base_addr_dma; 155 dma_addr_t base_addr_dma;
156 156
157 /** Application context */ 157 /** Application context */
158 void *app_ctx; 158 void *app_ctx;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index eef2a1e8a7e3..ddb61bf25775 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -28,6 +28,12 @@
28#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) 28#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
29#define LIO_MIN_MTU_SIZE ETH_MIN_MTU 29#define LIO_MIN_MTU_SIZE ETH_MIN_MTU
30 30
31/* Bit mask values for lio->ifstate */
32#define LIO_IFSTATE_DROQ_OPS 0x01
33#define LIO_IFSTATE_REGISTERED 0x02
34#define LIO_IFSTATE_RUNNING 0x04
35#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
36
31struct oct_nic_stats_resp { 37struct oct_nic_stats_resp {
32 u64 rh; 38 u64 rh;
33 struct oct_link_stats stats; 39 struct oct_link_stats stats;
@@ -438,4 +444,34 @@ static inline void octeon_fast_packet_next(struct octeon_droq *droq,
438 get_rbd(droq->recv_buf_list[idx].buffer), copy_len); 444 get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
439} 445}
440 446
447/**
448 * \brief check interface state
449 * @param lio per-network private data
450 * @param state_flag flag state to check
451 */
452static inline int ifstate_check(struct lio *lio, int state_flag)
453{
454 return atomic_read(&lio->ifstate) & state_flag;
455}
456
457/**
458 * \brief set interface state
459 * @param lio per-network private data
460 * @param state_flag flag state to set
461 */
462static inline void ifstate_set(struct lio *lio, int state_flag)
463{
464 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
465}
466
467/**
468 * \brief clear interface state
469 * @param lio per-network private data
470 * @param state_flag flag state to clear
471 */
472static inline void ifstate_reset(struct lio *lio, int state_flag)
473{
474 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
475}
476
441#endif 477#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 707bc15adec6..261f448f9de2 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -62,8 +62,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
62 u32 iq_no = (u32)txpciq.s.q_no; 62 u32 iq_no = (u32)txpciq.s.q_no;
63 u32 q_size; 63 u32 q_size;
64 struct cavium_wq *db_wq; 64 struct cavium_wq *db_wq;
65 int orig_node = dev_to_node(&oct->pci_dev->dev); 65 int numa_node = dev_to_node(&oct->pci_dev->dev);
66 int numa_node = cpu_to_node(iq_no % num_online_cpus());
67 66
68 if (OCTEON_CN6XXX(oct)) 67 if (OCTEON_CN6XXX(oct))
69 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx))); 68 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
@@ -91,13 +90,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
91 90
92 iq->oct_dev = oct; 91 iq->oct_dev = oct;
93 92
94 set_dev_node(&oct->pci_dev->dev, numa_node); 93 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
95 iq->base_addr = lio_dma_alloc(oct, q_size,
96 (dma_addr_t *)&iq->base_addr_dma);
97 set_dev_node(&oct->pci_dev->dev, orig_node);
98 if (!iq->base_addr)
99 iq->base_addr = lio_dma_alloc(oct, q_size,
100 (dma_addr_t *)&iq->base_addr_dma);
101 if (!iq->base_addr) { 94 if (!iq->base_addr) {
102 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n", 95 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
103 iq_no); 96 iq_no);
@@ -211,7 +204,7 @@ int octeon_setup_iq(struct octeon_device *oct,
211 void *app_ctx) 204 void *app_ctx)
212{ 205{
213 u32 iq_no = (u32)txpciq.s.q_no; 206 u32 iq_no = (u32)txpciq.s.q_no;
214 int numa_node = cpu_to_node(iq_no % num_online_cpus()); 207 int numa_node = dev_to_node(&oct->pci_dev->dev);
215 208
216 if (oct->instr_queue[iq_no]) { 209 if (oct->instr_queue[iq_no]) {
217 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n", 210 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 2fbaae96b505..3d691c69f74d 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -69,50 +69,53 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
69 int resp_to_process = MAX_ORD_REQS_TO_PROCESS; 69 int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
70 u32 status; 70 u32 status;
71 u64 status64; 71 u64 status64;
72 struct octeon_instr_rdp *rdp;
73 u64 rptr;
74 72
75 ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; 73 ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
76 74
77 do { 75 do {
78 spin_lock_bh(&ordered_sc_list->lock); 76 spin_lock_bh(&ordered_sc_list->lock);
79 77
80 if (ordered_sc_list->head.next == &ordered_sc_list->head) { 78 if (list_empty(&ordered_sc_list->head)) {
81 spin_unlock_bh(&ordered_sc_list->lock); 79 spin_unlock_bh(&ordered_sc_list->lock);
82 return 1; 80 return 1;
83 } 81 }
84 82
85 sc = (struct octeon_soft_command *)ordered_sc_list-> 83 sc = list_first_entry(&ordered_sc_list->head,
86 head.next; 84 struct octeon_soft_command, node);
87 if (OCTEON_CN23XX_PF(octeon_dev) ||
88 OCTEON_CN23XX_VF(octeon_dev)) {
89 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
90 rptr = sc->cmd.cmd3.rptr;
91 } else {
92 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
93 rptr = sc->cmd.cmd2.rptr;
94 }
95 85
96 status = OCTEON_REQUEST_PENDING; 86 status = OCTEON_REQUEST_PENDING;
97 87
98 /* check if octeon has finished DMA'ing a response 88 /* check if octeon has finished DMA'ing a response
99 * to where rptr is pointing to 89 * to where rptr is pointing to
100 */ 90 */
101 dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
102 rptr, rdp->rlen,
103 DMA_FROM_DEVICE);
104 status64 = *sc->status_word; 91 status64 = *sc->status_word;
105 92
106 if (status64 != COMPLETION_WORD_INIT) { 93 if (status64 != COMPLETION_WORD_INIT) {
94 /* This logic ensures that all 64b have been written.
95 * 1. check byte 0 for non-FF
96 * 2. if non-FF, then swap result from BE to host order
97 * 3. check byte 7 (swapped to 0) for non-FF
98 * 4. if non-FF, use the low 32-bit status code
99 * 5. if either byte 0 or byte 7 is FF, don't use status
100 */
107 if ((status64 & 0xff) != 0xff) { 101 if ((status64 & 0xff) != 0xff) {
108 octeon_swap_8B_data(&status64, 1); 102 octeon_swap_8B_data(&status64, 1);
109 if (((status64 & 0xff) != 0xff)) { 103 if (((status64 & 0xff) != 0xff)) {
110 status = (u32)(status64 & 104 /* retrieve 16-bit firmware status */
111 0xffffffffULL); 105 status = (u32)(status64 & 0xffffULL);
106 if (status) {
107 status =
108 FIRMWARE_STATUS_CODE(status);
109 } else {
110 /* i.e. no error */
111 status = OCTEON_REQUEST_DONE;
112 }
112 } 113 }
113 } 114 }
114 } else if (force_quit || (sc->timeout && 115 } else if (force_quit || (sc->timeout &&
115 time_after(jiffies, (unsigned long)sc->timeout))) { 116 time_after(jiffies, (unsigned long)sc->timeout))) {
117 dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n",
118 __func__, (long)jiffies, (long)sc->timeout);
116 status = OCTEON_REQUEST_TIMEOUT; 119 status = OCTEON_REQUEST_TIMEOUT;
117 } 120 }
118 121
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h
index cbb2d84e8932..9169c2815dba 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.h
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h
@@ -78,6 +78,8 @@ enum {
78 78
79/*------------ Error codes used by host driver -----------------*/ 79/*------------ Error codes used by host driver -----------------*/
80#define DRIVER_MAJOR_ERROR_CODE 0x0000 80#define DRIVER_MAJOR_ERROR_CODE 0x0000
81/*------ Error codes used by firmware (bits 15..0 set by firmware */
82#define FIRMWARE_MAJOR_ERROR_CODE 0x0001
81 83
82/** A value of 0x00000000 indicates no error i.e. success */ 84/** A value of 0x00000000 indicates no error i.e. success */
83#define DRIVER_ERROR_NONE 0x00000000 85#define DRIVER_ERROR_NONE 0x00000000
@@ -116,6 +118,9 @@ enum {
116 118
117}; 119};
118 120
121#define FIRMWARE_STATUS_CODE(status) \
122 ((FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
123
119/** Initialize the response lists. The number of response lists to create is 124/** Initialize the response lists. The number of response lists to create is
120 * given by count. 125 * given by count.
121 * @param octeon_dev - the octeon device structure. 126 * @param octeon_dev - the octeon device structure.
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 23d82748f52b..e863ba74d005 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1148,14 +1148,14 @@ static int ethoc_probe(struct platform_device *pdev)
1148 1148
1149 /* Allow the platform setup code to pass in a MAC address. */ 1149 /* Allow the platform setup code to pass in a MAC address. */
1150 if (pdata) { 1150 if (pdata) {
1151 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1151 ether_addr_copy(netdev->dev_addr, pdata->hwaddr);
1152 priv->phy_id = pdata->phy_id; 1152 priv->phy_id = pdata->phy_id;
1153 } else { 1153 } else {
1154 const void *mac; 1154 const void *mac;
1155 1155
1156 mac = of_get_mac_address(pdev->dev.of_node); 1156 mac = of_get_mac_address(pdev->dev.of_node);
1157 if (mac) 1157 if (mac)
1158 memcpy(netdev->dev_addr, mac, IFHWADDRLEN); 1158 ether_addr_copy(netdev->dev_addr, mac);
1159 priv->phy_id = -1; 1159 priv->phy_id = -1;
1160 } 1160 }
1161 1161
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index e2ca107f9d94..d4bb8bf86a45 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -137,6 +137,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
137/* L4 Type field: TCP */ 137/* L4 Type field: TCP */
138#define FM_L4_PARSE_RESULT_TCP 0x20 138#define FM_L4_PARSE_RESULT_TCP 0x20
139 139
140/* FD status field indicating whether the FM Parser has attempted to validate
141 * the L4 csum of the frame.
142 * Note that having this bit set doesn't necessarily imply that the checksum
143 * is valid. One would have to check the parse results to find that out.
144 */
145#define FM_FD_STAT_L4CV 0x00000004
146
140#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ 147#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
141#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ 148#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
142 149
@@ -235,6 +242,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
235 * For conformity, we'll still declare GSO explicitly. 242 * For conformity, we'll still declare GSO explicitly.
236 */ 243 */
237 net_dev->features |= NETIF_F_GSO; 244 net_dev->features |= NETIF_F_GSO;
245 net_dev->features |= NETIF_F_RXCSUM;
238 246
239 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 247 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
240 /* we do not want shared skbs on TX */ 248 /* we do not want shared skbs on TX */
@@ -334,6 +342,45 @@ static void dpaa_get_stats64(struct net_device *net_dev,
334 } 342 }
335} 343}
336 344
345static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
346 struct tc_to_netdev *tc)
347{
348 struct dpaa_priv *priv = netdev_priv(net_dev);
349 u8 num_tc;
350 int i;
351
352 if (tc->type != TC_SETUP_MQPRIO)
353 return -EINVAL;
354
355 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
356 num_tc = tc->mqprio->num_tc;
357
358 if (num_tc == priv->num_tc)
359 return 0;
360
361 if (!num_tc) {
362 netdev_reset_tc(net_dev);
363 goto out;
364 }
365
366 if (num_tc > DPAA_TC_NUM) {
367 netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
368 DPAA_TC_NUM);
369 return -EINVAL;
370 }
371
372 netdev_set_num_tc(net_dev, num_tc);
373
374 for (i = 0; i < num_tc; i++)
375 netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
376 i * DPAA_TC_TXQ_NUM);
377
378out:
379 priv->num_tc = num_tc ? : 1;
380 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
381 return 0;
382}
383
337static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) 384static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
338{ 385{
339 struct platform_device *of_dev; 386 struct platform_device *of_dev;
@@ -557,16 +604,18 @@ static void dpaa_bps_free(struct dpaa_priv *priv)
557 604
558/* Use multiple WQs for FQ assignment: 605/* Use multiple WQs for FQ assignment:
559 * - Tx Confirmation queues go to WQ1. 606 * - Tx Confirmation queues go to WQ1.
560 * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance 607 * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
561 * to be scheduled, in case there are many more FQs in WQ3). 608 * to be scheduled, in case there are many more FQs in WQ6).
562 * - Rx Default and Tx queues go to WQ3 (no differentiation between 609 * - Rx Default goes to WQ6.
563 * Rx and Tx traffic). 610 * - Tx queues go to different WQs depending on their priority. Equal
611 * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
612 * WQ0 (highest priority).
564 * This ensures that Tx-confirmed buffers are timely released. In particular, 613 * This ensures that Tx-confirmed buffers are timely released. In particular,
565 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they 614 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
566 * are greatly outnumbered by other FQs in the system, while 615 * are greatly outnumbered by other FQs in the system, while
567 * dequeue scheduling is round-robin. 616 * dequeue scheduling is round-robin.
568 */ 617 */
569static inline void dpaa_assign_wq(struct dpaa_fq *fq) 618static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
570{ 619{
571 switch (fq->fq_type) { 620 switch (fq->fq_type) {
572 case FQ_TYPE_TX_CONFIRM: 621 case FQ_TYPE_TX_CONFIRM:
@@ -575,11 +624,33 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq)
575 break; 624 break;
576 case FQ_TYPE_RX_ERROR: 625 case FQ_TYPE_RX_ERROR:
577 case FQ_TYPE_TX_ERROR: 626 case FQ_TYPE_TX_ERROR:
578 fq->wq = 2; 627 fq->wq = 5;
579 break; 628 break;
580 case FQ_TYPE_RX_DEFAULT: 629 case FQ_TYPE_RX_DEFAULT:
630 fq->wq = 6;
631 break;
581 case FQ_TYPE_TX: 632 case FQ_TYPE_TX:
582 fq->wq = 3; 633 switch (idx / DPAA_TC_TXQ_NUM) {
634 case 0:
635 /* Low priority (best effort) */
636 fq->wq = 6;
637 break;
638 case 1:
639 /* Medium priority */
640 fq->wq = 2;
641 break;
642 case 2:
643 /* High priority */
644 fq->wq = 1;
645 break;
646 case 3:
647 /* Very high priority */
648 fq->wq = 0;
649 break;
650 default:
651 WARN(1, "Too many TX FQs: more than %d!\n",
652 DPAA_ETH_TXQ_NUM);
653 }
583 break; 654 break;
584 default: 655 default:
585 WARN(1, "Invalid FQ type %d for FQID %d!\n", 656 WARN(1, "Invalid FQ type %d for FQID %d!\n",
@@ -607,7 +678,7 @@ static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
607 } 678 }
608 679
609 for (i = 0; i < count; i++) 680 for (i = 0; i < count; i++)
610 dpaa_assign_wq(dpaa_fq + i); 681 dpaa_assign_wq(dpaa_fq + i, i);
611 682
612 return dpaa_fq; 683 return dpaa_fq;
613} 684}
@@ -985,7 +1056,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
985 /* Initialization common to all ingress queues */ 1056 /* Initialization common to all ingress queues */
986 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { 1057 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
987 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA); 1058 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
988 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE); 1059 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
1060 QM_FQCTRL_CTXASTASHING);
989 initfq.fqd.context_a.stashing.exclusive = 1061 initfq.fqd.context_a.stashing.exclusive =
990 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | 1062 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
991 QM_STASHING_EXCL_ANNOTATION; 1063 QM_STASHING_EXCL_ANNOTATION;
@@ -1055,9 +1127,9 @@ static int dpaa_fq_free(struct device *dev, struct list_head *list)
1055 return err; 1127 return err;
1056} 1128}
1057 1129
1058static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, 1130static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1059 struct dpaa_fq *defq, 1131 struct dpaa_fq *defq,
1060 struct dpaa_buffer_layout *buf_layout) 1132 struct dpaa_buffer_layout *buf_layout)
1061{ 1133{
1062 struct fman_buffer_prefix_content buf_prefix_content; 1134 struct fman_buffer_prefix_content buf_prefix_content;
1063 struct fman_port_params params; 1135 struct fman_port_params params;
@@ -1076,23 +1148,29 @@ static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1076 params.specific_params.non_rx_params.dflt_fqid = defq->fqid; 1148 params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
1077 1149
1078 err = fman_port_config(port, &params); 1150 err = fman_port_config(port, &params);
1079 if (err) 1151 if (err) {
1080 pr_err("%s: fman_port_config failed\n", __func__); 1152 pr_err("%s: fman_port_config failed\n", __func__);
1153 return err;
1154 }
1081 1155
1082 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); 1156 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1083 if (err) 1157 if (err) {
1084 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", 1158 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1085 __func__); 1159 __func__);
1160 return err;
1161 }
1086 1162
1087 err = fman_port_init(port); 1163 err = fman_port_init(port);
1088 if (err) 1164 if (err)
1089 pr_err("%s: fm_port_init failed\n", __func__); 1165 pr_err("%s: fm_port_init failed\n", __func__);
1166
1167 return err;
1090} 1168}
1091 1169
1092static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, 1170static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
1093 size_t count, struct dpaa_fq *errq, 1171 size_t count, struct dpaa_fq *errq,
1094 struct dpaa_fq *defq, 1172 struct dpaa_fq *defq,
1095 struct dpaa_buffer_layout *buf_layout) 1173 struct dpaa_buffer_layout *buf_layout)
1096{ 1174{
1097 struct fman_buffer_prefix_content buf_prefix_content; 1175 struct fman_buffer_prefix_content buf_prefix_content;
1098 struct fman_port_rx_params *rx_p; 1176 struct fman_port_rx_params *rx_p;
@@ -1120,32 +1198,44 @@ static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
1120 } 1198 }
1121 1199
1122 err = fman_port_config(port, &params); 1200 err = fman_port_config(port, &params);
1123 if (err) 1201 if (err) {
1124 pr_err("%s: fman_port_config failed\n", __func__); 1202 pr_err("%s: fman_port_config failed\n", __func__);
1203 return err;
1204 }
1125 1205
1126 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); 1206 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1127 if (err) 1207 if (err) {
1128 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", 1208 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1129 __func__); 1209 __func__);
1210 return err;
1211 }
1130 1212
1131 err = fman_port_init(port); 1213 err = fman_port_init(port);
1132 if (err) 1214 if (err)
1133 pr_err("%s: fm_port_init failed\n", __func__); 1215 pr_err("%s: fm_port_init failed\n", __func__);
1216
1217 return err;
1134} 1218}
1135 1219
1136static void dpaa_eth_init_ports(struct mac_device *mac_dev, 1220static int dpaa_eth_init_ports(struct mac_device *mac_dev,
1137 struct dpaa_bp **bps, size_t count, 1221 struct dpaa_bp **bps, size_t count,
1138 struct fm_port_fqs *port_fqs, 1222 struct fm_port_fqs *port_fqs,
1139 struct dpaa_buffer_layout *buf_layout, 1223 struct dpaa_buffer_layout *buf_layout,
1140 struct device *dev) 1224 struct device *dev)
1141{ 1225{
1142 struct fman_port *rxport = mac_dev->port[RX]; 1226 struct fman_port *rxport = mac_dev->port[RX];
1143 struct fman_port *txport = mac_dev->port[TX]; 1227 struct fman_port *txport = mac_dev->port[TX];
1228 int err;
1229
1230 err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1231 port_fqs->tx_defq, &buf_layout[TX]);
1232 if (err)
1233 return err;
1234
1235 err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
1236 port_fqs->rx_defq, &buf_layout[RX]);
1144 1237
1145 dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, 1238 return err;
1146 port_fqs->tx_defq, &buf_layout[TX]);
1147 dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
1148 port_fqs->rx_defq, &buf_layout[RX]);
1149} 1239}
1150 1240
1151static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, 1241static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
@@ -1526,6 +1616,23 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1526 return skb; 1616 return skb;
1527} 1617}
1528 1618
1619static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
1620{
1621 /* The parser has run and performed L4 checksum validation.
1622 * We know there were no parser errors (and implicitly no
1623 * L4 csum error), otherwise we wouldn't be here.
1624 */
1625 if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
1626 (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
1627 return CHECKSUM_UNNECESSARY;
1628
1629 /* We're here because either the parser didn't run or the L4 checksum
1630 * was not verified. This may include the case of a UDP frame with
1631 * checksum zero or an L4 proto other than TCP/UDP
1632 */
1633 return CHECKSUM_NONE;
1634}
1635
1529/* Build a linear skb around the received buffer. 1636/* Build a linear skb around the received buffer.
1530 * We are guaranteed there is enough room at the end of the data buffer to 1637 * We are guaranteed there is enough room at the end of the data buffer to
1531 * accommodate the shared info area of the skb. 1638 * accommodate the shared info area of the skb.
@@ -1556,7 +1663,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
1556 skb_reserve(skb, fd_off); 1663 skb_reserve(skb, fd_off);
1557 skb_put(skb, qm_fd_get_length(fd)); 1664 skb_put(skb, qm_fd_get_length(fd));
1558 1665
1559 skb->ip_summed = CHECKSUM_NONE; 1666 skb->ip_summed = rx_csum_offload(priv, fd);
1560 1667
1561 return skb; 1668 return skb;
1562 1669
@@ -1616,7 +1723,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1616 if (WARN_ON(unlikely(!skb))) 1723 if (WARN_ON(unlikely(!skb)))
1617 goto free_buffers; 1724 goto free_buffers;
1618 1725
1619 skb->ip_summed = CHECKSUM_NONE; 1726 skb->ip_summed = rx_csum_offload(priv, fd);
1620 1727
1621 /* Make sure forwarded skbs will have enough space 1728 /* Make sure forwarded skbs will have enough space
1622 * on Tx, if extra headers are added. 1729 * on Tx, if extra headers are added.
@@ -2093,7 +2200,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2093 dma_addr_t addr = qm_fd_addr(fd); 2200 dma_addr_t addr = qm_fd_addr(fd);
2094 enum qm_fd_format fd_format; 2201 enum qm_fd_format fd_format;
2095 struct net_device *net_dev; 2202 struct net_device *net_dev;
2096 u32 fd_status = fd->status; 2203 u32 fd_status;
2097 struct dpaa_bp *dpaa_bp; 2204 struct dpaa_bp *dpaa_bp;
2098 struct dpaa_priv *priv; 2205 struct dpaa_priv *priv;
2099 unsigned int skb_len; 2206 unsigned int skb_len;
@@ -2350,6 +2457,7 @@ static const struct net_device_ops dpaa_ops = {
2350 .ndo_validate_addr = eth_validate_addr, 2457 .ndo_validate_addr = eth_validate_addr,
2351 .ndo_set_rx_mode = dpaa_set_rx_mode, 2458 .ndo_set_rx_mode = dpaa_set_rx_mode,
2352 .ndo_do_ioctl = dpaa_ioctl, 2459 .ndo_do_ioctl = dpaa_ioctl,
2460 .ndo_setup_tc = dpaa_setup_tc,
2353}; 2461};
2354 2462
2355static int dpaa_napi_add(struct net_device *net_dev) 2463static int dpaa_napi_add(struct net_device *net_dev)
@@ -2624,8 +2732,10 @@ static int dpaa_eth_probe(struct platform_device *pdev)
2624 priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); 2732 priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
2625 2733
2626 /* All real interfaces need their ports initialized */ 2734 /* All real interfaces need their ports initialized */
2627 dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, 2735 err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
2628 &priv->buf_layout[0], dev); 2736 &priv->buf_layout[0], dev);
2737 if (err)
2738 goto init_ports_failed;
2629 2739
2630 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); 2740 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
2631 if (!priv->percpu_priv) { 2741 if (!priv->percpu_priv) {
@@ -2638,6 +2748,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
2638 memset(percpu_priv, 0, sizeof(*percpu_priv)); 2748 memset(percpu_priv, 0, sizeof(*percpu_priv));
2639 } 2749 }
2640 2750
2751 priv->num_tc = 1;
2752 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
2753
2641 /* Initialize NAPI */ 2754 /* Initialize NAPI */
2642 err = dpaa_napi_add(net_dev); 2755 err = dpaa_napi_add(net_dev);
2643 if (err < 0) 2756 if (err < 0)
@@ -2658,6 +2771,7 @@ netdev_init_failed:
2658napi_add_failed: 2771napi_add_failed:
2659 dpaa_napi_del(net_dev); 2772 dpaa_napi_del(net_dev);
2660alloc_percpu_failed: 2773alloc_percpu_failed:
2774init_ports_failed:
2661 dpaa_fq_free(dev, &priv->dpaa_fq_list); 2775 dpaa_fq_free(dev, &priv->dpaa_fq_list);
2662fq_alloc_failed: 2776fq_alloc_failed:
2663 qman_delete_cgr_safe(&priv->ingress_cgr); 2777 qman_delete_cgr_safe(&priv->ingress_cgr);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index 1f9aebf3f3c5..9941a7866ebe 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -39,7 +39,12 @@
39#include "mac.h" 39#include "mac.h"
40#include "dpaa_eth_trace.h" 40#include "dpaa_eth_trace.h"
41 41
42#define DPAA_ETH_TXQ_NUM NR_CPUS 42/* Number of prioritised traffic classes */
43#define DPAA_TC_NUM 4
44/* Number of Tx queues per traffic class */
45#define DPAA_TC_TXQ_NUM NR_CPUS
46/* Total number of Tx queues */
47#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
43 48
44#define DPAA_BPS_NUM 3 /* number of bpools per interface */ 49#define DPAA_BPS_NUM 3 /* number of bpools per interface */
45 50
@@ -152,6 +157,7 @@ struct dpaa_priv {
152 u16 channel; 157 u16 channel;
153 struct list_head dpaa_fq_list; 158 struct list_head dpaa_fq_list;
154 159
160 u8 num_tc;
155 u32 msg_enable; /* net_device message level */ 161 u32 msg_enable; /* net_device message level */
156 162
157 struct { 163 struct {
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index f60845f0c6ca..4aefe2438969 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -59,6 +59,7 @@
59#define DMA_OFFSET 0x000C2000 59#define DMA_OFFSET 0x000C2000
60#define FPM_OFFSET 0x000C3000 60#define FPM_OFFSET 0x000C3000
61#define IMEM_OFFSET 0x000C4000 61#define IMEM_OFFSET 0x000C4000
62#define HWP_OFFSET 0x000C7000
62#define CGP_OFFSET 0x000DB000 63#define CGP_OFFSET 0x000DB000
63 64
64/* Exceptions bit map */ 65/* Exceptions bit map */
@@ -218,6 +219,9 @@
218 219
219#define QMI_GS_HALT_NOT_BUSY 0x00000002 220#define QMI_GS_HALT_NOT_BUSY 0x00000002
220 221
222/* HWP defines */
223#define HWP_RPIMAC_PEN 0x00000001
224
221/* IRAM defines */ 225/* IRAM defines */
222#define IRAM_IADD_AIE 0x80000000 226#define IRAM_IADD_AIE 0x80000000
223#define IRAM_READY 0x80000000 227#define IRAM_READY 0x80000000
@@ -475,6 +479,12 @@ struct fman_dma_regs {
475 u32 res00e0[0x400 - 56]; 479 u32 res00e0[0x400 - 56];
476}; 480};
477 481
482struct fman_hwp_regs {
483 u32 res0000[0x844 / 4]; /* 0x000..0x843 */
484 u32 fmprrpimac; /* FM Parser Internal memory access control */
485 u32 res[(0x1000 - 0x848) / 4]; /* 0x848..0xFFF */
486};
487
478/* Structure that holds current FMan state. 488/* Structure that holds current FMan state.
479 * Used for saving run time information. 489 * Used for saving run time information.
480 */ 490 */
@@ -606,6 +616,7 @@ struct fman {
606 struct fman_bmi_regs __iomem *bmi_regs; 616 struct fman_bmi_regs __iomem *bmi_regs;
607 struct fman_qmi_regs __iomem *qmi_regs; 617 struct fman_qmi_regs __iomem *qmi_regs;
608 struct fman_dma_regs __iomem *dma_regs; 618 struct fman_dma_regs __iomem *dma_regs;
619 struct fman_hwp_regs __iomem *hwp_regs;
609 fman_exceptions_cb *exception_cb; 620 fman_exceptions_cb *exception_cb;
610 fman_bus_error_cb *bus_error_cb; 621 fman_bus_error_cb *bus_error_cb;
611 /* Spinlock for FMan use */ 622 /* Spinlock for FMan use */
@@ -999,6 +1010,12 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
999 iowrite32be(tmp_reg, &qmi_rg->fmqm_ien); 1010 iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
1000} 1011}
1001 1012
1013static void hwp_init(struct fman_hwp_regs __iomem *hwp_rg)
1014{
1015 /* enable HW Parser */
1016 iowrite32be(HWP_RPIMAC_PEN, &hwp_rg->fmprrpimac);
1017}
1018
1002static int enable(struct fman *fman, struct fman_cfg *cfg) 1019static int enable(struct fman *fman, struct fman_cfg *cfg)
1003{ 1020{
1004 u32 cfg_reg = 0; 1021 u32 cfg_reg = 0;
@@ -1195,7 +1212,7 @@ static int fill_soc_specific_params(struct fman_state_struct *state)
1195 state->max_num_of_open_dmas = 32; 1212 state->max_num_of_open_dmas = 32;
1196 state->fm_port_num_of_cg = 256; 1213 state->fm_port_num_of_cg = 256;
1197 state->num_of_rx_ports = 6; 1214 state->num_of_rx_ports = 6;
1198 state->total_fifo_size = 122 * 1024; 1215 state->total_fifo_size = 136 * 1024;
1199 break; 1216 break;
1200 1217
1201 case 2: 1218 case 2:
@@ -1793,6 +1810,7 @@ static int fman_config(struct fman *fman)
1793 fman->bmi_regs = base_addr + BMI_OFFSET; 1810 fman->bmi_regs = base_addr + BMI_OFFSET;
1794 fman->qmi_regs = base_addr + QMI_OFFSET; 1811 fman->qmi_regs = base_addr + QMI_OFFSET;
1795 fman->dma_regs = base_addr + DMA_OFFSET; 1812 fman->dma_regs = base_addr + DMA_OFFSET;
1813 fman->hwp_regs = base_addr + HWP_OFFSET;
1796 fman->base_addr = base_addr; 1814 fman->base_addr = base_addr;
1797 1815
1798 spin_lock_init(&fman->spinlock); 1816 spin_lock_init(&fman->spinlock);
@@ -2062,6 +2080,9 @@ static int fman_init(struct fman *fman)
2062 /* Init QMI Registers */ 2080 /* Init QMI Registers */
2063 qmi_init(fman->qmi_regs, fman->cfg); 2081 qmi_init(fman->qmi_regs, fman->cfg);
2064 2082
2083 /* Init HW Parser */
2084 hwp_init(fman->hwp_regs);
2085
2065 err = enable(fman, cfg); 2086 err = enable(fman, cfg);
2066 if (err != 0) 2087 if (err != 0)
2067 return err; 2088 return err;
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index 57aae8d17d77..f53e1473dbcc 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -134,14 +134,14 @@ enum fman_exceptions {
134struct fman_prs_result { 134struct fman_prs_result {
135 u8 lpid; /* Logical port id */ 135 u8 lpid; /* Logical port id */
136 u8 shimr; /* Shim header result */ 136 u8 shimr; /* Shim header result */
137 u16 l2r; /* Layer 2 result */ 137 __be16 l2r; /* Layer 2 result */
138 u16 l3r; /* Layer 3 result */ 138 __be16 l3r; /* Layer 3 result */
139 u8 l4r; /* Layer 4 result */ 139 u8 l4r; /* Layer 4 result */
140 u8 cplan; /* Classification plan id */ 140 u8 cplan; /* Classification plan id */
141 u16 nxthdr; /* Next Header */ 141 __be16 nxthdr; /* Next Header */
142 u16 cksum; /* Running-sum */ 142 __be16 cksum; /* Running-sum */
143 /* Flags&fragment-offset field of the last IP-header */ 143 /* Flags&fragment-offset field of the last IP-header */
144 u16 flags_frag_off; 144 __be16 flags_frag_off;
145 /* Routing type field of a IPV6 routing extension header */ 145 /* Routing type field of a IPV6 routing extension header */
146 u8 route_type; 146 u8 route_type;
147 /* Routing Extension Header Present; last bit is IP valid */ 147 /* Routing Extension Header Present; last bit is IP valid */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 9f3bb50a2365..57bf44fa16a1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -62,6 +62,7 @@
62 62
63#define BMI_PORT_REGS_OFFSET 0 63#define BMI_PORT_REGS_OFFSET 0
64#define QMI_PORT_REGS_OFFSET 0x400 64#define QMI_PORT_REGS_OFFSET 0x400
65#define HWP_PORT_REGS_OFFSET 0x800
65 66
66/* Default values */ 67/* Default values */
67#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \ 68#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \
@@ -182,7 +183,7 @@
182#define NIA_ENG_BMI 0x00500000 183#define NIA_ENG_BMI 0x00500000
183#define NIA_ENG_QMI_ENQ 0x00540000 184#define NIA_ENG_QMI_ENQ 0x00540000
184#define NIA_ENG_QMI_DEQ 0x00580000 185#define NIA_ENG_QMI_DEQ 0x00580000
185 186#define NIA_ENG_HWP 0x00440000
186#define NIA_BMI_AC_ENQ_FRAME 0x00000002 187#define NIA_BMI_AC_ENQ_FRAME 0x00000002
187#define NIA_BMI_AC_TX_RELEASE 0x000002C0 188#define NIA_BMI_AC_TX_RELEASE 0x000002C0
188#define NIA_BMI_AC_RELEASE 0x000000C0 189#define NIA_BMI_AC_RELEASE 0x000000C0
@@ -317,6 +318,19 @@ struct fman_port_qmi_regs {
317 u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */ 318 u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */
318}; 319};
319 320
321#define HWP_HXS_COUNT 16
322#define HWP_HXS_PHE_REPORT 0x00000800
323#define HWP_HXS_PCAC_PSTAT 0x00000100
324#define HWP_HXS_PCAC_PSTOP 0x00000001
325struct fman_port_hwp_regs {
326 struct {
327 u32 ssa; /* Soft Sequence Attachment */
328 u32 lcv; /* Line-up Enable Confirmation Mask */
329 } pmda[HWP_HXS_COUNT]; /* Parse Memory Direct Access Registers */
330 u32 reserved080[(0x3f8 - 0x080) / 4]; /* (0x080-0x3f7) */
331 u32 fmpr_pcac; /* Configuration Access Control */
332};
333
320/* QMI dequeue prefetch modes */ 334/* QMI dequeue prefetch modes */
321enum fman_port_deq_prefetch { 335enum fman_port_deq_prefetch {
322 FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */ 336 FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
@@ -436,6 +450,7 @@ struct fman_port {
436 450
437 union fman_port_bmi_regs __iomem *bmi_regs; 451 union fman_port_bmi_regs __iomem *bmi_regs;
438 struct fman_port_qmi_regs __iomem *qmi_regs; 452 struct fman_port_qmi_regs __iomem *qmi_regs;
453 struct fman_port_hwp_regs __iomem *hwp_regs;
439 454
440 struct fman_sp_buffer_offsets buffer_offsets; 455 struct fman_sp_buffer_offsets buffer_offsets;
441 456
@@ -521,9 +536,12 @@ static int init_bmi_rx(struct fman_port *port)
521 /* NIA */ 536 /* NIA */
522 tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT; 537 tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
523 538
524 tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME; 539 tmp |= NIA_ENG_HWP;
525 iowrite32be(tmp, &regs->fmbm_rfne); 540 iowrite32be(tmp, &regs->fmbm_rfne);
526 541
542 /* Parser Next Engine NIA */
543 iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME, &regs->fmbm_rfpne);
544
527 /* Enqueue NIA */ 545 /* Enqueue NIA */
528 iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene); 546 iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
529 547
@@ -665,6 +683,50 @@ static int init_qmi(struct fman_port *port)
665 return 0; 683 return 0;
666} 684}
667 685
686static void stop_port_hwp(struct fman_port *port)
687{
688 struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
689 int cnt = 100;
690
691 iowrite32be(HWP_HXS_PCAC_PSTOP, &regs->fmpr_pcac);
692
693 while (cnt-- > 0 &&
694 (ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
695 udelay(10);
696 if (!cnt)
697 pr_err("Timeout stopping HW Parser\n");
698}
699
700static void start_port_hwp(struct fman_port *port)
701{
702 struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
703 int cnt = 100;
704
705 iowrite32be(0, &regs->fmpr_pcac);
706
707 while (cnt-- > 0 &&
708 !(ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
709 udelay(10);
710 if (!cnt)
711 pr_err("Timeout starting HW Parser\n");
712}
713
714static void init_hwp(struct fman_port *port)
715{
716 struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
717 int i;
718
719 stop_port_hwp(port);
720
721 for (i = 0; i < HWP_HXS_COUNT; i++) {
722 /* enable HXS error reporting into FD[STATUS] PHE */
723 iowrite32be(0x00000000, &regs->pmda[i].ssa);
724 iowrite32be(0xffffffff, &regs->pmda[i].lcv);
725 }
726
727 start_port_hwp(port);
728}
729
668static int init(struct fman_port *port) 730static int init(struct fman_port *port)
669{ 731{
670 int err; 732 int err;
@@ -673,6 +735,8 @@ static int init(struct fman_port *port)
673 switch (port->port_type) { 735 switch (port->port_type) {
674 case FMAN_PORT_TYPE_RX: 736 case FMAN_PORT_TYPE_RX:
675 err = init_bmi_rx(port); 737 err = init_bmi_rx(port);
738 if (!err)
739 init_hwp(port);
676 break; 740 break;
677 case FMAN_PORT_TYPE_TX: 741 case FMAN_PORT_TYPE_TX:
678 err = init_bmi_tx(port); 742 err = init_bmi_tx(port);
@@ -686,7 +750,8 @@ static int init(struct fman_port *port)
686 750
687 /* Init QMI registers */ 751 /* Init QMI registers */
688 err = init_qmi(port); 752 err = init_qmi(port);
689 return err; 753 if (err)
754 return err;
690 755
691 return 0; 756 return 0;
692} 757}
@@ -1247,7 +1312,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
1247 /* Allocate the FM driver's parameters structure */ 1312 /* Allocate the FM driver's parameters structure */
1248 port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL); 1313 port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
1249 if (!port->cfg) 1314 if (!port->cfg)
1250 goto err_params; 1315 return -EINVAL;
1251 1316
1252 /* Initialize FM port parameters which will be kept by the driver */ 1317 /* Initialize FM port parameters which will be kept by the driver */
1253 port->port_type = port->dts_params.type; 1318 port->port_type = port->dts_params.type;
@@ -1276,6 +1341,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
1276 /* set memory map pointers */ 1341 /* set memory map pointers */
1277 port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET; 1342 port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
1278 port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET; 1343 port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
1344 port->hwp_regs = base_addr + HWP_PORT_REGS_OFFSET;
1279 1345
1280 port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH; 1346 port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
1281 /* resource distribution. */ 1347 /* resource distribution. */
@@ -1327,8 +1393,6 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
1327 1393
1328err_port_cfg: 1394err_port_cfg:
1329 kfree(port->cfg); 1395 kfree(port->cfg);
1330err_params:
1331 kfree(port);
1332 return -EINVAL; 1396 return -EINVAL;
1333} 1397}
1334EXPORT_SYMBOL(fman_port_config); 1398EXPORT_SYMBOL(fman_port_config);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index db9c0bcf54cd..1fc27c97e3b2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -38,12 +38,6 @@
38#include <asm/irq.h> 38#include <asm/irq.h>
39#include <linux/uaccess.h> 39#include <linux/uaccess.h>
40 40
41#ifdef CONFIG_8xx
42#include <asm/8xx_immap.h>
43#include <asm/pgtable.h>
44#include <asm/cpm1.h>
45#endif
46
47#include "fs_enet.h" 41#include "fs_enet.h"
48#include "fec.h" 42#include "fec.h"
49 43
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 96d44cf44fe0..64300ac13e02 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -37,12 +37,6 @@
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <linux/uaccess.h> 38#include <linux/uaccess.h>
39 39
40#ifdef CONFIG_8xx
41#include <asm/8xx_immap.h>
42#include <asm/pgtable.h>
43#include <asm/cpm1.h>
44#endif
45
46#include "fs_enet.h" 40#include "fs_enet.h"
47 41
48/*************************************************/ 42/*************************************************/
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5f11b4dc95d2..30e1699649b8 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -110,6 +110,11 @@ static int ibmvnic_poll(struct napi_struct *napi, int data);
110static void send_map_query(struct ibmvnic_adapter *adapter); 110static void send_map_query(struct ibmvnic_adapter *adapter);
111static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); 111static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
112static void send_request_unmap(struct ibmvnic_adapter *, u8); 112static void send_request_unmap(struct ibmvnic_adapter *, u8);
113static void send_login(struct ibmvnic_adapter *adapter);
114static void send_cap_queries(struct ibmvnic_adapter *adapter);
115static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
116static int ibmvnic_init(struct ibmvnic_adapter *);
117static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *);
113 118
114struct ibmvnic_stat { 119struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN]; 120 char name[ETH_GSTRING_LEN];
@@ -368,6 +373,38 @@ static void free_rx_pool(struct ibmvnic_adapter *adapter,
368 pool->rx_buff = NULL; 373 pool->rx_buff = NULL;
369} 374}
370 375
376static int ibmvnic_login(struct net_device *netdev)
377{
378 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
379 unsigned long timeout = msecs_to_jiffies(30000);
380 struct device *dev = &adapter->vdev->dev;
381
382 do {
383 if (adapter->renegotiate) {
384 adapter->renegotiate = false;
385 release_sub_crqs_no_irqs(adapter);
386
387 reinit_completion(&adapter->init_done);
388 send_cap_queries(adapter);
389 if (!wait_for_completion_timeout(&adapter->init_done,
390 timeout)) {
391 dev_err(dev, "Capabilities query timeout\n");
392 return -1;
393 }
394 }
395
396 reinit_completion(&adapter->init_done);
397 send_login(adapter);
398 if (!wait_for_completion_timeout(&adapter->init_done,
399 timeout)) {
400 dev_err(dev, "Login timeout\n");
401 return -1;
402 }
403 } while (adapter->renegotiate);
404
405 return 0;
406}
407
371static int ibmvnic_open(struct net_device *netdev) 408static int ibmvnic_open(struct net_device *netdev)
372{ 409{
373 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 410 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
@@ -377,8 +414,31 @@ static int ibmvnic_open(struct net_device *netdev)
377 int rxadd_subcrqs; 414 int rxadd_subcrqs;
378 u64 *size_array; 415 u64 *size_array;
379 int tx_subcrqs; 416 int tx_subcrqs;
417 int rc = 0;
380 int i, j; 418 int i, j;
381 419
420 if (adapter->is_closed) {
421 rc = ibmvnic_init(adapter);
422 if (rc)
423 return rc;
424 }
425
426 rc = ibmvnic_login(netdev);
427 if (rc)
428 return rc;
429
430 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
431 if (rc) {
432 dev_err(dev, "failed to set the number of tx queues\n");
433 return -1;
434 }
435
436 rc = init_sub_crq_irqs(adapter);
437 if (rc) {
438 dev_err(dev, "failed to initialize sub crq irqs\n");
439 return -1;
440 }
441
382 rxadd_subcrqs = 442 rxadd_subcrqs =
383 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 443 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
384 tx_subcrqs = 444 tx_subcrqs =
@@ -473,6 +533,7 @@ static int ibmvnic_open(struct net_device *netdev)
473 ibmvnic_send_crq(adapter, &crq); 533 ibmvnic_send_crq(adapter, &crq);
474 534
475 netif_tx_start_all_queues(netdev); 535 netif_tx_start_all_queues(netdev);
536 adapter->is_closed = false;
476 537
477 return 0; 538 return 0;
478 539
@@ -508,24 +569,16 @@ rx_pool_arr_alloc_failed:
508 for (i = 0; i < adapter->req_rx_queues; i++) 569 for (i = 0; i < adapter->req_rx_queues; i++)
509 napi_disable(&adapter->napi[i]); 570 napi_disable(&adapter->napi[i]);
510alloc_napi_failed: 571alloc_napi_failed:
572 release_sub_crqs(adapter);
511 return -ENOMEM; 573 return -ENOMEM;
512} 574}
513 575
514static int ibmvnic_close(struct net_device *netdev) 576static void ibmvnic_release_resources(struct ibmvnic_adapter *adapter)
515{ 577{
516 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
517 struct device *dev = &adapter->vdev->dev; 578 struct device *dev = &adapter->vdev->dev;
518 union ibmvnic_crq crq; 579 int tx_scrqs, rx_scrqs;
519 int i; 580 int i;
520 581
521 adapter->closing = true;
522
523 for (i = 0; i < adapter->req_rx_queues; i++)
524 napi_disable(&adapter->napi[i]);
525
526 if (!adapter->failover)
527 netif_tx_stop_all_queues(netdev);
528
529 if (adapter->bounce_buffer) { 582 if (adapter->bounce_buffer) {
530 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 583 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
531 dma_unmap_single(&adapter->vdev->dev, 584 dma_unmap_single(&adapter->vdev->dev,
@@ -538,33 +591,70 @@ static int ibmvnic_close(struct net_device *netdev)
538 adapter->bounce_buffer = NULL; 591 adapter->bounce_buffer = NULL;
539 } 592 }
540 593
541 memset(&crq, 0, sizeof(crq)); 594 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
542 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 595 for (i = 0; i < tx_scrqs; i++) {
543 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 596 struct ibmvnic_tx_pool *tx_pool = &adapter->tx_pool[i];
544 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
545 ibmvnic_send_crq(adapter, &crq);
546 597
547 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 598 kfree(tx_pool->tx_buff);
548 i++) { 599 free_long_term_buff(adapter, &tx_pool->long_term_buff);
549 kfree(adapter->tx_pool[i].tx_buff); 600 kfree(tx_pool->free_map);
550 free_long_term_buff(adapter,
551 &adapter->tx_pool[i].long_term_buff);
552 kfree(adapter->tx_pool[i].free_map);
553 } 601 }
554 kfree(adapter->tx_pool); 602 kfree(adapter->tx_pool);
555 adapter->tx_pool = NULL; 603 adapter->tx_pool = NULL;
556 604
557 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 605 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
558 i++) { 606 for (i = 0; i < rx_scrqs; i++) {
559 free_rx_pool(adapter, &adapter->rx_pool[i]); 607 struct ibmvnic_rx_pool *rx_pool = &adapter->rx_pool[i];
560 free_long_term_buff(adapter, 608
561 &adapter->rx_pool[i].long_term_buff); 609 free_rx_pool(adapter, rx_pool);
610 free_long_term_buff(adapter, &rx_pool->long_term_buff);
562 } 611 }
563 kfree(adapter->rx_pool); 612 kfree(adapter->rx_pool);
564 adapter->rx_pool = NULL; 613 adapter->rx_pool = NULL;
565 614
566 adapter->closing = false; 615 release_sub_crqs(adapter);
616 ibmvnic_release_crq_queue(adapter);
567 617
618 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
619 debugfs_remove_recursive(adapter->debugfs_dir);
620
621 if (adapter->stats_token)
622 dma_unmap_single(dev, adapter->stats_token,
623 sizeof(struct ibmvnic_statistics),
624 DMA_FROM_DEVICE);
625
626 if (adapter->ras_comps)
627 dma_free_coherent(dev, adapter->ras_comp_num *
628 sizeof(struct ibmvnic_fw_component),
629 adapter->ras_comps, adapter->ras_comps_tok);
630
631 kfree(adapter->ras_comp_int);
632}
633
634static int ibmvnic_close(struct net_device *netdev)
635{
636 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
637 union ibmvnic_crq crq;
638 int i;
639
640 adapter->closing = true;
641
642 for (i = 0; i < adapter->req_rx_queues; i++)
643 napi_disable(&adapter->napi[i]);
644
645 if (!adapter->failover)
646 netif_tx_stop_all_queues(netdev);
647
648 memset(&crq, 0, sizeof(crq));
649 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
650 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
651 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
652 ibmvnic_send_crq(adapter, &crq);
653
654 ibmvnic_release_resources(adapter);
655
656 adapter->is_closed = true;
657 adapter->closing = false;
568 return 0; 658 return 0;
569} 659}
570 660
@@ -3419,8 +3509,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3419 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 3509 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3420 sizeof(adapter->ip_offload_ctrl), 3510 sizeof(adapter->ip_offload_ctrl),
3421 DMA_TO_DEVICE); 3511 DMA_TO_DEVICE);
3422 /* We're done with the queries, perform the login */ 3512 complete(&adapter->init_done);
3423 send_login(adapter);
3424 break; 3513 break;
3425 case REQUEST_RAS_COMP_NUM_RSP: 3514 case REQUEST_RAS_COMP_NUM_RSP:
3426 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n"); 3515 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
@@ -3700,26 +3789,6 @@ static void handle_crq_init_rsp(struct work_struct *work)
3700 goto task_failed; 3789 goto task_failed;
3701 } 3790 }
3702 3791
3703 do {
3704 if (adapter->renegotiate) {
3705 adapter->renegotiate = false;
3706 release_sub_crqs_no_irqs(adapter);
3707
3708 reinit_completion(&adapter->init_done);
3709 send_cap_queries(adapter);
3710 if (!wait_for_completion_timeout(&adapter->init_done,
3711 timeout)) {
3712 dev_err(dev, "Passive init timeout\n");
3713 goto task_failed;
3714 }
3715 }
3716 } while (adapter->renegotiate);
3717 rc = init_sub_crq_irqs(adapter);
3718
3719 if (rc)
3720 goto task_failed;
3721
3722 netdev->real_num_tx_queues = adapter->req_tx_queues;
3723 netdev->mtu = adapter->req_mtu - ETH_HLEN; 3792 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3724 3793
3725 if (adapter->failover) { 3794 if (adapter->failover) {
@@ -3751,14 +3820,65 @@ task_failed:
3751 dev_err(dev, "Passive initialization was not successful\n"); 3820 dev_err(dev, "Passive initialization was not successful\n");
3752} 3821}
3753 3822
3754static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 3823static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3755{ 3824{
3825 struct device *dev = &adapter->vdev->dev;
3756 unsigned long timeout = msecs_to_jiffies(30000); 3826 unsigned long timeout = msecs_to_jiffies(30000);
3827 struct dentry *ent;
3828 char buf[17]; /* debugfs name buf */
3829 int rc;
3830
3831 rc = ibmvnic_init_crq_queue(adapter);
3832 if (rc) {
3833 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3834 return rc;
3835 }
3836
3837 adapter->stats_token = dma_map_single(dev, &adapter->stats,
3838 sizeof(struct ibmvnic_statistics),
3839 DMA_FROM_DEVICE);
3840 if (dma_mapping_error(dev, adapter->stats_token)) {
3841 ibmvnic_release_crq_queue(adapter);
3842 dev_err(dev, "Couldn't map stats buffer\n");
3843 return -ENOMEM;
3844 }
3845
3846 snprintf(buf, sizeof(buf), "ibmvnic_%x", adapter->vdev->unit_address);
3847 ent = debugfs_create_dir(buf, NULL);
3848 if (!ent || IS_ERR(ent)) {
3849 dev_info(dev, "debugfs create directory failed\n");
3850 adapter->debugfs_dir = NULL;
3851 } else {
3852 adapter->debugfs_dir = ent;
3853 ent = debugfs_create_file("dump", S_IRUGO,
3854 adapter->debugfs_dir,
3855 adapter->netdev, &ibmvnic_dump_ops);
3856 if (!ent || IS_ERR(ent)) {
3857 dev_info(dev, "debugfs create dump file failed\n");
3858 adapter->debugfs_dump = NULL;
3859 } else {
3860 adapter->debugfs_dump = ent;
3861 }
3862 }
3863
3864 init_completion(&adapter->init_done);
3865 ibmvnic_send_crq_init(adapter);
3866 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3867 dev_err(dev, "Initialization sequence timed out\n");
3868 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3869 debugfs_remove_recursive(adapter->debugfs_dir);
3870 ibmvnic_release_crq_queue(adapter);
3871 return -1;
3872 }
3873
3874 return 0;
3875}
3876
3877static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3878{
3757 struct ibmvnic_adapter *adapter; 3879 struct ibmvnic_adapter *adapter;
3758 struct net_device *netdev; 3880 struct net_device *netdev;
3759 unsigned char *mac_addr_p; 3881 unsigned char *mac_addr_p;
3760 struct dentry *ent;
3761 char buf[17]; /* debugfs name buf */
3762 int rc; 3882 int rc;
3763 3883
3764 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 3884 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -3796,118 +3916,36 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3796 3916
3797 spin_lock_init(&adapter->stats_lock); 3917 spin_lock_init(&adapter->stats_lock);
3798 3918
3799 rc = ibmvnic_init_crq_queue(adapter);
3800 if (rc) {
3801 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3802 goto free_netdev;
3803 }
3804
3805 INIT_LIST_HEAD(&adapter->errors); 3919 INIT_LIST_HEAD(&adapter->errors);
3806 INIT_LIST_HEAD(&adapter->inflight); 3920 INIT_LIST_HEAD(&adapter->inflight);
3807 spin_lock_init(&adapter->error_list_lock); 3921 spin_lock_init(&adapter->error_list_lock);
3808 spin_lock_init(&adapter->inflight_lock); 3922 spin_lock_init(&adapter->inflight_lock);
3809 3923
3810 adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats, 3924 rc = ibmvnic_init(adapter);
3811 sizeof(struct ibmvnic_statistics),
3812 DMA_FROM_DEVICE);
3813 if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3814 if (!firmware_has_feature(FW_FEATURE_CMO))
3815 dev_err(&dev->dev, "Couldn't map stats buffer\n");
3816 rc = -ENOMEM;
3817 goto free_crq;
3818 }
3819
3820 snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3821 ent = debugfs_create_dir(buf, NULL);
3822 if (!ent || IS_ERR(ent)) {
3823 dev_info(&dev->dev, "debugfs create directory failed\n");
3824 adapter->debugfs_dir = NULL;
3825 } else {
3826 adapter->debugfs_dir = ent;
3827 ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3828 netdev, &ibmvnic_dump_ops);
3829 if (!ent || IS_ERR(ent)) {
3830 dev_info(&dev->dev,
3831 "debugfs create dump file failed\n");
3832 adapter->debugfs_dump = NULL;
3833 } else {
3834 adapter->debugfs_dump = ent;
3835 }
3836 }
3837
3838 init_completion(&adapter->init_done);
3839 ibmvnic_send_crq_init(adapter);
3840 if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3841 return 0;
3842
3843 do {
3844 if (adapter->renegotiate) {
3845 adapter->renegotiate = false;
3846 release_sub_crqs_no_irqs(adapter);
3847
3848 reinit_completion(&adapter->init_done);
3849 send_cap_queries(adapter);
3850 if (!wait_for_completion_timeout(&adapter->init_done,
3851 timeout))
3852 return 0;
3853 }
3854 } while (adapter->renegotiate);
3855
3856 rc = init_sub_crq_irqs(adapter);
3857 if (rc) { 3925 if (rc) {
3858 dev_err(&dev->dev, "failed to initialize sub crq irqs\n"); 3926 free_netdev(netdev);
3859 goto free_debugfs; 3927 return rc;
3860 } 3928 }
3861 3929
3862 netdev->real_num_tx_queues = adapter->req_tx_queues;
3863 netdev->mtu = adapter->req_mtu - ETH_HLEN; 3930 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3931 adapter->is_closed = false;
3864 3932
3865 rc = register_netdev(netdev); 3933 rc = register_netdev(netdev);
3866 if (rc) { 3934 if (rc) {
3867 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 3935 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3868 goto free_sub_crqs; 3936 free_netdev(netdev);
3937 return rc;
3869 } 3938 }
3870 dev_info(&dev->dev, "ibmvnic registered\n"); 3939 dev_info(&dev->dev, "ibmvnic registered\n");
3871 3940
3872 return 0; 3941 return 0;
3873
3874free_sub_crqs:
3875 release_sub_crqs(adapter);
3876free_debugfs:
3877 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3878 debugfs_remove_recursive(adapter->debugfs_dir);
3879free_crq:
3880 ibmvnic_release_crq_queue(adapter);
3881free_netdev:
3882 free_netdev(netdev);
3883 return rc;
3884} 3942}
3885 3943
3886static int ibmvnic_remove(struct vio_dev *dev) 3944static int ibmvnic_remove(struct vio_dev *dev)
3887{ 3945{
3888 struct net_device *netdev = dev_get_drvdata(&dev->dev); 3946 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3889 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3890 3947
3891 unregister_netdev(netdev); 3948 unregister_netdev(netdev);
3892
3893 release_sub_crqs(adapter);
3894
3895 ibmvnic_release_crq_queue(adapter);
3896
3897 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3898 debugfs_remove_recursive(adapter->debugfs_dir);
3899
3900 dma_unmap_single(&dev->dev, adapter->stats_token,
3901 sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
3902
3903 if (adapter->ras_comps)
3904 dma_free_coherent(&dev->dev,
3905 adapter->ras_comp_num *
3906 sizeof(struct ibmvnic_fw_component),
3907 adapter->ras_comps, adapter->ras_comps_tok);
3908
3909 kfree(adapter->ras_comp_int);
3910
3911 free_netdev(netdev); 3949 free_netdev(netdev);
3912 dev_set_drvdata(&dev->dev, NULL); 3950 dev_set_drvdata(&dev->dev, NULL);
3913 3951
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1993b42666f7..10ad259208cb 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1052,4 +1052,5 @@ struct ibmvnic_adapter {
1052 struct work_struct ibmvnic_xport; 1052 struct work_struct ibmvnic_xport;
1053 struct tasklet_struct tasklet; 1053 struct tasklet_struct tasklet;
1054 bool failover; 1054 bool failover;
1055 bool is_closed;
1055}; 1056};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 01db688cf539..72481670478c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1226,7 +1226,9 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
1226 if (tc->type != TC_SETUP_MQPRIO) 1226 if (tc->type != TC_SETUP_MQPRIO)
1227 return -EINVAL; 1227 return -EINVAL;
1228 1228
1229 return fm10k_setup_tc(dev, tc->tc); 1229 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1230
1231 return fm10k_setup_tc(dev, tc->mqprio->num_tc);
1230} 1232}
1231 1233
1232static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, 1234static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 82d8040fa418..c0f2286c2b72 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -208,8 +208,8 @@ struct i40e_fdir_filter {
208 u8 flow_type; 208 u8 flow_type;
209 u8 ip4_proto; 209 u8 ip4_proto;
210 /* TX packet view of src and dst */ 210 /* TX packet view of src and dst */
211 __be32 dst_ip[4]; 211 __be32 dst_ip;
212 __be32 src_ip[4]; 212 __be32 src_ip;
213 __be16 src_port; 213 __be16 src_port;
214 __be16 dst_port; 214 __be16 dst_port;
215 __be32 sctp_v_tag; 215 __be32 sctp_v_tag;
@@ -244,7 +244,8 @@ struct i40e_tc_configuration {
244}; 244};
245 245
246struct i40e_udp_port_config { 246struct i40e_udp_port_config {
247 __be16 index; 247 /* AdminQ command interface expects port number in Host byte order */
248 u16 index;
248 u8 type; 249 u8 type;
249}; 250};
250 251
@@ -285,7 +286,14 @@ struct i40e_pf {
285 u32 fd_flush_cnt; 286 u32 fd_flush_cnt;
286 u32 fd_add_err; 287 u32 fd_add_err;
287 u32 fd_atr_cnt; 288 u32 fd_atr_cnt;
288 u32 fd_tcp_rule; 289
290 /* Book-keeping of side-band filter count per flow-type.
291 * This is used to detect and handle input set changes for
292 * respective flow-type.
293 */
294 u16 fd_tcp4_filter_cnt;
295 u16 fd_udp4_filter_cnt;
296 u16 fd_ip4_filter_cnt;
289 297
290 struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; 298 struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
291 u16 pending_udp_bitmap; 299 u16 pending_udp_bitmap;
@@ -348,16 +356,23 @@ struct i40e_pf {
348#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51) 356#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
349#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52) 357#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52)
350#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53) 358#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53)
351#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(54) 359#define I40E_FLAG_CLIENT_RESET BIT_ULL(54)
352#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) 360#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55)
361#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56)
362#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57)
353 363
354 /* tracks features that get auto disabled by errors */ 364 /* Tracks features that are disabled due to hw limitations.
355 u64 auto_disable_flags; 365 * If a bit is set here, it means that the corresponding
366 * bit in the 'flags' field is cleared i.e that feature
367 * is disabled
368 */
369 u64 hw_disabled_flags;
356 370
357#ifdef I40E_FCOE 371#ifdef I40E_FCOE
358 struct i40e_fcoe fcoe; 372 struct i40e_fcoe fcoe;
359 373
360#endif /* I40E_FCOE */ 374#endif /* I40E_FCOE */
375 struct i40e_client_instance *cinst;
361 bool stat_offsets_loaded; 376 bool stat_offsets_loaded;
362 struct i40e_hw_port_stats stats; 377 struct i40e_hw_port_stats stats;
363 struct i40e_hw_port_stats stats_offsets; 378 struct i40e_hw_port_stats stats_offsets;
@@ -813,8 +828,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
813void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset); 828void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
814void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs); 829void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
815void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id); 830void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
816int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id, 831int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id);
817 enum i40e_client_type type);
818/** 832/**
819 * i40e_irq_dynamic_enable - Enable default interrupt generation settings 833 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
820 * @vsi: pointer to a vsi 834 * @vsi: pointer to a vsi
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 451f48b7540a..251074c677c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -132,6 +132,10 @@ enum i40e_admin_queue_opc {
132 i40e_aqc_opc_list_func_capabilities = 0x000A, 132 i40e_aqc_opc_list_func_capabilities = 0x000A,
133 i40e_aqc_opc_list_dev_capabilities = 0x000B, 133 i40e_aqc_opc_list_dev_capabilities = 0x000B,
134 134
135 /* Proxy commands */
136 i40e_aqc_opc_set_proxy_config = 0x0104,
137 i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
138
135 /* LAA */ 139 /* LAA */
136 i40e_aqc_opc_mac_address_read = 0x0107, 140 i40e_aqc_opc_mac_address_read = 0x0107,
137 i40e_aqc_opc_mac_address_write = 0x0108, 141 i40e_aqc_opc_mac_address_write = 0x0108,
@@ -139,6 +143,10 @@ enum i40e_admin_queue_opc {
139 /* PXE */ 143 /* PXE */
140 i40e_aqc_opc_clear_pxe_mode = 0x0110, 144 i40e_aqc_opc_clear_pxe_mode = 0x0110,
141 145
146 /* WoL commands */
147 i40e_aqc_opc_set_wol_filter = 0x0120,
148 i40e_aqc_opc_get_wake_reason = 0x0121,
149
142 /* internal switch commands */ 150 /* internal switch commands */
143 i40e_aqc_opc_get_switch_config = 0x0200, 151 i40e_aqc_opc_get_switch_config = 0x0200,
144 i40e_aqc_opc_add_statistics = 0x0201, 152 i40e_aqc_opc_add_statistics = 0x0201,
@@ -177,6 +185,7 @@ enum i40e_admin_queue_opc {
177 i40e_aqc_opc_remove_control_packet_filter = 0x025B, 185 i40e_aqc_opc_remove_control_packet_filter = 0x025B,
178 i40e_aqc_opc_add_cloud_filters = 0x025C, 186 i40e_aqc_opc_add_cloud_filters = 0x025C,
179 i40e_aqc_opc_remove_cloud_filters = 0x025D, 187 i40e_aqc_opc_remove_cloud_filters = 0x025D,
188 i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
180 189
181 i40e_aqc_opc_add_mirror_rule = 0x0260, 190 i40e_aqc_opc_add_mirror_rule = 0x0260,
182 i40e_aqc_opc_delete_mirror_rule = 0x0261, 191 i40e_aqc_opc_delete_mirror_rule = 0x0261,
@@ -563,6 +572,56 @@ struct i40e_aqc_clear_pxe {
563 572
564I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); 573I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
565 574
575/* Set WoL Filter (0x0120) */
576
577struct i40e_aqc_set_wol_filter {
578 __le16 filter_index;
579#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
580#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
581#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
582 I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
583
584#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
585#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
586 I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
587 __le16 cmd_flags;
588#define I40E_AQC_SET_WOL_FILTER 0x8000
589#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
590#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
591#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
592 __le16 valid_flags;
593#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
594#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
595 u8 reserved[2];
596 __le32 address_high;
597 __le32 address_low;
598};
599
600I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
601
602struct i40e_aqc_set_wol_filter_data {
603 u8 filter[128];
604 u8 mask[16];
605};
606
607I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
608
609/* Get Wake Reason (0x0121) */
610
611struct i40e_aqc_get_wake_reason_completion {
612 u8 reserved_1[2];
613 __le16 wake_reason;
614#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
615#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
616 I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
617#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
618#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
619 I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
620 u8 reserved_2[12];
621};
622
623I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
624
566/* Switch configuration commands (0x02xx) */ 625/* Switch configuration commands (0x02xx) */
567 626
568/* Used by many indirect commands that only pass an seid and a buffer in the 627/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -645,6 +704,8 @@ struct i40e_aqc_set_port_parameters {
645#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ 704#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
646#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 705#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
647 __le16 bad_frame_vsi; 706 __le16 bad_frame_vsi;
707#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
708#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
648 __le16 default_seid; /* reserved for command */ 709 __le16 default_seid; /* reserved for command */
649 u8 reserved[10]; 710 u8 reserved[10];
650}; 711};
@@ -696,6 +757,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
696/* Set Switch Configuration (direct 0x0205) */ 757/* Set Switch Configuration (direct 0x0205) */
697struct i40e_aqc_set_switch_config { 758struct i40e_aqc_set_switch_config {
698 __le16 flags; 759 __le16 flags;
760/* flags used for both fields below */
699#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 761#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
700#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 762#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
701 __le16 valid_flags; 763 __le16 valid_flags;
@@ -1844,11 +1906,12 @@ struct i40e_aqc_get_link_status {
1844#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 1906#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
1845#define I40E_AQ_CONFIG_CRC_ENA 0x04 1907#define I40E_AQ_CONFIG_CRC_ENA 0x04
1846#define I40E_AQ_CONFIG_PACING_MASK 0x78 1908#define I40E_AQ_CONFIG_PACING_MASK 0x78
1847 u8 external_power_ability; 1909 u8 power_desc;
1848#define I40E_AQ_LINK_POWER_CLASS_1 0x00 1910#define I40E_AQ_LINK_POWER_CLASS_1 0x00
1849#define I40E_AQ_LINK_POWER_CLASS_2 0x01 1911#define I40E_AQ_LINK_POWER_CLASS_2 0x01
1850#define I40E_AQ_LINK_POWER_CLASS_3 0x02 1912#define I40E_AQ_LINK_POWER_CLASS_3 0x02
1851#define I40E_AQ_LINK_POWER_CLASS_4 0x03 1913#define I40E_AQ_LINK_POWER_CLASS_4 0x03
1914#define I40E_AQ_PWR_CLASS_MASK 0x03
1852 u8 reserved[4]; 1915 u8 reserved[4];
1853}; 1916};
1854 1917
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index d570219efd9f..a9f0d22a7cf4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -32,16 +32,10 @@
32#include "i40e_client.h" 32#include "i40e_client.h"
33 33
34static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR; 34static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
35 35static struct i40e_client *registered_client;
36static LIST_HEAD(i40e_devices); 36static LIST_HEAD(i40e_devices);
37static DEFINE_MUTEX(i40e_device_mutex); 37static DEFINE_MUTEX(i40e_device_mutex);
38 38
39static LIST_HEAD(i40e_clients);
40static DEFINE_MUTEX(i40e_client_mutex);
41
42static LIST_HEAD(i40e_client_instances);
43static DEFINE_MUTEX(i40e_client_instance_mutex);
44
45static int i40e_client_virtchnl_send(struct i40e_info *ldev, 39static int i40e_client_virtchnl_send(struct i40e_info *ldev,
46 struct i40e_client *client, 40 struct i40e_client *client,
47 u32 vf_id, u8 *msg, u16 len); 41 u32 vf_id, u8 *msg, u16 len);
@@ -67,28 +61,6 @@ static struct i40e_ops i40e_lan_ops = {
67}; 61};
68 62
69/** 63/**
70 * i40e_client_type_to_vsi_type - convert client type to vsi type
71 * @client_type: the i40e_client type
72 *
73 * returns the related vsi type value
74 **/
75static
76enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type)
77{
78 switch (type) {
79 case I40E_CLIENT_IWARP:
80 return I40E_VSI_IWARP;
81
82 case I40E_CLIENT_VMDQ2:
83 return I40E_VSI_VMDQ2;
84
85 default:
86 pr_err("i40e: Client type unknown\n");
87 return I40E_VSI_TYPE_UNKNOWN;
88 }
89}
90
91/**
92 * i40e_client_get_params - Get the params that can change at runtime 64 * i40e_client_get_params - Get the params that can change at runtime
93 * @vsi: the VSI with the message 65 * @vsi: the VSI with the message
94 * @param: clinet param struct 66 * @param: clinet param struct
@@ -134,31 +106,22 @@ int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
134void 106void
135i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len) 107i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
136{ 108{
137 struct i40e_client_instance *cdev; 109 struct i40e_pf *pf = vsi->back;
110 struct i40e_client_instance *cdev = pf->cinst;
138 111
139 if (!vsi) 112 if (!cdev || !cdev->client)
113 return;
114 if (!cdev->client->ops || !cdev->client->ops->virtchnl_receive) {
115 dev_dbg(&pf->pdev->dev,
116 "Cannot locate client instance virtual channel receive routine\n");
117 return;
118 }
119 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
120 dev_dbg(&pf->pdev->dev, "Client is not open, abort virtchnl_receive\n");
140 return; 121 return;
141 mutex_lock(&i40e_client_instance_mutex);
142 list_for_each_entry(cdev, &i40e_client_instances, list) {
143 if (cdev->lan_info.pf == vsi->back) {
144 if (!cdev->client ||
145 !cdev->client->ops ||
146 !cdev->client->ops->virtchnl_receive) {
147 dev_dbg(&vsi->back->pdev->dev,
148 "Cannot locate client instance virtual channel receive routine\n");
149 continue;
150 }
151 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
152 &cdev->state)) {
153 dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort virtchnl_receive\n");
154 continue;
155 }
156 cdev->client->ops->virtchnl_receive(&cdev->lan_info,
157 cdev->client,
158 vf_id, msg, len);
159 }
160 } 122 }
161 mutex_unlock(&i40e_client_instance_mutex); 123 cdev->client->ops->virtchnl_receive(&cdev->lan_info, cdev->client,
124 vf_id, msg, len);
162} 125}
163 126
164/** 127/**
@@ -169,39 +132,28 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
169 **/ 132 **/
170void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) 133void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
171{ 134{
172 struct i40e_client_instance *cdev; 135 struct i40e_pf *pf = vsi->back;
136 struct i40e_client_instance *cdev = pf->cinst;
173 struct i40e_params params; 137 struct i40e_params params;
174 138
175 if (!vsi) 139 if (!cdev || !cdev->client)
140 return;
141 if (!cdev->client->ops || !cdev->client->ops->l2_param_change) {
142 dev_dbg(&vsi->back->pdev->dev,
143 "Cannot locate client instance l2_param_change routine\n");
144 return;
145 }
146 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
147 dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
176 return; 148 return;
177 mutex_lock(&i40e_client_instance_mutex);
178 list_for_each_entry(cdev, &i40e_client_instances, list) {
179 if (cdev->lan_info.pf == vsi->back) {
180 if (!cdev->client ||
181 !cdev->client->ops ||
182 !cdev->client->ops->l2_param_change) {
183 dev_dbg(&vsi->back->pdev->dev,
184 "Cannot locate client instance l2_param_change routine\n");
185 continue;
186 }
187 memset(&params, 0, sizeof(params));
188 i40e_client_get_params(vsi, &params);
189 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
190 &cdev->state)) {
191 dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
192 continue;
193 }
194 cdev->lan_info.params = params;
195 cdev->client->ops->l2_param_change(&cdev->lan_info,
196 cdev->client,
197 &params);
198 }
199 } 149 }
200 mutex_unlock(&i40e_client_instance_mutex); 150 memcpy(&cdev->lan_info.params, &params, sizeof(struct i40e_params));
151 cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client,
152 &params);
201} 153}
202 154
203/** 155/**
204 * i40e_client_release_qvlist 156 * i40e_client_release_qvlist - release MSI-X vector mapping for client
205 * @ldev: pointer to L2 context. 157 * @ldev: pointer to L2 context.
206 * 158 *
207 **/ 159 **/
@@ -237,26 +189,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
237 **/ 189 **/
238void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) 190void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
239{ 191{
240 struct i40e_client_instance *cdev; 192 struct i40e_pf *pf = vsi->back;
193 struct i40e_client_instance *cdev = pf->cinst;
241 194
242 if (!vsi) 195 if (!cdev || !cdev->client)
196 return;
197 if (!cdev->client->ops || !cdev->client->ops->close) {
198 dev_dbg(&vsi->back->pdev->dev,
199 "Cannot locate client instance close routine\n");
243 return; 200 return;
244 mutex_lock(&i40e_client_instance_mutex);
245 list_for_each_entry(cdev, &i40e_client_instances, list) {
246 if (cdev->lan_info.netdev == vsi->netdev) {
247 if (!cdev->client ||
248 !cdev->client->ops || !cdev->client->ops->close) {
249 dev_dbg(&vsi->back->pdev->dev,
250 "Cannot locate client instance close routine\n");
251 continue;
252 }
253 cdev->client->ops->close(&cdev->lan_info, cdev->client,
254 reset);
255 clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
256 i40e_client_release_qvlist(&cdev->lan_info);
257 }
258 } 201 }
259 mutex_unlock(&i40e_client_instance_mutex); 202 cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
203 clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
204 i40e_client_release_qvlist(&cdev->lan_info);
260} 205}
261 206
262/** 207/**
@@ -268,30 +213,20 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
268 **/ 213 **/
269void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id) 214void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
270{ 215{
271 struct i40e_client_instance *cdev; 216 struct i40e_client_instance *cdev = pf->cinst;
272 217
273 if (!pf) 218 if (!cdev || !cdev->client)
219 return;
220 if (!cdev->client->ops || !cdev->client->ops->vf_reset) {
221 dev_dbg(&pf->pdev->dev,
222 "Cannot locate client instance VF reset routine\n");
223 return;
224 }
225 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
226 dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
274 return; 227 return;
275 mutex_lock(&i40e_client_instance_mutex);
276 list_for_each_entry(cdev, &i40e_client_instances, list) {
277 if (cdev->lan_info.pf == pf) {
278 if (!cdev->client ||
279 !cdev->client->ops ||
280 !cdev->client->ops->vf_reset) {
281 dev_dbg(&pf->pdev->dev,
282 "Cannot locate client instance VF reset routine\n");
283 continue;
284 }
285 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
286 &cdev->state)) {
287 dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
288 continue;
289 }
290 cdev->client->ops->vf_reset(&cdev->lan_info,
291 cdev->client, vf_id);
292 }
293 } 228 }
294 mutex_unlock(&i40e_client_instance_mutex); 229 cdev->client->ops->vf_reset(&cdev->lan_info, cdev->client, vf_id);
295} 230}
296 231
297/** 232/**
@@ -303,30 +238,21 @@ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
303 **/ 238 **/
304void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs) 239void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
305{ 240{
306 struct i40e_client_instance *cdev; 241 struct i40e_client_instance *cdev = pf->cinst;
307 242
308 if (!pf) 243 if (!cdev || !cdev->client)
244 return;
245 if (!cdev->client->ops || !cdev->client->ops->vf_enable) {
246 dev_dbg(&pf->pdev->dev,
247 "Cannot locate client instance VF enable routine\n");
248 return;
249 }
250 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
251 &cdev->state)) {
252 dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
309 return; 253 return;
310 mutex_lock(&i40e_client_instance_mutex);
311 list_for_each_entry(cdev, &i40e_client_instances, list) {
312 if (cdev->lan_info.pf == pf) {
313 if (!cdev->client ||
314 !cdev->client->ops ||
315 !cdev->client->ops->vf_enable) {
316 dev_dbg(&pf->pdev->dev,
317 "Cannot locate client instance VF enable routine\n");
318 continue;
319 }
320 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
321 &cdev->state)) {
322 dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
323 continue;
324 }
325 cdev->client->ops->vf_enable(&cdev->lan_info,
326 cdev->client, num_vfs);
327 }
328 } 254 }
329 mutex_unlock(&i40e_client_instance_mutex); 255 cdev->client->ops->vf_enable(&cdev->lan_info, cdev->client, num_vfs);
330} 256}
331 257
332/** 258/**
@@ -337,37 +263,25 @@ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
337 * If there is a client of the specified type attached to this PF, call 263 * If there is a client of the specified type attached to this PF, call
338 * its vf_capable routine 264 * its vf_capable routine
339 **/ 265 **/
340int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id, 266int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id)
341 enum i40e_client_type type)
342{ 267{
343 struct i40e_client_instance *cdev; 268 struct i40e_client_instance *cdev = pf->cinst;
344 int capable = false; 269 int capable = false;
345 270
346 if (!pf) 271 if (!cdev || !cdev->client)
347 return false; 272 goto out;
348 mutex_lock(&i40e_client_instance_mutex); 273 if (!cdev->client->ops || !cdev->client->ops->vf_capable) {
349 list_for_each_entry(cdev, &i40e_client_instances, list) { 274 dev_info(&pf->pdev->dev,
350 if (cdev->lan_info.pf == pf) { 275 "Cannot locate client instance VF capability routine\n");
351 if (!cdev->client || 276 goto out;
352 !cdev->client->ops ||
353 !cdev->client->ops->vf_capable ||
354 !(cdev->client->type == type)) {
355 dev_dbg(&pf->pdev->dev,
356 "Cannot locate client instance VF capability routine\n");
357 continue;
358 }
359 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
360 &cdev->state)) {
361 dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-capable\n");
362 continue;
363 }
364 capable = cdev->client->ops->vf_capable(&cdev->lan_info,
365 cdev->client,
366 vf_id);
367 break;
368 }
369 } 277 }
370 mutex_unlock(&i40e_client_instance_mutex); 278 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state))
279 goto out;
280
281 capable = cdev->client->ops->vf_capable(&cdev->lan_info,
282 cdev->client,
283 vf_id);
284out:
371 return capable; 285 return capable;
372} 286}
373 287
@@ -377,27 +291,19 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
377 * @client: pointer to a client struct in the client list. 291 * @client: pointer to a client struct in the client list.
378 * @existing: if there was already an existing instance 292 * @existing: if there was already an existing instance
379 * 293 *
380 * Returns cdev ptr on success or if already exists, NULL on failure
381 **/ 294 **/
382static 295static void i40e_client_add_instance(struct i40e_pf *pf)
383struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
384 struct i40e_client *client,
385 bool *existing)
386{ 296{
387 struct i40e_client_instance *cdev; 297 struct i40e_client_instance *cdev = NULL;
388 struct netdev_hw_addr *mac = NULL; 298 struct netdev_hw_addr *mac = NULL;
389 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 299 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
390 300
391 mutex_lock(&i40e_client_instance_mutex); 301 if (!registered_client || pf->cinst)
392 list_for_each_entry(cdev, &i40e_client_instances, list) { 302 return;
393 if ((cdev->lan_info.pf == pf) && (cdev->client == client)) { 303
394 *existing = true;
395 goto out;
396 }
397 }
398 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 304 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
399 if (!cdev) 305 if (!cdev)
400 goto out; 306 return;
401 307
402 cdev->lan_info.pf = (void *)pf; 308 cdev->lan_info.pf = (void *)pf;
403 cdev->lan_info.netdev = vsi->netdev; 309 cdev->lan_info.netdev = vsi->netdev;
@@ -417,7 +323,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
417 if (i40e_client_get_params(vsi, &cdev->lan_info.params)) { 323 if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
418 kfree(cdev); 324 kfree(cdev);
419 cdev = NULL; 325 cdev = NULL;
420 goto out; 326 return;
421 } 327 }
422 328
423 cdev->lan_info.msix_count = pf->num_iwarp_msix; 329 cdev->lan_info.msix_count = pf->num_iwarp_msix;
@@ -430,41 +336,20 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
430 else 336 else
431 dev_err(&pf->pdev->dev, "MAC address list is empty!\n"); 337 dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
432 338
433 cdev->client = client; 339 cdev->client = registered_client;
434 INIT_LIST_HEAD(&cdev->list); 340 pf->cinst = cdev;
435 list_add(&cdev->list, &i40e_client_instances);
436out:
437 mutex_unlock(&i40e_client_instance_mutex);
438 return cdev;
439} 341}
440 342
441/** 343/**
442 * i40e_client_del_instance - removes a client instance from the list 344 * i40e_client_del_instance - removes a client instance from the list
443 * @pf: pointer to the board struct 345 * @pf: pointer to the board struct
444 * 346 *
445 * Returns 0 on success or non-0 on error
446 **/ 347 **/
447static 348static
448int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client) 349void i40e_client_del_instance(struct i40e_pf *pf)
449{ 350{
450 struct i40e_client_instance *cdev, *tmp; 351 kfree(pf->cinst);
451 int ret = -ENODEV; 352 pf->cinst = NULL;
452
453 mutex_lock(&i40e_client_instance_mutex);
454 list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
455 if ((cdev->lan_info.pf != pf) || (cdev->client != client))
456 continue;
457
458 dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n",
459 client->name, pf->hw.pf_id,
460 pf->hw.bus.device, pf->hw.bus.func);
461 list_del(&cdev->list);
462 kfree(cdev);
463 ret = 0;
464 break;
465 }
466 mutex_unlock(&i40e_client_instance_mutex);
467 return ret;
468} 353}
469 354
470/** 355/**
@@ -473,67 +358,50 @@ int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
473 **/ 358 **/
474void i40e_client_subtask(struct i40e_pf *pf) 359void i40e_client_subtask(struct i40e_pf *pf)
475{ 360{
361 struct i40e_client *client = registered_client;
476 struct i40e_client_instance *cdev; 362 struct i40e_client_instance *cdev;
477 struct i40e_client *client; 363 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
478 bool existing = false;
479 int ret = 0; 364 int ret = 0;
480 365
481 if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED)) 366 if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
482 return; 367 return;
483 pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED; 368 pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
369 cdev = pf->cinst;
484 370
485 /* If we're down or resetting, just bail */ 371 /* If we're down or resetting, just bail */
486 if (test_bit(__I40E_DOWN, &pf->state) || 372 if (test_bit(__I40E_DOWN, &pf->state) ||
487 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 373 test_bit(__I40E_CONFIG_BUSY, &pf->state))
488 return; 374 return;
489 375
490 /* Check client state and instantiate client if client registered */ 376 if (!client || !cdev)
491 mutex_lock(&i40e_client_mutex); 377 return;
492 list_for_each_entry(client, &i40e_clients, list) {
493 /* first check client is registered */
494 if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state))
495 continue;
496
497 /* Do we also need the LAN VSI to be up, to create instance */
498 if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) {
499 /* check if L2 VSI is up, if not we are not ready */
500 if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
501 continue;
502 } else {
503 dev_warn(&pf->pdev->dev, "This client %s is being instantiated at probe\n",
504 client->name);
505 }
506
507 /* Add the client instance to the instance list */
508 cdev = i40e_client_add_instance(pf, client, &existing);
509 if (!cdev)
510 continue;
511
512 if (!existing) {
513 dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
514 client->name, pf->hw.pf_id,
515 pf->hw.bus.bus_id, pf->hw.bus.device,
516 pf->hw.bus.func);
517 }
518 378
519 mutex_lock(&i40e_client_instance_mutex); 379 /* Here we handle client opens. If the client is down, but
520 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, 380 * the netdev is up, then open the client.
521 &cdev->state)) { 381 */
522 /* Send an Open request to the client */ 382 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
523 if (client->ops && client->ops->open) 383 if (!test_bit(__I40E_DOWN, &vsi->state) &&
524 ret = client->ops->open(&cdev->lan_info, 384 client->ops && client->ops->open) {
525 client); 385 set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
526 if (!ret) { 386 ret = client->ops->open(&cdev->lan_info, client);
527 set_bit(__I40E_CLIENT_INSTANCE_OPENED, 387 if (ret) {
528 &cdev->state); 388 /* Remove failed client instance */
529 } else { 389 clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
530 /* remove client instance */ 390 &cdev->state);
531 i40e_client_del_instance(pf, client); 391 i40e_client_del_instance(pf);
532 } 392 }
533 } 393 }
534 mutex_unlock(&i40e_client_instance_mutex); 394 } else {
395 /* Likewise for client close. If the client is up, but the netdev
396 * is down, then close the client.
397 */
398 if (test_bit(__I40E_DOWN, &vsi->state) &&
399 client->ops && client->ops->close) {
400 clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
401 client->ops->close(&cdev->lan_info, client, false);
402 i40e_client_release_qvlist(&cdev->lan_info);
403 }
535 } 404 }
536 mutex_unlock(&i40e_client_mutex);
537} 405}
538 406
539/** 407/**
@@ -601,7 +469,6 @@ int i40e_lan_del_device(struct i40e_pf *pf)
601 break; 469 break;
602 } 470 }
603 } 471 }
604
605 mutex_unlock(&i40e_device_mutex); 472 mutex_unlock(&i40e_device_mutex);
606 return ret; 473 return ret;
607} 474}
@@ -610,22 +477,24 @@ int i40e_lan_del_device(struct i40e_pf *pf)
610 * i40e_client_release - release client specific resources 477 * i40e_client_release - release client specific resources
611 * @client: pointer to the registered client 478 * @client: pointer to the registered client
612 * 479 *
613 * Return 0 on success or < 0 on error
614 **/ 480 **/
615static int i40e_client_release(struct i40e_client *client) 481static void i40e_client_release(struct i40e_client *client)
616{ 482{
617 struct i40e_client_instance *cdev, *tmp; 483 struct i40e_client_instance *cdev;
484 struct i40e_device *ldev;
618 struct i40e_pf *pf; 485 struct i40e_pf *pf;
619 int ret = 0;
620 486
621 LIST_HEAD(cdevs_tmp); 487 mutex_lock(&i40e_device_mutex);
622 488 list_for_each_entry(ldev, &i40e_devices, list) {
623 mutex_lock(&i40e_client_instance_mutex); 489 pf = ldev->pf;
624 list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) { 490 cdev = pf->cinst;
625 if (strncmp(cdev->client->name, client->name, 491 if (!cdev)
626 I40E_CLIENT_STR_LENGTH))
627 continue; 492 continue;
628 pf = (struct i40e_pf *)cdev->lan_info.pf; 493
494 while (test_and_set_bit(__I40E_SERVICE_SCHED,
495 &pf->state))
496 usleep_range(500, 1000);
497
629 if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { 498 if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
630 if (client->ops && client->ops->close) 499 if (client->ops && client->ops->close)
631 client->ops->close(&cdev->lan_info, client, 500 client->ops->close(&cdev->lan_info, client,
@@ -637,18 +506,13 @@ static int i40e_client_release(struct i40e_client *client)
637 "Client %s instance for PF id %d closed\n", 506 "Client %s instance for PF id %d closed\n",
638 client->name, pf->hw.pf_id); 507 client->name, pf->hw.pf_id);
639 } 508 }
640 /* delete the client instance from the list */ 509 /* delete the client instance */
641 list_move(&cdev->list, &cdevs_tmp); 510 i40e_client_del_instance(pf);
642 dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n", 511 dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
643 client->name); 512 client->name);
513 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
644 } 514 }
645 mutex_unlock(&i40e_client_instance_mutex); 515 mutex_unlock(&i40e_device_mutex);
646
647 /* free the client device and release its vsi */
648 list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) {
649 kfree(cdev);
650 }
651 return ret;
652} 516}
653 517
654/** 518/**
@@ -664,6 +528,7 @@ static void i40e_client_prepare(struct i40e_client *client)
664 mutex_lock(&i40e_device_mutex); 528 mutex_lock(&i40e_device_mutex);
665 list_for_each_entry(ldev, &i40e_devices, list) { 529 list_for_each_entry(ldev, &i40e_devices, list) {
666 pf = ldev->pf; 530 pf = ldev->pf;
531 i40e_client_add_instance(pf);
667 /* Start the client subtask */ 532 /* Start the client subtask */
668 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; 533 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
669 i40e_service_event_schedule(pf); 534 i40e_service_event_schedule(pf);
@@ -792,8 +657,8 @@ static void i40e_client_request_reset(struct i40e_info *ldev,
792 break; 657 break;
793 default: 658 default:
794 dev_warn(&pf->pdev->dev, 659 dev_warn(&pf->pdev->dev,
795 "Client %s instance for PF id %d request an unsupported reset: %d.\n", 660 "Client for PF id %d requested an unsupported reset: %d.\n",
796 client->name, pf->hw.pf_id, reset_level); 661 pf->hw.pf_id, reset_level);
797 break; 662 break;
798 } 663 }
799 664
@@ -852,8 +717,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
852 } else { 717 } else {
853 update = false; 718 update = false;
854 dev_warn(&pf->pdev->dev, 719 dev_warn(&pf->pdev->dev,
855 "Client %s instance for PF id %d request an unsupported Config: %x.\n", 720 "Client for PF id %d request an unsupported Config: %x.\n",
856 client->name, pf->hw.pf_id, flag); 721 pf->hw.pf_id, flag);
857 } 722 }
858 723
859 if (update) { 724 if (update) {
@@ -878,7 +743,6 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
878int i40e_register_client(struct i40e_client *client) 743int i40e_register_client(struct i40e_client *client)
879{ 744{
880 int ret = 0; 745 int ret = 0;
881 enum i40e_vsi_type vsi_type;
882 746
883 if (!client) { 747 if (!client) {
884 ret = -EIO; 748 ret = -EIO;
@@ -891,11 +755,9 @@ int i40e_register_client(struct i40e_client *client)
891 goto out; 755 goto out;
892 } 756 }
893 757
894 mutex_lock(&i40e_client_mutex); 758 if (registered_client) {
895 if (i40e_client_is_registered(client)) {
896 pr_info("i40e: Client %s has already been registered!\n", 759 pr_info("i40e: Client %s has already been registered!\n",
897 client->name); 760 client->name);
898 mutex_unlock(&i40e_client_mutex);
899 ret = -EEXIST; 761 ret = -EEXIST;
900 goto out; 762 goto out;
901 } 763 }
@@ -908,22 +770,11 @@ int i40e_register_client(struct i40e_client *client)
908 client->version.major, client->version.minor, 770 client->version.major, client->version.minor,
909 client->version.build, 771 client->version.build,
910 i40e_client_interface_version_str); 772 i40e_client_interface_version_str);
911 mutex_unlock(&i40e_client_mutex);
912 ret = -EIO; 773 ret = -EIO;
913 goto out; 774 goto out;
914 } 775 }
915 776
916 vsi_type = i40e_client_type_to_vsi_type(client->type); 777 registered_client = client;
917 if (vsi_type == I40E_VSI_TYPE_UNKNOWN) {
918 pr_info("i40e: Failed to register client %s due to unknown client type %d\n",
919 client->name, client->type);
920 mutex_unlock(&i40e_client_mutex);
921 ret = -EIO;
922 goto out;
923 }
924 list_add(&client->list, &i40e_clients);
925 set_bit(__I40E_CLIENT_REGISTERED, &client->state);
926 mutex_unlock(&i40e_client_mutex);
927 778
928 i40e_client_prepare(client); 779 i40e_client_prepare(client);
929 780
@@ -943,29 +794,21 @@ int i40e_unregister_client(struct i40e_client *client)
943{ 794{
944 int ret = 0; 795 int ret = 0;
945 796
946 /* When a unregister request comes through we would have to send 797 if (registered_client != client) {
947 * a close for each of the client instances that were opened.
948 * client_release function is called to handle this.
949 */
950 mutex_lock(&i40e_client_mutex);
951 if (!client || i40e_client_release(client)) {
952 ret = -EIO;
953 goto out;
954 }
955
956 /* TODO: check if device is in reset, or if that matters? */
957 if (!i40e_client_is_registered(client)) {
958 pr_info("i40e: Client %s has not been registered\n", 798 pr_info("i40e: Client %s has not been registered\n",
959 client->name); 799 client->name);
960 ret = -ENODEV; 800 ret = -ENODEV;
961 goto out; 801 goto out;
962 } 802 }
963 clear_bit(__I40E_CLIENT_REGISTERED, &client->state); 803 registered_client = NULL;
964 list_del(&client->list); 804 /* When a unregister request comes through we would have to send
965 pr_info("i40e: Unregistered client %s with return code %d\n", 805 * a close for each of the client instances that were opened.
966 client->name, ret); 806 * client_release function is called to handle this.
807 */
808 i40e_client_release(client);
809
810 pr_info("i40e: Unregistered client %s\n", client->name);
967out: 811out:
968 mutex_unlock(&i40e_client_mutex);
969 return ret; 812 return ret;
970} 813}
971EXPORT_SYMBOL(i40e_unregister_client); 814EXPORT_SYMBOL(i40e_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 528bd79b05fe..15b21a5315b5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -57,11 +57,6 @@ enum i40e_client_instance_state {
57 __I40E_CLIENT_INSTANCE_OPENED, 57 __I40E_CLIENT_INSTANCE_OPENED,
58}; 58};
59 59
60enum i40e_client_type {
61 I40E_CLIENT_IWARP,
62 I40E_CLIENT_VMDQ2
63};
64
65struct i40e_ops; 60struct i40e_ops;
66struct i40e_client; 61struct i40e_client;
67 62
@@ -214,7 +209,8 @@ struct i40e_client {
214 u32 flags; 209 u32 flags;
215#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) 210#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
216#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) 211#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
217 enum i40e_client_type type; 212 u8 type;
213#define I40E_CLIENT_IWARP 0
218 const struct i40e_client_ops *ops; /* client ops provided by the client */ 214 const struct i40e_client_ops *ops; /* client ops provided by the client */
219}; 215};
220 216
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index a22e26200bcc..1c3805b4fcf3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -387,7 +387,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
387 * 387 *
388 **/ 388 **/
389static void i40e_get_settings_link_up(struct i40e_hw *hw, 389static void i40e_get_settings_link_up(struct i40e_hw *hw,
390 struct ethtool_cmd *ecmd, 390 struct ethtool_link_ksettings *cmd,
391 struct net_device *netdev, 391 struct net_device *netdev,
392 struct i40e_pf *pf) 392 struct i40e_pf *pf)
393{ 393{
@@ -395,90 +395,96 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
395 u32 link_speed = hw_link_info->link_speed; 395 u32 link_speed = hw_link_info->link_speed;
396 u32 e_advertising = 0x0; 396 u32 e_advertising = 0x0;
397 u32 e_supported = 0x0; 397 u32 e_supported = 0x0;
398 u32 supported, advertising;
399
400 ethtool_convert_link_mode_to_legacy_u32(&supported,
401 cmd->link_modes.supported);
402 ethtool_convert_link_mode_to_legacy_u32(&advertising,
403 cmd->link_modes.advertising);
398 404
399 /* Initialize supported and advertised settings based on phy settings */ 405 /* Initialize supported and advertised settings based on phy settings */
400 switch (hw_link_info->phy_type) { 406 switch (hw_link_info->phy_type) {
401 case I40E_PHY_TYPE_40GBASE_CR4: 407 case I40E_PHY_TYPE_40GBASE_CR4:
402 case I40E_PHY_TYPE_40GBASE_CR4_CU: 408 case I40E_PHY_TYPE_40GBASE_CR4_CU:
403 ecmd->supported = SUPPORTED_Autoneg | 409 supported = SUPPORTED_Autoneg |
404 SUPPORTED_40000baseCR4_Full; 410 SUPPORTED_40000baseCR4_Full;
405 ecmd->advertising = ADVERTISED_Autoneg | 411 advertising = ADVERTISED_Autoneg |
406 ADVERTISED_40000baseCR4_Full; 412 ADVERTISED_40000baseCR4_Full;
407 break; 413 break;
408 case I40E_PHY_TYPE_XLAUI: 414 case I40E_PHY_TYPE_XLAUI:
409 case I40E_PHY_TYPE_XLPPI: 415 case I40E_PHY_TYPE_XLPPI:
410 case I40E_PHY_TYPE_40GBASE_AOC: 416 case I40E_PHY_TYPE_40GBASE_AOC:
411 ecmd->supported = SUPPORTED_40000baseCR4_Full; 417 supported = SUPPORTED_40000baseCR4_Full;
412 break; 418 break;
413 case I40E_PHY_TYPE_40GBASE_SR4: 419 case I40E_PHY_TYPE_40GBASE_SR4:
414 ecmd->supported = SUPPORTED_40000baseSR4_Full; 420 supported = SUPPORTED_40000baseSR4_Full;
415 break; 421 break;
416 case I40E_PHY_TYPE_40GBASE_LR4: 422 case I40E_PHY_TYPE_40GBASE_LR4:
417 ecmd->supported = SUPPORTED_40000baseLR4_Full; 423 supported = SUPPORTED_40000baseLR4_Full;
418 break; 424 break;
419 case I40E_PHY_TYPE_10GBASE_SR: 425 case I40E_PHY_TYPE_10GBASE_SR:
420 case I40E_PHY_TYPE_10GBASE_LR: 426 case I40E_PHY_TYPE_10GBASE_LR:
421 case I40E_PHY_TYPE_1000BASE_SX: 427 case I40E_PHY_TYPE_1000BASE_SX:
422 case I40E_PHY_TYPE_1000BASE_LX: 428 case I40E_PHY_TYPE_1000BASE_LX:
423 ecmd->supported = SUPPORTED_10000baseT_Full; 429 supported = SUPPORTED_10000baseT_Full;
424 if (hw_link_info->module_type[2] & 430 if (hw_link_info->module_type[2] &
425 I40E_MODULE_TYPE_1000BASE_SX || 431 I40E_MODULE_TYPE_1000BASE_SX ||
426 hw_link_info->module_type[2] & 432 hw_link_info->module_type[2] &
427 I40E_MODULE_TYPE_1000BASE_LX) { 433 I40E_MODULE_TYPE_1000BASE_LX) {
428 ecmd->supported |= SUPPORTED_1000baseT_Full; 434 supported |= SUPPORTED_1000baseT_Full;
429 if (hw_link_info->requested_speeds & 435 if (hw_link_info->requested_speeds &
430 I40E_LINK_SPEED_1GB) 436 I40E_LINK_SPEED_1GB)
431 ecmd->advertising |= ADVERTISED_1000baseT_Full; 437 advertising |= ADVERTISED_1000baseT_Full;
432 } 438 }
433 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 439 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
434 ecmd->advertising |= ADVERTISED_10000baseT_Full; 440 advertising |= ADVERTISED_10000baseT_Full;
435 break; 441 break;
436 case I40E_PHY_TYPE_10GBASE_T: 442 case I40E_PHY_TYPE_10GBASE_T:
437 case I40E_PHY_TYPE_1000BASE_T: 443 case I40E_PHY_TYPE_1000BASE_T:
438 case I40E_PHY_TYPE_100BASE_TX: 444 case I40E_PHY_TYPE_100BASE_TX:
439 ecmd->supported = SUPPORTED_Autoneg | 445 supported = SUPPORTED_Autoneg |
440 SUPPORTED_10000baseT_Full | 446 SUPPORTED_10000baseT_Full |
441 SUPPORTED_1000baseT_Full | 447 SUPPORTED_1000baseT_Full |
442 SUPPORTED_100baseT_Full; 448 SUPPORTED_100baseT_Full;
443 ecmd->advertising = ADVERTISED_Autoneg; 449 advertising = ADVERTISED_Autoneg;
444 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 450 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
445 ecmd->advertising |= ADVERTISED_10000baseT_Full; 451 advertising |= ADVERTISED_10000baseT_Full;
446 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) 452 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
447 ecmd->advertising |= ADVERTISED_1000baseT_Full; 453 advertising |= ADVERTISED_1000baseT_Full;
448 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) 454 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
449 ecmd->advertising |= ADVERTISED_100baseT_Full; 455 advertising |= ADVERTISED_100baseT_Full;
450 break; 456 break;
451 case I40E_PHY_TYPE_1000BASE_T_OPTICAL: 457 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
452 ecmd->supported = SUPPORTED_Autoneg | 458 supported = SUPPORTED_Autoneg |
453 SUPPORTED_1000baseT_Full; 459 SUPPORTED_1000baseT_Full;
454 ecmd->advertising = ADVERTISED_Autoneg | 460 advertising = ADVERTISED_Autoneg |
455 ADVERTISED_1000baseT_Full; 461 ADVERTISED_1000baseT_Full;
456 break; 462 break;
457 case I40E_PHY_TYPE_10GBASE_CR1_CU: 463 case I40E_PHY_TYPE_10GBASE_CR1_CU:
458 case I40E_PHY_TYPE_10GBASE_CR1: 464 case I40E_PHY_TYPE_10GBASE_CR1:
459 ecmd->supported = SUPPORTED_Autoneg | 465 supported = SUPPORTED_Autoneg |
460 SUPPORTED_10000baseT_Full; 466 SUPPORTED_10000baseT_Full;
461 ecmd->advertising = ADVERTISED_Autoneg | 467 advertising = ADVERTISED_Autoneg |
462 ADVERTISED_10000baseT_Full; 468 ADVERTISED_10000baseT_Full;
463 break; 469 break;
464 case I40E_PHY_TYPE_XAUI: 470 case I40E_PHY_TYPE_XAUI:
465 case I40E_PHY_TYPE_XFI: 471 case I40E_PHY_TYPE_XFI:
466 case I40E_PHY_TYPE_SFI: 472 case I40E_PHY_TYPE_SFI:
467 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 473 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
468 case I40E_PHY_TYPE_10GBASE_AOC: 474 case I40E_PHY_TYPE_10GBASE_AOC:
469 ecmd->supported = SUPPORTED_10000baseT_Full; 475 supported = SUPPORTED_10000baseT_Full;
470 ecmd->advertising = SUPPORTED_10000baseT_Full; 476 advertising = SUPPORTED_10000baseT_Full;
471 break; 477 break;
472 case I40E_PHY_TYPE_SGMII: 478 case I40E_PHY_TYPE_SGMII:
473 ecmd->supported = SUPPORTED_Autoneg | 479 supported = SUPPORTED_Autoneg |
474 SUPPORTED_1000baseT_Full; 480 SUPPORTED_1000baseT_Full;
475 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) 481 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
476 ecmd->advertising |= ADVERTISED_1000baseT_Full; 482 advertising |= ADVERTISED_1000baseT_Full;
477 if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { 483 if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
478 ecmd->supported |= SUPPORTED_100baseT_Full; 484 supported |= SUPPORTED_100baseT_Full;
479 if (hw_link_info->requested_speeds & 485 if (hw_link_info->requested_speeds &
480 I40E_LINK_SPEED_100MB) 486 I40E_LINK_SPEED_100MB)
481 ecmd->advertising |= ADVERTISED_100baseT_Full; 487 advertising |= ADVERTISED_100baseT_Full;
482 } 488 }
483 break; 489 break;
484 case I40E_PHY_TYPE_40GBASE_KR4: 490 case I40E_PHY_TYPE_40GBASE_KR4:
@@ -486,25 +492,25 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
486 case I40E_PHY_TYPE_10GBASE_KR: 492 case I40E_PHY_TYPE_10GBASE_KR:
487 case I40E_PHY_TYPE_10GBASE_KX4: 493 case I40E_PHY_TYPE_10GBASE_KX4:
488 case I40E_PHY_TYPE_1000BASE_KX: 494 case I40E_PHY_TYPE_1000BASE_KX:
489 ecmd->supported |= SUPPORTED_40000baseKR4_Full | 495 supported |= SUPPORTED_40000baseKR4_Full |
490 SUPPORTED_20000baseKR2_Full | 496 SUPPORTED_20000baseKR2_Full |
491 SUPPORTED_10000baseKR_Full | 497 SUPPORTED_10000baseKR_Full |
492 SUPPORTED_10000baseKX4_Full | 498 SUPPORTED_10000baseKX4_Full |
493 SUPPORTED_1000baseKX_Full | 499 SUPPORTED_1000baseKX_Full |
494 SUPPORTED_Autoneg; 500 SUPPORTED_Autoneg;
495 ecmd->advertising |= ADVERTISED_40000baseKR4_Full | 501 advertising |= ADVERTISED_40000baseKR4_Full |
496 ADVERTISED_20000baseKR2_Full | 502 ADVERTISED_20000baseKR2_Full |
497 ADVERTISED_10000baseKR_Full | 503 ADVERTISED_10000baseKR_Full |
498 ADVERTISED_10000baseKX4_Full | 504 ADVERTISED_10000baseKX4_Full |
499 ADVERTISED_1000baseKX_Full | 505 ADVERTISED_1000baseKX_Full |
500 ADVERTISED_Autoneg; 506 ADVERTISED_Autoneg;
501 break; 507 break;
502 case I40E_PHY_TYPE_25GBASE_KR: 508 case I40E_PHY_TYPE_25GBASE_KR:
503 case I40E_PHY_TYPE_25GBASE_CR: 509 case I40E_PHY_TYPE_25GBASE_CR:
504 case I40E_PHY_TYPE_25GBASE_SR: 510 case I40E_PHY_TYPE_25GBASE_SR:
505 case I40E_PHY_TYPE_25GBASE_LR: 511 case I40E_PHY_TYPE_25GBASE_LR:
506 ecmd->supported = SUPPORTED_Autoneg; 512 supported = SUPPORTED_Autoneg;
507 ecmd->advertising = ADVERTISED_Autoneg; 513 advertising = ADVERTISED_Autoneg;
508 /* TODO: add speeds when ethtool is ready to support*/ 514 /* TODO: add speeds when ethtool is ready to support*/
509 break; 515 break;
510 default: 516 default:
@@ -520,38 +526,43 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
520 i40e_phy_type_to_ethtool(pf, &e_supported, 526 i40e_phy_type_to_ethtool(pf, &e_supported,
521 &e_advertising); 527 &e_advertising);
522 528
523 ecmd->supported = ecmd->supported & e_supported; 529 supported = supported & e_supported;
524 ecmd->advertising = ecmd->advertising & e_advertising; 530 advertising = advertising & e_advertising;
525 531
526 /* Set speed and duplex */ 532 /* Set speed and duplex */
527 switch (link_speed) { 533 switch (link_speed) {
528 case I40E_LINK_SPEED_40GB: 534 case I40E_LINK_SPEED_40GB:
529 ethtool_cmd_speed_set(ecmd, SPEED_40000); 535 cmd->base.speed = SPEED_40000;
530 break; 536 break;
531 case I40E_LINK_SPEED_25GB: 537 case I40E_LINK_SPEED_25GB:
532#ifdef SPEED_25000 538#ifdef SPEED_25000
533 ethtool_cmd_speed_set(ecmd, SPEED_25000); 539 cmd->base.speed = SPEED_25000;
534#else 540#else
535 netdev_info(netdev, 541 netdev_info(netdev,
536 "Speed is 25G, display not supported by this version of ethtool.\n"); 542 "Speed is 25G, display not supported by this version of ethtool.\n");
537#endif 543#endif
538 break; 544 break;
539 case I40E_LINK_SPEED_20GB: 545 case I40E_LINK_SPEED_20GB:
540 ethtool_cmd_speed_set(ecmd, SPEED_20000); 546 cmd->base.speed = SPEED_20000;
541 break; 547 break;
542 case I40E_LINK_SPEED_10GB: 548 case I40E_LINK_SPEED_10GB:
543 ethtool_cmd_speed_set(ecmd, SPEED_10000); 549 cmd->base.speed = SPEED_10000;
544 break; 550 break;
545 case I40E_LINK_SPEED_1GB: 551 case I40E_LINK_SPEED_1GB:
546 ethtool_cmd_speed_set(ecmd, SPEED_1000); 552 cmd->base.speed = SPEED_1000;
547 break; 553 break;
548 case I40E_LINK_SPEED_100MB: 554 case I40E_LINK_SPEED_100MB:
549 ethtool_cmd_speed_set(ecmd, SPEED_100); 555 cmd->base.speed = SPEED_100;
550 break; 556 break;
551 default: 557 default:
552 break; 558 break;
553 } 559 }
554 ecmd->duplex = DUPLEX_FULL; 560 cmd->base.duplex = DUPLEX_FULL;
561
562 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
563 supported);
564 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
565 advertising);
555} 566}
556 567
557/** 568/**
@@ -562,18 +573,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
562 * Reports link settings that can be determined when link is down 573 * Reports link settings that can be determined when link is down
563 **/ 574 **/
564static void i40e_get_settings_link_down(struct i40e_hw *hw, 575static void i40e_get_settings_link_down(struct i40e_hw *hw,
565 struct ethtool_cmd *ecmd, 576 struct ethtool_link_ksettings *cmd,
566 struct i40e_pf *pf) 577 struct i40e_pf *pf)
567{ 578{
579 u32 supported, advertising;
580
568 /* link is down and the driver needs to fall back on 581 /* link is down and the driver needs to fall back on
569 * supported phy types to figure out what info to display 582 * supported phy types to figure out what info to display
570 */ 583 */
571 i40e_phy_type_to_ethtool(pf, &ecmd->supported, 584 i40e_phy_type_to_ethtool(pf, &supported, &advertising);
572 &ecmd->advertising); 585
586 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
587 supported);
588 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
589 advertising);
573 590
574 /* With no link speed and duplex are unknown */ 591 /* With no link speed and duplex are unknown */
575 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); 592 cmd->base.speed = SPEED_UNKNOWN;
576 ecmd->duplex = DUPLEX_UNKNOWN; 593 cmd->base.duplex = DUPLEX_UNKNOWN;
577} 594}
578 595
579/** 596/**
@@ -583,74 +600,85 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
583 * 600 *
584 * Reports speed/duplex settings based on media_type 601 * Reports speed/duplex settings based on media_type
585 **/ 602 **/
586static int i40e_get_settings(struct net_device *netdev, 603static int i40e_get_link_ksettings(struct net_device *netdev,
587 struct ethtool_cmd *ecmd) 604 struct ethtool_link_ksettings *cmd)
588{ 605{
589 struct i40e_netdev_priv *np = netdev_priv(netdev); 606 struct i40e_netdev_priv *np = netdev_priv(netdev);
590 struct i40e_pf *pf = np->vsi->back; 607 struct i40e_pf *pf = np->vsi->back;
591 struct i40e_hw *hw = &pf->hw; 608 struct i40e_hw *hw = &pf->hw;
592 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 609 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
593 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; 610 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
611 u32 advertising;
594 612
595 if (link_up) 613 if (link_up)
596 i40e_get_settings_link_up(hw, ecmd, netdev, pf); 614 i40e_get_settings_link_up(hw, cmd, netdev, pf);
597 else 615 else
598 i40e_get_settings_link_down(hw, ecmd, pf); 616 i40e_get_settings_link_down(hw, cmd, pf);
599 617
600 /* Now set the settings that don't rely on link being up/down */ 618 /* Now set the settings that don't rely on link being up/down */
601 /* Set autoneg settings */ 619 /* Set autoneg settings */
602 ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? 620 cmd->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
603 AUTONEG_ENABLE : AUTONEG_DISABLE); 621 AUTONEG_ENABLE : AUTONEG_DISABLE);
604 622
605 switch (hw->phy.media_type) { 623 switch (hw->phy.media_type) {
606 case I40E_MEDIA_TYPE_BACKPLANE: 624 case I40E_MEDIA_TYPE_BACKPLANE:
607 ecmd->supported |= SUPPORTED_Autoneg | 625 ethtool_link_ksettings_add_link_mode(cmd, supported,
608 SUPPORTED_Backplane; 626 Autoneg);
609 ecmd->advertising |= ADVERTISED_Autoneg | 627 ethtool_link_ksettings_add_link_mode(cmd, supported,
610 ADVERTISED_Backplane; 628 Backplane);
611 ecmd->port = PORT_NONE; 629 ethtool_link_ksettings_add_link_mode(cmd, advertising,
630 Autoneg);
631 ethtool_link_ksettings_add_link_mode(cmd, advertising,
632 Backplane);
633 cmd->base.port = PORT_NONE;
612 break; 634 break;
613 case I40E_MEDIA_TYPE_BASET: 635 case I40E_MEDIA_TYPE_BASET:
614 ecmd->supported |= SUPPORTED_TP; 636 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
615 ecmd->advertising |= ADVERTISED_TP; 637 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
616 ecmd->port = PORT_TP; 638 cmd->base.port = PORT_TP;
617 break; 639 break;
618 case I40E_MEDIA_TYPE_DA: 640 case I40E_MEDIA_TYPE_DA:
619 case I40E_MEDIA_TYPE_CX4: 641 case I40E_MEDIA_TYPE_CX4:
620 ecmd->supported |= SUPPORTED_FIBRE; 642 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
621 ecmd->advertising |= ADVERTISED_FIBRE; 643 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
622 ecmd->port = PORT_DA; 644 cmd->base.port = PORT_DA;
623 break; 645 break;
624 case I40E_MEDIA_TYPE_FIBER: 646 case I40E_MEDIA_TYPE_FIBER:
625 ecmd->supported |= SUPPORTED_FIBRE; 647 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
626 ecmd->port = PORT_FIBRE; 648 cmd->base.port = PORT_FIBRE;
627 break; 649 break;
628 case I40E_MEDIA_TYPE_UNKNOWN: 650 case I40E_MEDIA_TYPE_UNKNOWN:
629 default: 651 default:
630 ecmd->port = PORT_OTHER; 652 cmd->base.port = PORT_OTHER;
631 break; 653 break;
632 } 654 }
633 655
634 /* Set transceiver */
635 ecmd->transceiver = XCVR_EXTERNAL;
636
637 /* Set flow control settings */ 656 /* Set flow control settings */
638 ecmd->supported |= SUPPORTED_Pause; 657 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
639 658
640 switch (hw->fc.requested_mode) { 659 switch (hw->fc.requested_mode) {
641 case I40E_FC_FULL: 660 case I40E_FC_FULL:
642 ecmd->advertising |= ADVERTISED_Pause; 661 ethtool_link_ksettings_add_link_mode(cmd, advertising,
662 Pause);
643 break; 663 break;
644 case I40E_FC_TX_PAUSE: 664 case I40E_FC_TX_PAUSE:
645 ecmd->advertising |= ADVERTISED_Asym_Pause; 665 ethtool_link_ksettings_add_link_mode(cmd, advertising,
666 Asym_Pause);
646 break; 667 break;
647 case I40E_FC_RX_PAUSE: 668 case I40E_FC_RX_PAUSE:
648 ecmd->advertising |= (ADVERTISED_Pause | 669 ethtool_link_ksettings_add_link_mode(cmd, advertising,
649 ADVERTISED_Asym_Pause); 670 Pause);
671 ethtool_link_ksettings_add_link_mode(cmd, advertising,
672 Asym_Pause);
650 break; 673 break;
651 default: 674 default:
652 ecmd->advertising &= ~(ADVERTISED_Pause | 675 ethtool_convert_link_mode_to_legacy_u32(
653 ADVERTISED_Asym_Pause); 676 &advertising, cmd->link_modes.advertising);
677
678 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
679
680 ethtool_convert_legacy_u32_to_link_mode(
681 cmd->link_modes.advertising, advertising);
654 break; 682 break;
655 } 683 }
656 684
@@ -664,8 +692,8 @@ static int i40e_get_settings(struct net_device *netdev,
664 * 692 *
665 * Set speed/duplex per media_types advertised/forced 693 * Set speed/duplex per media_types advertised/forced
666 **/ 694 **/
667static int i40e_set_settings(struct net_device *netdev, 695static int i40e_set_link_ksettings(struct net_device *netdev,
668 struct ethtool_cmd *ecmd) 696 const struct ethtool_link_ksettings *cmd)
669{ 697{
670 struct i40e_netdev_priv *np = netdev_priv(netdev); 698 struct i40e_netdev_priv *np = netdev_priv(netdev);
671 struct i40e_aq_get_phy_abilities_resp abilities; 699 struct i40e_aq_get_phy_abilities_resp abilities;
@@ -673,12 +701,14 @@ static int i40e_set_settings(struct net_device *netdev,
673 struct i40e_pf *pf = np->vsi->back; 701 struct i40e_pf *pf = np->vsi->back;
674 struct i40e_vsi *vsi = np->vsi; 702 struct i40e_vsi *vsi = np->vsi;
675 struct i40e_hw *hw = &pf->hw; 703 struct i40e_hw *hw = &pf->hw;
676 struct ethtool_cmd safe_ecmd; 704 struct ethtool_link_ksettings safe_cmd;
705 struct ethtool_link_ksettings copy_cmd;
677 i40e_status status = 0; 706 i40e_status status = 0;
678 bool change = false; 707 bool change = false;
679 int err = 0; 708 int err = 0;
680 u8 autoneg; 709 u32 autoneg;
681 u32 advertise; 710 u32 advertise;
711 u32 tmp;
682 712
683 /* Changing port settings is not supported if this isn't the 713 /* Changing port settings is not supported if this isn't the
684 * port's controlling PF 714 * port's controlling PF
@@ -706,23 +736,31 @@ static int i40e_set_settings(struct net_device *netdev,
706 return -EOPNOTSUPP; 736 return -EOPNOTSUPP;
707 } 737 }
708 738
739 /* copy the cmd to copy_cmd to avoid modifying the origin */
740 memcpy(&copy_cmd, cmd, sizeof(struct ethtool_link_ksettings));
741
709 /* get our own copy of the bits to check against */ 742 /* get our own copy of the bits to check against */
710 memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd)); 743 memset(&safe_cmd, 0, sizeof(struct ethtool_link_ksettings));
711 i40e_get_settings(netdev, &safe_ecmd); 744 i40e_get_link_ksettings(netdev, &safe_cmd);
712 745
713 /* save autoneg and speed out of ecmd */ 746 /* save autoneg and speed out of cmd */
714 autoneg = ecmd->autoneg; 747 autoneg = cmd->base.autoneg;
715 advertise = ecmd->advertising; 748 ethtool_convert_link_mode_to_legacy_u32(&advertise,
749 cmd->link_modes.advertising);
716 750
717 /* set autoneg and speed back to what they currently are */ 751 /* set autoneg and speed back to what they currently are */
718 ecmd->autoneg = safe_ecmd.autoneg; 752 copy_cmd.base.autoneg = safe_cmd.base.autoneg;
719 ecmd->advertising = safe_ecmd.advertising; 753 ethtool_convert_link_mode_to_legacy_u32(
754 &tmp, safe_cmd.link_modes.advertising);
755 ethtool_convert_legacy_u32_to_link_mode(
756 copy_cmd.link_modes.advertising, tmp);
757
758 copy_cmd.base.cmd = safe_cmd.base.cmd;
720 759
721 ecmd->cmd = safe_ecmd.cmd; 760 /* If copy_cmd and safe_cmd are not the same now, then they are
722 /* If ecmd and safe_ecmd are not the same now, then they are
723 * trying to set something that we do not support 761 * trying to set something that we do not support
724 */ 762 */
725 if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd))) 763 if (memcmp(&copy_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings)))
726 return -EOPNOTSUPP; 764 return -EOPNOTSUPP;
727 765
728 while (test_bit(__I40E_CONFIG_BUSY, &vsi->state)) 766 while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
@@ -745,7 +783,8 @@ static int i40e_set_settings(struct net_device *netdev,
745 /* If autoneg was not already enabled */ 783 /* If autoneg was not already enabled */
746 if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { 784 if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
747 /* If autoneg is not supported, return error */ 785 /* If autoneg is not supported, return error */
748 if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { 786 if (!ethtool_link_ksettings_test_link_mode(
787 &safe_cmd, supported, Autoneg)) {
749 netdev_info(netdev, "Autoneg not supported on this phy\n"); 788 netdev_info(netdev, "Autoneg not supported on this phy\n");
750 return -EINVAL; 789 return -EINVAL;
751 } 790 }
@@ -760,7 +799,8 @@ static int i40e_set_settings(struct net_device *netdev,
760 /* If autoneg is supported 10GBASE_T is the only PHY 799 /* If autoneg is supported 10GBASE_T is the only PHY
761 * that can disable it, so otherwise return error 800 * that can disable it, so otherwise return error
762 */ 801 */
763 if (safe_ecmd.supported & SUPPORTED_Autoneg && 802 if (ethtool_link_ksettings_test_link_mode(
803 &safe_cmd, supported, Autoneg) &&
764 hw->phy.link_info.phy_type != 804 hw->phy.link_info.phy_type !=
765 I40E_PHY_TYPE_10GBASE_T) { 805 I40E_PHY_TYPE_10GBASE_T) {
766 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); 806 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
@@ -773,7 +813,9 @@ static int i40e_set_settings(struct net_device *netdev,
773 } 813 }
774 } 814 }
775 815
776 if (advertise & ~safe_ecmd.supported) 816 ethtool_convert_link_mode_to_legacy_u32(&tmp,
817 safe_cmd.link_modes.supported);
818 if (advertise & ~tmp)
777 return -EINVAL; 819 return -EINVAL;
778 820
779 if (advertise & ADVERTISED_100baseT_Full) 821 if (advertise & ADVERTISED_100baseT_Full)
@@ -1165,6 +1207,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
1165 struct i40e_hw *hw = &np->vsi->back->hw; 1207 struct i40e_hw *hw = &np->vsi->back->hw;
1166 u32 val; 1208 u32 val;
1167 1209
1210#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
1211 if (hw->mac.type == I40E_MAC_X722) {
1212 val = X722_EEPROM_SCOPE_LIMIT + 1;
1213 return val;
1214 }
1168 val = (rd32(hw, I40E_GLPCI_LBARCTRL) 1215 val = (rd32(hw, I40E_GLPCI_LBARCTRL)
1169 & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) 1216 & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
1170 >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; 1217 >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
@@ -2359,8 +2406,8 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
2359 */ 2406 */
2360 fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port; 2407 fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
2361 fsp->h_u.tcp_ip4_spec.pdst = rule->src_port; 2408 fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
2362 fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0]; 2409 fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip;
2363 fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0]; 2410 fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip;
2364 2411
2365 if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) 2412 if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
2366 fsp->ring_cookie = RX_CLS_FLOW_DISC; 2413 fsp->ring_cookie = RX_CLS_FLOW_DISC;
@@ -2574,24 +2621,6 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
2574} 2621}
2575 2622
2576/** 2623/**
2577 * i40e_match_fdir_input_set - Match a new filter against an existing one
2578 * @rule: The filter already added
2579 * @input: The new filter to comapre against
2580 *
2581 * Returns true if the two input set match
2582 **/
2583static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
2584 struct i40e_fdir_filter *input)
2585{
2586 if ((rule->dst_ip[0] != input->dst_ip[0]) ||
2587 (rule->src_ip[0] != input->src_ip[0]) ||
2588 (rule->dst_port != input->dst_port) ||
2589 (rule->src_port != input->src_port))
2590 return false;
2591 return true;
2592}
2593
2594/**
2595 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry 2624 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
2596 * @vsi: Pointer to the targeted VSI 2625 * @vsi: Pointer to the targeted VSI
2597 * @input: The filter to update or NULL to indicate deletion 2626 * @input: The filter to update or NULL to indicate deletion
@@ -2626,22 +2655,22 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
2626 2655
2627 /* if there is an old rule occupying our place remove it */ 2656 /* if there is an old rule occupying our place remove it */
2628 if (rule && (rule->fd_id == sw_idx)) { 2657 if (rule && (rule->fd_id == sw_idx)) {
2629 if (input && !i40e_match_fdir_input_set(rule, input)) 2658 /* Remove this rule, since we're either deleting it, or
2630 err = i40e_add_del_fdir(vsi, rule, false); 2659 * replacing it.
2631 else if (!input) 2660 */
2632 err = i40e_add_del_fdir(vsi, rule, false); 2661 err = i40e_add_del_fdir(vsi, rule, false);
2633 hlist_del(&rule->fdir_node); 2662 hlist_del(&rule->fdir_node);
2634 kfree(rule); 2663 kfree(rule);
2635 pf->fdir_pf_active_filters--; 2664 pf->fdir_pf_active_filters--;
2636 } 2665 }
2637 2666
2638 /* If no input this was a delete, err should be 0 if a rule was 2667 /* If we weren't given an input, this is a delete, so just return the
2639 * successfully found and removed from the list else -EINVAL 2668 * error code indicating if there was an entry at the requested slot
2640 */ 2669 */
2641 if (!input) 2670 if (!input)
2642 return err; 2671 return err;
2643 2672
2644 /* initialize node and set software index */ 2673 /* Otherwise, install the new rule as requested */
2645 INIT_HLIST_NODE(&input->fdir_node); 2674 INIT_HLIST_NODE(&input->fdir_node);
2646 2675
2647 /* add filter to the list */ 2676 /* add filter to the list */
@@ -2712,7 +2741,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2712 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 2741 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2713 return -EOPNOTSUPP; 2742 return -EOPNOTSUPP;
2714 2743
2715 if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) 2744 if (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)
2716 return -ENOSPC; 2745 return -ENOSPC;
2717 2746
2718 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || 2747 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
@@ -2724,6 +2753,10 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2724 2753
2725 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 2754 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
2726 2755
2756 /* Extended MAC field is not supported */
2757 if (fsp->flow_type & FLOW_MAC_EXT)
2758 return -EINVAL;
2759
2727 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + 2760 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
2728 pf->hw.func_caps.fd_filters_guaranteed)) { 2761 pf->hw.func_caps.fd_filters_guaranteed)) {
2729 return -EINVAL; 2762 return -EINVAL;
@@ -2760,8 +2793,8 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2760 */ 2793 */
2761 input->dst_port = fsp->h_u.tcp_ip4_spec.psrc; 2794 input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
2762 input->src_port = fsp->h_u.tcp_ip4_spec.pdst; 2795 input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
2763 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 2796 input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
2764 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 2797 input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
2765 2798
2766 if (ntohl(fsp->m_ext.data[1])) { 2799 if (ntohl(fsp->m_ext.data[1])) {
2767 vf_id = ntohl(fsp->h_ext.data[1]); 2800 vf_id = ntohl(fsp->h_ext.data[1]);
@@ -2781,12 +2814,19 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2781 } 2814 }
2782 2815
2783 ret = i40e_add_del_fdir(vsi, input, true); 2816 ret = i40e_add_del_fdir(vsi, input, true);
2784free_input:
2785 if (ret) 2817 if (ret)
2786 kfree(input); 2818 goto free_input;
2787 else 2819
2788 i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); 2820 /* Add the input filter to the fdir_input_list, possibly replacing
2821 * a previous filter. Do not free the input structure after adding it
2822 * to the list as this would cause a use-after-free bug.
2823 */
2824 i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
2789 2825
2826 return 0;
2827
2828free_input:
2829 kfree(input);
2790 return ret; 2830 return ret;
2791} 2831}
2792 2832
@@ -3054,7 +3094,7 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
3054 I40E_PRIV_FLAGS_FD_ATR : 0; 3094 I40E_PRIV_FLAGS_FD_ATR : 0;
3055 ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? 3095 ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
3056 I40E_PRIV_FLAGS_VEB_STATS : 0; 3096 I40E_PRIV_FLAGS_VEB_STATS : 0;
3057 ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ? 3097 ret_flags |= pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
3058 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT; 3098 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
3059 if (pf->hw.pf_id == 0) { 3099 if (pf->hw.pf_id == 0) {
3060 ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ? 3100 ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
@@ -3094,7 +3134,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
3094 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 3134 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
3095 } else { 3135 } else {
3096 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 3136 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
3097 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; 3137 pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
3098 3138
3099 /* flush current ATR settings */ 3139 /* flush current ATR settings */
3100 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 3140 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
@@ -3139,9 +3179,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
3139 3179
3140 if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) && 3180 if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
3141 (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)) 3181 (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
3142 pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; 3182 pf->hw_disabled_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
3143 else 3183 else
3144 pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE; 3184 pf->hw_disabled_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE;
3145 3185
3146 /* if needed, issue reset to cause things to take effect */ 3186 /* if needed, issue reset to cause things to take effect */
3147 if (reset_required) 3187 if (reset_required)
@@ -3151,8 +3191,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
3151} 3191}
3152 3192
3153static const struct ethtool_ops i40e_ethtool_ops = { 3193static const struct ethtool_ops i40e_ethtool_ops = {
3154 .get_settings = i40e_get_settings,
3155 .set_settings = i40e_set_settings,
3156 .get_drvinfo = i40e_get_drvinfo, 3194 .get_drvinfo = i40e_get_drvinfo,
3157 .get_regs_len = i40e_get_regs_len, 3195 .get_regs_len = i40e_get_regs_len,
3158 .get_regs = i40e_get_regs, 3196 .get_regs = i40e_get_regs,
@@ -3189,6 +3227,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
3189 .set_priv_flags = i40e_set_priv_flags, 3227 .set_priv_flags = i40e_set_priv_flags,
3190 .get_per_queue_coalesce = i40e_get_per_queue_coalesce, 3228 .get_per_queue_coalesce = i40e_get_per_queue_coalesce,
3191 .set_per_queue_coalesce = i40e_set_per_queue_coalesce, 3229 .set_per_queue_coalesce = i40e_set_per_queue_coalesce,
3230 .get_link_ksettings = i40e_get_link_ksettings,
3231 .set_link_ksettings = i40e_set_link_ksettings,
3192}; 3232};
3193 3233
3194void i40e_set_ethtool_ops(struct net_device *netdev) 3234void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e8a8351c8ea9..caccb8e97f1b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,9 +39,9 @@ static const char i40e_driver_string[] =
39 39
40#define DRV_KERN "-k" 40#define DRV_KERN "-k"
41 41
42#define DRV_VERSION_MAJOR 1 42#define DRV_VERSION_MAJOR 2
43#define DRV_VERSION_MINOR 6 43#define DRV_VERSION_MINOR 1
44#define DRV_VERSION_BUILD 27 44#define DRV_VERSION_BUILD 7
45#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 45#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
46 __stringify(DRV_VERSION_MINOR) "." \ 46 __stringify(DRV_VERSION_MINOR) "." \
47 __stringify(DRV_VERSION_BUILD) DRV_KERN 47 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1101,13 +1101,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
1101 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1101 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1102 1102
1103 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && 1103 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1104 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) 1104 !(pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED))
1105 nsd->fd_sb_status = true; 1105 nsd->fd_sb_status = true;
1106 else 1106 else
1107 nsd->fd_sb_status = false; 1107 nsd->fd_sb_status = false;
1108 1108
1109 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && 1109 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1110 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 1110 !(pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
1111 nsd->fd_atr_status = true; 1111 nsd->fd_atr_status = true;
1112 else 1112 else
1113 nsd->fd_atr_status = false; 1113 nsd->fd_atr_status = false;
@@ -2487,13 +2487,15 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2487{ 2487{
2488 struct i40e_netdev_priv *np = netdev_priv(netdev); 2488 struct i40e_netdev_priv *np = netdev_priv(netdev);
2489 struct i40e_vsi *vsi = np->vsi; 2489 struct i40e_vsi *vsi = np->vsi;
2490 struct i40e_pf *pf = vsi->back;
2490 2491
2491 netdev_info(netdev, "changing MTU from %d to %d\n", 2492 netdev_info(netdev, "changing MTU from %d to %d\n",
2492 netdev->mtu, new_mtu); 2493 netdev->mtu, new_mtu);
2493 netdev->mtu = new_mtu; 2494 netdev->mtu = new_mtu;
2494 if (netif_running(netdev)) 2495 if (netif_running(netdev))
2495 i40e_vsi_reinit_locked(vsi); 2496 i40e_vsi_reinit_locked(vsi);
2496 i40e_notify_client_of_l2_param_changes(vsi); 2497 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
2498 I40E_FLAG_CLIENT_L2_CHANGE);
2497 return 0; 2499 return 0;
2498} 2500}
2499 2501
@@ -3281,6 +3283,11 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3281 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 3283 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3282 return; 3284 return;
3283 3285
3286 /* Reset FDir counters as we're replaying all existing filters */
3287 pf->fd_tcp4_filter_cnt = 0;
3288 pf->fd_udp4_filter_cnt = 0;
3289 pf->fd_ip4_filter_cnt = 0;
3290
3284 hlist_for_each_entry_safe(filter, node, 3291 hlist_for_each_entry_safe(filter, node,
3285 &pf->fdir_filter_list, fdir_node) { 3292 &pf->fdir_filter_list, fdir_node) {
3286 i40e_add_del_fdir(vsi, filter, true); 3293 i40e_add_del_fdir(vsi, filter, true);
@@ -4463,17 +4470,16 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4463 **/ 4470 **/
4464static void i40e_vsi_close(struct i40e_vsi *vsi) 4471static void i40e_vsi_close(struct i40e_vsi *vsi)
4465{ 4472{
4466 bool reset = false; 4473 struct i40e_pf *pf = vsi->back;
4467
4468 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 4474 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4469 i40e_down(vsi); 4475 i40e_down(vsi);
4470 i40e_vsi_free_irq(vsi); 4476 i40e_vsi_free_irq(vsi);
4471 i40e_vsi_free_tx_resources(vsi); 4477 i40e_vsi_free_tx_resources(vsi);
4472 i40e_vsi_free_rx_resources(vsi); 4478 i40e_vsi_free_rx_resources(vsi);
4473 vsi->current_netdev_flags = 0; 4479 vsi->current_netdev_flags = 0;
4474 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 4480 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
4475 reset = true; 4481 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
4476 i40e_notify_client_of_netdev_close(vsi, reset); 4482 pf->flags |= I40E_FLAG_CLIENT_RESET;
4477} 4483}
4478 4484
4479/** 4485/**
@@ -5464,13 +5470,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
5464 /* replay FDIR SB filters */ 5470 /* replay FDIR SB filters */
5465 if (vsi->type == I40E_VSI_FDIR) { 5471 if (vsi->type == I40E_VSI_FDIR) {
5466 /* reset fd counters */ 5472 /* reset fd counters */
5467 pf->fd_add_err = pf->fd_atr_cnt = 0; 5473 pf->fd_add_err = 0;
5468 if (pf->fd_tcp_rule > 0) { 5474 pf->fd_atr_cnt = 0;
5469 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
5470 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5471 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
5472 pf->fd_tcp_rule = 0;
5473 }
5474 i40e_fdir_filter_restore(vsi); 5475 i40e_fdir_filter_restore(vsi);
5475 } 5476 }
5476 5477
@@ -5542,8 +5543,6 @@ void i40e_down(struct i40e_vsi *vsi)
5542 i40e_clean_rx_ring(vsi->rx_rings[i]); 5543 i40e_clean_rx_ring(vsi->rx_rings[i]);
5543 } 5544 }
5544 5545
5545 i40e_notify_client_of_netdev_close(vsi, false);
5546
5547} 5546}
5548 5547
5549/** 5548/**
@@ -5612,9 +5611,12 @@ static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5612 struct tc_to_netdev *tc) 5611 struct tc_to_netdev *tc)
5613#endif 5612#endif
5614{ 5613{
5615 if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) 5614 if (tc->type != TC_SETUP_MQPRIO)
5616 return -EINVAL; 5615 return -EINVAL;
5617 return i40e_setup_tc(netdev, tc->tc); 5616
5617 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
5618
5619 return i40e_setup_tc(netdev, tc->mqprio->num_tc);
5618} 5620}
5619 5621
5620/** 5622/**
@@ -5752,7 +5754,11 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5752 hlist_del(&filter->fdir_node); 5754 hlist_del(&filter->fdir_node);
5753 kfree(filter); 5755 kfree(filter);
5754 } 5756 }
5757
5755 pf->fdir_pf_active_filters = 0; 5758 pf->fdir_pf_active_filters = 0;
5759 pf->fd_tcp4_filter_cnt = 0;
5760 pf->fd_udp4_filter_cnt = 0;
5761 pf->fd_ip4_filter_cnt = 0;
5756} 5762}
5757 5763
5758/** 5764/**
@@ -6021,8 +6027,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
6021 i40e_service_event_schedule(pf); 6027 i40e_service_event_schedule(pf);
6022 } else { 6028 } else {
6023 i40e_pf_unquiesce_all_vsi(pf); 6029 i40e_pf_unquiesce_all_vsi(pf);
6024 /* Notify the client for the DCB changes */ 6030 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
6025 i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]); 6031 I40E_FLAG_CLIENT_L2_CHANGE);
6026 } 6032 }
6027 6033
6028exit: 6034exit:
@@ -6144,8 +6150,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
6144 (pf->fd_add_err == 0) || 6150 (pf->fd_add_err == 0) ||
6145 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { 6151 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
6146 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 6152 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
6147 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 6153 (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
6148 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 6154 pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
6149 if (I40E_DEBUG_FD & pf->hw.debug_mask) 6155 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6150 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 6156 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
6151 } 6157 }
@@ -6156,9 +6162,9 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
6156 */ 6162 */
6157 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 6163 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
6158 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 6164 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
6159 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) && 6165 (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED) &&
6160 (pf->fd_tcp_rule == 0)) { 6166 (pf->fd_tcp4_filter_cnt == 0)) {
6161 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 6167 pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
6162 if (I40E_DEBUG_FD & pf->hw.debug_mask) 6168 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6163 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); 6169 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
6164 } 6170 }
@@ -6210,7 +6216,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
6210 } 6216 }
6211 6217
6212 pf->fd_flush_timestamp = jiffies; 6218 pf->fd_flush_timestamp = jiffies;
6213 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; 6219 pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
6214 /* flush all filters */ 6220 /* flush all filters */
6215 wr32(&pf->hw, I40E_PFQF_CTL_1, 6221 wr32(&pf->hw, I40E_PFQF_CTL_1,
6216 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); 6222 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -6229,8 +6235,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
6229 } else { 6235 } else {
6230 /* replay sideband filters */ 6236 /* replay sideband filters */
6231 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); 6237 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
6232 if (!disable_atr) 6238 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
6233 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 6239 pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
6234 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 6240 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
6235 if (I40E_DEBUG_FD & pf->hw.debug_mask) 6241 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6236 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 6242 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
@@ -7351,7 +7357,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7351{ 7357{
7352 struct i40e_hw *hw = &pf->hw; 7358 struct i40e_hw *hw = &pf->hw;
7353 i40e_status ret; 7359 i40e_status ret;
7354 __be16 port; 7360 u16 port;
7355 int i; 7361 int i;
7356 7362
7357 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) 7363 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
@@ -7375,7 +7381,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7375 "%s %s port %d, index %d failed, err %s aq_err %s\n", 7381 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7376 pf->udp_ports[i].type ? "vxlan" : "geneve", 7382 pf->udp_ports[i].type ? "vxlan" : "geneve",
7377 port ? "add" : "delete", 7383 port ? "add" : "delete",
7378 ntohs(port), i, 7384 port, i,
7379 i40e_stat_str(&pf->hw, ret), 7385 i40e_stat_str(&pf->hw, ret),
7380 i40e_aq_str(&pf->hw, 7386 i40e_aq_str(&pf->hw,
7381 pf->hw.aq.asq_last_status)); 7387 pf->hw.aq.asq_last_status));
@@ -7411,7 +7417,18 @@ static void i40e_service_task(struct work_struct *work)
7411 i40e_vc_process_vflr_event(pf); 7417 i40e_vc_process_vflr_event(pf);
7412 i40e_watchdog_subtask(pf); 7418 i40e_watchdog_subtask(pf);
7413 i40e_fdir_reinit_subtask(pf); 7419 i40e_fdir_reinit_subtask(pf);
7414 i40e_client_subtask(pf); 7420 if (pf->flags & I40E_FLAG_CLIENT_RESET) {
7421 /* Client subtask will reopen next time through. */
7422 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
7423 pf->flags &= ~I40E_FLAG_CLIENT_RESET;
7424 } else {
7425 i40e_client_subtask(pf);
7426 if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
7427 i40e_notify_client_of_l2_param_changes(
7428 pf->vsi[pf->lan_vsi]);
7429 pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
7430 }
7431 }
7415 i40e_sync_filters_subtask(pf); 7432 i40e_sync_filters_subtask(pf);
7416 i40e_sync_udp_filters_subtask(pf); 7433 i40e_sync_udp_filters_subtask(pf);
7417 i40e_clean_adminq_subtask(pf); 7434 i40e_clean_adminq_subtask(pf);
@@ -7809,6 +7826,7 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7809static int i40e_init_msix(struct i40e_pf *pf) 7826static int i40e_init_msix(struct i40e_pf *pf)
7810{ 7827{
7811 struct i40e_hw *hw = &pf->hw; 7828 struct i40e_hw *hw = &pf->hw;
7829 int cpus, extra_vectors;
7812 int vectors_left; 7830 int vectors_left;
7813 int v_budget, i; 7831 int v_budget, i;
7814 int v_actual; 7832 int v_actual;
@@ -7844,10 +7862,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
7844 vectors_left--; 7862 vectors_left--;
7845 } 7863 }
7846 7864
7847 /* reserve vectors for the main PF traffic queues */ 7865 /* reserve some vectors for the main PF traffic queues. Initially we
7848 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); 7866 * only reserve at most 50% of the available vectors, in the case that
7867 * the number of online CPUs is large. This ensures that we can enable
7868 * extra features as well. Once we've enabled the other features, we
7869 * will use any remaining vectors to reach as close as we can to the
7870 * number of online CPUs.
7871 */
7872 cpus = num_online_cpus();
7873 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
7849 vectors_left -= pf->num_lan_msix; 7874 vectors_left -= pf->num_lan_msix;
7850 v_budget += pf->num_lan_msix;
7851 7875
7852 /* reserve one vector for sideband flow director */ 7876 /* reserve one vector for sideband flow director */
7853 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7877 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -7910,6 +7934,23 @@ static int i40e_init_msix(struct i40e_pf *pf)
7910 } 7934 }
7911 } 7935 }
7912 7936
7937 /* On systems with a large number of SMP cores, we previously limited
7938 * the number of vectors for num_lan_msix to be at most 50% of the
7939 * available vectors, to allow for other features. Now, we add back
7940 * the remaining vectors. However, we ensure that the total
7941 * num_lan_msix will not exceed num_online_cpus(). To do this, we
7942 * calculate the number of vectors we can add without going over the
7943 * cap of CPUs. For systems with a small number of CPUs this will be
7944 * zero.
7945 */
7946 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
7947 pf->num_lan_msix += extra_vectors;
7948 vectors_left -= extra_vectors;
7949
7950 WARN(vectors_left < 0,
7951 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
7952
7953 v_budget += pf->num_lan_msix;
7913 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 7954 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7914 GFP_KERNEL); 7955 GFP_KERNEL);
7915 if (!pf->msix_entries) 7956 if (!pf->msix_entries)
@@ -8360,13 +8401,10 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8360 8401
8361 if (vsi->type == I40E_VSI_MAIN) { 8402 if (vsi->type == I40E_VSI_MAIN) {
8362 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 8403 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8363 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), 8404 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
8364 seed_dw[i]);
8365 } else if (vsi->type == I40E_VSI_SRIOV) { 8405 } else if (vsi->type == I40E_VSI_SRIOV) {
8366 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++) 8406 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8367 i40e_write_rx_ctl(hw, 8407 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
8368 I40E_VFQF_HKEY1(i, vf_id),
8369 seed_dw[i]);
8370 } else { 8408 } else {
8371 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); 8409 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8372 } 8410 }
@@ -8384,9 +8422,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8384 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE) 8422 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8385 return -EINVAL; 8423 return -EINVAL;
8386 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) 8424 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8387 i40e_write_rx_ctl(hw, 8425 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
8388 I40E_VFQF_HLUT1(i, vf_id),
8389 lut_dw[i]);
8390 } else { 8426 } else {
8391 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); 8427 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8392 } 8428 }
@@ -8843,9 +8879,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
8843 (pf->hw.aq.api_min_ver > 4))) { 8879 (pf->hw.aq.api_min_ver > 4))) {
8844 /* Supported in FW API version higher than 1.4 */ 8880 /* Supported in FW API version higher than 1.4 */
8845 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8881 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8846 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; 8882 pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8847 } else { 8883 } else {
8848 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; 8884 pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8849 } 8885 }
8850 8886
8851 pf->eeprom_version = 0xDEAD; 8887 pf->eeprom_version = 0xDEAD;
@@ -8906,14 +8942,14 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8906 i40e_fdir_filter_exit(pf); 8942 i40e_fdir_filter_exit(pf);
8907 } 8943 }
8908 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 8944 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8909 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 8945 pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
8910 /* reset fd counters */ 8946 /* reset fd counters */
8911 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; 8947 pf->fd_add_err = 0;
8912 pf->fdir_pf_active_filters = 0; 8948 pf->fd_atr_cnt = 0;
8913 /* if ATR was auto disabled it can be re-enabled. */ 8949 /* if ATR was auto disabled it can be re-enabled. */
8914 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 8950 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8915 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 8951 (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) {
8916 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 8952 pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8917 if (I40E_DEBUG_FD & pf->hw.debug_mask) 8953 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8918 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); 8954 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8919 } 8955 }
@@ -8982,7 +9018,7 @@ static int i40e_set_features(struct net_device *netdev,
8982 * 9018 *
8983 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 9019 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8984 **/ 9020 **/
8985static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) 9021static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
8986{ 9022{
8987 u8 i; 9023 u8 i;
8988 9024
@@ -9005,7 +9041,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
9005 struct i40e_netdev_priv *np = netdev_priv(netdev); 9041 struct i40e_netdev_priv *np = netdev_priv(netdev);
9006 struct i40e_vsi *vsi = np->vsi; 9042 struct i40e_vsi *vsi = np->vsi;
9007 struct i40e_pf *pf = vsi->back; 9043 struct i40e_pf *pf = vsi->back;
9008 __be16 port = ti->port; 9044 u16 port = ntohs(ti->port);
9009 u8 next_idx; 9045 u8 next_idx;
9010 u8 idx; 9046 u8 idx;
9011 9047
@@ -9013,8 +9049,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
9013 9049
9014 /* Check if port already exists */ 9050 /* Check if port already exists */
9015 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 9051 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
9016 netdev_info(netdev, "port %d already offloaded\n", 9052 netdev_info(netdev, "port %d already offloaded\n", port);
9017 ntohs(port));
9018 return; 9053 return;
9019 } 9054 }
9020 9055
@@ -9023,7 +9058,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
9023 9058
9024 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 9059 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
9025 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", 9060 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
9026 ntohs(port)); 9061 port);
9027 return; 9062 return;
9028 } 9063 }
9029 9064
@@ -9057,7 +9092,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
9057 struct i40e_netdev_priv *np = netdev_priv(netdev); 9092 struct i40e_netdev_priv *np = netdev_priv(netdev);
9058 struct i40e_vsi *vsi = np->vsi; 9093 struct i40e_vsi *vsi = np->vsi;
9059 struct i40e_pf *pf = vsi->back; 9094 struct i40e_pf *pf = vsi->back;
9060 __be16 port = ti->port; 9095 u16 port = ntohs(ti->port);
9061 u8 idx; 9096 u8 idx;
9062 9097
9063 idx = i40e_get_udp_port_idx(pf, port); 9098 idx = i40e_get_udp_port_idx(pf, port);
@@ -9089,7 +9124,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
9089 return; 9124 return;
9090not_found: 9125not_found:
9091 netdev_warn(netdev, "UDP port %d was not found, not deleting\n", 9126 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
9092 ntohs(port)); 9127 port);
9093} 9128}
9094 9129
9095static int i40e_get_phys_port_id(struct net_device *netdev, 9130static int i40e_get_phys_port_id(struct net_device *netdev,
@@ -9432,10 +9467,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
9432 if (vsi->type == I40E_VSI_MAIN) { 9467 if (vsi->type == I40E_VSI_MAIN) {
9433 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 9468 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9434 ether_addr_copy(mac_addr, hw->mac.perm_addr); 9469 ether_addr_copy(mac_addr, hw->mac.perm_addr);
9435 /* The following steps are necessary to prevent reception 9470 /* The following steps are necessary to properly keep track of
9436 * of tagged packets - some older NVM configurations load a 9471 * MAC-VLAN filters loaded into firmware - first we remove
9437 * default a MAC-VLAN filter that accepts any tagged packet 9472 * filter that is automatically generated by firmware and then
9438 * which must be replaced by a normal filter. 9473 * add new filter both to the driver hash table and firmware.
9439 */ 9474 */
9440 i40e_rm_default_mac_filter(vsi, mac_addr); 9475 i40e_rm_default_mac_filter(vsi, mac_addr);
9441 spin_lock_bh(&vsi->mac_filter_hash_lock); 9476 spin_lock_bh(&vsi->mac_filter_hash_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 38ee18f11124..800bd55d0159 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
292{ 292{
293 enum i40e_status_code ret_code = 0; 293 enum i40e_status_code ret_code = 0;
294 294
295 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { 295 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
296 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 296 if (!ret_code) {
297 if (!ret_code) { 297 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
298 ret_code = i40e_read_nvm_word_aq(hw, offset, data); 298 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
299 i40e_release_nvm(hw); 299 } else {
300 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
300 } 301 }
301 } else { 302 i40e_release_nvm(hw);
302 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
303 } 303 }
304 return ret_code; 304 return ret_code;
305} 305}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 97d46058d71d..3880e417f167 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -203,7 +203,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
203 struct i40e_pf *pf = vsi->back; 203 struct i40e_pf *pf = vsi->back;
204 struct udphdr *udp; 204 struct udphdr *udp;
205 struct iphdr *ip; 205 struct iphdr *ip;
206 bool err = false;
207 u8 *raw_packet; 206 u8 *raw_packet;
208 int ret; 207 int ret;
209 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, 208 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
@@ -219,9 +218,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
219 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET 218 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
220 + sizeof(struct iphdr)); 219 + sizeof(struct iphdr));
221 220
222 ip->daddr = fd_data->dst_ip[0]; 221 ip->daddr = fd_data->dst_ip;
223 udp->dest = fd_data->dst_port; 222 udp->dest = fd_data->dst_port;
224 ip->saddr = fd_data->src_ip[0]; 223 ip->saddr = fd_data->src_ip;
225 udp->source = fd_data->src_port; 224 udp->source = fd_data->src_port;
226 225
227 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; 226 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
@@ -230,7 +229,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
230 dev_info(&pf->pdev->dev, 229 dev_info(&pf->pdev->dev,
231 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", 230 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
232 fd_data->pctype, fd_data->fd_id, ret); 231 fd_data->pctype, fd_data->fd_id, ret);
233 err = true; 232 /* Free the packet buffer since it wasn't added to the ring */
233 kfree(raw_packet);
234 return -EOPNOTSUPP;
234 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { 235 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
235 if (add) 236 if (add)
236 dev_info(&pf->pdev->dev, 237 dev_info(&pf->pdev->dev,
@@ -241,10 +242,13 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
241 "Filter deleted for PCTYPE %d loc = %d\n", 242 "Filter deleted for PCTYPE %d loc = %d\n",
242 fd_data->pctype, fd_data->fd_id); 243 fd_data->pctype, fd_data->fd_id);
243 } 244 }
244 if (err)
245 kfree(raw_packet);
246 245
247 return err ? -EOPNOTSUPP : 0; 246 if (add)
247 pf->fd_udp4_filter_cnt++;
248 else
249 pf->fd_udp4_filter_cnt--;
250
251 return 0;
248} 252}
249 253
250#define I40E_TCPIP_DUMMY_PACKET_LEN 54 254#define I40E_TCPIP_DUMMY_PACKET_LEN 54
@@ -263,7 +267,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
263 struct i40e_pf *pf = vsi->back; 267 struct i40e_pf *pf = vsi->back;
264 struct tcphdr *tcp; 268 struct tcphdr *tcp;
265 struct iphdr *ip; 269 struct iphdr *ip;
266 bool err = false;
267 u8 *raw_packet; 270 u8 *raw_packet;
268 int ret; 271 int ret;
269 /* Dummy packet */ 272 /* Dummy packet */
@@ -281,36 +284,20 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
281 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET 284 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
282 + sizeof(struct iphdr)); 285 + sizeof(struct iphdr));
283 286
284 ip->daddr = fd_data->dst_ip[0]; 287 ip->daddr = fd_data->dst_ip;
285 tcp->dest = fd_data->dst_port; 288 tcp->dest = fd_data->dst_port;
286 ip->saddr = fd_data->src_ip[0]; 289 ip->saddr = fd_data->src_ip;
287 tcp->source = fd_data->src_port; 290 tcp->source = fd_data->src_port;
288 291
289 if (add) {
290 pf->fd_tcp_rule++;
291 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
292 I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
294 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
295 } else {
296 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
297 (pf->fd_tcp_rule - 1) : 0;
298 if (pf->fd_tcp_rule == 0) {
299 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
300 I40E_DEBUG_FD & pf->hw.debug_mask)
301 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
302 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
303 }
304 }
305
306 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; 292 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
307 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 293 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
308
309 if (ret) { 294 if (ret) {
310 dev_info(&pf->pdev->dev, 295 dev_info(&pf->pdev->dev,
311 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", 296 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
312 fd_data->pctype, fd_data->fd_id, ret); 297 fd_data->pctype, fd_data->fd_id, ret);
313 err = true; 298 /* Free the packet buffer since it wasn't added to the ring */
299 kfree(raw_packet);
300 return -EOPNOTSUPP;
314 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { 301 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
315 if (add) 302 if (add)
316 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", 303 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
@@ -321,10 +308,23 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
321 fd_data->pctype, fd_data->fd_id); 308 fd_data->pctype, fd_data->fd_id);
322 } 309 }
323 310
324 if (err) 311 if (add) {
325 kfree(raw_packet); 312 pf->fd_tcp4_filter_cnt++;
313 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
314 I40E_DEBUG_FD & pf->hw.debug_mask)
315 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
316 pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
317 } else {
318 pf->fd_tcp4_filter_cnt--;
319 if (pf->fd_tcp4_filter_cnt == 0) {
320 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
321 I40E_DEBUG_FD & pf->hw.debug_mask)
322 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
323 pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
324 }
325 }
326 326
327 return err ? -EOPNOTSUPP : 0; 327 return 0;
328} 328}
329 329
330#define I40E_IP_DUMMY_PACKET_LEN 34 330#define I40E_IP_DUMMY_PACKET_LEN 34
@@ -343,7 +343,6 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
343{ 343{
344 struct i40e_pf *pf = vsi->back; 344 struct i40e_pf *pf = vsi->back;
345 struct iphdr *ip; 345 struct iphdr *ip;
346 bool err = false;
347 u8 *raw_packet; 346 u8 *raw_packet;
348 int ret; 347 int ret;
349 int i; 348 int i;
@@ -359,18 +358,21 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
359 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN); 358 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
360 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); 359 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
361 360
362 ip->saddr = fd_data->src_ip[0]; 361 ip->saddr = fd_data->src_ip;
363 ip->daddr = fd_data->dst_ip[0]; 362 ip->daddr = fd_data->dst_ip;
364 ip->protocol = 0; 363 ip->protocol = 0;
365 364
366 fd_data->pctype = i; 365 fd_data->pctype = i;
367 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 366 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
368
369 if (ret) { 367 if (ret) {
370 dev_info(&pf->pdev->dev, 368 dev_info(&pf->pdev->dev,
371 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", 369 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
372 fd_data->pctype, fd_data->fd_id, ret); 370 fd_data->pctype, fd_data->fd_id, ret);
373 err = true; 371 /* The packet buffer wasn't added to the ring so we
372 * need to free it now.
373 */
374 kfree(raw_packet);
375 return -EOPNOTSUPP;
374 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { 376 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
375 if (add) 377 if (add)
376 dev_info(&pf->pdev->dev, 378 dev_info(&pf->pdev->dev,
@@ -383,10 +385,12 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
383 } 385 }
384 } 386 }
385 387
386 if (err) 388 if (add)
387 kfree(raw_packet); 389 pf->fd_ip4_filter_cnt++;
390 else
391 pf->fd_ip4_filter_cnt--;
388 392
389 return err ? -EOPNOTSUPP : 0; 393 return 0;
390} 394}
391 395
392/** 396/**
@@ -484,8 +488,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
484 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); 488 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
485 489
486 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && 490 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
487 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 491 (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
488 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; 492 pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
489 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 493 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
490 } 494 }
491 495
@@ -498,11 +502,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
498 */ 502 */
499 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { 503 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
500 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 504 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
501 !(pf->auto_disable_flags & 505 !(pf->hw_disabled_flags &
502 I40E_FLAG_FD_SB_ENABLED)) { 506 I40E_FLAG_FD_SB_ENABLED)) {
503 if (I40E_DEBUG_FD & pf->hw.debug_mask) 507 if (I40E_DEBUG_FD & pf->hw.debug_mask)
504 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); 508 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
505 pf->auto_disable_flags |= 509 pf->hw_disabled_flags |=
506 I40E_FLAG_FD_SB_ENABLED; 510 I40E_FLAG_FD_SB_ENABLED;
507 } 511 }
508 } 512 }
@@ -1010,7 +1014,6 @@ err:
1010 **/ 1014 **/
1011void i40e_clean_rx_ring(struct i40e_ring *rx_ring) 1015void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1012{ 1016{
1013 struct device *dev = rx_ring->dev;
1014 unsigned long bi_size; 1017 unsigned long bi_size;
1015 u16 i; 1018 u16 i;
1016 1019
@@ -1030,7 +1033,20 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1030 if (!rx_bi->page) 1033 if (!rx_bi->page)
1031 continue; 1034 continue;
1032 1035
1033 dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE); 1036 /* Invalidate cache lines that may have been written to by
1037 * device so that we avoid corrupting memory.
1038 */
1039 dma_sync_single_range_for_cpu(rx_ring->dev,
1040 rx_bi->dma,
1041 rx_bi->page_offset,
1042 I40E_RXBUFFER_2048,
1043 DMA_FROM_DEVICE);
1044
1045 /* free resources associated with mapping */
1046 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1047 PAGE_SIZE,
1048 DMA_FROM_DEVICE,
1049 I40E_RX_DMA_ATTR);
1034 __free_pages(rx_bi->page, 0); 1050 __free_pages(rx_bi->page, 0);
1035 1051
1036 rx_bi->page = NULL; 1052 rx_bi->page = NULL;
@@ -1159,7 +1175,10 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1159 } 1175 }
1160 1176
1161 /* map page for use */ 1177 /* map page for use */
1162 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1178 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1179 PAGE_SIZE,
1180 DMA_FROM_DEVICE,
1181 I40E_RX_DMA_ATTR);
1163 1182
1164 /* if mapping failed free memory back to system since 1183 /* if mapping failed free memory back to system since
1165 * there isn't much point in holding memory we can't use 1184 * there isn't much point in holding memory we can't use
@@ -1219,6 +1238,12 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1219 if (!i40e_alloc_mapped_page(rx_ring, bi)) 1238 if (!i40e_alloc_mapped_page(rx_ring, bi))
1220 goto no_buffers; 1239 goto no_buffers;
1221 1240
1241 /* sync the buffer for use by the device */
1242 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1243 bi->page_offset,
1244 I40E_RXBUFFER_2048,
1245 DMA_FROM_DEVICE);
1246
1222 /* Refresh the desc even if buffer_addrs didn't change 1247 /* Refresh the desc even if buffer_addrs didn't change
1223 * because each write-back erases this info. 1248 * because each write-back erases this info.
1224 */ 1249 */
@@ -1685,8 +1710,8 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
1685 rx_ring->rx_stats.page_reuse_count++; 1710 rx_ring->rx_stats.page_reuse_count++;
1686 } else { 1711 } else {
1687 /* we are not reusing the buffer so unmap it */ 1712 /* we are not reusing the buffer so unmap it */
1688 dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, 1713 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
1689 DMA_FROM_DEVICE); 1714 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
1690 } 1715 }
1691 1716
1692 /* clear contents of buffer_info */ 1717 /* clear contents of buffer_info */
@@ -2079,7 +2104,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2079 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) 2104 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2080 return; 2105 return;
2081 2106
2082 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 2107 if ((pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
2083 return; 2108 return;
2084 2109
2085 /* if sampling is disabled do nothing */ 2110 /* if sampling is disabled do nothing */
@@ -2113,10 +2138,10 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2113 th = (struct tcphdr *)(hdr.network + hlen); 2138 th = (struct tcphdr *)(hdr.network + hlen);
2114 2139
2115 /* Due to lack of space, no more new filters can be programmed */ 2140 /* Due to lack of space, no more new filters can be programmed */
2116 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 2141 if (th->syn && (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
2117 return; 2142 return;
2118 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) && 2143 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2119 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) { 2144 (!(pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
2120 /* HW ATR eviction will take care of removing filters on FIN 2145 /* HW ATR eviction will take care of removing filters on FIN
2121 * and RST packets. 2146 * and RST packets.
2122 */ 2147 */
@@ -2179,7 +2204,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2179 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2204 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2180 2205
2181 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) && 2206 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2182 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) 2207 (!(pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
2183 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; 2208 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2184 2209
2185 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 2210 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index f80979025c01..49c7b2089d8e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -133,6 +133,9 @@ enum i40e_dyn_idx_t {
133#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 133#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
134#define i40e_rx_desc i40e_32byte_rx_desc 134#define i40e_rx_desc i40e_32byte_rx_desc
135 135
136#define I40E_RX_DMA_ATTR \
137 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
138
136/** 139/**
137 * i40e_test_staterr - tests bits in Rx descriptor status and error fields 140 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
138 * @rx_desc: pointer to receive descriptor (in le64 format) 141 * @rx_desc: pointer to receive descriptor (in le64 format)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 78460c52b7c4..cfe8b78dac0e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -702,10 +702,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
702 dev_info(&pf->pdev->dev, 702 dev_info(&pf->pdev->dev,
703 "Could not allocate VF broadcast filter\n"); 703 "Could not allocate VF broadcast filter\n");
704 spin_unlock_bh(&vsi->mac_filter_hash_lock); 704 spin_unlock_bh(&vsi->mac_filter_hash_lock);
705 i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), 705 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
706 (u32)hena); 706 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
707 i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
708 (u32)(hena >> 32));
709 } 707 }
710 708
711 /* program mac filter */ 709 /* program mac filter */
@@ -1359,7 +1357,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1359 if (!vsi->info.pvid) 1357 if (!vsi->info.pvid)
1360 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1358 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1361 1359
1362 if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) && 1360 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1363 (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) { 1361 (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
1364 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP; 1362 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
1365 set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states); 1363 set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
@@ -1853,7 +1851,7 @@ error_param:
1853} 1851}
1854 1852
1855/* If the VF is not trusted restrict the number of MAC/VLAN it can program */ 1853/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
1856#define I40E_VC_MAX_MAC_ADDR_PER_VF 8 1854#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
1857#define I40E_VC_MAX_VLAN_PER_VF 8 1855#define I40E_VC_MAX_VLAN_PER_VF 8
1858 1856
1859/** 1857/**
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index 3a423836a565..827c7a6ed0ba 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -32,5 +32,5 @@
32obj-$(CONFIG_I40EVF) += i40evf.o 32obj-$(CONFIG_I40EVF) += i40evf.o
33 33
34i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \ 34i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
35 i40e_txrx.o i40e_common.o i40e_adminq.o 35 i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o
36 36
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index eeb9864bc5b1..c28cb8f27243 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -132,6 +132,10 @@ enum i40e_admin_queue_opc {
132 i40e_aqc_opc_list_func_capabilities = 0x000A, 132 i40e_aqc_opc_list_func_capabilities = 0x000A,
133 i40e_aqc_opc_list_dev_capabilities = 0x000B, 133 i40e_aqc_opc_list_dev_capabilities = 0x000B,
134 134
135 /* Proxy commands */
136 i40e_aqc_opc_set_proxy_config = 0x0104,
137 i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
138
135 /* LAA */ 139 /* LAA */
136 i40e_aqc_opc_mac_address_read = 0x0107, 140 i40e_aqc_opc_mac_address_read = 0x0107,
137 i40e_aqc_opc_mac_address_write = 0x0108, 141 i40e_aqc_opc_mac_address_write = 0x0108,
@@ -139,6 +143,10 @@ enum i40e_admin_queue_opc {
139 /* PXE */ 143 /* PXE */
140 i40e_aqc_opc_clear_pxe_mode = 0x0110, 144 i40e_aqc_opc_clear_pxe_mode = 0x0110,
141 145
146 /* WoL commands */
147 i40e_aqc_opc_set_wol_filter = 0x0120,
148 i40e_aqc_opc_get_wake_reason = 0x0121,
149
142 /* internal switch commands */ 150 /* internal switch commands */
143 i40e_aqc_opc_get_switch_config = 0x0200, 151 i40e_aqc_opc_get_switch_config = 0x0200,
144 i40e_aqc_opc_add_statistics = 0x0201, 152 i40e_aqc_opc_add_statistics = 0x0201,
@@ -177,6 +185,7 @@ enum i40e_admin_queue_opc {
177 i40e_aqc_opc_remove_control_packet_filter = 0x025B, 185 i40e_aqc_opc_remove_control_packet_filter = 0x025B,
178 i40e_aqc_opc_add_cloud_filters = 0x025C, 186 i40e_aqc_opc_add_cloud_filters = 0x025C,
179 i40e_aqc_opc_remove_cloud_filters = 0x025D, 187 i40e_aqc_opc_remove_cloud_filters = 0x025D,
188 i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
180 189
181 i40e_aqc_opc_add_mirror_rule = 0x0260, 190 i40e_aqc_opc_add_mirror_rule = 0x0260,
182 i40e_aqc_opc_delete_mirror_rule = 0x0261, 191 i40e_aqc_opc_delete_mirror_rule = 0x0261,
@@ -558,6 +567,56 @@ struct i40e_aqc_clear_pxe {
558 567
559I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); 568I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
560 569
570/* Set WoL Filter (0x0120) */
571
572struct i40e_aqc_set_wol_filter {
573 __le16 filter_index;
574#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
575#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
576#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
577 I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
578
579#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
580#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
581 I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
582 __le16 cmd_flags;
583#define I40E_AQC_SET_WOL_FILTER 0x8000
584#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
585#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
586#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
587 __le16 valid_flags;
588#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
589#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
590 u8 reserved[2];
591 __le32 address_high;
592 __le32 address_low;
593};
594
595I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
596
597struct i40e_aqc_set_wol_filter_data {
598 u8 filter[128];
599 u8 mask[16];
600};
601
602I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
603
604/* Get Wake Reason (0x0121) */
605
606struct i40e_aqc_get_wake_reason_completion {
607 u8 reserved_1[2];
608 __le16 wake_reason;
609#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
610#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
611 I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
612#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
613#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
614 I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
615 u8 reserved_2[12];
616};
617
618I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
619
561/* Switch configuration commands (0x02xx) */ 620/* Switch configuration commands (0x02xx) */
562 621
563/* Used by many indirect commands that only pass an seid and a buffer in the 622/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -640,6 +699,8 @@ struct i40e_aqc_set_port_parameters {
640#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ 699#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
641#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 700#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
642 __le16 bad_frame_vsi; 701 __le16 bad_frame_vsi;
702#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
703#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
643 __le16 default_seid; /* reserved for command */ 704 __le16 default_seid; /* reserved for command */
644 u8 reserved[10]; 705 u8 reserved[10];
645}; 706};
@@ -691,6 +752,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
691/* Set Switch Configuration (direct 0x0205) */ 752/* Set Switch Configuration (direct 0x0205) */
692struct i40e_aqc_set_switch_config { 753struct i40e_aqc_set_switch_config {
693 __le16 flags; 754 __le16 flags;
755/* flags used for both fields below */
694#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 756#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
695#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 757#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
696 __le16 valid_flags; 758 __le16 valid_flags;
@@ -1839,11 +1901,12 @@ struct i40e_aqc_get_link_status {
1839#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 1901#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
1840#define I40E_AQ_CONFIG_CRC_ENA 0x04 1902#define I40E_AQ_CONFIG_CRC_ENA 0x04
1841#define I40E_AQ_CONFIG_PACING_MASK 0x78 1903#define I40E_AQ_CONFIG_PACING_MASK 0x78
1842 u8 external_power_ability; 1904 u8 power_desc;
1843#define I40E_AQ_LINK_POWER_CLASS_1 0x00 1905#define I40E_AQ_LINK_POWER_CLASS_1 0x00
1844#define I40E_AQ_LINK_POWER_CLASS_2 0x01 1906#define I40E_AQ_LINK_POWER_CLASS_2 0x01
1845#define I40E_AQ_LINK_POWER_CLASS_3 0x02 1907#define I40E_AQ_LINK_POWER_CLASS_3 0x02
1846#define I40E_AQ_LINK_POWER_CLASS_4 0x03 1908#define I40E_AQ_LINK_POWER_CLASS_4 0x03
1909#define I40E_AQ_PWR_CLASS_MASK 0x03
1847 u8 reserved[4]; 1910 u8 reserved[4];
1848}; 1911};
1849 1912
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index c91fcf43ccbc..d7790c08e523 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -493,7 +493,6 @@ err:
493 **/ 493 **/
494void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) 494void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
495{ 495{
496 struct device *dev = rx_ring->dev;
497 unsigned long bi_size; 496 unsigned long bi_size;
498 u16 i; 497 u16 i;
499 498
@@ -513,7 +512,20 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
513 if (!rx_bi->page) 512 if (!rx_bi->page)
514 continue; 513 continue;
515 514
516 dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE); 515 /* Invalidate cache lines that may have been written to by
516 * device so that we avoid corrupting memory.
517 */
518 dma_sync_single_range_for_cpu(rx_ring->dev,
519 rx_bi->dma,
520 rx_bi->page_offset,
521 I40E_RXBUFFER_2048,
522 DMA_FROM_DEVICE);
523
524 /* free resources associated with mapping */
525 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
526 PAGE_SIZE,
527 DMA_FROM_DEVICE,
528 I40E_RX_DMA_ATTR);
517 __free_pages(rx_bi->page, 0); 529 __free_pages(rx_bi->page, 0);
518 530
519 rx_bi->page = NULL; 531 rx_bi->page = NULL;
@@ -642,7 +654,10 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
642 } 654 }
643 655
644 /* map page for use */ 656 /* map page for use */
645 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 657 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
658 PAGE_SIZE,
659 DMA_FROM_DEVICE,
660 I40E_RX_DMA_ATTR);
646 661
647 /* if mapping failed free memory back to system since 662 /* if mapping failed free memory back to system since
648 * there isn't much point in holding memory we can't use 663 * there isn't much point in holding memory we can't use
@@ -702,6 +717,12 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
702 if (!i40e_alloc_mapped_page(rx_ring, bi)) 717 if (!i40e_alloc_mapped_page(rx_ring, bi))
703 goto no_buffers; 718 goto no_buffers;
704 719
720 /* sync the buffer for use by the device */
721 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
722 bi->page_offset,
723 I40E_RXBUFFER_2048,
724 DMA_FROM_DEVICE);
725
705 /* Refresh the desc even if buffer_addrs didn't change 726 /* Refresh the desc even if buffer_addrs didn't change
706 * because each write-back erases this info. 727 * because each write-back erases this info.
707 */ 728 */
@@ -1158,8 +1179,8 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
1158 rx_ring->rx_stats.page_reuse_count++; 1179 rx_ring->rx_stats.page_reuse_count++;
1159 } else { 1180 } else {
1160 /* we are not reusing the buffer so unmap it */ 1181 /* we are not reusing the buffer so unmap it */
1161 dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, 1182 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
1162 DMA_FROM_DEVICE); 1183 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
1163 } 1184 }
1164 1185
1165 /* clear contents of buffer_info */ 1186 /* clear contents of buffer_info */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 8274ba68bd32..013512124e6a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -120,6 +120,9 @@ enum i40e_dyn_idx_t {
120#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 120#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
121#define i40e_rx_desc i40e_32byte_rx_desc 121#define i40e_rx_desc i40e_32byte_rx_desc
122 122
123#define I40E_RX_DMA_ATTR \
124 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
125
123/** 126/**
124 * i40e_test_staterr - tests bits in Rx descriptor status and error fields 127 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
125 * @rx_desc: pointer to receive descriptor (in le64 format) 128 * @rx_desc: pointer to receive descriptor (in le64 format)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index d38a2b2aea2b..f431fbc4a3e7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -81,7 +81,9 @@ enum i40e_virtchnl_ops {
81 I40E_VIRTCHNL_OP_GET_STATS = 15, 81 I40E_VIRTCHNL_OP_GET_STATS = 15,
82 I40E_VIRTCHNL_OP_FCOE = 16, 82 I40E_VIRTCHNL_OP_FCOE = 16,
83 I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ 83 I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
84 I40E_VIRTCHNL_OP_IWARP = 20,
84 I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, 85 I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
86 I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
85 I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, 87 I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
86 I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, 88 I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
87 I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, 89 I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
@@ -393,6 +395,37 @@ struct i40e_virtchnl_pf_event {
393 int severity; 395 int severity;
394}; 396};
395 397
398/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
399 * VF uses this message to request PF to map IWARP vectors to IWARP queues.
400 * The request for this originates from the VF IWARP driver through
401 * a client interface between VF LAN and VF IWARP driver.
402 * A vector could have an AEQ and CEQ attached to it although
403 * there is a single AEQ per VF IWARP instance in which case
404 * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
405 * There will never be a case where there will be multiple CEQs attached
406 * to a single vector.
407 * PF configures interrupt mapping and returns status.
408 */
409
410/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
411 * In order for us to keep the interface simple, SW will define a
412 * unique type value for AEQ.
413 */
414#define I40E_QUEUE_TYPE_PE_AEQ 0x80
415#define I40E_QUEUE_INVALID_IDX 0xFFFF
416
417struct i40e_virtchnl_iwarp_qv_info {
418 u32 v_idx; /* msix_vector */
419 u16 ceq_idx;
420 u16 aeq_idx;
421 u8 itr_idx;
422};
423
424struct i40e_virtchnl_iwarp_qvlist_info {
425 u32 num_vectors;
426 struct i40e_virtchnl_iwarp_qv_info qv_info[1];
427};
428
396/* VF reset states - these are written into the RSTAT register: 429/* VF reset states - these are written into the RSTAT register:
397 * I40E_VFGEN_RSTAT1 on the PF 430 * I40E_VFGEN_RSTAT1 on the PF
398 * I40E_VFGEN_RSTAT on the VF 431 * I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 00c42d803276..b2b48511f457 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -60,6 +60,7 @@ struct i40e_vsi {
60 int base_vector; 60 int base_vector;
61 u16 work_limit; 61 u16 work_limit;
62 u16 qs_handle; 62 u16 qs_handle;
63 void *priv; /* client driver data reference. */
63}; 64};
64 65
65/* How many Rx Buffers do we bundle into one write to the hardware ? */ 66/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -169,6 +170,7 @@ enum i40evf_state_t {
169 170
170enum i40evf_critical_section_t { 171enum i40evf_critical_section_t {
171 __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ 172 __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
173 __I40EVF_IN_CLIENT_TASK,
172}; 174};
173/* make common code happy */ 175/* make common code happy */
174#define __I40E_DOWN __I40EVF_DOWN 176#define __I40E_DOWN __I40EVF_DOWN
@@ -178,6 +180,7 @@ struct i40evf_adapter {
178 struct timer_list watchdog_timer; 180 struct timer_list watchdog_timer;
179 struct work_struct reset_task; 181 struct work_struct reset_task;
180 struct work_struct adminq_task; 182 struct work_struct adminq_task;
183 struct delayed_work client_task;
181 struct delayed_work init_task; 184 struct delayed_work init_task;
182 struct i40e_q_vector *q_vectors; 185 struct i40e_q_vector *q_vectors;
183 struct list_head vlan_filter_list; 186 struct list_head vlan_filter_list;
@@ -195,7 +198,10 @@ struct i40evf_adapter {
195 u64 hw_csum_rx_error; 198 u64 hw_csum_rx_error;
196 u32 rx_desc_count; 199 u32 rx_desc_count;
197 int num_msix_vectors; 200 int num_msix_vectors;
201 int num_iwarp_msix;
202 int iwarp_base_vector;
198 u32 client_pending; 203 u32 client_pending;
204 struct i40e_client_instance *cinst;
199 struct msix_entry *msix_entries; 205 struct msix_entry *msix_entries;
200 206
201 u32 flags; 207 u32 flags;
@@ -211,8 +217,11 @@ struct i40evf_adapter {
211#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) 217#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
212#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) 218#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
213#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14) 219#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14)
214#define I40EVF_FLAG_PROMISC_ON BIT(15) 220#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(15)
215#define I40EVF_FLAG_ALLMULTI_ON BIT(16) 221#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(16)
222#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17)
223#define I40EVF_FLAG_PROMISC_ON BIT(18)
224#define I40EVF_FLAG_ALLMULTI_ON BIT(19)
216/* duplicates for common code */ 225/* duplicates for common code */
217#define I40E_FLAG_FDIR_ATR_ENABLED 0 226#define I40E_FLAG_FDIR_ATR_ENABLED 0
218#define I40E_FLAG_DCB_ENABLED 0 227#define I40E_FLAG_DCB_ENABLED 0
@@ -258,10 +267,11 @@ struct i40evf_adapter {
258 bool link_up; 267 bool link_up;
259 enum i40e_aq_link_speed link_speed; 268 enum i40e_aq_link_speed link_speed;
260 enum i40e_virtchnl_ops current_op; 269 enum i40e_virtchnl_ops current_op;
261#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \ 270#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
262 (_a)->vf_res->vf_offload_flags & \ 271 (_a)->vf_res->vf_offload_flags & \
263 I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \ 272 I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
264 0) 273 0)
274#define CLIENT_ENABLED(_a) ((_a)->cinst)
265/* RSS by the PF should be preferred over RSS via other methods. */ 275/* RSS by the PF should be preferred over RSS via other methods. */
266#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \ 276#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
267 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) 277 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -292,6 +302,12 @@ struct i40evf_adapter {
292 302
293/* Ethtool Private Flags */ 303/* Ethtool Private Flags */
294 304
305/* lan device */
306struct i40e_device {
307 struct list_head list;
308 struct i40evf_adapter *vf;
309};
310
295/* needed by i40evf_ethtool.c */ 311/* needed by i40evf_ethtool.c */
296extern char i40evf_driver_name[]; 312extern char i40evf_driver_name[];
297extern const char i40evf_driver_version[]; 313extern const char i40evf_driver_version[];
@@ -337,4 +353,11 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
337 enum i40e_virtchnl_ops v_opcode, 353 enum i40e_virtchnl_ops v_opcode,
338 i40e_status v_retval, u8 *msg, u16 msglen); 354 i40e_status v_retval, u8 *msg, u16 msglen);
339int i40evf_config_rss(struct i40evf_adapter *adapter); 355int i40evf_config_rss(struct i40evf_adapter *adapter);
356int i40evf_lan_add_device(struct i40evf_adapter *adapter);
357int i40evf_lan_del_device(struct i40evf_adapter *adapter);
358void i40evf_client_subtask(struct i40evf_adapter *adapter);
359void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);
360void i40evf_notify_client_l2_params(struct i40e_vsi *vsi);
361void i40evf_notify_client_open(struct i40e_vsi *vsi);
362void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
340#endif /* _I40EVF_H_ */ 363#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
new file mode 100644
index 000000000000..5b43e5b6e2eb
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -0,0 +1,563 @@
1#include <linux/list.h>
2#include <linux/errno.h>
3
4#include "i40evf.h"
5#include "i40e_prototype.h"
6#include "i40evf_client.h"
7
8static
9const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR;
10static struct i40e_client *vf_registered_client;
11static LIST_HEAD(i40evf_devices);
12static DEFINE_MUTEX(i40evf_device_mutex);
13
14static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
15 struct i40e_client *client,
16 u8 *msg, u16 len);
17
18static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
19 struct i40e_client *client,
20 struct i40e_qvlist_info *qvlist_info);
21
22static struct i40e_ops i40evf_lan_ops = {
23 .virtchnl_send = i40evf_client_virtchnl_send,
24 .setup_qvlist = i40evf_client_setup_qvlist,
25};
26
27/**
28 * i40evf_notify_client_message - call the client message receive callback
29 * @vsi: the VSI associated with this client
30 * @msg: message buffer
31 * @len: length of message
32 *
33 * If there is a client to this VSI, call the client
34 **/
35void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
36{
37 struct i40evf_adapter *adapter = vsi->back;
38 struct i40e_client_instance *cinst = adapter->cinst;
39
40 if (!vsi)
41 return;
42
43 if (!cinst || !cinst->client || !cinst->client->ops ||
44 !cinst->client->ops->virtchnl_receive) {
45 dev_dbg(&vsi->back->pdev->dev,
46 "Cannot locate client instance virtchnl_receive function\n");
47 return;
48 }
49 cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client,
50 msg, len);
51}
52
53/**
54 * i40evf_notify_client_l2_params - call the client notify callback
55 * @vsi: the VSI with l2 param changes
56 *
57 * If there is a client to this VSI, call the client
58 **/
59void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
60{
61 struct i40evf_adapter *adapter = vsi->back;
62 struct i40e_client_instance *cinst = adapter->cinst;
63 struct i40e_params params;
64
65 if (!vsi)
66 return;
67 memset(&params, 0, sizeof(params));
68 params.mtu = vsi->netdev->mtu;
69 params.link_up = vsi->back->link_up;
70 params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
71
72 if (!cinst || !cinst->client || !cinst->client->ops ||
73 !cinst->client->ops->l2_param_change) {
74 dev_dbg(&vsi->back->pdev->dev,
75 "Cannot locate client instance l2_param_change function\n");
76 return;
77 }
78 cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
79 &params);
80}
81
82/**
83 * i40evf_notify_client_open - call the client open callback
84 * @vsi: the VSI with netdev opened
85 *
86 * If there is a client to this netdev, call the client with open
87 **/
88void i40evf_notify_client_open(struct i40e_vsi *vsi)
89{
90 struct i40evf_adapter *adapter = vsi->back;
91 struct i40e_client_instance *cinst = adapter->cinst;
92 int ret;
93
94 if (!cinst || !cinst->client || !cinst->client->ops ||
95 !cinst->client->ops->open) {
96 dev_dbg(&vsi->back->pdev->dev,
97 "Cannot locate client instance open function\n");
98 return;
99 }
100 if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) {
101 ret = cinst->client->ops->open(&cinst->lan_info, cinst->client);
102 if (!ret)
103 set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
104 }
105}
106
107/**
108 * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map
109 * @ldev: pointer to L2 context.
110 *
111 * Return 0 on success or < 0 on error
112 **/
113static int i40evf_client_release_qvlist(struct i40e_info *ldev)
114{
115 struct i40evf_adapter *adapter = ldev->vf;
116 i40e_status err;
117
118 if (adapter->aq_required)
119 return -EAGAIN;
120
121 err = i40e_aq_send_msg_to_pf(&adapter->hw,
122 I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
123 I40E_SUCCESS, NULL, 0, NULL);
124
125 if (err)
126 dev_err(&adapter->pdev->dev,
127 "Unable to send iWarp vector release message to PF, error %d, aq status %d\n",
128 err, adapter->hw.aq.asq_last_status);
129
130 return err;
131}
132
133/**
134 * i40evf_notify_client_close - call the client close callback
135 * @vsi: the VSI with netdev closed
136 * @reset: true when close called due to reset pending
137 *
138 * If there is a client to this netdev, call the client with close
139 **/
140void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
141{
142 struct i40evf_adapter *adapter = vsi->back;
143 struct i40e_client_instance *cinst = adapter->cinst;
144
145 if (!cinst || !cinst->client || !cinst->client->ops ||
146 !cinst->client->ops->close) {
147 dev_dbg(&vsi->back->pdev->dev,
148 "Cannot locate client instance close function\n");
149 return;
150 }
151 cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
152 i40evf_client_release_qvlist(&cinst->lan_info);
153 clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
154}
155
156/**
157 * i40evf_client_add_instance - add a client instance to the instance list
158 * @adapter: pointer to the board struct
159 * @client: pointer to a client struct in the client list.
160 *
161 * Returns cinst ptr on success, NULL on failure
162 **/
163static struct i40e_client_instance *
164i40evf_client_add_instance(struct i40evf_adapter *adapter)
165{
166 struct i40e_client_instance *cinst = NULL;
167 struct netdev_hw_addr *mac = NULL;
168 struct i40e_vsi *vsi = &adapter->vsi;
169 int i;
170
171 if (!vf_registered_client)
172 goto out;
173
174 if (adapter->cinst) {
175 cinst = adapter->cinst;
176 goto out;
177 }
178
179 cinst = kzalloc(sizeof(*cinst), GFP_KERNEL);
180 if (!cinst)
181 goto out;
182
183 cinst->lan_info.vf = (void *)adapter;
184 cinst->lan_info.netdev = vsi->netdev;
185 cinst->lan_info.pcidev = adapter->pdev;
186 cinst->lan_info.fid = 0;
187 cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
188 cinst->lan_info.hw_addr = adapter->hw.hw_addr;
189 cinst->lan_info.ops = &i40evf_lan_ops;
190 cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
191 cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
192 cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
193 set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
194
195 cinst->lan_info.msix_count = adapter->num_iwarp_msix;
196 cinst->lan_info.msix_entries =
197 &adapter->msix_entries[adapter->iwarp_base_vector];
198
199 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
200 cinst->lan_info.params.qos.prio_qos[i].tc = 0;
201 cinst->lan_info.params.qos.prio_qos[i].qs_handle =
202 vsi->qs_handle;
203 }
204
205 mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
206 struct netdev_hw_addr, list);
207 if (mac)
208 ether_addr_copy(cinst->lan_info.lanmac, mac->addr);
209 else
210 dev_err(&adapter->pdev->dev, "MAC address list is empty!\n");
211
212 cinst->client = vf_registered_client;
213 adapter->cinst = cinst;
214out:
215 return cinst;
216}
217
218/**
219 * i40evf_client_del_instance - removes a client instance from the list
220 * @adapter: pointer to the board struct
221 * @client: pointer to the client struct
222 *
223 **/
224static
225void i40evf_client_del_instance(struct i40evf_adapter *adapter)
226{
227 kfree(adapter->cinst);
228 adapter->cinst = NULL;
229}
230
231/**
232 * i40evf_client_subtask - client maintenance work
233 * @adapter: board private structure
234 **/
235void i40evf_client_subtask(struct i40evf_adapter *adapter)
236{
237 struct i40e_client *client = vf_registered_client;
238 struct i40e_client_instance *cinst;
239 int ret = 0;
240
241 if (adapter->state < __I40EVF_DOWN)
242 return;
243
244 /* first check client is registered */
245 if (!client)
246 return;
247
248 /* Add the client instance to the instance list */
249 cinst = i40evf_client_add_instance(adapter);
250 if (!cinst)
251 return;
252
253 dev_info(&adapter->pdev->dev, "Added instance of Client %s\n",
254 client->name);
255
256 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
257 /* Send an Open request to the client */
258
259 if (client->ops && client->ops->open)
260 ret = client->ops->open(&cinst->lan_info, client);
261 if (!ret)
262 set_bit(__I40E_CLIENT_INSTANCE_OPENED,
263 &cinst->state);
264 else
265 /* remove client instance */
266 i40evf_client_del_instance(adapter);
267 }
268}
269
270/**
271 * i40evf_lan_add_device - add a lan device struct to the list of lan devices
272 * @adapter: pointer to the board struct
273 *
274 * Returns 0 on success or none 0 on error
275 **/
276int i40evf_lan_add_device(struct i40evf_adapter *adapter)
277{
278 struct i40e_device *ldev;
279 int ret = 0;
280
281 mutex_lock(&i40evf_device_mutex);
282 list_for_each_entry(ldev, &i40evf_devices, list) {
283 if (ldev->vf == adapter) {
284 ret = -EEXIST;
285 goto out;
286 }
287 }
288 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
289 if (!ldev) {
290 ret = -ENOMEM;
291 goto out;
292 }
293 ldev->vf = adapter;
294 INIT_LIST_HEAD(&ldev->list);
295 list_add(&ldev->list, &i40evf_devices);
296 dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
297 adapter->hw.bus.bus_id, adapter->hw.bus.device,
298 adapter->hw.bus.func);
299
300 /* Since in some cases register may have happened before a device gets
301 * added, we can schedule a subtask to go initiate the clients.
302 */
303 adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
304
305out:
306 mutex_unlock(&i40evf_device_mutex);
307 return ret;
308}
309
310/**
311 * i40evf_lan_del_device - removes a lan device from the device list
312 * @adapter: pointer to the board struct
313 *
314 * Returns 0 on success or non-0 on error
315 **/
316int i40evf_lan_del_device(struct i40evf_adapter *adapter)
317{
318 struct i40e_device *ldev, *tmp;
319 int ret = -ENODEV;
320
321 mutex_lock(&i40evf_device_mutex);
322 list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) {
323 if (ldev->vf == adapter) {
324 dev_info(&adapter->pdev->dev,
325 "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
326 adapter->hw.bus.bus_id, adapter->hw.bus.device,
327 adapter->hw.bus.func);
328 list_del(&ldev->list);
329 kfree(ldev);
330 ret = 0;
331 break;
332 }
333 }
334
335 mutex_unlock(&i40evf_device_mutex);
336 return ret;
337}
338
339/**
340 * i40evf_client_release - release client specific resources
341 * @client: pointer to the registered client
342 *
343 **/
344static void i40evf_client_release(struct i40e_client *client)
345{
346 struct i40e_client_instance *cinst;
347 struct i40e_device *ldev;
348 struct i40evf_adapter *adapter;
349
350 mutex_lock(&i40evf_device_mutex);
351 list_for_each_entry(ldev, &i40evf_devices, list) {
352 adapter = ldev->vf;
353 cinst = adapter->cinst;
354 if (!cinst)
355 continue;
356 if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
357 if (client->ops && client->ops->close)
358 client->ops->close(&cinst->lan_info, client,
359 false);
360 i40evf_client_release_qvlist(&cinst->lan_info);
361 clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
362
363 dev_warn(&adapter->pdev->dev,
364 "Client %s instance closed\n", client->name);
365 }
366 /* delete the client instance */
367 i40evf_client_del_instance(adapter);
368 dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n",
369 client->name);
370 }
371 mutex_unlock(&i40evf_device_mutex);
372}
373
374/**
375 * i40evf_client_prepare - prepare client specific resources
376 * @client: pointer to the registered client
377 *
378 **/
379static void i40evf_client_prepare(struct i40e_client *client)
380{
381 struct i40e_device *ldev;
382 struct i40evf_adapter *adapter;
383
384 mutex_lock(&i40evf_device_mutex);
385 list_for_each_entry(ldev, &i40evf_devices, list) {
386 adapter = ldev->vf;
387 /* Signal the watchdog to service the client */
388 adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
389 }
390 mutex_unlock(&i40evf_device_mutex);
391}
392
393/**
394 * i40evf_client_virtchnl_send - send a message to the PF instance
395 * @ldev: pointer to L2 context.
396 * @client: Client pointer.
397 * @msg: pointer to message buffer
398 * @len: message length
399 *
400 * Return 0 on success or < 0 on error
401 **/
402static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
403 struct i40e_client *client,
404 u8 *msg, u16 len)
405{
406 struct i40evf_adapter *adapter = ldev->vf;
407 i40e_status err;
408
409 if (adapter->aq_required)
410 return -EAGAIN;
411
412 err = i40e_aq_send_msg_to_pf(&adapter->hw, I40E_VIRTCHNL_OP_IWARP,
413 I40E_SUCCESS, msg, len, NULL);
414 if (err)
415 dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
416 err, adapter->hw.aq.asq_last_status);
417
418 return err;
419}
420
421/**
422 * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
423 * @ldev: pointer to L2 context.
424 * @client: Client pointer.
425 * @qv_info: queue and vector list
426 *
427 * Return 0 on success or < 0 on error
428 **/
429static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
430 struct i40e_client *client,
431 struct i40e_qvlist_info *qvlist_info)
432{
433 struct i40e_virtchnl_iwarp_qvlist_info *v_qvlist_info;
434 struct i40evf_adapter *adapter = ldev->vf;
435 struct i40e_qv_info *qv_info;
436 i40e_status err;
437 u32 v_idx, i;
438 u32 msg_size;
439
440 if (adapter->aq_required)
441 return -EAGAIN;
442
443 /* A quick check on whether the vectors belong to the client */
444 for (i = 0; i < qvlist_info->num_vectors; i++) {
445 qv_info = &qvlist_info->qv_info[i];
446 if (!qv_info)
447 continue;
448 v_idx = qv_info->v_idx;
449 if ((v_idx >=
450 (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) ||
451 (v_idx < adapter->iwarp_base_vector))
452 return -EINVAL;
453 }
454
455 v_qvlist_info = (struct i40e_virtchnl_iwarp_qvlist_info *)qvlist_info;
456 msg_size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
457 (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
458 (v_qvlist_info->num_vectors - 1));
459
460 adapter->client_pending |= BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
461 err = i40e_aq_send_msg_to_pf(&adapter->hw,
462 I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
463 I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
464
465 if (err) {
466 dev_err(&adapter->pdev->dev,
467 "Unable to send iWarp vector config message to PF, error %d, aq status %d\n",
468 err, adapter->hw.aq.asq_last_status);
469 goto out;
470 }
471
472 err = -EBUSY;
473 for (i = 0; i < 5; i++) {
474 msleep(100);
475 if (!(adapter->client_pending &
476 BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
477 err = 0;
478 break;
479 }
480 }
481out:
482 return err;
483}
484
485/**
486 * i40evf_register_client - Register a i40e client driver with the L2 driver
487 * @client: pointer to the i40e_client struct
488 *
489 * Returns 0 on success or non-0 on error
490 **/
491int i40evf_register_client(struct i40e_client *client)
492{
493 int ret = 0;
494
495 if (!client) {
496 ret = -EIO;
497 goto out;
498 }
499
500 if (strlen(client->name) == 0) {
501 pr_info("i40evf: Failed to register client with no name\n");
502 ret = -EIO;
503 goto out;
504 }
505
506 if (vf_registered_client) {
507 pr_info("i40evf: Client %s has already been registered!\n",
508 client->name);
509 ret = -EEXIST;
510 goto out;
511 }
512
513 if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) ||
514 (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) {
515 pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n",
516 client->name);
517 pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
518 client->version.major, client->version.minor,
519 client->version.build,
520 i40evf_client_interface_version_str);
521 ret = -EIO;
522 goto out;
523 }
524
525 vf_registered_client = client;
526
527 i40evf_client_prepare(client);
528
529 pr_info("i40evf: Registered client %s with return code %d\n",
530 client->name, ret);
531out:
532 return ret;
533}
534EXPORT_SYMBOL(i40evf_register_client);
535
536/**
537 * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver
538 * @client: pointer to the i40e_client struct
539 *
540 * Returns 0 on success or non-0 on error
541 **/
542int i40evf_unregister_client(struct i40e_client *client)
543{
544 int ret = 0;
545
546 /* When a unregister request comes through we would have to send
547 * a close for each of the client instances that were opened.
548 * client_release function is called to handle this.
549 */
550 i40evf_client_release(client);
551
552 if (vf_registered_client != client) {
553 pr_info("i40evf: Client %s has not been registered\n",
554 client->name);
555 ret = -ENODEV;
556 goto out;
557 }
558 vf_registered_client = NULL;
559 pr_info("i40evf: Unregistered client %s\n", client->name);
560out:
561 return ret;
562}
563EXPORT_SYMBOL(i40evf_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
new file mode 100644
index 000000000000..7d283c7506a5
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
@@ -0,0 +1,166 @@
1#ifndef _I40E_CLIENT_H_
2#define _I40E_CLIENT_H_
3
4#define I40EVF_CLIENT_STR_LENGTH 10
5
6/* Client interface version should be updated anytime there is a change in the
7 * existing APIs or data structures.
8 */
9#define I40EVF_CLIENT_VERSION_MAJOR 0
10#define I40EVF_CLIENT_VERSION_MINOR 01
11#define I40EVF_CLIENT_VERSION_BUILD 00
12#define I40EVF_CLIENT_VERSION_STR \
13 __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \
14 __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \
15 __stringify(I40EVF_CLIENT_VERSION_BUILD)
16
17struct i40e_client_version {
18 u8 major;
19 u8 minor;
20 u8 build;
21 u8 rsvd;
22};
23
24enum i40e_client_state {
25 __I40E_CLIENT_NULL,
26 __I40E_CLIENT_REGISTERED
27};
28
29enum i40e_client_instance_state {
30 __I40E_CLIENT_INSTANCE_NONE,
31 __I40E_CLIENT_INSTANCE_OPENED,
32};
33
34struct i40e_ops;
35struct i40e_client;
36
37/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
38 * In order for us to keep the interface simple, SW will define a
39 * unique type value for AEQ.
40 */
41#define I40E_QUEUE_TYPE_PE_AEQ 0x80
42#define I40E_QUEUE_INVALID_IDX 0xFFFF
43
44struct i40e_qv_info {
45 u32 v_idx; /* msix_vector */
46 u16 ceq_idx;
47 u16 aeq_idx;
48 u8 itr_idx;
49};
50
51struct i40e_qvlist_info {
52 u32 num_vectors;
53 struct i40e_qv_info qv_info[1];
54};
55
56#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
57
58/* set of LAN parameters useful for clients managed by LAN */
59
60/* Struct to hold per priority info */
61struct i40e_prio_qos_params {
62 u16 qs_handle; /* qs handle for prio */
63 u8 tc; /* TC mapped to prio */
64 u8 reserved;
65};
66
67#define I40E_CLIENT_MAX_USER_PRIORITY 8
68/* Struct to hold Client QoS */
69struct i40e_qos_params {
70 struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
71};
72
73struct i40e_params {
74 struct i40e_qos_params qos;
75 u16 mtu;
76 u16 link_up; /* boolean */
77};
78
79/* Structure to hold LAN device info for a client device */
80struct i40e_info {
81 struct i40e_client_version version;
82 u8 lanmac[6];
83 struct net_device *netdev;
84 struct pci_dev *pcidev;
85 u8 __iomem *hw_addr;
86 u8 fid; /* function id, PF id or VF id */
87#define I40E_CLIENT_FTYPE_PF 0
88#define I40E_CLIENT_FTYPE_VF 1
89 u8 ftype; /* function type, PF or VF */
90 void *vf; /* cast to i40evf_adapter */
91
92 /* All L2 params that could change during the life span of the device
93 * and needs to be communicated to the client when they change
94 */
95 struct i40e_params params;
96 struct i40e_ops *ops;
97
98 u16 msix_count; /* number of msix vectors*/
99 /* Array down below will be dynamically allocated based on msix_count */
100 struct msix_entry *msix_entries;
101 u16 itr_index; /* Which ITR index the PE driver is suppose to use */
102};
103
104struct i40e_ops {
105 /* setup_q_vector_list enables queues with a particular vector */
106 int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
107 struct i40e_qvlist_info *qv_info);
108
109 u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
110 u8 *msg, u16 len);
111
112 /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/
113 void (*request_reset)(struct i40e_info *ldev,
114 struct i40e_client *client);
115};
116
117struct i40e_client_ops {
118 /* Should be called from register_client() or whenever the driver is
119 * ready to create a specific client instance.
120 */
121 int (*open)(struct i40e_info *ldev, struct i40e_client *client);
122
123 /* Should be closed when netdev is unavailable or when unregister
124 * call comes in. If the close happens due to a reset, set the reset
125 * bit to true.
126 */
127 void (*close)(struct i40e_info *ldev, struct i40e_client *client,
128 bool reset);
129
130 /* called when some l2 managed parameters changes - mss */
131 void (*l2_param_change)(struct i40e_info *ldev,
132 struct i40e_client *client,
133 struct i40e_params *params);
134
135 /* called when a message is received from the PF */
136 int (*virtchnl_receive)(struct i40e_info *ldev,
137 struct i40e_client *client,
138 u8 *msg, u16 len);
139};
140
141/* Client device */
142struct i40e_client_instance {
143 struct list_head list;
144 struct i40e_info lan_info;
145 struct i40e_client *client;
146 unsigned long state;
147};
148
149struct i40e_client {
150 struct list_head list; /* list of registered clients */
151 char name[I40EVF_CLIENT_STR_LENGTH];
152 struct i40e_client_version version;
153 unsigned long state; /* client state */
154 atomic_t ref_cnt; /* Count of all the client devices of this kind */
155 u32 flags;
156#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
157#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
158 u8 type;
159#define I40E_CLIENT_IWARP 0
160 struct i40e_client_ops *ops; /* client ops provided by the client */
161};
162
163/* used by clients */
164int i40evf_register_client(struct i40e_client *client);
165int i40evf_unregister_client(struct i40e_client *client);
166#endif /* _I40E_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 272d600c1ed0..122efbd29a19 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -64,51 +64,50 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
64 (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) 64 (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
65 65
66/** 66/**
67 * i40evf_get_settings - Get Link Speed and Duplex settings 67 * i40evf_get_link_ksettings - Get Link Speed and Duplex settings
68 * @netdev: network interface device structure 68 * @netdev: network interface device structure
69 * @ecmd: ethtool command 69 * @cmd: ethtool command
70 * 70 *
71 * Reports speed/duplex settings. Because this is a VF, we don't know what 71 * Reports speed/duplex settings. Because this is a VF, we don't know what
72 * kind of link we really have, so we fake it. 72 * kind of link we really have, so we fake it.
73 **/ 73 **/
74static int i40evf_get_settings(struct net_device *netdev, 74static int i40evf_get_link_ksettings(struct net_device *netdev,
75 struct ethtool_cmd *ecmd) 75 struct ethtool_link_ksettings *cmd)
76{ 76{
77 struct i40evf_adapter *adapter = netdev_priv(netdev); 77 struct i40evf_adapter *adapter = netdev_priv(netdev);
78 78
79 ecmd->supported = 0; 79 ethtool_link_ksettings_zero_link_mode(cmd, supported);
80 ecmd->autoneg = AUTONEG_DISABLE; 80 cmd->base.autoneg = AUTONEG_DISABLE;
81 ecmd->transceiver = XCVR_DUMMY1; 81 cmd->base.port = PORT_NONE;
82 ecmd->port = PORT_NONE;
83 /* Set speed and duplex */ 82 /* Set speed and duplex */
84 switch (adapter->link_speed) { 83 switch (adapter->link_speed) {
85 case I40E_LINK_SPEED_40GB: 84 case I40E_LINK_SPEED_40GB:
86 ethtool_cmd_speed_set(ecmd, SPEED_40000); 85 cmd->base.speed = SPEED_40000;
87 break; 86 break;
88 case I40E_LINK_SPEED_25GB: 87 case I40E_LINK_SPEED_25GB:
89#ifdef SPEED_25000 88#ifdef SPEED_25000
90 ethtool_cmd_speed_set(ecmd, SPEED_25000); 89 cmd->base.speed = SPEED_25000;
91#else 90#else
92 netdev_info(netdev, 91 netdev_info(netdev,
93 "Speed is 25G, display not supported by this version of ethtool.\n"); 92 "Speed is 25G, display not supported by this version of ethtool.\n");
94#endif 93#endif
95 break; 94 break;
96 case I40E_LINK_SPEED_20GB: 95 case I40E_LINK_SPEED_20GB:
97 ethtool_cmd_speed_set(ecmd, SPEED_20000); 96 cmd->base.speed = SPEED_20000;
98 break; 97 break;
99 case I40E_LINK_SPEED_10GB: 98 case I40E_LINK_SPEED_10GB:
100 ethtool_cmd_speed_set(ecmd, SPEED_10000); 99 cmd->base.speed = SPEED_10000;
101 break; 100 break;
102 case I40E_LINK_SPEED_1GB: 101 case I40E_LINK_SPEED_1GB:
103 ethtool_cmd_speed_set(ecmd, SPEED_1000); 102 cmd->base.speed = SPEED_1000;
104 break; 103 break;
105 case I40E_LINK_SPEED_100MB: 104 case I40E_LINK_SPEED_100MB:
106 ethtool_cmd_speed_set(ecmd, SPEED_100); 105 cmd->base.speed = SPEED_100;
107 break; 106 break;
108 default: 107 default:
109 break; 108 break;
110 } 109 }
111 ecmd->duplex = DUPLEX_FULL; 110 cmd->base.duplex = DUPLEX_FULL;
112 111
113 return 0; 112 return 0;
114} 113}
@@ -643,7 +642,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
643} 642}
644 643
645static const struct ethtool_ops i40evf_ethtool_ops = { 644static const struct ethtool_ops i40evf_ethtool_ops = {
646 .get_settings = i40evf_get_settings,
647 .get_drvinfo = i40evf_get_drvinfo, 645 .get_drvinfo = i40evf_get_drvinfo,
648 .get_link = ethtool_op_get_link, 646 .get_link = ethtool_op_get_link,
649 .get_ringparam = i40evf_get_ringparam, 647 .get_ringparam = i40evf_get_ringparam,
@@ -663,6 +661,7 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
663 .set_rxfh = i40evf_set_rxfh, 661 .set_rxfh = i40evf_set_rxfh,
664 .get_channels = i40evf_get_channels, 662 .get_channels = i40evf_get_channels,
665 .get_rxfh_key_size = i40evf_get_rxfh_key_size, 663 .get_rxfh_key_size = i40evf_get_rxfh_key_size,
664 .get_link_ksettings = i40evf_get_link_ksettings,
666}; 665};
667 666
668/** 667/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f35dcaac5bb7..6d666bde9df5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -26,6 +26,7 @@
26 26
27#include "i40evf.h" 27#include "i40evf.h"
28#include "i40e_prototype.h" 28#include "i40e_prototype.h"
29#include "i40evf_client.h"
29static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); 30static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
30static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); 31static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
31static int i40evf_close(struct net_device *netdev); 32static int i40evf_close(struct net_device *netdev);
@@ -36,9 +37,9 @@ static const char i40evf_driver_string[] =
36 37
37#define DRV_KERN "-k" 38#define DRV_KERN "-k"
38 39
39#define DRV_VERSION_MAJOR 1 40#define DRV_VERSION_MAJOR 2
40#define DRV_VERSION_MINOR 6 41#define DRV_VERSION_MINOR 1
41#define DRV_VERSION_BUILD 27 42#define DRV_VERSION_BUILD 7
42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
43 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
44 __stringify(DRV_VERSION_BUILD) \ 45 __stringify(DRV_VERSION_BUILD) \
@@ -1058,6 +1059,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
1058 i40evf_napi_enable_all(adapter); 1059 i40evf_napi_enable_all(adapter);
1059 1060
1060 adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES; 1061 adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
1062 if (CLIENT_ENABLED(adapter))
1063 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
1061 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); 1064 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1062} 1065}
1063 1066
@@ -1685,6 +1688,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
1685 i40evf_set_promiscuous(adapter, 0); 1688 i40evf_set_promiscuous(adapter, 0);
1686 goto watchdog_done; 1689 goto watchdog_done;
1687 } 1690 }
1691 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1688 1692
1689 if (adapter->state == __I40EVF_RUNNING) 1693 if (adapter->state == __I40EVF_RUNNING)
1690 i40evf_request_stats(adapter); 1694 i40evf_request_stats(adapter);
@@ -1773,10 +1777,17 @@ static void i40evf_reset_task(struct work_struct *work)
1773 u32 reg_val; 1777 u32 reg_val;
1774 int i = 0, err; 1778 int i = 0, err;
1775 1779
1776 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, 1780 while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
1777 &adapter->crit_section)) 1781 &adapter->crit_section))
1778 usleep_range(500, 1000); 1782 usleep_range(500, 1000);
1779 1783 if (CLIENT_ENABLED(adapter)) {
1784 adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
1785 I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
1786 I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1787 I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
1788 cancel_delayed_work_sync(&adapter->client_task);
1789 i40evf_notify_client_close(&adapter->vsi, true);
1790 }
1780 i40evf_misc_irq_disable(adapter); 1791 i40evf_misc_irq_disable(adapter);
1781 if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { 1792 if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
1782 adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; 1793 adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
@@ -1819,6 +1830,7 @@ static void i40evf_reset_task(struct work_struct *work)
1819 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 1830 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1820 reg_val); 1831 reg_val);
1821 i40evf_disable_vf(adapter); 1832 i40evf_disable_vf(adapter);
1833 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1822 return; /* Do not attempt to reinit. It's dead, Jim. */ 1834 return; /* Do not attempt to reinit. It's dead, Jim. */
1823 } 1835 }
1824 1836
@@ -1861,9 +1873,8 @@ continue_reset:
1861 } 1873 }
1862 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; 1874 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
1863 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; 1875 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
1864 /* Open RDMA Client again */
1865 adapter->aq_required |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
1866 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); 1876 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1877 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1867 i40evf_misc_irq_enable(adapter); 1878 i40evf_misc_irq_enable(adapter);
1868 1879
1869 mod_timer(&adapter->watchdog_timer, jiffies + 2); 1880 mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -1980,6 +1991,48 @@ out:
1980} 1991}
1981 1992
1982/** 1993/**
1994 * i40evf_client_task - worker thread to perform client work
1995 * @work: pointer to work_struct containing our data
1996 *
1997 * This task handles client interactions. Because client calls can be
1998 * reentrant, we can't handle them in the watchdog.
1999 **/
2000static void i40evf_client_task(struct work_struct *work)
2001{
2002 struct i40evf_adapter *adapter =
2003 container_of(work, struct i40evf_adapter, client_task.work);
2004
2005 /* If we can't get the client bit, just give up. We'll be rescheduled
2006 * later.
2007 */
2008
2009 if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
2010 return;
2011
2012 if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2013 i40evf_client_subtask(adapter);
2014 adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2015 goto out;
2016 }
2017 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
2018 i40evf_notify_client_close(&adapter->vsi, false);
2019 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2020 goto out;
2021 }
2022 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
2023 i40evf_notify_client_open(&adapter->vsi);
2024 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
2025 goto out;
2026 }
2027 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2028 i40evf_notify_client_l2_params(&adapter->vsi);
2029 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2030 }
2031out:
2032 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2033}
2034
2035/**
1983 * i40evf_free_all_tx_resources - Free Tx Resources for All Queues 2036 * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
1984 * @adapter: board private structure 2037 * @adapter: board private structure
1985 * 2038 *
@@ -2148,6 +2201,8 @@ static int i40evf_close(struct net_device *netdev)
2148 2201
2149 2202
2150 set_bit(__I40E_DOWN, &adapter->vsi.state); 2203 set_bit(__I40E_DOWN, &adapter->vsi.state);
2204 if (CLIENT_ENABLED(adapter))
2205 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2151 2206
2152 i40evf_down(adapter); 2207 i40evf_down(adapter);
2153 adapter->state = __I40EVF_DOWN_PENDING; 2208 adapter->state = __I40EVF_DOWN_PENDING;
@@ -2188,6 +2243,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
2188 struct i40evf_adapter *adapter = netdev_priv(netdev); 2243 struct i40evf_adapter *adapter = netdev_priv(netdev);
2189 2244
2190 netdev->mtu = new_mtu; 2245 netdev->mtu = new_mtu;
2246 if (CLIENT_ENABLED(adapter)) {
2247 i40evf_notify_client_l2_params(&adapter->vsi);
2248 adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2249 }
2191 adapter->flags |= I40EVF_FLAG_RESET_NEEDED; 2250 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
2192 schedule_work(&adapter->reset_task); 2251 schedule_work(&adapter->reset_task);
2193 2252
@@ -2581,6 +2640,12 @@ static void i40evf_init_task(struct work_struct *work)
2581 adapter->netdev_registered = true; 2640 adapter->netdev_registered = true;
2582 2641
2583 netif_tx_stop_all_queues(netdev); 2642 netif_tx_stop_all_queues(netdev);
2643 if (CLIENT_ALLOWED(adapter)) {
2644 err = i40evf_lan_add_device(adapter);
2645 if (err)
2646 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2647 err);
2648 }
2584 2649
2585 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 2650 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2586 if (netdev->features & NETIF_F_GRO) 2651 if (netdev->features & NETIF_F_GRO)
@@ -2745,6 +2810,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2745 INIT_WORK(&adapter->reset_task, i40evf_reset_task); 2810 INIT_WORK(&adapter->reset_task, i40evf_reset_task);
2746 INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); 2811 INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
2747 INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); 2812 INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
2813 INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
2748 INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task); 2814 INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
2749 schedule_delayed_work(&adapter->init_task, 2815 schedule_delayed_work(&adapter->init_task,
2750 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 2816 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
@@ -2857,14 +2923,21 @@ static void i40evf_remove(struct pci_dev *pdev)
2857 struct i40evf_adapter *adapter = netdev_priv(netdev); 2923 struct i40evf_adapter *adapter = netdev_priv(netdev);
2858 struct i40evf_mac_filter *f, *ftmp; 2924 struct i40evf_mac_filter *f, *ftmp;
2859 struct i40e_hw *hw = &adapter->hw; 2925 struct i40e_hw *hw = &adapter->hw;
2926 int err;
2860 2927
2861 cancel_delayed_work_sync(&adapter->init_task); 2928 cancel_delayed_work_sync(&adapter->init_task);
2862 cancel_work_sync(&adapter->reset_task); 2929 cancel_work_sync(&adapter->reset_task);
2863 2930 cancel_delayed_work_sync(&adapter->client_task);
2864 if (adapter->netdev_registered) { 2931 if (adapter->netdev_registered) {
2865 unregister_netdev(netdev); 2932 unregister_netdev(netdev);
2866 adapter->netdev_registered = false; 2933 adapter->netdev_registered = false;
2867 } 2934 }
2935 if (CLIENT_ALLOWED(adapter)) {
2936 err = i40evf_lan_del_device(adapter);
2937 if (err)
2938 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
2939 err);
2940 }
2868 2941
2869 /* Shut down all the garbage mashers on the detention level */ 2942 /* Shut down all the garbage mashers on the detention level */
2870 adapter->state = __I40EVF_REMOVE; 2943 adapter->state = __I40EVF_REMOVE;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index bee58af390e1..a2a7354426a3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -26,6 +26,7 @@
26 26
27#include "i40evf.h" 27#include "i40evf.h"
28#include "i40e_prototype.h" 28#include "i40e_prototype.h"
29#include "i40evf_client.h"
29 30
30/* busy wait delay in msec */ 31/* busy wait delay in msec */
31#define I40EVF_BUSY_WAIT_DELAY 10 32#define I40EVF_BUSY_WAIT_DELAY 10
@@ -999,6 +1000,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
999 if (v_opcode != adapter->current_op) 1000 if (v_opcode != adapter->current_op)
1000 return; 1001 return;
1001 break; 1002 break;
1003 case I40E_VIRTCHNL_OP_IWARP:
1004 /* Gobble zero-length replies from the PF. They indicate that
1005 * a previous message was received OK, and the client doesn't
1006 * care about that.
1007 */
1008 if (msglen && CLIENT_ENABLED(adapter))
1009 i40evf_notify_client_message(&adapter->vsi,
1010 msg, msglen);
1011 break;
1012
1002 case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 1013 case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
1003 adapter->client_pending &= 1014 adapter->client_pending &=
1004 ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); 1015 ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
@@ -1014,7 +1025,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
1014 } 1025 }
1015 break; 1026 break;
1016 default: 1027 default:
1017 if (v_opcode != adapter->current_op) 1028 if (adapter->current_op && (v_opcode != adapter->current_op))
1018 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", 1029 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
1019 adapter->current_op, v_opcode); 1030 adapter->current_op, v_opcode);
1020 break; 1031 break;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index acbc3abe2ddd..dc6e2980718f 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -142,12 +142,24 @@ struct vf_data_storage {
142/* Supported Rx Buffer Sizes */ 142/* Supported Rx Buffer Sizes */
143#define IGB_RXBUFFER_256 256 143#define IGB_RXBUFFER_256 256
144#define IGB_RXBUFFER_2048 2048 144#define IGB_RXBUFFER_2048 2048
145#define IGB_RXBUFFER_3072 3072
145#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 146#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
146#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 147#define IGB_TS_HDR_LEN 16
148
149#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
150#if (PAGE_SIZE < 8192)
151#define IGB_MAX_FRAME_BUILD_SKB \
152 (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)
153#else
154#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)
155#endif
147 156
148/* How many Rx Buffers do we bundle into one write to the hardware ? */ 157/* How many Rx Buffers do we bundle into one write to the hardware ? */
149#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 158#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
150 159
160#define IGB_RX_DMA_ATTR \
161 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
162
151#define AUTO_ALL_MODES 0 163#define AUTO_ALL_MODES 0
152#define IGB_EEPROM_APME 0x0400 164#define IGB_EEPROM_APME 0x0400
153 165
@@ -301,12 +313,51 @@ struct igb_q_vector {
301}; 313};
302 314
303enum e1000_ring_flags_t { 315enum e1000_ring_flags_t {
316 IGB_RING_FLAG_RX_3K_BUFFER,
317 IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
304 IGB_RING_FLAG_RX_SCTP_CSUM, 318 IGB_RING_FLAG_RX_SCTP_CSUM,
305 IGB_RING_FLAG_RX_LB_VLAN_BSWAP, 319 IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
306 IGB_RING_FLAG_TX_CTX_IDX, 320 IGB_RING_FLAG_TX_CTX_IDX,
307 IGB_RING_FLAG_TX_DETECT_HANG 321 IGB_RING_FLAG_TX_DETECT_HANG
308}; 322};
309 323
324#define ring_uses_large_buffer(ring) \
325 test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
326#define set_ring_uses_large_buffer(ring) \
327 set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
328#define clear_ring_uses_large_buffer(ring) \
329 clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
330
331#define ring_uses_build_skb(ring) \
332 test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
333#define set_ring_build_skb_enabled(ring) \
334 set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
335#define clear_ring_build_skb_enabled(ring) \
336 clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
337
338static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
339{
340#if (PAGE_SIZE < 8192)
341 if (ring_uses_large_buffer(ring))
342 return IGB_RXBUFFER_3072;
343
344 if (ring_uses_build_skb(ring))
345 return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
346#endif
347 return IGB_RXBUFFER_2048;
348}
349
350static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
351{
352#if (PAGE_SIZE < 8192)
353 if (ring_uses_large_buffer(ring))
354 return 1;
355#endif
356 return 0;
357}
358
359#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
360
310#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) 361#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
311 362
312#define IGB_RX_DESC(R, i) \ 363#define IGB_RX_DESC(R, i) \
@@ -545,6 +596,7 @@ struct igb_adapter {
545#define IGB_FLAG_HAS_MSIX BIT(13) 596#define IGB_FLAG_HAS_MSIX BIT(13)
546#define IGB_FLAG_EEE BIT(14) 597#define IGB_FLAG_EEE BIT(14)
547#define IGB_FLAG_VLAN_PROMISC BIT(15) 598#define IGB_FLAG_VLAN_PROMISC BIT(15)
599#define IGB_FLAG_RX_LEGACY BIT(16)
548 600
549/* Media Auto Sense */ 601/* Media Auto Sense */
550#define IGB_MAS_ENABLE_0 0X0001 602#define IGB_MAS_ENABLE_0 0X0001
@@ -558,7 +610,6 @@ struct igb_adapter {
558#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ 610#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
559 611
560#define IGB_82576_TSYNC_SHIFT 19 612#define IGB_82576_TSYNC_SHIFT 19
561#define IGB_TS_HDR_LEN 16
562enum e1000_state_t { 613enum e1000_state_t {
563 __IGB_TESTING, 614 __IGB_TESTING,
564 __IGB_RESETTING, 615 __IGB_RESETTING,
@@ -591,7 +642,6 @@ void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
591void igb_setup_tctl(struct igb_adapter *); 642void igb_setup_tctl(struct igb_adapter *);
592void igb_setup_rctl(struct igb_adapter *); 643void igb_setup_rctl(struct igb_adapter *);
593netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); 644netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
594void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
595void igb_alloc_rx_buffers(struct igb_ring *, u16); 645void igb_alloc_rx_buffers(struct igb_ring *, u16);
596void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); 646void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
597bool igb_has_link(struct igb_adapter *adapter); 647bool igb_has_link(struct igb_adapter *adapter);
@@ -604,7 +654,7 @@ void igb_ptp_reset(struct igb_adapter *adapter);
604void igb_ptp_suspend(struct igb_adapter *adapter); 654void igb_ptp_suspend(struct igb_adapter *adapter);
605void igb_ptp_rx_hang(struct igb_adapter *adapter); 655void igb_ptp_rx_hang(struct igb_adapter *adapter);
606void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 656void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
607void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, 657void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
608 struct sk_buff *skb); 658 struct sk_buff *skb);
609int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); 659int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
610int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); 660int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 737b664d004c..797b9daba224 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -144,6 +144,13 @@ static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
144}; 144};
145#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) 145#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
146 146
147static const char igb_priv_flags_strings[][ETH_GSTRING_LEN] = {
148#define IGB_PRIV_FLAGS_LEGACY_RX BIT(0)
149 "legacy-rx",
150};
151
152#define IGB_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igb_priv_flags_strings)
153
147static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 154static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
148{ 155{
149 struct igb_adapter *adapter = netdev_priv(netdev); 156 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -852,6 +859,8 @@ static void igb_get_drvinfo(struct net_device *netdev,
852 sizeof(drvinfo->fw_version)); 859 sizeof(drvinfo->fw_version));
853 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 860 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
854 sizeof(drvinfo->bus_info)); 861 sizeof(drvinfo->bus_info));
862
863 drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN;
855} 864}
856 865
857static void igb_get_ringparam(struct net_device *netdev, 866static void igb_get_ringparam(struct net_device *netdev,
@@ -1811,14 +1820,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1811 tx_ntc = tx_ring->next_to_clean; 1820 tx_ntc = tx_ring->next_to_clean;
1812 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); 1821 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1813 1822
1814 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { 1823 while (rx_desc->wb.upper.length) {
1815 /* check Rx buffer */ 1824 /* check Rx buffer */
1816 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1825 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1817 1826
1818 /* sync Rx buffer for CPU read */ 1827 /* sync Rx buffer for CPU read */
1819 dma_sync_single_for_cpu(rx_ring->dev, 1828 dma_sync_single_for_cpu(rx_ring->dev,
1820 rx_buffer_info->dma, 1829 rx_buffer_info->dma,
1821 IGB_RX_BUFSZ, 1830 size,
1822 DMA_FROM_DEVICE); 1831 DMA_FROM_DEVICE);
1823 1832
1824 /* verify contents of skb */ 1833 /* verify contents of skb */
@@ -1828,12 +1837,21 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1828 /* sync Rx buffer for device write */ 1837 /* sync Rx buffer for device write */
1829 dma_sync_single_for_device(rx_ring->dev, 1838 dma_sync_single_for_device(rx_ring->dev,
1830 rx_buffer_info->dma, 1839 rx_buffer_info->dma,
1831 IGB_RX_BUFSZ, 1840 size,
1832 DMA_FROM_DEVICE); 1841 DMA_FROM_DEVICE);
1833 1842
1834 /* unmap buffer on Tx side */ 1843 /* unmap buffer on Tx side */
1835 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1844 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1836 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1845
1846 /* Free all the Tx ring sk_buffs */
1847 dev_kfree_skb_any(tx_buffer_info->skb);
1848
1849 /* unmap skb header data */
1850 dma_unmap_single(tx_ring->dev,
1851 dma_unmap_addr(tx_buffer_info, dma),
1852 dma_unmap_len(tx_buffer_info, len),
1853 DMA_TO_DEVICE);
1854 dma_unmap_len_set(tx_buffer_info, len, 0);
1837 1855
1838 /* increment Rx/Tx next to clean counters */ 1856 /* increment Rx/Tx next to clean counters */
1839 rx_ntc++; 1857 rx_ntc++;
@@ -2271,6 +2289,8 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
2271 return IGB_STATS_LEN; 2289 return IGB_STATS_LEN;
2272 case ETH_SS_TEST: 2290 case ETH_SS_TEST:
2273 return IGB_TEST_LEN; 2291 return IGB_TEST_LEN;
2292 case ETH_SS_PRIV_FLAGS:
2293 return IGB_PRIV_FLAGS_STR_LEN;
2274 default: 2294 default:
2275 return -ENOTSUPP; 2295 return -ENOTSUPP;
2276 } 2296 }
@@ -2376,6 +2396,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2376 } 2396 }
2377 /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ 2397 /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2378 break; 2398 break;
2399 case ETH_SS_PRIV_FLAGS:
2400 memcpy(data, igb_priv_flags_strings,
2401 IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
2402 break;
2379 } 2403 }
2380} 2404}
2381 2405
@@ -3388,6 +3412,37 @@ static int igb_set_channels(struct net_device *netdev,
3388 return 0; 3412 return 0;
3389} 3413}
3390 3414
3415static u32 igb_get_priv_flags(struct net_device *netdev)
3416{
3417 struct igb_adapter *adapter = netdev_priv(netdev);
3418 u32 priv_flags = 0;
3419
3420 if (adapter->flags & IGB_FLAG_RX_LEGACY)
3421 priv_flags |= IGB_PRIV_FLAGS_LEGACY_RX;
3422
3423 return priv_flags;
3424}
3425
3426static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3427{
3428 struct igb_adapter *adapter = netdev_priv(netdev);
3429 unsigned int flags = adapter->flags;
3430
3431 flags &= ~IGB_FLAG_RX_LEGACY;
3432 if (priv_flags & IGB_PRIV_FLAGS_LEGACY_RX)
3433 flags |= IGB_FLAG_RX_LEGACY;
3434
3435 if (flags != adapter->flags) {
3436 adapter->flags = flags;
3437
3438 /* reset interface to repopulate queues */
3439 if (netif_running(netdev))
3440 igb_reinit_locked(adapter);
3441 }
3442
3443 return 0;
3444}
3445
3391static const struct ethtool_ops igb_ethtool_ops = { 3446static const struct ethtool_ops igb_ethtool_ops = {
3392 .get_settings = igb_get_settings, 3447 .get_settings = igb_get_settings,
3393 .set_settings = igb_set_settings, 3448 .set_settings = igb_set_settings,
@@ -3426,6 +3481,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
3426 .set_rxfh = igb_set_rxfh, 3481 .set_rxfh = igb_set_rxfh,
3427 .get_channels = igb_get_channels, 3482 .get_channels = igb_get_channels,
3428 .set_channels = igb_set_channels, 3483 .set_channels = igb_set_channels,
3484 .get_priv_flags = igb_get_priv_flags,
3485 .set_priv_flags = igb_set_priv_flags,
3429 .begin = igb_ethtool_begin, 3486 .begin = igb_ethtool_begin,
3430 .complete = igb_ethtool_complete, 3487 .complete = igb_ethtool_complete,
3431}; 3488};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index be456bae8169..26a821fcd220 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -554,7 +554,7 @@ rx_ring_summary:
554 16, 1, 554 16, 1,
555 page_address(buffer_info->page) + 555 page_address(buffer_info->page) +
556 buffer_info->page_offset, 556 buffer_info->page_offset,
557 IGB_RX_BUFSZ, true); 557 igb_rx_bufsz(rx_ring), true);
558 } 558 }
559 } 559 }
560 } 560 }
@@ -3293,7 +3293,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
3293 3293
3294 size = sizeof(struct igb_tx_buffer) * tx_ring->count; 3294 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3295 3295
3296 tx_ring->tx_buffer_info = vzalloc(size); 3296 tx_ring->tx_buffer_info = vmalloc(size);
3297 if (!tx_ring->tx_buffer_info) 3297 if (!tx_ring->tx_buffer_info)
3298 goto err; 3298 goto err;
3299 3299
@@ -3404,6 +3404,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
3404 txdctl |= IGB_TX_HTHRESH << 8; 3404 txdctl |= IGB_TX_HTHRESH << 8;
3405 txdctl |= IGB_TX_WTHRESH << 16; 3405 txdctl |= IGB_TX_WTHRESH << 16;
3406 3406
3407 /* reinitialize tx_buffer_info */
3408 memset(ring->tx_buffer_info, 0,
3409 sizeof(struct igb_tx_buffer) * ring->count);
3410
3407 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 3411 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
3408 wr32(E1000_TXDCTL(reg_idx), txdctl); 3412 wr32(E1000_TXDCTL(reg_idx), txdctl);
3409} 3413}
@@ -3435,7 +3439,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
3435 3439
3436 size = sizeof(struct igb_rx_buffer) * rx_ring->count; 3440 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3437 3441
3438 rx_ring->rx_buffer_info = vzalloc(size); 3442 rx_ring->rx_buffer_info = vmalloc(size);
3439 if (!rx_ring->rx_buffer_info) 3443 if (!rx_ring->rx_buffer_info)
3440 goto err; 3444 goto err;
3441 3445
@@ -3720,6 +3724,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3720 struct igb_ring *ring) 3724 struct igb_ring *ring)
3721{ 3725{
3722 struct e1000_hw *hw = &adapter->hw; 3726 struct e1000_hw *hw = &adapter->hw;
3727 union e1000_adv_rx_desc *rx_desc;
3723 u64 rdba = ring->dma; 3728 u64 rdba = ring->dma;
3724 int reg_idx = ring->reg_idx; 3729 int reg_idx = ring->reg_idx;
3725 u32 srrctl = 0, rxdctl = 0; 3730 u32 srrctl = 0, rxdctl = 0;
@@ -3741,7 +3746,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3741 3746
3742 /* set descriptor configuration */ 3747 /* set descriptor configuration */
3743 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 3748 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
3744 srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; 3749 if (ring_uses_large_buffer(ring))
3750 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3751 else
3752 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3745 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 3753 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
3746 if (hw->mac.type >= e1000_82580) 3754 if (hw->mac.type >= e1000_82580)
3747 srrctl |= E1000_SRRCTL_TIMESTAMP; 3755 srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -3758,11 +3766,39 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3758 rxdctl |= IGB_RX_HTHRESH << 8; 3766 rxdctl |= IGB_RX_HTHRESH << 8;
3759 rxdctl |= IGB_RX_WTHRESH << 16; 3767 rxdctl |= IGB_RX_WTHRESH << 16;
3760 3768
3769 /* initialize rx_buffer_info */
3770 memset(ring->rx_buffer_info, 0,
3771 sizeof(struct igb_rx_buffer) * ring->count);
3772
3773 /* initialize Rx descriptor 0 */
3774 rx_desc = IGB_RX_DESC(ring, 0);
3775 rx_desc->wb.upper.length = 0;
3776
3761 /* enable receive descriptor fetching */ 3777 /* enable receive descriptor fetching */
3762 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 3778 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
3763 wr32(E1000_RXDCTL(reg_idx), rxdctl); 3779 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3764} 3780}
3765 3781
3782static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
3783 struct igb_ring *rx_ring)
3784{
3785 /* set build_skb and buffer size flags */
3786 clear_ring_build_skb_enabled(rx_ring);
3787 clear_ring_uses_large_buffer(rx_ring);
3788
3789 if (adapter->flags & IGB_FLAG_RX_LEGACY)
3790 return;
3791
3792 set_ring_build_skb_enabled(rx_ring);
3793
3794#if (PAGE_SIZE < 8192)
3795 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
3796 return;
3797
3798 set_ring_uses_large_buffer(rx_ring);
3799#endif
3800}
3801
3766/** 3802/**
3767 * igb_configure_rx - Configure receive Unit after Reset 3803 * igb_configure_rx - Configure receive Unit after Reset
3768 * @adapter: board private structure 3804 * @adapter: board private structure
@@ -3780,8 +3816,12 @@ static void igb_configure_rx(struct igb_adapter *adapter)
3780 /* Setup the HW Rx Head and Tail Descriptor Pointers and 3816 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3781 * the Base and Length of the Rx Descriptor Ring 3817 * the Base and Length of the Rx Descriptor Ring
3782 */ 3818 */
3783 for (i = 0; i < adapter->num_rx_queues; i++) 3819 for (i = 0; i < adapter->num_rx_queues; i++) {
3784 igb_configure_rx_ring(adapter, adapter->rx_ring[i]); 3820 struct igb_ring *rx_ring = adapter->rx_ring[i];
3821
3822 igb_set_rx_buffer_len(adapter, rx_ring);
3823 igb_configure_rx_ring(adapter, rx_ring);
3824 }
3785} 3825}
3786 3826
3787/** 3827/**
@@ -3822,55 +3862,63 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3822 igb_free_tx_resources(adapter->tx_ring[i]); 3862 igb_free_tx_resources(adapter->tx_ring[i]);
3823} 3863}
3824 3864
3825void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3826 struct igb_tx_buffer *tx_buffer)
3827{
3828 if (tx_buffer->skb) {
3829 dev_kfree_skb_any(tx_buffer->skb);
3830 if (dma_unmap_len(tx_buffer, len))
3831 dma_unmap_single(ring->dev,
3832 dma_unmap_addr(tx_buffer, dma),
3833 dma_unmap_len(tx_buffer, len),
3834 DMA_TO_DEVICE);
3835 } else if (dma_unmap_len(tx_buffer, len)) {
3836 dma_unmap_page(ring->dev,
3837 dma_unmap_addr(tx_buffer, dma),
3838 dma_unmap_len(tx_buffer, len),
3839 DMA_TO_DEVICE);
3840 }
3841 tx_buffer->next_to_watch = NULL;
3842 tx_buffer->skb = NULL;
3843 dma_unmap_len_set(tx_buffer, len, 0);
3844 /* buffer_info must be completely set up in the transmit path */
3845}
3846
3847/** 3865/**
3848 * igb_clean_tx_ring - Free Tx Buffers 3866 * igb_clean_tx_ring - Free Tx Buffers
3849 * @tx_ring: ring to be cleaned 3867 * @tx_ring: ring to be cleaned
3850 **/ 3868 **/
3851static void igb_clean_tx_ring(struct igb_ring *tx_ring) 3869static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3852{ 3870{
3853 struct igb_tx_buffer *buffer_info; 3871 u16 i = tx_ring->next_to_clean;
3854 unsigned long size; 3872 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
3855 u16 i;
3856 3873
3857 if (!tx_ring->tx_buffer_info) 3874 while (i != tx_ring->next_to_use) {
3858 return; 3875 union e1000_adv_tx_desc *eop_desc, *tx_desc;
3859 /* Free all the Tx ring sk_buffs */
3860 3876
3861 for (i = 0; i < tx_ring->count; i++) { 3877 /* Free all the Tx ring sk_buffs */
3862 buffer_info = &tx_ring->tx_buffer_info[i]; 3878 dev_kfree_skb_any(tx_buffer->skb);
3863 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3864 }
3865 3879
3866 netdev_tx_reset_queue(txring_txq(tx_ring)); 3880 /* unmap skb header data */
3881 dma_unmap_single(tx_ring->dev,
3882 dma_unmap_addr(tx_buffer, dma),
3883 dma_unmap_len(tx_buffer, len),
3884 DMA_TO_DEVICE);
3867 3885
3868 size = sizeof(struct igb_tx_buffer) * tx_ring->count; 3886 /* check for eop_desc to determine the end of the packet */
3869 memset(tx_ring->tx_buffer_info, 0, size); 3887 eop_desc = tx_buffer->next_to_watch;
3888 tx_desc = IGB_TX_DESC(tx_ring, i);
3889
3890 /* unmap remaining buffers */
3891 while (tx_desc != eop_desc) {
3892 tx_buffer++;
3893 tx_desc++;
3894 i++;
3895 if (unlikely(i == tx_ring->count)) {
3896 i = 0;
3897 tx_buffer = tx_ring->tx_buffer_info;
3898 tx_desc = IGB_TX_DESC(tx_ring, 0);
3899 }
3900
3901 /* unmap any remaining paged data */
3902 if (dma_unmap_len(tx_buffer, len))
3903 dma_unmap_page(tx_ring->dev,
3904 dma_unmap_addr(tx_buffer, dma),
3905 dma_unmap_len(tx_buffer, len),
3906 DMA_TO_DEVICE);
3907 }
3870 3908
3871 /* Zero out the descriptor ring */ 3909 /* move us one more past the eop_desc for start of next pkt */
3872 memset(tx_ring->desc, 0, tx_ring->size); 3910 tx_buffer++;
3911 i++;
3912 if (unlikely(i == tx_ring->count)) {
3913 i = 0;
3914 tx_buffer = tx_ring->tx_buffer_info;
3915 }
3916 }
3873 3917
3918 /* reset BQL for queue */
3919 netdev_tx_reset_queue(txring_txq(tx_ring));
3920
3921 /* reset next_to_use and next_to_clean */
3874 tx_ring->next_to_use = 0; 3922 tx_ring->next_to_use = 0;
3875 tx_ring->next_to_clean = 0; 3923 tx_ring->next_to_clean = 0;
3876} 3924}
@@ -3932,50 +3980,39 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3932 **/ 3980 **/
3933static void igb_clean_rx_ring(struct igb_ring *rx_ring) 3981static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3934{ 3982{
3935 unsigned long size; 3983 u16 i = rx_ring->next_to_clean;
3936 u16 i;
3937 3984
3938 if (rx_ring->skb) 3985 if (rx_ring->skb)
3939 dev_kfree_skb(rx_ring->skb); 3986 dev_kfree_skb(rx_ring->skb);
3940 rx_ring->skb = NULL; 3987 rx_ring->skb = NULL;
3941 3988
3942 if (!rx_ring->rx_buffer_info)
3943 return;
3944
3945 /* Free all the Rx ring sk_buffs */ 3989 /* Free all the Rx ring sk_buffs */
3946 for (i = 0; i < rx_ring->count; i++) { 3990 while (i != rx_ring->next_to_alloc) {
3947 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 3991 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
3948 3992
3949 if (!buffer_info->page)
3950 continue;
3951
3952 /* Invalidate cache lines that may have been written to by 3993 /* Invalidate cache lines that may have been written to by
3953 * device so that we avoid corrupting memory. 3994 * device so that we avoid corrupting memory.
3954 */ 3995 */
3955 dma_sync_single_range_for_cpu(rx_ring->dev, 3996 dma_sync_single_range_for_cpu(rx_ring->dev,
3956 buffer_info->dma, 3997 buffer_info->dma,
3957 buffer_info->page_offset, 3998 buffer_info->page_offset,
3958 IGB_RX_BUFSZ, 3999 igb_rx_bufsz(rx_ring),
3959 DMA_FROM_DEVICE); 4000 DMA_FROM_DEVICE);
3960 4001
3961 /* free resources associated with mapping */ 4002 /* free resources associated with mapping */
3962 dma_unmap_page_attrs(rx_ring->dev, 4003 dma_unmap_page_attrs(rx_ring->dev,
3963 buffer_info->dma, 4004 buffer_info->dma,
3964 PAGE_SIZE, 4005 igb_rx_pg_size(rx_ring),
3965 DMA_FROM_DEVICE, 4006 DMA_FROM_DEVICE,
3966 DMA_ATTR_SKIP_CPU_SYNC); 4007 IGB_RX_DMA_ATTR);
3967 __page_frag_cache_drain(buffer_info->page, 4008 __page_frag_cache_drain(buffer_info->page,
3968 buffer_info->pagecnt_bias); 4009 buffer_info->pagecnt_bias);
3969 4010
3970 buffer_info->page = NULL; 4011 i++;
4012 if (i == rx_ring->count)
4013 i = 0;
3971 } 4014 }
3972 4015
3973 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3974 memset(rx_ring->rx_buffer_info, 0, size);
3975
3976 /* Zero out the descriptor ring */
3977 memset(rx_ring->desc, 0, rx_ring->size);
3978
3979 rx_ring->next_to_alloc = 0; 4016 rx_ring->next_to_alloc = 0;
3980 rx_ring->next_to_clean = 0; 4017 rx_ring->next_to_clean = 0;
3981 rx_ring->next_to_use = 0; 4018 rx_ring->next_to_use = 0;
@@ -4240,7 +4277,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
4240 struct igb_adapter *adapter = netdev_priv(netdev); 4277 struct igb_adapter *adapter = netdev_priv(netdev);
4241 struct e1000_hw *hw = &adapter->hw; 4278 struct e1000_hw *hw = &adapter->hw;
4242 unsigned int vfn = adapter->vfs_allocated_count; 4279 unsigned int vfn = adapter->vfs_allocated_count;
4243 u32 rctl = 0, vmolr = 0; 4280 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
4244 int count; 4281 int count;
4245 4282
4246 /* Check for Promiscuous and All Multicast modes */ 4283 /* Check for Promiscuous and All Multicast modes */
@@ -4298,6 +4335,14 @@ static void igb_set_rx_mode(struct net_device *netdev)
4298 E1000_RCTL_VFE); 4335 E1000_RCTL_VFE);
4299 wr32(E1000_RCTL, rctl); 4336 wr32(E1000_RCTL, rctl);
4300 4337
4338#if (PAGE_SIZE < 8192)
4339 if (!adapter->vfs_allocated_count) {
4340 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4341 rlpml = IGB_MAX_FRAME_BUILD_SKB;
4342 }
4343#endif
4344 wr32(E1000_RLPML, rlpml);
4345
4301 /* In order to support SR-IOV and eventually VMDq it is necessary to set 4346 /* In order to support SR-IOV and eventually VMDq it is necessary to set
4302 * the VMOLR to enable the appropriate modes. Without this workaround 4347 * the VMOLR to enable the appropriate modes. Without this workaround
4303 * we will have issues with VLAN tag stripping not being done for frames 4348 * we will have issues with VLAN tag stripping not being done for frames
@@ -4312,12 +4357,17 @@ static void igb_set_rx_mode(struct net_device *netdev)
4312 vmolr |= rd32(E1000_VMOLR(vfn)) & 4357 vmolr |= rd32(E1000_VMOLR(vfn)) &
4313 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); 4358 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
4314 4359
4315 /* enable Rx jumbo frames, no need for restriction */ 4360 /* enable Rx jumbo frames, restrict as needed to support build_skb */
4316 vmolr &= ~E1000_VMOLR_RLPML_MASK; 4361 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4317 vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE; 4362#if (PAGE_SIZE < 8192)
4363 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4364 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
4365 else
4366#endif
4367 vmolr |= MAX_JUMBO_FRAME_SIZE;
4368 vmolr |= E1000_VMOLR_LPE;
4318 4369
4319 wr32(E1000_VMOLR(vfn), vmolr); 4370 wr32(E1000_VMOLR(vfn), vmolr);
4320 wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE);
4321 4371
4322 igb_restore_vf_multicasts(adapter); 4372 igb_restore_vf_multicasts(adapter);
4323} 4373}
@@ -5256,18 +5306,32 @@ static void igb_tx_map(struct igb_ring *tx_ring,
5256 5306
5257dma_error: 5307dma_error:
5258 dev_err(tx_ring->dev, "TX DMA map failed\n"); 5308 dev_err(tx_ring->dev, "TX DMA map failed\n");
5309 tx_buffer = &tx_ring->tx_buffer_info[i];
5259 5310
5260 /* clear dma mappings for failed tx_buffer_info map */ 5311 /* clear dma mappings for failed tx_buffer_info map */
5261 for (;;) { 5312 while (tx_buffer != first) {
5313 if (dma_unmap_len(tx_buffer, len))
5314 dma_unmap_page(tx_ring->dev,
5315 dma_unmap_addr(tx_buffer, dma),
5316 dma_unmap_len(tx_buffer, len),
5317 DMA_TO_DEVICE);
5318 dma_unmap_len_set(tx_buffer, len, 0);
5319
5320 if (i--)
5321 i += tx_ring->count;
5262 tx_buffer = &tx_ring->tx_buffer_info[i]; 5322 tx_buffer = &tx_ring->tx_buffer_info[i];
5263 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
5264 if (tx_buffer == first)
5265 break;
5266 if (i == 0)
5267 i = tx_ring->count;
5268 i--;
5269 } 5323 }
5270 5324
5325 if (dma_unmap_len(tx_buffer, len))
5326 dma_unmap_single(tx_ring->dev,
5327 dma_unmap_addr(tx_buffer, dma),
5328 dma_unmap_len(tx_buffer, len),
5329 DMA_TO_DEVICE);
5330 dma_unmap_len_set(tx_buffer, len, 0);
5331
5332 dev_kfree_skb_any(tx_buffer->skb);
5333 tx_buffer->skb = NULL;
5334
5271 tx_ring->next_to_use = i; 5335 tx_ring->next_to_use = i;
5272} 5336}
5273 5337
@@ -5339,7 +5403,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
5339 return NETDEV_TX_OK; 5403 return NETDEV_TX_OK;
5340 5404
5341out_drop: 5405out_drop:
5342 igb_unmap_and_free_tx_resource(tx_ring, first); 5406 dev_kfree_skb_any(first->skb);
5407 first->skb = NULL;
5343 5408
5344 return NETDEV_TX_OK; 5409 return NETDEV_TX_OK;
5345} 5410}
@@ -6686,7 +6751,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
6686 DMA_TO_DEVICE); 6751 DMA_TO_DEVICE);
6687 6752
6688 /* clear tx_buffer data */ 6753 /* clear tx_buffer data */
6689 tx_buffer->skb = NULL;
6690 dma_unmap_len_set(tx_buffer, len, 0); 6754 dma_unmap_len_set(tx_buffer, len, 0);
6691 6755
6692 /* clear last DMA location and unmap remaining buffers */ 6756 /* clear last DMA location and unmap remaining buffers */
@@ -6822,8 +6886,14 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6822 nta++; 6886 nta++;
6823 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 6887 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
6824 6888
6825 /* transfer page from old buffer to new buffer */ 6889 /* Transfer page from old buffer to new buffer.
6826 *new_buff = *old_buff; 6890 * Move each member individually to avoid possible store
6891 * forwarding stalls.
6892 */
6893 new_buff->dma = old_buff->dma;
6894 new_buff->page = old_buff->page;
6895 new_buff->page_offset = old_buff->page_offset;
6896 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
6827} 6897}
6828 6898
6829static inline bool igb_page_is_reserved(struct page *page) 6899static inline bool igb_page_is_reserved(struct page *page)
@@ -6831,11 +6901,10 @@ static inline bool igb_page_is_reserved(struct page *page)
6831 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 6901 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
6832} 6902}
6833 6903
6834static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, 6904static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
6835 struct page *page,
6836 unsigned int truesize)
6837{ 6905{
6838 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; 6906 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
6907 struct page *page = rx_buffer->page;
6839 6908
6840 /* avoid re-using remote pages */ 6909 /* avoid re-using remote pages */
6841 if (unlikely(igb_page_is_reserved(page))) 6910 if (unlikely(igb_page_is_reserved(page)))
@@ -6843,16 +6912,13 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6843 6912
6844#if (PAGE_SIZE < 8192) 6913#if (PAGE_SIZE < 8192)
6845 /* if we are only owner of page we can reuse it */ 6914 /* if we are only owner of page we can reuse it */
6846 if (unlikely(page_ref_count(page) != pagecnt_bias)) 6915 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
6847 return false; 6916 return false;
6848
6849 /* flip page offset to other buffer */
6850 rx_buffer->page_offset ^= IGB_RX_BUFSZ;
6851#else 6917#else
6852 /* move offset up to the next cache line */ 6918#define IGB_LAST_OFFSET \
6853 rx_buffer->page_offset += truesize; 6919 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
6854 6920
6855 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) 6921 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
6856 return false; 6922 return false;
6857#endif 6923#endif
6858 6924
@@ -6860,7 +6926,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6860 * the pagecnt_bias and page count so that we fully restock the 6926 * the pagecnt_bias and page count so that we fully restock the
6861 * number of references the driver holds. 6927 * number of references the driver holds.
6862 */ 6928 */
6863 if (unlikely(pagecnt_bias == 1)) { 6929 if (unlikely(!pagecnt_bias)) {
6864 page_ref_add(page, USHRT_MAX); 6930 page_ref_add(page, USHRT_MAX);
6865 rx_buffer->pagecnt_bias = USHRT_MAX; 6931 rx_buffer->pagecnt_bias = USHRT_MAX;
6866 } 6932 }
@@ -6872,34 +6938,56 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6872 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff 6938 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
6873 * @rx_ring: rx descriptor ring to transact packets on 6939 * @rx_ring: rx descriptor ring to transact packets on
6874 * @rx_buffer: buffer containing page to add 6940 * @rx_buffer: buffer containing page to add
6875 * @rx_desc: descriptor containing length of buffer written by hardware
6876 * @skb: sk_buff to place the data into 6941 * @skb: sk_buff to place the data into
6942 * @size: size of buffer to be added
6877 * 6943 *
6878 * This function will add the data contained in rx_buffer->page to the skb. 6944 * This function will add the data contained in rx_buffer->page to the skb.
6879 * This is done either through a direct copy if the data in the buffer is
6880 * less than the skb header size, otherwise it will just attach the page as
6881 * a frag to the skb.
6882 *
6883 * The function will then update the page offset if necessary and return
6884 * true if the buffer can be reused by the adapter.
6885 **/ 6945 **/
6886static bool igb_add_rx_frag(struct igb_ring *rx_ring, 6946static void igb_add_rx_frag(struct igb_ring *rx_ring,
6887 struct igb_rx_buffer *rx_buffer, 6947 struct igb_rx_buffer *rx_buffer,
6888 unsigned int size, 6948 struct sk_buff *skb,
6889 union e1000_adv_rx_desc *rx_desc, 6949 unsigned int size)
6890 struct sk_buff *skb)
6891{ 6950{
6892 struct page *page = rx_buffer->page;
6893 unsigned char *va = page_address(page) + rx_buffer->page_offset;
6894#if (PAGE_SIZE < 8192) 6951#if (PAGE_SIZE < 8192)
6895 unsigned int truesize = IGB_RX_BUFSZ; 6952 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
6953#else
6954 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
6955 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
6956 SKB_DATA_ALIGN(size);
6957#endif
6958 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
6959 rx_buffer->page_offset, size, truesize);
6960#if (PAGE_SIZE < 8192)
6961 rx_buffer->page_offset ^= truesize;
6962#else
6963 rx_buffer->page_offset += truesize;
6964#endif
6965}
6966
6967static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
6968 struct igb_rx_buffer *rx_buffer,
6969 union e1000_adv_rx_desc *rx_desc,
6970 unsigned int size)
6971{
6972 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
6973#if (PAGE_SIZE < 8192)
6974 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
6896#else 6975#else
6897 unsigned int truesize = SKB_DATA_ALIGN(size); 6976 unsigned int truesize = SKB_DATA_ALIGN(size);
6898#endif 6977#endif
6899 unsigned int pull_len; 6978 unsigned int headlen;
6979 struct sk_buff *skb;
6900 6980
6901 if (unlikely(skb_is_nonlinear(skb))) 6981 /* prefetch first cache line of first page */
6902 goto add_tail_frag; 6982 prefetch(va);
6983#if L1_CACHE_BYTES < 128
6984 prefetch(va + L1_CACHE_BYTES);
6985#endif
6986
6987 /* allocate a skb to store the frags */
6988 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
6989 if (unlikely(!skb))
6990 return NULL;
6903 6991
6904 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { 6992 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
6905 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); 6993 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
@@ -6907,95 +6995,73 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6907 size -= IGB_TS_HDR_LEN; 6995 size -= IGB_TS_HDR_LEN;
6908 } 6996 }
6909 6997
6910 if (likely(size <= IGB_RX_HDR_LEN)) { 6998 /* Determine available headroom for copy */
6911 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 6999 headlen = size;
6912 7000 if (headlen > IGB_RX_HDR_LEN)
6913 /* page is not reserved, we can reuse buffer as-is */ 7001 headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
6914 if (likely(!igb_page_is_reserved(page)))
6915 return true;
6916
6917 /* this page cannot be reused so discard it */
6918 return false;
6919 }
6920
6921 /* we need the header to contain the greater of either ETH_HLEN or
6922 * 60 bytes if the skb->len is less than 60 for skb_pad.
6923 */
6924 pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
6925 7002
6926 /* align pull length to size of long to optimize memcpy performance */ 7003 /* align pull length to size of long to optimize memcpy performance */
6927 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); 7004 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
6928 7005
6929 /* update all of the pointers */ 7006 /* update all of the pointers */
6930 va += pull_len; 7007 size -= headlen;
6931 size -= pull_len; 7008 if (size) {
6932 7009 skb_add_rx_frag(skb, 0, rx_buffer->page,
6933add_tail_frag: 7010 (va + headlen) - page_address(rx_buffer->page),
6934 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 7011 size, truesize);
6935 (unsigned long)va & ~PAGE_MASK, size, truesize); 7012#if (PAGE_SIZE < 8192)
7013 rx_buffer->page_offset ^= truesize;
7014#else
7015 rx_buffer->page_offset += truesize;
7016#endif
7017 } else {
7018 rx_buffer->pagecnt_bias++;
7019 }
6936 7020
6937 return igb_can_reuse_rx_page(rx_buffer, page, truesize); 7021 return skb;
6938} 7022}
6939 7023
6940static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, 7024static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
6941 union e1000_adv_rx_desc *rx_desc, 7025 struct igb_rx_buffer *rx_buffer,
6942 struct sk_buff *skb) 7026 union e1000_adv_rx_desc *rx_desc,
7027 unsigned int size)
6943{ 7028{
6944 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); 7029 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
6945 struct igb_rx_buffer *rx_buffer; 7030#if (PAGE_SIZE < 8192)
6946 struct page *page; 7031 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
6947 7032#else
6948 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 7033 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
6949 page = rx_buffer->page; 7034 SKB_DATA_ALIGN(IGB_SKB_PAD + size);
6950 prefetchw(page); 7035#endif
6951 7036 struct sk_buff *skb;
6952 /* we are reusing so sync this buffer for CPU use */
6953 dma_sync_single_range_for_cpu(rx_ring->dev,
6954 rx_buffer->dma,
6955 rx_buffer->page_offset,
6956 size,
6957 DMA_FROM_DEVICE);
6958
6959 if (likely(!skb)) {
6960 void *page_addr = page_address(page) +
6961 rx_buffer->page_offset;
6962 7037
6963 /* prefetch first cache line of first page */ 7038 /* prefetch first cache line of first page */
6964 prefetch(page_addr); 7039 prefetch(va);
6965#if L1_CACHE_BYTES < 128 7040#if L1_CACHE_BYTES < 128
6966 prefetch(page_addr + L1_CACHE_BYTES); 7041 prefetch(va + L1_CACHE_BYTES);
6967#endif 7042#endif
6968 7043
6969 /* allocate a skb to store the frags */ 7044 /* build an skb around the page buffer */
6970 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); 7045 skb = build_skb(va - IGB_SKB_PAD, truesize);
6971 if (unlikely(!skb)) { 7046 if (unlikely(!skb))
6972 rx_ring->rx_stats.alloc_failed++; 7047 return NULL;
6973 return NULL;
6974 }
6975 7048
6976 /* we will be copying header into skb->data in 7049 /* update pointers within the skb to store the data */
6977 * pskb_may_pull so it is in our interest to prefetch 7050 skb_reserve(skb, IGB_SKB_PAD);
6978 * it now to avoid a possible cache miss 7051 __skb_put(skb, size);
6979 */
6980 prefetchw(skb->data);
6981 }
6982 7052
6983 /* pull page into skb */ 7053 /* pull timestamp out of packet data */
6984 if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { 7054 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6985 /* hand second half of page back to the ring */ 7055 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
6986 igb_reuse_rx_page(rx_ring, rx_buffer); 7056 __skb_pull(skb, IGB_TS_HDR_LEN);
6987 } else {
6988 /* We are not reusing the buffer so unmap it and free
6989 * any references we are holding to it
6990 */
6991 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
6992 PAGE_SIZE, DMA_FROM_DEVICE,
6993 DMA_ATTR_SKIP_CPU_SYNC);
6994 __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
6995 } 7057 }
6996 7058
6997 /* clear contents of rx_buffer */ 7059 /* update buffer offset */
6998 rx_buffer->page = NULL; 7060#if (PAGE_SIZE < 8192)
7061 rx_buffer->page_offset ^= truesize;
7062#else
7063 rx_buffer->page_offset += truesize;
7064#endif
6999 7065
7000 return skb; 7066 return skb;
7001} 7067}
@@ -7154,6 +7220,47 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
7154 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 7220 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
7155} 7221}
7156 7222
7223static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
7224 const unsigned int size)
7225{
7226 struct igb_rx_buffer *rx_buffer;
7227
7228 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
7229 prefetchw(rx_buffer->page);
7230
7231 /* we are reusing so sync this buffer for CPU use */
7232 dma_sync_single_range_for_cpu(rx_ring->dev,
7233 rx_buffer->dma,
7234 rx_buffer->page_offset,
7235 size,
7236 DMA_FROM_DEVICE);
7237
7238 rx_buffer->pagecnt_bias--;
7239
7240 return rx_buffer;
7241}
7242
7243static void igb_put_rx_buffer(struct igb_ring *rx_ring,
7244 struct igb_rx_buffer *rx_buffer)
7245{
7246 if (igb_can_reuse_rx_page(rx_buffer)) {
7247 /* hand second half of page back to the ring */
7248 igb_reuse_rx_page(rx_ring, rx_buffer);
7249 } else {
7250 /* We are not reusing the buffer so unmap it and free
7251 * any references we are holding to it
7252 */
7253 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
7254 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
7255 IGB_RX_DMA_ATTR);
7256 __page_frag_cache_drain(rx_buffer->page,
7257 rx_buffer->pagecnt_bias);
7258 }
7259
7260 /* clear contents of rx_buffer */
7261 rx_buffer->page = NULL;
7262}
7263
7157static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) 7264static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7158{ 7265{
7159 struct igb_ring *rx_ring = q_vector->rx.ring; 7266 struct igb_ring *rx_ring = q_vector->rx.ring;
@@ -7163,6 +7270,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7163 7270
7164 while (likely(total_packets < budget)) { 7271 while (likely(total_packets < budget)) {
7165 union e1000_adv_rx_desc *rx_desc; 7272 union e1000_adv_rx_desc *rx_desc;
7273 struct igb_rx_buffer *rx_buffer;
7274 unsigned int size;
7166 7275
7167 /* return some buffers to hardware, one at a time is too slow */ 7276 /* return some buffers to hardware, one at a time is too slow */
7168 if (cleaned_count >= IGB_RX_BUFFER_WRITE) { 7277 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -7171,8 +7280,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7171 } 7280 }
7172 7281
7173 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); 7282 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
7174 7283 size = le16_to_cpu(rx_desc->wb.upper.length);
7175 if (!rx_desc->wb.upper.status_error) 7284 if (!size)
7176 break; 7285 break;
7177 7286
7178 /* This memory barrier is needed to keep us from reading 7287 /* This memory barrier is needed to keep us from reading
@@ -7181,13 +7290,25 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7181 */ 7290 */
7182 dma_rmb(); 7291 dma_rmb();
7183 7292
7293 rx_buffer = igb_get_rx_buffer(rx_ring, size);
7294
7184 /* retrieve a buffer from the ring */ 7295 /* retrieve a buffer from the ring */
7185 skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); 7296 if (skb)
7297 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
7298 else if (ring_uses_build_skb(rx_ring))
7299 skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
7300 else
7301 skb = igb_construct_skb(rx_ring, rx_buffer,
7302 rx_desc, size);
7186 7303
7187 /* exit if we failed to retrieve a buffer */ 7304 /* exit if we failed to retrieve a buffer */
7188 if (!skb) 7305 if (!skb) {
7306 rx_ring->rx_stats.alloc_failed++;
7307 rx_buffer->pagecnt_bias++;
7189 break; 7308 break;
7309 }
7190 7310
7311 igb_put_rx_buffer(rx_ring, rx_buffer);
7191 cleaned_count++; 7312 cleaned_count++;
7192 7313
7193 /* fetch next buffer in frame if non-eop */ 7314 /* fetch next buffer in frame if non-eop */
@@ -7231,6 +7352,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7231 return total_packets; 7352 return total_packets;
7232} 7353}
7233 7354
7355static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
7356{
7357 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
7358}
7359
7234static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, 7360static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
7235 struct igb_rx_buffer *bi) 7361 struct igb_rx_buffer *bi)
7236{ 7362{
@@ -7242,21 +7368,23 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
7242 return true; 7368 return true;
7243 7369
7244 /* alloc new page for storage */ 7370 /* alloc new page for storage */
7245 page = dev_alloc_page(); 7371 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
7246 if (unlikely(!page)) { 7372 if (unlikely(!page)) {
7247 rx_ring->rx_stats.alloc_failed++; 7373 rx_ring->rx_stats.alloc_failed++;
7248 return false; 7374 return false;
7249 } 7375 }
7250 7376
7251 /* map page for use */ 7377 /* map page for use */
7252 dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, 7378 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
7253 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 7379 igb_rx_pg_size(rx_ring),
7380 DMA_FROM_DEVICE,
7381 IGB_RX_DMA_ATTR);
7254 7382
7255 /* if mapping failed free memory back to system since 7383 /* if mapping failed free memory back to system since
7256 * there isn't much point in holding memory we can't use 7384 * there isn't much point in holding memory we can't use
7257 */ 7385 */
7258 if (dma_mapping_error(rx_ring->dev, dma)) { 7386 if (dma_mapping_error(rx_ring->dev, dma)) {
7259 __free_page(page); 7387 __free_pages(page, igb_rx_pg_order(rx_ring));
7260 7388
7261 rx_ring->rx_stats.alloc_failed++; 7389 rx_ring->rx_stats.alloc_failed++;
7262 return false; 7390 return false;
@@ -7264,7 +7392,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
7264 7392
7265 bi->dma = dma; 7393 bi->dma = dma;
7266 bi->page = page; 7394 bi->page = page;
7267 bi->page_offset = 0; 7395 bi->page_offset = igb_rx_offset(rx_ring);
7268 bi->pagecnt_bias = 1; 7396 bi->pagecnt_bias = 1;
7269 7397
7270 return true; 7398 return true;
@@ -7279,6 +7407,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
7279 union e1000_adv_rx_desc *rx_desc; 7407 union e1000_adv_rx_desc *rx_desc;
7280 struct igb_rx_buffer *bi; 7408 struct igb_rx_buffer *bi;
7281 u16 i = rx_ring->next_to_use; 7409 u16 i = rx_ring->next_to_use;
7410 u16 bufsz;
7282 7411
7283 /* nothing to do */ 7412 /* nothing to do */
7284 if (!cleaned_count) 7413 if (!cleaned_count)
@@ -7288,14 +7417,15 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
7288 bi = &rx_ring->rx_buffer_info[i]; 7417 bi = &rx_ring->rx_buffer_info[i];
7289 i -= rx_ring->count; 7418 i -= rx_ring->count;
7290 7419
7420 bufsz = igb_rx_bufsz(rx_ring);
7421
7291 do { 7422 do {
7292 if (!igb_alloc_mapped_page(rx_ring, bi)) 7423 if (!igb_alloc_mapped_page(rx_ring, bi))
7293 break; 7424 break;
7294 7425
7295 /* sync the buffer for use by the device */ 7426 /* sync the buffer for use by the device */
7296 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 7427 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
7297 bi->page_offset, 7428 bi->page_offset, bufsz,
7298 IGB_RX_BUFSZ,
7299 DMA_FROM_DEVICE); 7429 DMA_FROM_DEVICE);
7300 7430
7301 /* Refresh the desc even if buffer_addrs didn't change 7431 /* Refresh the desc even if buffer_addrs didn't change
@@ -7312,8 +7442,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
7312 i -= rx_ring->count; 7442 i -= rx_ring->count;
7313 } 7443 }
7314 7444
7315 /* clear the status bits for the next_to_use descriptor */ 7445 /* clear the length for the next_to_use descriptor */
7316 rx_desc->wb.upper.status_error = 0; 7446 rx_desc->wb.upper.length = 0;
7317 7447
7318 cleaned_count--; 7448 cleaned_count--;
7319 } while (cleaned_count); 7449 } while (cleaned_count);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c4477552ce9e..7a3fd4d74592 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -764,8 +764,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
764 * incoming frame. The value is stored in little endian format starting on 764 * incoming frame. The value is stored in little endian format starting on
765 * byte 8. 765 * byte 8.
766 **/ 766 **/
767void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, 767void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
768 unsigned char *va,
769 struct sk_buff *skb) 768 struct sk_buff *skb)
770{ 769{
771 __le64 *regval = (__le64 *)va; 770 __le64 *regval = (__le64 *)va;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 90fa5bf23d1b..0da0752fedef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -186,60 +186,62 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
186 } 186 }
187} 187}
188 188
189static int ixgbe_get_settings(struct net_device *netdev, 189static int ixgbe_get_link_ksettings(struct net_device *netdev,
190 struct ethtool_cmd *ecmd) 190 struct ethtool_link_ksettings *cmd)
191{ 191{
192 struct ixgbe_adapter *adapter = netdev_priv(netdev); 192 struct ixgbe_adapter *adapter = netdev_priv(netdev);
193 struct ixgbe_hw *hw = &adapter->hw; 193 struct ixgbe_hw *hw = &adapter->hw;
194 ixgbe_link_speed supported_link; 194 ixgbe_link_speed supported_link;
195 bool autoneg = false; 195 bool autoneg = false;
196 u32 supported, advertising;
197
198 ethtool_convert_link_mode_to_legacy_u32(&supported,
199 cmd->link_modes.supported);
196 200
197 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); 201 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
198 202
199 /* set the supported link speeds */ 203 /* set the supported link speeds */
200 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) 204 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
201 ecmd->supported |= ixgbe_get_supported_10gtypes(hw); 205 supported |= ixgbe_get_supported_10gtypes(hw);
202 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) 206 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
203 ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? 207 supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
204 SUPPORTED_1000baseKX_Full : 208 SUPPORTED_1000baseKX_Full :
205 SUPPORTED_1000baseT_Full; 209 SUPPORTED_1000baseT_Full;
206 if (supported_link & IXGBE_LINK_SPEED_100_FULL) 210 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
207 ecmd->supported |= SUPPORTED_100baseT_Full; 211 supported |= SUPPORTED_100baseT_Full;
208 if (supported_link & IXGBE_LINK_SPEED_10_FULL) 212 if (supported_link & IXGBE_LINK_SPEED_10_FULL)
209 ecmd->supported |= SUPPORTED_10baseT_Full; 213 supported |= SUPPORTED_10baseT_Full;
210 214
211 /* default advertised speed if phy.autoneg_advertised isn't set */ 215 /* default advertised speed if phy.autoneg_advertised isn't set */
212 ecmd->advertising = ecmd->supported; 216 advertising = supported;
213 /* set the advertised speeds */ 217 /* set the advertised speeds */
214 if (hw->phy.autoneg_advertised) { 218 if (hw->phy.autoneg_advertised) {
215 ecmd->advertising = 0; 219 advertising = 0;
216 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) 220 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
217 ecmd->advertising |= ADVERTISED_10baseT_Full; 221 advertising |= ADVERTISED_10baseT_Full;
218 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) 222 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
219 ecmd->advertising |= ADVERTISED_100baseT_Full; 223 advertising |= ADVERTISED_100baseT_Full;
220 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 224 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
221 ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G; 225 advertising |= supported & ADVRTSD_MSK_10G;
222 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { 226 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
223 if (ecmd->supported & SUPPORTED_1000baseKX_Full) 227 if (supported & SUPPORTED_1000baseKX_Full)
224 ecmd->advertising |= ADVERTISED_1000baseKX_Full; 228 advertising |= ADVERTISED_1000baseKX_Full;
225 else 229 else
226 ecmd->advertising |= ADVERTISED_1000baseT_Full; 230 advertising |= ADVERTISED_1000baseT_Full;
227 } 231 }
228 } else { 232 } else {
229 if (hw->phy.multispeed_fiber && !autoneg) { 233 if (hw->phy.multispeed_fiber && !autoneg) {
230 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) 234 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
231 ecmd->advertising = ADVERTISED_10000baseT_Full; 235 advertising = ADVERTISED_10000baseT_Full;
232 } 236 }
233 } 237 }
234 238
235 if (autoneg) { 239 if (autoneg) {
236 ecmd->supported |= SUPPORTED_Autoneg; 240 supported |= SUPPORTED_Autoneg;
237 ecmd->advertising |= ADVERTISED_Autoneg; 241 advertising |= ADVERTISED_Autoneg;
238 ecmd->autoneg = AUTONEG_ENABLE; 242 cmd->base.autoneg = AUTONEG_ENABLE;
239 } else 243 } else
240 ecmd->autoneg = AUTONEG_DISABLE; 244 cmd->base.autoneg = AUTONEG_DISABLE;
241
242 ecmd->transceiver = XCVR_EXTERNAL;
243 245
244 /* Determine the remaining settings based on the PHY type. */ 246 /* Determine the remaining settings based on the PHY type. */
245 switch (adapter->hw.phy.type) { 247 switch (adapter->hw.phy.type) {
@@ -248,14 +250,14 @@ static int ixgbe_get_settings(struct net_device *netdev,
248 case ixgbe_phy_x550em_ext_t: 250 case ixgbe_phy_x550em_ext_t:
249 case ixgbe_phy_fw: 251 case ixgbe_phy_fw:
250 case ixgbe_phy_cu_unknown: 252 case ixgbe_phy_cu_unknown:
251 ecmd->supported |= SUPPORTED_TP; 253 supported |= SUPPORTED_TP;
252 ecmd->advertising |= ADVERTISED_TP; 254 advertising |= ADVERTISED_TP;
253 ecmd->port = PORT_TP; 255 cmd->base.port = PORT_TP;
254 break; 256 break;
255 case ixgbe_phy_qt: 257 case ixgbe_phy_qt:
256 ecmd->supported |= SUPPORTED_FIBRE; 258 supported |= SUPPORTED_FIBRE;
257 ecmd->advertising |= ADVERTISED_FIBRE; 259 advertising |= ADVERTISED_FIBRE;
258 ecmd->port = PORT_FIBRE; 260 cmd->base.port = PORT_FIBRE;
259 break; 261 break;
260 case ixgbe_phy_nl: 262 case ixgbe_phy_nl:
261 case ixgbe_phy_sfp_passive_tyco: 263 case ixgbe_phy_sfp_passive_tyco:
@@ -273,9 +275,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
273 case ixgbe_sfp_type_da_cu: 275 case ixgbe_sfp_type_da_cu:
274 case ixgbe_sfp_type_da_cu_core0: 276 case ixgbe_sfp_type_da_cu_core0:
275 case ixgbe_sfp_type_da_cu_core1: 277 case ixgbe_sfp_type_da_cu_core1:
276 ecmd->supported |= SUPPORTED_FIBRE; 278 supported |= SUPPORTED_FIBRE;
277 ecmd->advertising |= ADVERTISED_FIBRE; 279 advertising |= ADVERTISED_FIBRE;
278 ecmd->port = PORT_DA; 280 cmd->base.port = PORT_DA;
279 break; 281 break;
280 case ixgbe_sfp_type_sr: 282 case ixgbe_sfp_type_sr:
281 case ixgbe_sfp_type_lr: 283 case ixgbe_sfp_type_lr:
@@ -285,102 +287,113 @@ static int ixgbe_get_settings(struct net_device *netdev,
285 case ixgbe_sfp_type_1g_sx_core1: 287 case ixgbe_sfp_type_1g_sx_core1:
286 case ixgbe_sfp_type_1g_lx_core0: 288 case ixgbe_sfp_type_1g_lx_core0:
287 case ixgbe_sfp_type_1g_lx_core1: 289 case ixgbe_sfp_type_1g_lx_core1:
288 ecmd->supported |= SUPPORTED_FIBRE; 290 supported |= SUPPORTED_FIBRE;
289 ecmd->advertising |= ADVERTISED_FIBRE; 291 advertising |= ADVERTISED_FIBRE;
290 ecmd->port = PORT_FIBRE; 292 cmd->base.port = PORT_FIBRE;
291 break; 293 break;
292 case ixgbe_sfp_type_not_present: 294 case ixgbe_sfp_type_not_present:
293 ecmd->supported |= SUPPORTED_FIBRE; 295 supported |= SUPPORTED_FIBRE;
294 ecmd->advertising |= ADVERTISED_FIBRE; 296 advertising |= ADVERTISED_FIBRE;
295 ecmd->port = PORT_NONE; 297 cmd->base.port = PORT_NONE;
296 break; 298 break;
297 case ixgbe_sfp_type_1g_cu_core0: 299 case ixgbe_sfp_type_1g_cu_core0:
298 case ixgbe_sfp_type_1g_cu_core1: 300 case ixgbe_sfp_type_1g_cu_core1:
299 ecmd->supported |= SUPPORTED_TP; 301 supported |= SUPPORTED_TP;
300 ecmd->advertising |= ADVERTISED_TP; 302 advertising |= ADVERTISED_TP;
301 ecmd->port = PORT_TP; 303 cmd->base.port = PORT_TP;
302 break; 304 break;
303 case ixgbe_sfp_type_unknown: 305 case ixgbe_sfp_type_unknown:
304 default: 306 default:
305 ecmd->supported |= SUPPORTED_FIBRE; 307 supported |= SUPPORTED_FIBRE;
306 ecmd->advertising |= ADVERTISED_FIBRE; 308 advertising |= ADVERTISED_FIBRE;
307 ecmd->port = PORT_OTHER; 309 cmd->base.port = PORT_OTHER;
308 break; 310 break;
309 } 311 }
310 break; 312 break;
311 case ixgbe_phy_xaui: 313 case ixgbe_phy_xaui:
312 ecmd->supported |= SUPPORTED_FIBRE; 314 supported |= SUPPORTED_FIBRE;
313 ecmd->advertising |= ADVERTISED_FIBRE; 315 advertising |= ADVERTISED_FIBRE;
314 ecmd->port = PORT_NONE; 316 cmd->base.port = PORT_NONE;
315 break; 317 break;
316 case ixgbe_phy_unknown: 318 case ixgbe_phy_unknown:
317 case ixgbe_phy_generic: 319 case ixgbe_phy_generic:
318 case ixgbe_phy_sfp_unsupported: 320 case ixgbe_phy_sfp_unsupported:
319 default: 321 default:
320 ecmd->supported |= SUPPORTED_FIBRE; 322 supported |= SUPPORTED_FIBRE;
321 ecmd->advertising |= ADVERTISED_FIBRE; 323 advertising |= ADVERTISED_FIBRE;
322 ecmd->port = PORT_OTHER; 324 cmd->base.port = PORT_OTHER;
323 break; 325 break;
324 } 326 }
325 327
326 /* Indicate pause support */ 328 /* Indicate pause support */
327 ecmd->supported |= SUPPORTED_Pause; 329 supported |= SUPPORTED_Pause;
328 330
329 switch (hw->fc.requested_mode) { 331 switch (hw->fc.requested_mode) {
330 case ixgbe_fc_full: 332 case ixgbe_fc_full:
331 ecmd->advertising |= ADVERTISED_Pause; 333 advertising |= ADVERTISED_Pause;
332 break; 334 break;
333 case ixgbe_fc_rx_pause: 335 case ixgbe_fc_rx_pause:
334 ecmd->advertising |= ADVERTISED_Pause | 336 advertising |= ADVERTISED_Pause |
335 ADVERTISED_Asym_Pause; 337 ADVERTISED_Asym_Pause;
336 break; 338 break;
337 case ixgbe_fc_tx_pause: 339 case ixgbe_fc_tx_pause:
338 ecmd->advertising |= ADVERTISED_Asym_Pause; 340 advertising |= ADVERTISED_Asym_Pause;
339 break; 341 break;
340 default: 342 default:
341 ecmd->advertising &= ~(ADVERTISED_Pause | 343 advertising &= ~(ADVERTISED_Pause |
342 ADVERTISED_Asym_Pause); 344 ADVERTISED_Asym_Pause);
343 } 345 }
344 346
345 if (netif_carrier_ok(netdev)) { 347 if (netif_carrier_ok(netdev)) {
346 switch (adapter->link_speed) { 348 switch (adapter->link_speed) {
347 case IXGBE_LINK_SPEED_10GB_FULL: 349 case IXGBE_LINK_SPEED_10GB_FULL:
348 ethtool_cmd_speed_set(ecmd, SPEED_10000); 350 cmd->base.speed = SPEED_10000;
349 break; 351 break;
350 case IXGBE_LINK_SPEED_5GB_FULL: 352 case IXGBE_LINK_SPEED_5GB_FULL:
351 ethtool_cmd_speed_set(ecmd, SPEED_5000); 353 cmd->base.speed = SPEED_5000;
352 break; 354 break;
353 case IXGBE_LINK_SPEED_2_5GB_FULL: 355 case IXGBE_LINK_SPEED_2_5GB_FULL:
354 ethtool_cmd_speed_set(ecmd, SPEED_2500); 356 cmd->base.speed = SPEED_2500;
355 break; 357 break;
356 case IXGBE_LINK_SPEED_1GB_FULL: 358 case IXGBE_LINK_SPEED_1GB_FULL:
357 ethtool_cmd_speed_set(ecmd, SPEED_1000); 359 cmd->base.speed = SPEED_1000;
358 break; 360 break;
359 case IXGBE_LINK_SPEED_100_FULL: 361 case IXGBE_LINK_SPEED_100_FULL:
360 ethtool_cmd_speed_set(ecmd, SPEED_100); 362 cmd->base.speed = SPEED_100;
361 break; 363 break;
362 case IXGBE_LINK_SPEED_10_FULL: 364 case IXGBE_LINK_SPEED_10_FULL:
363 ethtool_cmd_speed_set(ecmd, SPEED_10); 365 cmd->base.speed = SPEED_10;
364 break; 366 break;
365 default: 367 default:
366 break; 368 break;
367 } 369 }
368 ecmd->duplex = DUPLEX_FULL; 370 cmd->base.duplex = DUPLEX_FULL;
369 } else { 371 } else {
370 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); 372 cmd->base.speed = SPEED_UNKNOWN;
371 ecmd->duplex = DUPLEX_UNKNOWN; 373 cmd->base.duplex = DUPLEX_UNKNOWN;
372 } 374 }
373 375
376 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
377 supported);
378 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
379 advertising);
380
374 return 0; 381 return 0;
375} 382}
376 383
377static int ixgbe_set_settings(struct net_device *netdev, 384static int ixgbe_set_link_ksettings(struct net_device *netdev,
378 struct ethtool_cmd *ecmd) 385 const struct ethtool_link_ksettings *cmd)
379{ 386{
380 struct ixgbe_adapter *adapter = netdev_priv(netdev); 387 struct ixgbe_adapter *adapter = netdev_priv(netdev);
381 struct ixgbe_hw *hw = &adapter->hw; 388 struct ixgbe_hw *hw = &adapter->hw;
382 u32 advertised, old; 389 u32 advertised, old;
383 s32 err = 0; 390 s32 err = 0;
391 u32 supported, advertising;
392
393 ethtool_convert_link_mode_to_legacy_u32(&supported,
394 cmd->link_modes.supported);
395 ethtool_convert_link_mode_to_legacy_u32(&advertising,
396 cmd->link_modes.advertising);
384 397
385 if ((hw->phy.media_type == ixgbe_media_type_copper) || 398 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
386 (hw->phy.multispeed_fiber)) { 399 (hw->phy.multispeed_fiber)) {
@@ -388,12 +401,12 @@ static int ixgbe_set_settings(struct net_device *netdev,
388 * this function does not support duplex forcing, but can 401 * this function does not support duplex forcing, but can
389 * limit the advertising of the adapter to the specified speed 402 * limit the advertising of the adapter to the specified speed
390 */ 403 */
391 if (ecmd->advertising & ~ecmd->supported) 404 if (advertising & ~supported)
392 return -EINVAL; 405 return -EINVAL;
393 406
394 /* only allow one speed at a time if no autoneg */ 407 /* only allow one speed at a time if no autoneg */
395 if (!ecmd->autoneg && hw->phy.multispeed_fiber) { 408 if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
396 if (ecmd->advertising == 409 if (advertising ==
397 (ADVERTISED_10000baseT_Full | 410 (ADVERTISED_10000baseT_Full |
398 ADVERTISED_1000baseT_Full)) 411 ADVERTISED_1000baseT_Full))
399 return -EINVAL; 412 return -EINVAL;
@@ -401,16 +414,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
401 414
402 old = hw->phy.autoneg_advertised; 415 old = hw->phy.autoneg_advertised;
403 advertised = 0; 416 advertised = 0;
404 if (ecmd->advertising & ADVERTISED_10000baseT_Full) 417 if (advertising & ADVERTISED_10000baseT_Full)
405 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 418 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
406 419
407 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 420 if (advertising & ADVERTISED_1000baseT_Full)
408 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 421 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
409 422
410 if (ecmd->advertising & ADVERTISED_100baseT_Full) 423 if (advertising & ADVERTISED_100baseT_Full)
411 advertised |= IXGBE_LINK_SPEED_100_FULL; 424 advertised |= IXGBE_LINK_SPEED_100_FULL;
412 425
413 if (ecmd->advertising & ADVERTISED_10baseT_Full) 426 if (advertising & ADVERTISED_10baseT_Full)
414 advertised |= IXGBE_LINK_SPEED_10_FULL; 427 advertised |= IXGBE_LINK_SPEED_10_FULL;
415 428
416 if (old == advertised) 429 if (old == advertised)
@@ -428,10 +441,11 @@ static int ixgbe_set_settings(struct net_device *netdev,
428 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 441 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
429 } else { 442 } else {
430 /* in this case we currently only support 10Gb/FULL */ 443 /* in this case we currently only support 10Gb/FULL */
431 u32 speed = ethtool_cmd_speed(ecmd); 444 u32 speed = cmd->base.speed;
432 if ((ecmd->autoneg == AUTONEG_ENABLE) || 445
433 (ecmd->advertising != ADVERTISED_10000baseT_Full) || 446 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
434 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 447 (advertising != ADVERTISED_10000baseT_Full) ||
448 (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
435 return -EINVAL; 449 return -EINVAL;
436 } 450 }
437 451
@@ -3402,8 +3416,6 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3402} 3416}
3403 3417
3404static const struct ethtool_ops ixgbe_ethtool_ops = { 3418static const struct ethtool_ops ixgbe_ethtool_ops = {
3405 .get_settings = ixgbe_get_settings,
3406 .set_settings = ixgbe_set_settings,
3407 .get_drvinfo = ixgbe_get_drvinfo, 3419 .get_drvinfo = ixgbe_get_drvinfo,
3408 .get_regs_len = ixgbe_get_regs_len, 3420 .get_regs_len = ixgbe_get_regs_len,
3409 .get_regs = ixgbe_get_regs, 3421 .get_regs = ixgbe_get_regs,
@@ -3442,6 +3454,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
3442 .get_ts_info = ixgbe_get_ts_info, 3454 .get_ts_info = ixgbe_get_ts_info,
3443 .get_module_info = ixgbe_get_module_info, 3455 .get_module_info = ixgbe_get_module_info,
3444 .get_module_eeprom = ixgbe_get_module_eeprom, 3456 .get_module_eeprom = ixgbe_get_module_eeprom,
3457 .get_link_ksettings = ixgbe_get_link_ksettings,
3458 .set_link_ksettings = ixgbe_set_link_ksettings,
3445}; 3459};
3446 3460
3447void ixgbe_set_ethtool_ops(struct net_device *netdev) 3461void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a7a430a7be2c..852a2e7e25ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2122,7 +2122,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2122 prefetch(va + L1_CACHE_BYTES); 2122 prefetch(va + L1_CACHE_BYTES);
2123#endif 2123#endif
2124 2124
2125 /* build an skb to around the page buffer */ 2125 /* build an skb around the page buffer */
2126 skb = build_skb(va - IXGBE_SKB_PAD, truesize); 2126 skb = build_skb(va - IXGBE_SKB_PAD, truesize);
2127 if (unlikely(!skb)) 2127 if (unlikely(!skb))
2128 return NULL; 2128 return NULL;
@@ -8948,7 +8948,9 @@ static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
8948 if (tc->type != TC_SETUP_MQPRIO) 8948 if (tc->type != TC_SETUP_MQPRIO)
8949 return -EINVAL; 8949 return -EINVAL;
8950 8950
8951 return ixgbe_setup_tc(dev, tc->tc); 8951 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
8952
8953 return ixgbe_setup_tc(dev, tc->mqprio->num_tc);
8952} 8954}
8953 8955
8954#ifdef CONFIG_PCI_IOV 8956#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index d2555e8b947e..da6fb825afea 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -82,13 +82,13 @@ config MVNETA_BM
82 that all dependencies are met. 82 that all dependencies are met.
83 83
84config MVPP2 84config MVPP2
85 tristate "Marvell Armada 375 network interface support" 85 tristate "Marvell Armada 375/7K/8K network interface support"
86 depends on ARCH_MVEBU || COMPILE_TEST 86 depends on ARCH_MVEBU || COMPILE_TEST
87 depends on HAS_DMA 87 depends on HAS_DMA
88 select MVMDIO 88 select MVMDIO
89 ---help--- 89 ---help---
90 This driver supports the network interface units in the 90 This driver supports the network interface units in the
91 Marvell ARMADA 375 SoC. 91 Marvell ARMADA 375, 7K and 8K SoCs.
92 92
93config PXA168_ETH 93config PXA168_ETH
94 tristate "Marvell pxa168 ethernet support" 94 tristate "Marvell pxa168 ethernet support"
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 61dd4462411c..aebbc5399a06 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -431,6 +431,7 @@ struct mvneta_port {
431 /* Flags for special SoC configurations */ 431 /* Flags for special SoC configurations */
432 bool neta_armada3700; 432 bool neta_armada3700;
433 u16 rx_offset_correction; 433 u16 rx_offset_correction;
434 const struct mbus_dram_target_info *dram_target_info;
434}; 435};
435 436
436/* The mvneta_tx_desc and mvneta_rx_desc structures describe the 437/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -4118,7 +4119,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4118/* Device initialization routine */ 4119/* Device initialization routine */
4119static int mvneta_probe(struct platform_device *pdev) 4120static int mvneta_probe(struct platform_device *pdev)
4120{ 4121{
4121 const struct mbus_dram_target_info *dram_target_info;
4122 struct resource *res; 4122 struct resource *res;
4123 struct device_node *dn = pdev->dev.of_node; 4123 struct device_node *dn = pdev->dev.of_node;
4124 struct device_node *phy_node; 4124 struct device_node *phy_node;
@@ -4267,13 +4267,13 @@ static int mvneta_probe(struct platform_device *pdev)
4267 4267
4268 pp->tx_csum_limit = tx_csum_limit; 4268 pp->tx_csum_limit = tx_csum_limit;
4269 4269
4270 dram_target_info = mv_mbus_dram_info(); 4270 pp->dram_target_info = mv_mbus_dram_info();
4271 /* Armada3700 requires setting default configuration of Mbus 4271 /* Armada3700 requires setting default configuration of Mbus
4272 * windows, however without using filled mbus_dram_target_info 4272 * windows, however without using filled mbus_dram_target_info
4273 * structure. 4273 * structure.
4274 */ 4274 */
4275 if (dram_target_info || pp->neta_armada3700) 4275 if (pp->dram_target_info || pp->neta_armada3700)
4276 mvneta_conf_mbus_windows(pp, dram_target_info); 4276 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4277 4277
4278 pp->tx_ring_size = MVNETA_MAX_TXD; 4278 pp->tx_ring_size = MVNETA_MAX_TXD;
4279 pp->rx_ring_size = MVNETA_MAX_RXD; 4279 pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -4405,6 +4405,58 @@ static int mvneta_remove(struct platform_device *pdev)
4405 return 0; 4405 return 0;
4406} 4406}
4407 4407
4408#ifdef CONFIG_PM_SLEEP
4409static int mvneta_suspend(struct device *device)
4410{
4411 struct net_device *dev = dev_get_drvdata(device);
4412 struct mvneta_port *pp = netdev_priv(dev);
4413
4414 if (netif_running(dev))
4415 mvneta_stop(dev);
4416 netif_device_detach(dev);
4417 clk_disable_unprepare(pp->clk_bus);
4418 clk_disable_unprepare(pp->clk);
4419 return 0;
4420}
4421
4422static int mvneta_resume(struct device *device)
4423{
4424 struct platform_device *pdev = to_platform_device(device);
4425 struct net_device *dev = dev_get_drvdata(device);
4426 struct mvneta_port *pp = netdev_priv(dev);
4427 int err;
4428
4429 clk_prepare_enable(pp->clk);
4430 if (!IS_ERR(pp->clk_bus))
4431 clk_prepare_enable(pp->clk_bus);
4432 if (pp->dram_target_info || pp->neta_armada3700)
4433 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4434 if (pp->bm_priv) {
4435 err = mvneta_bm_port_init(pdev, pp);
4436 if (err < 0) {
4437 dev_info(&pdev->dev, "use SW buffer management\n");
4438 pp->bm_priv = NULL;
4439 }
4440 }
4441 mvneta_defaults_set(pp);
4442 err = mvneta_port_power_up(pp, pp->phy_interface);
4443 if (err < 0) {
4444 dev_err(device, "can't power up port\n");
4445 return err;
4446 }
4447
4448 if (pp->use_inband_status)
4449 mvneta_fixed_link_update(pp, dev->phydev);
4450
4451 netif_device_attach(dev);
4452 if (netif_running(dev))
4453 mvneta_open(dev);
4454 return 0;
4455}
4456#endif
4457
4458static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
4459
4408static const struct of_device_id mvneta_match[] = { 4460static const struct of_device_id mvneta_match[] = {
4409 { .compatible = "marvell,armada-370-neta" }, 4461 { .compatible = "marvell,armada-370-neta" },
4410 { .compatible = "marvell,armada-xp-neta" }, 4462 { .compatible = "marvell,armada-xp-neta" },
@@ -4419,6 +4471,7 @@ static struct platform_driver mvneta_driver = {
4419 .driver = { 4471 .driver = {
4420 .name = MVNETA_DRIVER_NAME, 4472 .name = MVNETA_DRIVER_NAME,
4421 .of_match_table = mvneta_match, 4473 .of_match_table = mvneta_match,
4474 .pm = &mvneta_pm_ops,
4422 }, 4475 },
4423}; 4476};
4424 4477
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index d00421b9ffea..af5bfa13d976 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -25,6 +25,7 @@
25#include <linux/of_mdio.h> 25#include <linux/of_mdio.h>
26#include <linux/of_net.h> 26#include <linux/of_net.h>
27#include <linux/of_address.h> 27#include <linux/of_address.h>
28#include <linux/of_device.h>
28#include <linux/phy.h> 29#include <linux/phy.h>
29#include <linux/clk.h> 30#include <linux/clk.h>
30#include <linux/hrtimer.h> 31#include <linux/hrtimer.h>
@@ -49,9 +50,11 @@
49#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff 50#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
50#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) 51#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
51#define MVPP2_RXQ_POOL_SHORT_OFFS 20 52#define MVPP2_RXQ_POOL_SHORT_OFFS 20
52#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000 53#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
54#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
53#define MVPP2_RXQ_POOL_LONG_OFFS 24 55#define MVPP2_RXQ_POOL_LONG_OFFS 24
54#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000 56#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
57#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
55#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 58#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
56#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 59#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
57#define MVPP2_RXQ_DISABLE_MASK BIT(31) 60#define MVPP2_RXQ_DISABLE_MASK BIT(31)
@@ -99,6 +102,7 @@
99/* Descriptor Manager Top Registers */ 102/* Descriptor Manager Top Registers */
100#define MVPP2_RXQ_NUM_REG 0x2040 103#define MVPP2_RXQ_NUM_REG 0x2040
101#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 104#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
105#define MVPP22_DESC_ADDR_OFFS 8
102#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 106#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
103#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 107#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
104#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) 108#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
@@ -117,9 +121,6 @@
117#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 121#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
118#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 122#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
119#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 123#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
120#define MVPP2_TXQ_THRESH_REG 0x2094
121#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
122#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
123#define MVPP2_TXQ_INDEX_REG 0x2098 124#define MVPP2_TXQ_INDEX_REG 0x2098
124#define MVPP2_TXQ_PREF_BUF_REG 0x209c 125#define MVPP2_TXQ_PREF_BUF_REG 0x209c
125#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) 126#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
@@ -140,6 +141,7 @@
140#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 141#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
141#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 142#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
142#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) 143#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
144#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
143#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) 145#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
144#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 146#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
145#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) 147#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
@@ -152,10 +154,52 @@
152#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) 154#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
153#define MVPP2_BASE_ADDR_ENABLE 0x4060 155#define MVPP2_BASE_ADDR_ENABLE 0x4060
154 156
157/* AXI Bridge Registers */
158#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
159#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
160#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
161#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
162#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
163#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
164#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
165#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
166#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
167#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
168#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
169#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
170
171/* Values for AXI Bridge registers */
172#define MVPP22_AXI_ATTR_CACHE_OFFS 0
173#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
174
175#define MVPP22_AXI_CODE_CACHE_OFFS 0
176#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
177
178#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
179#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
180#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
181
182#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
183#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
184
155/* Interrupt Cause and Mask registers */ 185/* Interrupt Cause and Mask registers */
156#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) 186#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
157#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 187#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
158#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) 188#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
189
190#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
191#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
192#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
193#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
194
195#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
196#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
197
198#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
199#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
200#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
201#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
202
159#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) 203#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
160#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) 204#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
161#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) 205#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
@@ -210,14 +254,19 @@
210#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) 254#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
211#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) 255#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
212#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 256#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
257#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
258#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
259#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
260#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
213#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) 261#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
214#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) 262#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
215#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) 263#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
216#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) 264#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
217#define MVPP2_BM_VIRT_RLS_REG 0x64c0 265#define MVPP2_BM_VIRT_RLS_REG 0x64c0
218#define MVPP2_BM_MC_RLS_REG 0x64c4 266#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
219#define MVPP2_BM_MC_ID_MASK 0xfff 267#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
220#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) 268#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
269#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
221 270
222/* TX Scheduler registers */ 271/* TX Scheduler registers */
223#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 272#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
@@ -287,6 +336,24 @@
287#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 336#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
288#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ 337#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
289 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) 338 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
339#define MVPP22_GMAC_CTRL_4_REG 0x90
340#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
341#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
342#define MVPP22_CTRL4_SYNC_BYPASS BIT(6)
343#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
344
345/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
346 * relative to port->base.
347 */
348#define MVPP22_XLG_CTRL3_REG 0x11c
349#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
350#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
351
352/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
353#define MVPP22_SMI_MISC_CFG_REG 0x1204
354#define MVPP22_SMI_POLLING_EN BIT(10)
355
356#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
290 357
291#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 358#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
292 359
@@ -335,15 +402,9 @@
335/* Maximum number of TXQs used by single port */ 402/* Maximum number of TXQs used by single port */
336#define MVPP2_MAX_TXQ 8 403#define MVPP2_MAX_TXQ 8
337 404
338/* Maximum number of RXQs used by single port */
339#define MVPP2_MAX_RXQ 8
340
341/* Dfault number of RXQs in use */ 405/* Dfault number of RXQs in use */
342#define MVPP2_DEFAULT_RXQ 4 406#define MVPP2_DEFAULT_RXQ 4
343 407
344/* Total number of RXQs available to all ports */
345#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
346
347/* Max number of Rx descriptors */ 408/* Max number of Rx descriptors */
348#define MVPP2_MAX_RXD 128 409#define MVPP2_MAX_RXD 128
349 410
@@ -615,6 +676,11 @@ enum mvpp2_prs_l3_cast {
615 */ 676 */
616#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) 677#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
617 678
679#define MVPP21_ADDR_SPACE_SZ 0
680#define MVPP22_ADDR_SPACE_SZ SZ_64K
681
682#define MVPP2_MAX_CPUS 4
683
618enum mvpp2_bm_type { 684enum mvpp2_bm_type {
619 MVPP2_BM_FREE, 685 MVPP2_BM_FREE,
620 MVPP2_BM_SWF_LONG, 686 MVPP2_BM_SWF_LONG,
@@ -626,12 +692,19 @@ enum mvpp2_bm_type {
626/* Shared Packet Processor resources */ 692/* Shared Packet Processor resources */
627struct mvpp2 { 693struct mvpp2 {
628 /* Shared registers' base addresses */ 694 /* Shared registers' base addresses */
629 void __iomem *base;
630 void __iomem *lms_base; 695 void __iomem *lms_base;
696 void __iomem *iface_base;
697
698 /* On PPv2.2, each CPU can access the base register through a
699 * separate address space, each 64 KB apart from each
700 * other.
701 */
702 void __iomem *cpu_base[MVPP2_MAX_CPUS];
631 703
632 /* Common clocks */ 704 /* Common clocks */
633 struct clk *pp_clk; 705 struct clk *pp_clk;
634 struct clk *gop_clk; 706 struct clk *gop_clk;
707 struct clk *mg_clk;
635 708
636 /* List of pointers to port structures */ 709 /* List of pointers to port structures */
637 struct mvpp2_port **port_list; 710 struct mvpp2_port **port_list;
@@ -649,6 +722,12 @@ struct mvpp2 {
649 722
650 /* Tclk value */ 723 /* Tclk value */
651 u32 tclk; 724 u32 tclk;
725
726 /* HW version */
727 enum { MVPP21, MVPP22 } hw_version;
728
729 /* Maximum number of RXQs per port */
730 unsigned int max_port_rxqs;
652}; 731};
653 732
654struct mvpp2_pcpu_stats { 733struct mvpp2_pcpu_stats {
@@ -670,6 +749,11 @@ struct mvpp2_port_pcpu {
670struct mvpp2_port { 749struct mvpp2_port {
671 u8 id; 750 u8 id;
672 751
752 /* Index of the port from the "group of ports" complex point
753 * of view
754 */
755 int gop_id;
756
673 int irq; 757 int irq;
674 758
675 struct mvpp2 *priv; 759 struct mvpp2 *priv;
@@ -741,22 +825,24 @@ struct mvpp2_port {
741#define MVPP2_RXD_L3_IP6 BIT(30) 825#define MVPP2_RXD_L3_IP6 BIT(30)
742#define MVPP2_RXD_BUF_HDR BIT(31) 826#define MVPP2_RXD_BUF_HDR BIT(31)
743 827
744struct mvpp2_tx_desc { 828/* HW TX descriptor for PPv2.1 */
829struct mvpp21_tx_desc {
745 u32 command; /* Options used by HW for packet transmitting.*/ 830 u32 command; /* Options used by HW for packet transmitting.*/
746 u8 packet_offset; /* the offset from the buffer beginning */ 831 u8 packet_offset; /* the offset from the buffer beginning */
747 u8 phys_txq; /* destination queue ID */ 832 u8 phys_txq; /* destination queue ID */
748 u16 data_size; /* data size of transmitted packet in bytes */ 833 u16 data_size; /* data size of transmitted packet in bytes */
749 u32 buf_phys_addr; /* physical addr of transmitted buffer */ 834 u32 buf_dma_addr; /* physical addr of transmitted buffer */
750 u32 buf_cookie; /* cookie for access to TX buffer in tx path */ 835 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
751 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ 836 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
752 u32 reserved2; /* reserved (for future use) */ 837 u32 reserved2; /* reserved (for future use) */
753}; 838};
754 839
755struct mvpp2_rx_desc { 840/* HW RX descriptor for PPv2.1 */
841struct mvpp21_rx_desc {
756 u32 status; /* info about received packet */ 842 u32 status; /* info about received packet */
757 u16 reserved1; /* parser_info (for future use, PnC) */ 843 u16 reserved1; /* parser_info (for future use, PnC) */
758 u16 data_size; /* size of received packet in bytes */ 844 u16 data_size; /* size of received packet in bytes */
759 u32 buf_phys_addr; /* physical address of the buffer */ 845 u32 buf_dma_addr; /* physical address of the buffer */
760 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 846 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
761 u16 reserved2; /* gem_port_id (for future use, PON) */ 847 u16 reserved2; /* gem_port_id (for future use, PON) */
762 u16 reserved3; /* csum_l4 (for future use, PnC) */ 848 u16 reserved3; /* csum_l4 (for future use, PnC) */
@@ -767,12 +853,51 @@ struct mvpp2_rx_desc {
767 u32 reserved8; 853 u32 reserved8;
768}; 854};
769 855
856/* HW TX descriptor for PPv2.2 */
857struct mvpp22_tx_desc {
858 u32 command;
859 u8 packet_offset;
860 u8 phys_txq;
861 u16 data_size;
862 u64 reserved1;
863 u64 buf_dma_addr_ptp;
864 u64 buf_cookie_misc;
865};
866
867/* HW RX descriptor for PPv2.2 */
868struct mvpp22_rx_desc {
869 u32 status;
870 u16 reserved1;
871 u16 data_size;
872 u32 reserved2;
873 u32 reserved3;
874 u64 buf_dma_addr_key_hash;
875 u64 buf_cookie_misc;
876};
877
878/* Opaque type used by the driver to manipulate the HW TX and RX
879 * descriptors
880 */
881struct mvpp2_tx_desc {
882 union {
883 struct mvpp21_tx_desc pp21;
884 struct mvpp22_tx_desc pp22;
885 };
886};
887
888struct mvpp2_rx_desc {
889 union {
890 struct mvpp21_rx_desc pp21;
891 struct mvpp22_rx_desc pp22;
892 };
893};
894
770struct mvpp2_txq_pcpu_buf { 895struct mvpp2_txq_pcpu_buf {
771 /* Transmitted SKB */ 896 /* Transmitted SKB */
772 struct sk_buff *skb; 897 struct sk_buff *skb;
773 898
774 /* Physical address of transmitted buffer */ 899 /* Physical address of transmitted buffer */
775 dma_addr_t phys; 900 dma_addr_t dma;
776 901
777 /* Size transmitted */ 902 /* Size transmitted */
778 size_t size; 903 size_t size;
@@ -825,7 +950,7 @@ struct mvpp2_tx_queue {
825 struct mvpp2_tx_desc *descs; 950 struct mvpp2_tx_desc *descs;
826 951
827 /* DMA address of the Tx DMA descriptors array */ 952 /* DMA address of the Tx DMA descriptors array */
828 dma_addr_t descs_phys; 953 dma_addr_t descs_dma;
829 954
830 /* Index of the last Tx DMA descriptor */ 955 /* Index of the last Tx DMA descriptor */
831 int last_desc; 956 int last_desc;
@@ -848,7 +973,7 @@ struct mvpp2_rx_queue {
848 struct mvpp2_rx_desc *descs; 973 struct mvpp2_rx_desc *descs;
849 974
850 /* DMA address of the RX DMA descriptors array */ 975 /* DMA address of the RX DMA descriptors array */
851 dma_addr_t descs_phys; 976 dma_addr_t descs_dma;
852 977
853 /* Index of the last RX DMA descriptor */ 978 /* Index of the last RX DMA descriptor */
854 int last_desc; 979 int last_desc;
@@ -912,6 +1037,8 @@ struct mvpp2_bm_pool {
912 1037
913 /* Buffer Pointers Pool External (BPPE) size */ 1038 /* Buffer Pointers Pool External (BPPE) size */
914 int size; 1039 int size;
1040 /* BPPE size in bytes */
1041 int size_bytes;
915 /* Number of buffers for this pool */ 1042 /* Number of buffers for this pool */
916 int buf_num; 1043 int buf_num;
917 /* Pool buffer size */ 1044 /* Pool buffer size */
@@ -922,29 +1049,13 @@ struct mvpp2_bm_pool {
922 1049
923 /* BPPE virtual base address */ 1050 /* BPPE virtual base address */
924 u32 *virt_addr; 1051 u32 *virt_addr;
925 /* BPPE physical base address */ 1052 /* BPPE DMA base address */
926 dma_addr_t phys_addr; 1053 dma_addr_t dma_addr;
927 1054
928 /* Ports using BM pool */ 1055 /* Ports using BM pool */
929 u32 port_map; 1056 u32 port_map;
930}; 1057};
931 1058
932struct mvpp2_buff_hdr {
933 u32 next_buff_phys_addr;
934 u32 next_buff_virt_addr;
935 u16 byte_count;
936 u16 info;
937 u8 reserved1; /* bm_qset (for future use, BM) */
938};
939
940/* Buffer header info bits */
941#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
942#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
943#define MVPP2_B_HDR_INFO_LAST_OFFS 12
944#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
945#define MVPP2_B_HDR_INFO_IS_LAST(info) \
946 ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
947
948/* Static declaractions */ 1059/* Static declaractions */
949 1060
950/* Number of RXQs used by single port */ 1061/* Number of RXQs used by single port */
@@ -959,12 +1070,177 @@ static int txq_number = MVPP2_MAX_TXQ;
959 1070
960static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 1071static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
961{ 1072{
962 writel(data, priv->base + offset); 1073 writel(data, priv->cpu_base[0] + offset);
963} 1074}
964 1075
965static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 1076static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
966{ 1077{
967 return readl(priv->base + offset); 1078 return readl(priv->cpu_base[0] + offset);
1079}
1080
1081/* These accessors should be used to access:
1082 *
1083 * - per-CPU registers, where each CPU has its own copy of the
1084 * register.
1085 *
1086 * MVPP2_BM_VIRT_ALLOC_REG
1087 * MVPP2_BM_ADDR_HIGH_ALLOC
1088 * MVPP22_BM_ADDR_HIGH_RLS_REG
1089 * MVPP2_BM_VIRT_RLS_REG
1090 * MVPP2_ISR_RX_TX_CAUSE_REG
1091 * MVPP2_ISR_RX_TX_MASK_REG
1092 * MVPP2_TXQ_NUM_REG
1093 * MVPP2_AGGR_TXQ_UPDATE_REG
1094 * MVPP2_TXQ_RSVD_REQ_REG
1095 * MVPP2_TXQ_RSVD_RSLT_REG
1096 * MVPP2_TXQ_SENT_REG
1097 * MVPP2_RXQ_NUM_REG
1098 *
1099 * - global registers that must be accessed through a specific CPU
1100 * window, because they are related to an access to a per-CPU
1101 * register
1102 *
1103 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1104 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1105 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1106 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1107 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1108 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1109 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1110 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1111 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1112 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1113 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1114 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1115 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1116 */
1117static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1118 u32 offset, u32 data)
1119{
1120 writel(data, priv->cpu_base[cpu] + offset);
1121}
1122
1123static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1124 u32 offset)
1125{
1126 return readl(priv->cpu_base[cpu] + offset);
1127}
1128
1129static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1130 struct mvpp2_tx_desc *tx_desc)
1131{
1132 if (port->priv->hw_version == MVPP21)
1133 return tx_desc->pp21.buf_dma_addr;
1134 else
1135 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
1136}
1137
1138static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1139 struct mvpp2_tx_desc *tx_desc,
1140 dma_addr_t dma_addr)
1141{
1142 if (port->priv->hw_version == MVPP21) {
1143 tx_desc->pp21.buf_dma_addr = dma_addr;
1144 } else {
1145 u64 val = (u64)dma_addr;
1146
1147 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1148 tx_desc->pp22.buf_dma_addr_ptp |= val;
1149 }
1150}
1151
1152static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1153 struct mvpp2_tx_desc *tx_desc)
1154{
1155 if (port->priv->hw_version == MVPP21)
1156 return tx_desc->pp21.data_size;
1157 else
1158 return tx_desc->pp22.data_size;
1159}
1160
1161static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1162 struct mvpp2_tx_desc *tx_desc,
1163 size_t size)
1164{
1165 if (port->priv->hw_version == MVPP21)
1166 tx_desc->pp21.data_size = size;
1167 else
1168 tx_desc->pp22.data_size = size;
1169}
1170
1171static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1172 struct mvpp2_tx_desc *tx_desc,
1173 unsigned int txq)
1174{
1175 if (port->priv->hw_version == MVPP21)
1176 tx_desc->pp21.phys_txq = txq;
1177 else
1178 tx_desc->pp22.phys_txq = txq;
1179}
1180
1181static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1182 struct mvpp2_tx_desc *tx_desc,
1183 unsigned int command)
1184{
1185 if (port->priv->hw_version == MVPP21)
1186 tx_desc->pp21.command = command;
1187 else
1188 tx_desc->pp22.command = command;
1189}
1190
1191static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1192 struct mvpp2_tx_desc *tx_desc,
1193 unsigned int offset)
1194{
1195 if (port->priv->hw_version == MVPP21)
1196 tx_desc->pp21.packet_offset = offset;
1197 else
1198 tx_desc->pp22.packet_offset = offset;
1199}
1200
1201static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1202 struct mvpp2_tx_desc *tx_desc)
1203{
1204 if (port->priv->hw_version == MVPP21)
1205 return tx_desc->pp21.packet_offset;
1206 else
1207 return tx_desc->pp22.packet_offset;
1208}
1209
1210static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1211 struct mvpp2_rx_desc *rx_desc)
1212{
1213 if (port->priv->hw_version == MVPP21)
1214 return rx_desc->pp21.buf_dma_addr;
1215 else
1216 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1217}
1218
1219static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1220 struct mvpp2_rx_desc *rx_desc)
1221{
1222 if (port->priv->hw_version == MVPP21)
1223 return rx_desc->pp21.buf_cookie;
1224 else
1225 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1226}
1227
1228static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1229 struct mvpp2_rx_desc *rx_desc)
1230{
1231 if (port->priv->hw_version == MVPP21)
1232 return rx_desc->pp21.data_size;
1233 else
1234 return rx_desc->pp22.data_size;
1235}
1236
1237static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1238 struct mvpp2_rx_desc *rx_desc)
1239{
1240 if (port->priv->hw_version == MVPP21)
1241 return rx_desc->pp21.status;
1242 else
1243 return rx_desc->pp22.status;
968} 1244}
969 1245
970static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 1246static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
@@ -974,15 +1250,17 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
974 txq_pcpu->txq_get_index = 0; 1250 txq_pcpu->txq_get_index = 0;
975} 1251}
976 1252
977static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, 1253static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1254 struct mvpp2_txq_pcpu *txq_pcpu,
978 struct sk_buff *skb, 1255 struct sk_buff *skb,
979 struct mvpp2_tx_desc *tx_desc) 1256 struct mvpp2_tx_desc *tx_desc)
980{ 1257{
981 struct mvpp2_txq_pcpu_buf *tx_buf = 1258 struct mvpp2_txq_pcpu_buf *tx_buf =
982 txq_pcpu->buffs + txq_pcpu->txq_put_index; 1259 txq_pcpu->buffs + txq_pcpu->txq_put_index;
983 tx_buf->skb = skb; 1260 tx_buf->skb = skb;
984 tx_buf->size = tx_desc->data_size; 1261 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
985 tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset; 1262 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1263 mvpp2_txdesc_offset_get(port, tx_desc);
986 txq_pcpu->txq_put_index++; 1264 txq_pcpu->txq_put_index++;
987 if (txq_pcpu->txq_put_index == txq_pcpu->size) 1265 if (txq_pcpu->txq_put_index == txq_pcpu->size)
988 txq_pcpu->txq_put_index = 0; 1266 txq_pcpu->txq_put_index = 0;
@@ -3378,27 +3656,39 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
3378 struct mvpp2 *priv, 3656 struct mvpp2 *priv,
3379 struct mvpp2_bm_pool *bm_pool, int size) 3657 struct mvpp2_bm_pool *bm_pool, int size)
3380{ 3658{
3381 int size_bytes;
3382 u32 val; 3659 u32 val;
3383 3660
3384 size_bytes = sizeof(u32) * size; 3661 /* Number of buffer pointers must be a multiple of 16, as per
3385 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, 3662 * hardware constraints
3386 &bm_pool->phys_addr, 3663 */
3664 if (!IS_ALIGNED(size, 16))
3665 return -EINVAL;
3666
3667 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3668 * bytes per buffer pointer
3669 */
3670 if (priv->hw_version == MVPP21)
3671 bm_pool->size_bytes = 2 * sizeof(u32) * size;
3672 else
3673 bm_pool->size_bytes = 2 * sizeof(u64) * size;
3674
3675 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
3676 &bm_pool->dma_addr,
3387 GFP_KERNEL); 3677 GFP_KERNEL);
3388 if (!bm_pool->virt_addr) 3678 if (!bm_pool->virt_addr)
3389 return -ENOMEM; 3679 return -ENOMEM;
3390 3680
3391 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 3681 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3392 MVPP2_BM_POOL_PTR_ALIGN)) { 3682 MVPP2_BM_POOL_PTR_ALIGN)) {
3393 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, 3683 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3394 bm_pool->phys_addr); 3684 bm_pool->virt_addr, bm_pool->dma_addr);
3395 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", 3685 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3396 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 3686 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3397 return -ENOMEM; 3687 return -ENOMEM;
3398 } 3688 }
3399 3689
3400 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 3690 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3401 bm_pool->phys_addr); 3691 lower_32_bits(bm_pool->dma_addr));
3402 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 3692 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3403 3693
3404 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 3694 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
@@ -3426,6 +3716,34 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3426 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 3716 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3427} 3717}
3428 3718
3719static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3720 struct mvpp2_bm_pool *bm_pool,
3721 dma_addr_t *dma_addr,
3722 phys_addr_t *phys_addr)
3723{
3724 int cpu = smp_processor_id();
3725
3726 *dma_addr = mvpp2_percpu_read(priv, cpu,
3727 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3728 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
3729
3730 if (priv->hw_version == MVPP22) {
3731 u32 val;
3732 u32 dma_addr_highbits, phys_addr_highbits;
3733
3734 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
3735 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
3736 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
3737 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
3738
3739 if (sizeof(dma_addr_t) == 8)
3740 *dma_addr |= (u64)dma_addr_highbits << 32;
3741
3742 if (sizeof(phys_addr_t) == 8)
3743 *phys_addr |= (u64)phys_addr_highbits << 32;
3744 }
3745}
3746
3429/* Free all buffers from the pool */ 3747/* Free all buffers from the pool */
3430static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, 3748static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3431 struct mvpp2_bm_pool *bm_pool) 3749 struct mvpp2_bm_pool *bm_pool)
@@ -3433,21 +3751,21 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3433 int i; 3751 int i;
3434 3752
3435 for (i = 0; i < bm_pool->buf_num; i++) { 3753 for (i = 0; i < bm_pool->buf_num; i++) {
3436 dma_addr_t buf_phys_addr; 3754 dma_addr_t buf_dma_addr;
3437 unsigned long vaddr; 3755 phys_addr_t buf_phys_addr;
3756 void *data;
3438 3757
3439 /* Get buffer virtual address (indirect access) */ 3758 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
3440 buf_phys_addr = mvpp2_read(priv, 3759 &buf_dma_addr, &buf_phys_addr);
3441 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3442 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3443 3760
3444 dma_unmap_single(dev, buf_phys_addr, 3761 dma_unmap_single(dev, buf_dma_addr,
3445 bm_pool->buf_size, DMA_FROM_DEVICE); 3762 bm_pool->buf_size, DMA_FROM_DEVICE);
3446 3763
3447 if (!vaddr) 3764 data = (void *)phys_to_virt(buf_phys_addr);
3765 if (!data)
3448 break; 3766 break;
3449 3767
3450 mvpp2_frag_free(bm_pool, (void *)vaddr); 3768 mvpp2_frag_free(bm_pool, data);
3451 } 3769 }
3452 3770
3453 /* Update BM driver with number of buffers removed from pool */ 3771 /* Update BM driver with number of buffers removed from pool */
@@ -3471,9 +3789,9 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3471 val |= MVPP2_BM_STOP_MASK; 3789 val |= MVPP2_BM_STOP_MASK;
3472 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 3790 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3473 3791
3474 dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size, 3792 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3475 bm_pool->virt_addr, 3793 bm_pool->virt_addr,
3476 bm_pool->phys_addr); 3794 bm_pool->dma_addr);
3477 return 0; 3795 return 0;
3478} 3796}
3479 3797
@@ -3529,17 +3847,20 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3529static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 3847static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3530 int lrxq, int long_pool) 3848 int lrxq, int long_pool)
3531{ 3849{
3532 u32 val; 3850 u32 val, mask;
3533 int prxq; 3851 int prxq;
3534 3852
3535 /* Get queue physical ID */ 3853 /* Get queue physical ID */
3536 prxq = port->rxqs[lrxq]->id; 3854 prxq = port->rxqs[lrxq]->id;
3537 3855
3538 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 3856 if (port->priv->hw_version == MVPP21)
3539 val &= ~MVPP2_RXQ_POOL_LONG_MASK; 3857 mask = MVPP21_RXQ_POOL_LONG_MASK;
3540 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & 3858 else
3541 MVPP2_RXQ_POOL_LONG_MASK); 3859 mask = MVPP22_RXQ_POOL_LONG_MASK;
3542 3860
3861 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3862 val &= ~mask;
3863 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
3543 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 3864 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3544} 3865}
3545 3866
@@ -3547,40 +3868,45 @@ static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3547static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, 3868static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3548 int lrxq, int short_pool) 3869 int lrxq, int short_pool)
3549{ 3870{
3550 u32 val; 3871 u32 val, mask;
3551 int prxq; 3872 int prxq;
3552 3873
3553 /* Get queue physical ID */ 3874 /* Get queue physical ID */
3554 prxq = port->rxqs[lrxq]->id; 3875 prxq = port->rxqs[lrxq]->id;
3555 3876
3556 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 3877 if (port->priv->hw_version == MVPP21)
3557 val &= ~MVPP2_RXQ_POOL_SHORT_MASK; 3878 mask = MVPP21_RXQ_POOL_SHORT_MASK;
3558 val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & 3879 else
3559 MVPP2_RXQ_POOL_SHORT_MASK); 3880 mask = MVPP22_RXQ_POOL_SHORT_MASK;
3560 3881
3882 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3883 val &= ~mask;
3884 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
3561 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 3885 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3562} 3886}
3563 3887
3564static void *mvpp2_buf_alloc(struct mvpp2_port *port, 3888static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3565 struct mvpp2_bm_pool *bm_pool, 3889 struct mvpp2_bm_pool *bm_pool,
3566 dma_addr_t *buf_phys_addr, 3890 dma_addr_t *buf_dma_addr,
3891 phys_addr_t *buf_phys_addr,
3567 gfp_t gfp_mask) 3892 gfp_t gfp_mask)
3568{ 3893{
3569 dma_addr_t phys_addr; 3894 dma_addr_t dma_addr;
3570 void *data; 3895 void *data;
3571 3896
3572 data = mvpp2_frag_alloc(bm_pool); 3897 data = mvpp2_frag_alloc(bm_pool);
3573 if (!data) 3898 if (!data)
3574 return NULL; 3899 return NULL;
3575 3900
3576 phys_addr = dma_map_single(port->dev->dev.parent, data, 3901 dma_addr = dma_map_single(port->dev->dev.parent, data,
3577 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), 3902 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3578 DMA_FROM_DEVICE); 3903 DMA_FROM_DEVICE);
3579 if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) { 3904 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3580 mvpp2_frag_free(bm_pool, data); 3905 mvpp2_frag_free(bm_pool, data);
3581 return NULL; 3906 return NULL;
3582 } 3907 }
3583 *buf_phys_addr = phys_addr; 3908 *buf_dma_addr = dma_addr;
3909 *buf_phys_addr = virt_to_phys(data);
3584 3910
3585 return data; 3911 return data;
3586} 3912}
@@ -3604,37 +3930,46 @@ static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
3604 3930
3605/* Release buffer to BM */ 3931/* Release buffer to BM */
3606static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 3932static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3607 dma_addr_t buf_phys_addr, 3933 dma_addr_t buf_dma_addr,
3608 unsigned long buf_virt_addr) 3934 phys_addr_t buf_phys_addr)
3609{ 3935{
3610 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr); 3936 int cpu = smp_processor_id();
3611 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
3612}
3613 3937
3614/* Release multicast buffer */ 3938 if (port->priv->hw_version == MVPP22) {
3615static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool, 3939 u32 val = 0;
3616 dma_addr_t buf_phys_addr, 3940
3617 unsigned long buf_virt_addr, 3941 if (sizeof(dma_addr_t) == 8)
3618 int mc_id) 3942 val |= upper_32_bits(buf_dma_addr) &
3619{ 3943 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
3620 u32 val = 0; 3944
3945 if (sizeof(phys_addr_t) == 8)
3946 val |= (upper_32_bits(buf_phys_addr)
3947 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
3948 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
3621 3949
3622 val |= (mc_id & MVPP2_BM_MC_ID_MASK); 3950 mvpp2_percpu_write(port->priv, cpu,
3623 mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val); 3951 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
3952 }
3624 3953
3625 mvpp2_bm_pool_put(port, pool, 3954 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3626 buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK, 3955 * returned in the "cookie" field of the RX
3627 buf_virt_addr); 3956 * descriptor. Instead of storing the virtual address, we
3957 * store the physical address
3958 */
3959 mvpp2_percpu_write(port->priv, cpu,
3960 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3961 mvpp2_percpu_write(port->priv, cpu,
3962 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
3628} 3963}
3629 3964
3630/* Refill BM pool */ 3965/* Refill BM pool */
3631static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 3966static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3632 dma_addr_t phys_addr, 3967 dma_addr_t dma_addr,
3633 unsigned long cookie) 3968 phys_addr_t phys_addr)
3634{ 3969{
3635 int pool = mvpp2_bm_cookie_pool_get(bm); 3970 int pool = mvpp2_bm_cookie_pool_get(bm);
3636 3971
3637 mvpp2_bm_pool_put(port, pool, phys_addr, cookie); 3972 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3638} 3973}
3639 3974
3640/* Allocate buffers for the pool */ 3975/* Allocate buffers for the pool */
@@ -3642,7 +3977,8 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3642 struct mvpp2_bm_pool *bm_pool, int buf_num) 3977 struct mvpp2_bm_pool *bm_pool, int buf_num)
3643{ 3978{
3644 int i, buf_size, total_size; 3979 int i, buf_size, total_size;
3645 dma_addr_t phys_addr; 3980 dma_addr_t dma_addr;
3981 phys_addr_t phys_addr;
3646 void *buf; 3982 void *buf;
3647 3983
3648 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); 3984 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
@@ -3657,12 +3993,13 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3657 } 3993 }
3658 3994
3659 for (i = 0; i < buf_num; i++) { 3995 for (i = 0; i < buf_num; i++) {
3660 buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_KERNEL); 3996 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
3997 &phys_addr, GFP_KERNEL);
3661 if (!buf) 3998 if (!buf)
3662 break; 3999 break;
3663 4000
3664 mvpp2_bm_pool_put(port, bm_pool->id, phys_addr, 4001 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
3665 (unsigned long)buf); 4002 phys_addr);
3666 } 4003 }
3667 4004
3668 /* Update BM driver with number of buffers added to pool */ 4005 /* Update BM driver with number of buffers added to pool */
@@ -3830,7 +4167,8 @@ static void mvpp2_interrupts_mask(void *arg)
3830{ 4167{
3831 struct mvpp2_port *port = arg; 4168 struct mvpp2_port *port = arg;
3832 4169
3833 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); 4170 mvpp2_percpu_write(port->priv, smp_processor_id(),
4171 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3834} 4172}
3835 4173
3836/* Unmask the current CPU's Rx/Tx interrupts */ 4174/* Unmask the current CPU's Rx/Tx interrupts */
@@ -3838,17 +4176,46 @@ static void mvpp2_interrupts_unmask(void *arg)
3838{ 4176{
3839 struct mvpp2_port *port = arg; 4177 struct mvpp2_port *port = arg;
3840 4178
3841 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 4179 mvpp2_percpu_write(port->priv, smp_processor_id(),
3842 (MVPP2_CAUSE_MISC_SUM_MASK | 4180 MVPP2_ISR_RX_TX_MASK_REG(port->id),
3843 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); 4181 (MVPP2_CAUSE_MISC_SUM_MASK |
4182 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3844} 4183}
3845 4184
3846/* Port configuration routines */ 4185/* Port configuration routines */
3847 4186
4187static void mvpp22_port_mii_set(struct mvpp2_port *port)
4188{
4189 u32 val;
4190
4191 return;
4192
4193 /* Only GOP port 0 has an XLG MAC */
4194 if (port->gop_id == 0) {
4195 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
4196 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4197 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4198 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
4199 }
4200
4201 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4202 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
4203 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4204 else
4205 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4206 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4207 val |= MVPP22_CTRL4_SYNC_BYPASS;
4208 val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4209 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4210}
4211
3848static void mvpp2_port_mii_set(struct mvpp2_port *port) 4212static void mvpp2_port_mii_set(struct mvpp2_port *port)
3849{ 4213{
3850 u32 val; 4214 u32 val;
3851 4215
4216 if (port->priv->hw_version == MVPP22)
4217 mvpp22_port_mii_set(port);
4218
3852 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 4219 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3853 4220
3854 switch (port->phy_interface) { 4221 switch (port->phy_interface) {
@@ -3952,16 +4319,18 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
3952{ 4319{
3953 int tx_port_num, val, queue, ptxq, lrxq; 4320 int tx_port_num, val, queue, ptxq, lrxq;
3954 4321
3955 /* Configure port to loopback if needed */ 4322 if (port->priv->hw_version == MVPP21) {
3956 if (port->flags & MVPP2_F_LOOPBACK) 4323 /* Configure port to loopback if needed */
3957 mvpp2_port_loopback_set(port); 4324 if (port->flags & MVPP2_F_LOOPBACK)
4325 mvpp2_port_loopback_set(port);
3958 4326
3959 /* Update TX FIFO MIN Threshold */ 4327 /* Update TX FIFO MIN Threshold */
3960 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 4328 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3961 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 4329 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3962 /* Min. TX threshold must be less than minimal packet length */ 4330 /* Min. TX threshold must be less than minimal packet length */
3963 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 4331 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3964 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 4332 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4333 }
3965 4334
3966 /* Disable Legacy WRR, Disable EJP, Release from reset */ 4335 /* Disable Legacy WRR, Disable EJP, Release from reset */
3967 tx_port_num = mvpp2_egress_port(port); 4336 tx_port_num = mvpp2_egress_port(port);
@@ -4149,11 +4518,15 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4149} 4518}
4150 4519
4151/* Obtain BM cookie information from descriptor */ 4520/* Obtain BM cookie information from descriptor */
4152static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc) 4521static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
4522 struct mvpp2_rx_desc *rx_desc)
4153{ 4523{
4154 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4155 MVPP2_RXD_BM_POOL_ID_OFFS;
4156 int cpu = smp_processor_id(); 4524 int cpu = smp_processor_id();
4525 int pool;
4526
4527 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
4528 MVPP2_RXD_BM_POOL_ID_MASK) >>
4529 MVPP2_RXD_BM_POOL_ID_OFFS;
4157 4530
4158 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | 4531 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4159 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); 4532 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
@@ -4161,18 +4534,6 @@ static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
4161 4534
4162/* Tx descriptors helper methods */ 4535/* Tx descriptors helper methods */
4163 4536
4164/* Get number of Tx descriptors waiting to be transmitted by HW */
4165static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
4166 struct mvpp2_tx_queue *txq)
4167{
4168 u32 val;
4169
4170 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4171 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4172
4173 return val & MVPP2_TXQ_PENDING_MASK;
4174}
4175
4176/* Get pointer to next Tx descriptor to be processed (send) by HW */ 4537/* Get pointer to next Tx descriptor to be processed (send) by HW */
4177static struct mvpp2_tx_desc * 4538static struct mvpp2_tx_desc *
4178mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 4539mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
@@ -4187,7 +4548,8 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4187static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 4548static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4188{ 4549{
4189 /* aggregated access - relevant TXQ number is written in TX desc */ 4550 /* aggregated access - relevant TXQ number is written in TX desc */
4190 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); 4551 mvpp2_percpu_write(port->priv, smp_processor_id(),
4552 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4191} 4553}
4192 4554
4193 4555
@@ -4216,11 +4578,12 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4216 struct mvpp2_tx_queue *txq, int num) 4578 struct mvpp2_tx_queue *txq, int num)
4217{ 4579{
4218 u32 val; 4580 u32 val;
4581 int cpu = smp_processor_id();
4219 4582
4220 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; 4583 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4221 mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val); 4584 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
4222 4585
4223 val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG); 4586 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
4224 4587
4225 return val & MVPP2_TXQ_RSVD_RSLT_MASK; 4588 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4226} 4589}
@@ -4321,7 +4684,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4321 u32 val; 4684 u32 val;
4322 4685
4323 /* Reading status reg resets transmitted descriptor counter */ 4686 /* Reading status reg resets transmitted descriptor counter */
4324 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); 4687 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
4688 MVPP2_TXQ_SENT_REG(txq->id));
4325 4689
4326 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 4690 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4327 MVPP2_TRANSMITTED_COUNT_OFFSET; 4691 MVPP2_TRANSMITTED_COUNT_OFFSET;
@@ -4335,7 +4699,8 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
4335 for (queue = 0; queue < txq_number; queue++) { 4699 for (queue = 0; queue < txq_number; queue++) {
4336 int id = port->txqs[queue]->id; 4700 int id = port->txqs[queue]->id;
4337 4701
4338 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); 4702 mvpp2_percpu_read(port->priv, smp_processor_id(),
4703 MVPP2_TXQ_SENT_REG(id));
4339 } 4704 }
4340} 4705}
4341 4706
@@ -4394,12 +4759,14 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4394static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, 4759static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4395 struct mvpp2_rx_queue *rxq) 4760 struct mvpp2_rx_queue *rxq)
4396{ 4761{
4762 int cpu = smp_processor_id();
4763
4397 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) 4764 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4398 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; 4765 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
4399 4766
4400 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4767 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4401 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, 4768 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
4402 rxq->pkts_coal); 4769 rxq->pkts_coal);
4403} 4770}
4404 4771
4405static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) 4772static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -4449,7 +4816,7 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4449 struct mvpp2_txq_pcpu_buf *tx_buf = 4816 struct mvpp2_txq_pcpu_buf *tx_buf =
4450 txq_pcpu->buffs + txq_pcpu->txq_get_index; 4817 txq_pcpu->buffs + txq_pcpu->txq_get_index;
4451 4818
4452 dma_unmap_single(port->dev->dev.parent, tx_buf->phys, 4819 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
4453 tx_buf->size, DMA_TO_DEVICE); 4820 tx_buf->size, DMA_TO_DEVICE);
4454 if (tx_buf->skb) 4821 if (tx_buf->skb)
4455 dev_kfree_skb_any(tx_buf->skb); 4822 dev_kfree_skb_any(tx_buf->skb);
@@ -4527,10 +4894,12 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4527 int desc_num, int cpu, 4894 int desc_num, int cpu,
4528 struct mvpp2 *priv) 4895 struct mvpp2 *priv)
4529{ 4896{
4897 u32 txq_dma;
4898
4530 /* Allocate memory for TX descriptors */ 4899 /* Allocate memory for TX descriptors */
4531 aggr_txq->descs = dma_alloc_coherent(&pdev->dev, 4900 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4532 desc_num * MVPP2_DESC_ALIGNED_SIZE, 4901 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4533 &aggr_txq->descs_phys, GFP_KERNEL); 4902 &aggr_txq->descs_dma, GFP_KERNEL);
4534 if (!aggr_txq->descs) 4903 if (!aggr_txq->descs)
4535 return -ENOMEM; 4904 return -ENOMEM;
4536 4905
@@ -4540,10 +4909,16 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4540 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 4909 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4541 MVPP2_AGGR_TXQ_INDEX_REG(cpu)); 4910 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4542 4911
4543 /* Set Tx descriptors queue starting address */ 4912 /* Set Tx descriptors queue starting address indirect
4544 /* indirect access */ 4913 * access
4545 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), 4914 */
4546 aggr_txq->descs_phys); 4915 if (priv->hw_version == MVPP21)
4916 txq_dma = aggr_txq->descs_dma;
4917 else
4918 txq_dma = aggr_txq->descs_dma >>
4919 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
4920
4921 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
4547 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); 4922 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4548 4923
4549 return 0; 4924 return 0;
@@ -4554,12 +4929,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
4554 struct mvpp2_rx_queue *rxq) 4929 struct mvpp2_rx_queue *rxq)
4555 4930
4556{ 4931{
4932 u32 rxq_dma;
4933 int cpu;
4934
4557 rxq->size = port->rx_ring_size; 4935 rxq->size = port->rx_ring_size;
4558 4936
4559 /* Allocate memory for RX descriptors */ 4937 /* Allocate memory for RX descriptors */
4560 rxq->descs = dma_alloc_coherent(port->dev->dev.parent, 4938 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4561 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 4939 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4562 &rxq->descs_phys, GFP_KERNEL); 4940 &rxq->descs_dma, GFP_KERNEL);
4563 if (!rxq->descs) 4941 if (!rxq->descs)
4564 return -ENOMEM; 4942 return -ENOMEM;
4565 4943
@@ -4569,10 +4947,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
4569 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4947 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4570 4948
4571 /* Set Rx descriptors queue starting address - indirect access */ 4949 /* Set Rx descriptors queue starting address - indirect access */
4572 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4950 cpu = smp_processor_id();
4573 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys); 4951 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4574 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 4952 if (port->priv->hw_version == MVPP21)
4575 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); 4953 rxq_dma = rxq->descs_dma;
4954 else
4955 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
4956 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
4957 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4958 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
4576 4959
4577 /* Set Offset */ 4960 /* Set Offset */
4578 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 4961 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
@@ -4599,10 +4982,11 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4599 4982
4600 for (i = 0; i < rx_received; i++) { 4983 for (i = 0; i < rx_received; i++) {
4601 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 4984 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4602 u32 bm = mvpp2_bm_cookie_build(rx_desc); 4985 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
4603 4986
4604 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, 4987 mvpp2_pool_refill(port, bm,
4605 rx_desc->buf_cookie); 4988 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4989 mvpp2_rxdesc_cookie_get(port, rx_desc));
4606 } 4990 }
4607 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 4991 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4608} 4992}
@@ -4611,26 +4995,29 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4611static void mvpp2_rxq_deinit(struct mvpp2_port *port, 4995static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4612 struct mvpp2_rx_queue *rxq) 4996 struct mvpp2_rx_queue *rxq)
4613{ 4997{
4998 int cpu;
4999
4614 mvpp2_rxq_drop_pkts(port, rxq); 5000 mvpp2_rxq_drop_pkts(port, rxq);
4615 5001
4616 if (rxq->descs) 5002 if (rxq->descs)
4617 dma_free_coherent(port->dev->dev.parent, 5003 dma_free_coherent(port->dev->dev.parent,
4618 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 5004 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4619 rxq->descs, 5005 rxq->descs,
4620 rxq->descs_phys); 5006 rxq->descs_dma);
4621 5007
4622 rxq->descs = NULL; 5008 rxq->descs = NULL;
4623 rxq->last_desc = 0; 5009 rxq->last_desc = 0;
4624 rxq->next_desc_to_proc = 0; 5010 rxq->next_desc_to_proc = 0;
4625 rxq->descs_phys = 0; 5011 rxq->descs_dma = 0;
4626 5012
4627 /* Clear Rx descriptors queue starting address and size; 5013 /* Clear Rx descriptors queue starting address and size;
4628 * free descriptor number 5014 * free descriptor number
4629 */ 5015 */
4630 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 5016 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4631 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 5017 cpu = smp_processor_id();
4632 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); 5018 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4633 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); 5019 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
5020 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
4634} 5021}
4635 5022
4636/* Create and initialize a Tx queue */ 5023/* Create and initialize a Tx queue */
@@ -4646,23 +5033,25 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4646 /* Allocate memory for Tx descriptors */ 5033 /* Allocate memory for Tx descriptors */
4647 txq->descs = dma_alloc_coherent(port->dev->dev.parent, 5034 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4648 txq->size * MVPP2_DESC_ALIGNED_SIZE, 5035 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4649 &txq->descs_phys, GFP_KERNEL); 5036 &txq->descs_dma, GFP_KERNEL);
4650 if (!txq->descs) 5037 if (!txq->descs)
4651 return -ENOMEM; 5038 return -ENOMEM;
4652 5039
4653 txq->last_desc = txq->size - 1; 5040 txq->last_desc = txq->size - 1;
4654 5041
4655 /* Set Tx descriptors queue starting address - indirect access */ 5042 /* Set Tx descriptors queue starting address - indirect access */
4656 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5043 cpu = smp_processor_id();
4657 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys); 5044 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
4658 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & 5045 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
4659 MVPP2_TXQ_DESC_SIZE_MASK); 5046 txq->descs_dma);
4660 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); 5047 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
4661 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, 5048 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
4662 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 5049 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
4663 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 5050 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
5051 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
5052 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
4664 val &= ~MVPP2_TXQ_PENDING_MASK; 5053 val &= ~MVPP2_TXQ_PENDING_MASK;
4665 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); 5054 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
4666 5055
4667 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 5056 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4668 * for each existing TXQ. 5057 * for each existing TXQ.
@@ -4673,9 +5062,9 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4673 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 5062 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4674 (txq->log_id * desc_per_txq); 5063 (txq->log_id * desc_per_txq);
4675 5064
4676 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, 5065 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
4677 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 5066 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4678 MVPP2_PREF_BUF_THRESH(desc_per_txq/2)); 5067 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
4679 5068
4680 /* WRR / EJP configuration - indirect access */ 5069 /* WRR / EJP configuration - indirect access */
4681 tx_port_num = mvpp2_egress_port(port); 5070 tx_port_num = mvpp2_egress_port(port);
@@ -4716,7 +5105,7 @@ error:
4716 5105
4717 dma_free_coherent(port->dev->dev.parent, 5106 dma_free_coherent(port->dev->dev.parent,
4718 txq->size * MVPP2_DESC_ALIGNED_SIZE, 5107 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4719 txq->descs, txq->descs_phys); 5108 txq->descs, txq->descs_dma);
4720 5109
4721 return -ENOMEM; 5110 return -ENOMEM;
4722} 5111}
@@ -4736,20 +5125,21 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
4736 if (txq->descs) 5125 if (txq->descs)
4737 dma_free_coherent(port->dev->dev.parent, 5126 dma_free_coherent(port->dev->dev.parent,
4738 txq->size * MVPP2_DESC_ALIGNED_SIZE, 5127 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4739 txq->descs, txq->descs_phys); 5128 txq->descs, txq->descs_dma);
4740 5129
4741 txq->descs = NULL; 5130 txq->descs = NULL;
4742 txq->last_desc = 0; 5131 txq->last_desc = 0;
4743 txq->next_desc_to_proc = 0; 5132 txq->next_desc_to_proc = 0;
4744 txq->descs_phys = 0; 5133 txq->descs_dma = 0;
4745 5134
4746 /* Set minimum bandwidth for disabled TXQs */ 5135 /* Set minimum bandwidth for disabled TXQs */
4747 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 5136 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4748 5137
4749 /* Set Tx descriptors queue starting address and size */ 5138 /* Set Tx descriptors queue starting address and size */
4750 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5139 cpu = smp_processor_id();
4751 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); 5140 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
4752 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); 5141 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5142 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
4753} 5143}
4754 5144
4755/* Cleanup Tx ports */ 5145/* Cleanup Tx ports */
@@ -4759,10 +5149,11 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4759 int delay, pending, cpu; 5149 int delay, pending, cpu;
4760 u32 val; 5150 u32 val;
4761 5151
4762 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5152 cpu = smp_processor_id();
4763 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 5153 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5154 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
4764 val |= MVPP2_TXQ_DRAIN_EN_MASK; 5155 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4765 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 5156 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
4766 5157
4767 /* The napi queue has been stopped so wait for all packets 5158 /* The napi queue has been stopped so wait for all packets
4768 * to be transmitted. 5159 * to be transmitted.
@@ -4778,11 +5169,13 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4778 mdelay(1); 5169 mdelay(1);
4779 delay++; 5170 delay++;
4780 5171
4781 pending = mvpp2_txq_pend_desc_num_get(port, txq); 5172 pending = mvpp2_percpu_read(port->priv, cpu,
5173 MVPP2_TXQ_PENDING_REG);
5174 pending &= MVPP2_TXQ_PENDING_MASK;
4782 } while (pending); 5175 } while (pending);
4783 5176
4784 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 5177 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4785 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 5178 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
4786 5179
4787 for_each_present_cpu(cpu) { 5180 for_each_present_cpu(cpu) {
4788 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 5181 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
@@ -4991,20 +5384,21 @@ static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4991static void mvpp2_rx_error(struct mvpp2_port *port, 5384static void mvpp2_rx_error(struct mvpp2_port *port,
4992 struct mvpp2_rx_desc *rx_desc) 5385 struct mvpp2_rx_desc *rx_desc)
4993{ 5386{
4994 u32 status = rx_desc->status; 5387 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5388 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
4995 5389
4996 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 5390 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4997 case MVPP2_RXD_ERR_CRC: 5391 case MVPP2_RXD_ERR_CRC:
4998 netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n", 5392 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
4999 status, rx_desc->data_size); 5393 status, sz);
5000 break; 5394 break;
5001 case MVPP2_RXD_ERR_OVERRUN: 5395 case MVPP2_RXD_ERR_OVERRUN:
5002 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n", 5396 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
5003 status, rx_desc->data_size); 5397 status, sz);
5004 break; 5398 break;
5005 case MVPP2_RXD_ERR_RESOURCE: 5399 case MVPP2_RXD_ERR_RESOURCE:
5006 netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n", 5400 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
5007 status, rx_desc->data_size); 5401 status, sz);
5008 break; 5402 break;
5009 } 5403 }
5010} 5404}
@@ -5031,15 +5425,17 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5031static int mvpp2_rx_refill(struct mvpp2_port *port, 5425static int mvpp2_rx_refill(struct mvpp2_port *port,
5032 struct mvpp2_bm_pool *bm_pool, u32 bm) 5426 struct mvpp2_bm_pool *bm_pool, u32 bm)
5033{ 5427{
5034 dma_addr_t phys_addr; 5428 dma_addr_t dma_addr;
5429 phys_addr_t phys_addr;
5035 void *buf; 5430 void *buf;
5036 5431
5037 /* No recycle or too many buffers are in use, so allocate a new skb */ 5432 /* No recycle or too many buffers are in use, so allocate a new skb */
5038 buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC); 5433 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
5434 GFP_ATOMIC);
5039 if (!buf) 5435 if (!buf)
5040 return -ENOMEM; 5436 return -ENOMEM;
5041 5437
5042 mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)buf); 5438 mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
5043 5439
5044 return 0; 5440 return 0;
5045} 5441}
@@ -5075,43 +5471,6 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5075 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; 5471 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5076} 5472}
5077 5473
5078static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
5079 struct mvpp2_rx_desc *rx_desc)
5080{
5081 struct mvpp2_buff_hdr *buff_hdr;
5082 struct sk_buff *skb;
5083 u32 rx_status = rx_desc->status;
5084 dma_addr_t buff_phys_addr;
5085 unsigned long buff_virt_addr;
5086 dma_addr_t buff_phys_addr_next;
5087 unsigned long buff_virt_addr_next;
5088 int mc_id;
5089 int pool_id;
5090
5091 pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5092 MVPP2_RXD_BM_POOL_ID_OFFS;
5093 buff_phys_addr = rx_desc->buf_phys_addr;
5094 buff_virt_addr = rx_desc->buf_cookie;
5095
5096 do {
5097 skb = (struct sk_buff *)buff_virt_addr;
5098 buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
5099
5100 mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
5101
5102 buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
5103 buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
5104
5105 /* Release buffer */
5106 mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
5107 buff_virt_addr, mc_id);
5108
5109 buff_phys_addr = buff_phys_addr_next;
5110 buff_virt_addr = buff_virt_addr_next;
5111
5112 } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
5113}
5114
5115/* Main rx processing */ 5474/* Main rx processing */
5116static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, 5475static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5117 struct mvpp2_rx_queue *rxq) 5476 struct mvpp2_rx_queue *rxq)
@@ -5132,25 +5491,23 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5132 struct mvpp2_bm_pool *bm_pool; 5491 struct mvpp2_bm_pool *bm_pool;
5133 struct sk_buff *skb; 5492 struct sk_buff *skb;
5134 unsigned int frag_size; 5493 unsigned int frag_size;
5135 dma_addr_t phys_addr; 5494 dma_addr_t dma_addr;
5495 phys_addr_t phys_addr;
5136 u32 bm, rx_status; 5496 u32 bm, rx_status;
5137 int pool, rx_bytes, err; 5497 int pool, rx_bytes, err;
5138 void *data; 5498 void *data;
5139 5499
5140 rx_done++; 5500 rx_done++;
5141 rx_status = rx_desc->status; 5501 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5142 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; 5502 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5143 phys_addr = rx_desc->buf_phys_addr; 5503 rx_bytes -= MVPP2_MH_SIZE;
5144 data = (void *)(uintptr_t)rx_desc->buf_cookie; 5504 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5145 5505 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5146 bm = mvpp2_bm_cookie_build(rx_desc); 5506 data = (void *)phys_to_virt(phys_addr);
5507
5508 bm = mvpp2_bm_cookie_build(port, rx_desc);
5147 pool = mvpp2_bm_cookie_pool_get(bm); 5509 pool = mvpp2_bm_cookie_pool_get(bm);
5148 bm_pool = &port->priv->bm_pools[pool]; 5510 bm_pool = &port->priv->bm_pools[pool];
5149 /* Check if buffer header is used */
5150 if (rx_status & MVPP2_RXD_BUF_HDR) {
5151 mvpp2_buff_hdr_rx(port, rx_desc);
5152 continue;
5153 }
5154 5511
5155 /* In case of an error, release the requested buffer pointer 5512 /* In case of an error, release the requested buffer pointer
5156 * to the Buffer Manager. This request process is controlled 5513 * to the Buffer Manager. This request process is controlled
@@ -5162,9 +5519,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5162 dev->stats.rx_errors++; 5519 dev->stats.rx_errors++;
5163 mvpp2_rx_error(port, rx_desc); 5520 mvpp2_rx_error(port, rx_desc);
5164 /* Return the buffer to the pool */ 5521 /* Return the buffer to the pool */
5165 5522 mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
5166 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5167 rx_desc->buf_cookie);
5168 continue; 5523 continue;
5169 } 5524 }
5170 5525
@@ -5185,7 +5540,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5185 goto err_drop_frame; 5540 goto err_drop_frame;
5186 } 5541 }
5187 5542
5188 dma_unmap_single(dev->dev.parent, phys_addr, 5543 dma_unmap_single(dev->dev.parent, dma_addr,
5189 bm_pool->buf_size, DMA_FROM_DEVICE); 5544 bm_pool->buf_size, DMA_FROM_DEVICE);
5190 5545
5191 rcvd_pkts++; 5546 rcvd_pkts++;
@@ -5216,11 +5571,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5216} 5571}
5217 5572
5218static inline void 5573static inline void
5219tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq, 5574tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5220 struct mvpp2_tx_desc *desc) 5575 struct mvpp2_tx_desc *desc)
5221{ 5576{
5222 dma_unmap_single(dev, desc->buf_phys_addr, 5577 dma_addr_t buf_dma_addr =
5223 desc->data_size, DMA_TO_DEVICE); 5578 mvpp2_txdesc_dma_addr_get(port, desc);
5579 size_t buf_sz =
5580 mvpp2_txdesc_size_get(port, desc);
5581 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
5582 buf_sz, DMA_TO_DEVICE);
5224 mvpp2_txq_desc_put(txq); 5583 mvpp2_txq_desc_put(txq);
5225} 5584}
5226 5585
@@ -5232,35 +5591,38 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5232 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); 5591 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5233 struct mvpp2_tx_desc *tx_desc; 5592 struct mvpp2_tx_desc *tx_desc;
5234 int i; 5593 int i;
5235 dma_addr_t buf_phys_addr; 5594 dma_addr_t buf_dma_addr;
5236 5595
5237 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5596 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5238 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5597 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5239 void *addr = page_address(frag->page.p) + frag->page_offset; 5598 void *addr = page_address(frag->page.p) + frag->page_offset;
5240 5599
5241 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 5600 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5242 tx_desc->phys_txq = txq->id; 5601 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5243 tx_desc->data_size = frag->size; 5602 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
5244 5603
5245 buf_phys_addr = dma_map_single(port->dev->dev.parent, addr, 5604 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
5246 tx_desc->data_size, 5605 frag->size,
5247 DMA_TO_DEVICE); 5606 DMA_TO_DEVICE);
5248 if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) { 5607 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
5249 mvpp2_txq_desc_put(txq); 5608 mvpp2_txq_desc_put(txq);
5250 goto error; 5609 goto error;
5251 } 5610 }
5252 5611
5253 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN; 5612 mvpp2_txdesc_offset_set(port, tx_desc,
5254 tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN); 5613 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5614 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5615 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5255 5616
5256 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 5617 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5257 /* Last descriptor */ 5618 /* Last descriptor */
5258 tx_desc->command = MVPP2_TXD_L_DESC; 5619 mvpp2_txdesc_cmd_set(port, tx_desc,
5259 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); 5620 MVPP2_TXD_L_DESC);
5621 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5260 } else { 5622 } else {
5261 /* Descriptor in the middle: Not First, Not Last */ 5623 /* Descriptor in the middle: Not First, Not Last */
5262 tx_desc->command = 0; 5624 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
5263 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); 5625 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5264 } 5626 }
5265 } 5627 }
5266 5628
@@ -5272,7 +5634,7 @@ error:
5272 */ 5634 */
5273 for (i = i - 1; i >= 0; i--) { 5635 for (i = i - 1; i >= 0; i--) {
5274 tx_desc = txq->descs + i; 5636 tx_desc = txq->descs + i;
5275 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc); 5637 tx_desc_unmap_put(port, txq, tx_desc);
5276 } 5638 }
5277 5639
5278 return -ENOMEM; 5640 return -ENOMEM;
@@ -5285,7 +5647,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5285 struct mvpp2_tx_queue *txq, *aggr_txq; 5647 struct mvpp2_tx_queue *txq, *aggr_txq;
5286 struct mvpp2_txq_pcpu *txq_pcpu; 5648 struct mvpp2_txq_pcpu *txq_pcpu;
5287 struct mvpp2_tx_desc *tx_desc; 5649 struct mvpp2_tx_desc *tx_desc;
5288 dma_addr_t buf_phys_addr; 5650 dma_addr_t buf_dma_addr;
5289 int frags = 0; 5651 int frags = 0;
5290 u16 txq_id; 5652 u16 txq_id;
5291 u32 tx_cmd; 5653 u32 tx_cmd;
@@ -5307,35 +5669,38 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5307 5669
5308 /* Get a descriptor for the first part of the packet */ 5670 /* Get a descriptor for the first part of the packet */
5309 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 5671 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5310 tx_desc->phys_txq = txq->id; 5672 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5311 tx_desc->data_size = skb_headlen(skb); 5673 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
5312 5674
5313 buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, 5675 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
5314 tx_desc->data_size, DMA_TO_DEVICE); 5676 skb_headlen(skb), DMA_TO_DEVICE);
5315 if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) { 5677 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
5316 mvpp2_txq_desc_put(txq); 5678 mvpp2_txq_desc_put(txq);
5317 frags = 0; 5679 frags = 0;
5318 goto out; 5680 goto out;
5319 } 5681 }
5320 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN; 5682
5321 tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN; 5683 mvpp2_txdesc_offset_set(port, tx_desc,
5684 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5685 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5686 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5322 5687
5323 tx_cmd = mvpp2_skb_tx_csum(port, skb); 5688 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5324 5689
5325 if (frags == 1) { 5690 if (frags == 1) {
5326 /* First and Last descriptor */ 5691 /* First and Last descriptor */
5327 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 5692 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5328 tx_desc->command = tx_cmd; 5693 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5329 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); 5694 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5330 } else { 5695 } else {
5331 /* First but not Last */ 5696 /* First but not Last */
5332 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 5697 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5333 tx_desc->command = tx_cmd; 5698 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5334 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); 5699 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5335 5700
5336 /* Continue with other skb fragments */ 5701 /* Continue with other skb fragments */
5337 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 5702 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5338 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc); 5703 tx_desc_unmap_put(port, txq, tx_desc);
5339 frags = 0; 5704 frags = 0;
5340 goto out; 5705 goto out;
5341 } 5706 }
@@ -5396,6 +5761,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
5396 u32 cause_rx_tx, cause_rx, cause_misc; 5761 u32 cause_rx_tx, cause_rx, cause_misc;
5397 int rx_done = 0; 5762 int rx_done = 0;
5398 struct mvpp2_port *port = netdev_priv(napi->dev); 5763 struct mvpp2_port *port = netdev_priv(napi->dev);
5764 int cpu = smp_processor_id();
5399 5765
5400 /* Rx/Tx cause register 5766 /* Rx/Tx cause register
5401 * 5767 *
@@ -5407,8 +5773,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
5407 * 5773 *
5408 * Each CPU has its own Rx/Tx cause register 5774 * Each CPU has its own Rx/Tx cause register
5409 */ 5775 */
5410 cause_rx_tx = mvpp2_read(port->priv, 5776 cause_rx_tx = mvpp2_percpu_read(port->priv, cpu,
5411 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 5777 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5412 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 5778 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5413 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 5779 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5414 5780
@@ -5417,8 +5783,9 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
5417 5783
5418 /* Clear the cause register */ 5784 /* Clear the cause register */
5419 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); 5785 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5420 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id), 5786 mvpp2_percpu_write(port->priv, cpu,
5421 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 5787 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5788 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5422 } 5789 }
5423 5790
5424 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 5791 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
@@ -5530,7 +5897,7 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
5530 return 0; 5897 return 0;
5531} 5898}
5532 5899
5533static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr) 5900static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5534{ 5901{
5535 u32 mac_addr_l, mac_addr_m, mac_addr_h; 5902 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5536 5903
@@ -5975,16 +6342,6 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
5975 .set_link_ksettings = phy_ethtool_set_link_ksettings, 6342 .set_link_ksettings = phy_ethtool_set_link_ksettings,
5976}; 6343};
5977 6344
5978/* Driver initialization */
5979
5980static void mvpp2_port_power_up(struct mvpp2_port *port)
5981{
5982 mvpp2_port_mii_set(port);
5983 mvpp2_port_periodic_xon_disable(port);
5984 mvpp2_port_fc_adv_enable(port);
5985 mvpp2_port_reset(port);
5986}
5987
5988/* Initialize port HW */ 6345/* Initialize port HW */
5989static int mvpp2_port_init(struct mvpp2_port *port) 6346static int mvpp2_port_init(struct mvpp2_port *port)
5990{ 6347{
@@ -5993,7 +6350,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
5993 struct mvpp2_txq_pcpu *txq_pcpu; 6350 struct mvpp2_txq_pcpu *txq_pcpu;
5994 int queue, cpu, err; 6351 int queue, cpu, err;
5995 6352
5996 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM) 6353 if (port->first_rxq + rxq_number >
6354 MVPP2_MAX_PORTS * priv->max_port_rxqs)
5997 return -EINVAL; 6355 return -EINVAL;
5998 6356
5999 /* Disable port */ 6357 /* Disable port */
@@ -6061,7 +6419,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
6061 } 6419 }
6062 6420
6063 /* Configure Rx queue group interrupt for this port */ 6421 /* Configure Rx queue group interrupt for this port */
6064 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number); 6422 if (priv->hw_version == MVPP21) {
6423 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
6424 rxq_number);
6425 } else {
6426 u32 val;
6427
6428 val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
6429 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
6430
6431 val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
6432 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
6433 }
6065 6434
6066 /* Create Rx descriptor rings */ 6435 /* Create Rx descriptor rings */
6067 for (queue = 0; queue < rxq_number; queue++) { 6436 for (queue = 0; queue < rxq_number; queue++) {
@@ -6103,8 +6472,7 @@ err_free_percpu:
6103/* Ports initialization */ 6472/* Ports initialization */
6104static int mvpp2_port_probe(struct platform_device *pdev, 6473static int mvpp2_port_probe(struct platform_device *pdev,
6105 struct device_node *port_node, 6474 struct device_node *port_node,
6106 struct mvpp2 *priv, 6475 struct mvpp2 *priv)
6107 int *next_first_rxq)
6108{ 6476{
6109 struct device_node *phy_node; 6477 struct device_node *phy_node;
6110 struct mvpp2_port *port; 6478 struct mvpp2_port *port;
@@ -6117,7 +6485,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6117 u32 id; 6485 u32 id;
6118 int features; 6486 int features;
6119 int phy_mode; 6487 int phy_mode;
6120 int priv_common_regs_num = 2;
6121 int err, i, cpu; 6488 int err, i, cpu;
6122 6489
6123 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, 6490 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
@@ -6163,16 +6530,30 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6163 6530
6164 port->priv = priv; 6531 port->priv = priv;
6165 port->id = id; 6532 port->id = id;
6166 port->first_rxq = *next_first_rxq; 6533 if (priv->hw_version == MVPP21)
6534 port->first_rxq = port->id * rxq_number;
6535 else
6536 port->first_rxq = port->id * priv->max_port_rxqs;
6537
6167 port->phy_node = phy_node; 6538 port->phy_node = phy_node;
6168 port->phy_interface = phy_mode; 6539 port->phy_interface = phy_mode;
6169 6540
6170 res = platform_get_resource(pdev, IORESOURCE_MEM, 6541 if (priv->hw_version == MVPP21) {
6171 priv_common_regs_num + id); 6542 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
6172 port->base = devm_ioremap_resource(&pdev->dev, res); 6543 port->base = devm_ioremap_resource(&pdev->dev, res);
6173 if (IS_ERR(port->base)) { 6544 if (IS_ERR(port->base)) {
6174 err = PTR_ERR(port->base); 6545 err = PTR_ERR(port->base);
6175 goto err_free_irq; 6546 goto err_free_irq;
6547 }
6548 } else {
6549 if (of_property_read_u32(port_node, "gop-port-id",
6550 &port->gop_id)) {
6551 err = -EINVAL;
6552 dev_err(&pdev->dev, "missing gop-port-id value\n");
6553 goto err_free_irq;
6554 }
6555
6556 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6176 } 6557 }
6177 6558
6178 /* Alloc per-cpu stats */ 6559 /* Alloc per-cpu stats */
@@ -6187,7 +6568,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6187 mac_from = "device tree"; 6568 mac_from = "device tree";
6188 ether_addr_copy(dev->dev_addr, dt_mac_addr); 6569 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6189 } else { 6570 } else {
6190 mvpp2_get_mac_address(port, hw_mac_addr); 6571 if (priv->hw_version == MVPP21)
6572 mvpp21_get_mac_address(port, hw_mac_addr);
6191 if (is_valid_ether_addr(hw_mac_addr)) { 6573 if (is_valid_ether_addr(hw_mac_addr)) {
6192 mac_from = "hardware"; 6574 mac_from = "hardware";
6193 ether_addr_copy(dev->dev_addr, hw_mac_addr); 6575 ether_addr_copy(dev->dev_addr, hw_mac_addr);
@@ -6207,7 +6589,14 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6207 dev_err(&pdev->dev, "failed to init port %d\n", id); 6589 dev_err(&pdev->dev, "failed to init port %d\n", id);
6208 goto err_free_stats; 6590 goto err_free_stats;
6209 } 6591 }
6210 mvpp2_port_power_up(port); 6592
6593 mvpp2_port_mii_set(port);
6594 mvpp2_port_periodic_xon_disable(port);
6595
6596 if (priv->hw_version == MVPP21)
6597 mvpp2_port_fc_adv_enable(port);
6598
6599 mvpp2_port_reset(port);
6211 6600
6212 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); 6601 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6213 if (!port->pcpu) { 6602 if (!port->pcpu) {
@@ -6245,8 +6634,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6245 } 6634 }
6246 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 6635 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6247 6636
6248 /* Increment the first Rx queue number to be used by the next port */
6249 *next_first_rxq += rxq_number;
6250 priv->port_list[id] = port; 6637 priv->port_list[id] = port;
6251 return 0; 6638 return 0;
6252 6639
@@ -6330,6 +6717,60 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6330 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 6717 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6331} 6718}
6332 6719
6720static void mvpp2_axi_init(struct mvpp2 *priv)
6721{
6722 u32 val, rdval, wrval;
6723
6724 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
6725
6726 /* AXI Bridge Configuration */
6727
6728 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
6729 << MVPP22_AXI_ATTR_CACHE_OFFS;
6730 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6731 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6732
6733 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
6734 << MVPP22_AXI_ATTR_CACHE_OFFS;
6735 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6736 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6737
6738 /* BM */
6739 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
6740 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
6741
6742 /* Descriptors */
6743 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
6744 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
6745 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
6746 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
6747
6748 /* Buffer Data */
6749 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
6750 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
6751
6752 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
6753 << MVPP22_AXI_CODE_CACHE_OFFS;
6754 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
6755 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6756 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
6757 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
6758
6759 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
6760 << MVPP22_AXI_CODE_CACHE_OFFS;
6761 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6762 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6763
6764 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
6765
6766 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
6767 << MVPP22_AXI_CODE_CACHE_OFFS;
6768 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6769 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6770
6771 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
6772}
6773
6333/* Initialize network controller common part HW */ 6774/* Initialize network controller common part HW */
6334static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) 6775static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6335{ 6776{
@@ -6338,7 +6779,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6338 u32 val; 6779 u32 val;
6339 6780
6340 /* Checks for hardware constraints */ 6781 /* Checks for hardware constraints */
6341 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) || 6782 if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) ||
6342 (txq_number > MVPP2_MAX_TXQ)) { 6783 (txq_number > MVPP2_MAX_TXQ)) {
6343 dev_err(&pdev->dev, "invalid queue size parameter\n"); 6784 dev_err(&pdev->dev, "invalid queue size parameter\n");
6344 return -EINVAL; 6785 return -EINVAL;
@@ -6349,10 +6790,19 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6349 if (dram_target_info) 6790 if (dram_target_info)
6350 mvpp2_conf_mbus_windows(dram_target_info, priv); 6791 mvpp2_conf_mbus_windows(dram_target_info, priv);
6351 6792
6793 if (priv->hw_version == MVPP22)
6794 mvpp2_axi_init(priv);
6795
6352 /* Disable HW PHY polling */ 6796 /* Disable HW PHY polling */
6353 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 6797 if (priv->hw_version == MVPP21) {
6354 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 6798 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6355 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 6799 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6800 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6801 } else {
6802 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6803 val &= ~MVPP22_SMI_POLLING_EN;
6804 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6805 }
6356 6806
6357 /* Allocate and initialize aggregated TXQs */ 6807 /* Allocate and initialize aggregated TXQs */
6358 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), 6808 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
@@ -6374,11 +6824,25 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6374 mvpp2_rx_fifo_init(priv); 6824 mvpp2_rx_fifo_init(priv);
6375 6825
6376 /* Reset Rx queue group interrupt configuration */ 6826 /* Reset Rx queue group interrupt configuration */
6377 for (i = 0; i < MVPP2_MAX_PORTS; i++) 6827 for (i = 0; i < MVPP2_MAX_PORTS; i++) {
6378 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number); 6828 if (priv->hw_version == MVPP21) {
6829 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i),
6830 rxq_number);
6831 continue;
6832 } else {
6833 u32 val;
6834
6835 val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
6836 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
6379 6837
6380 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 6838 val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
6381 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 6839 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
6840 }
6841 }
6842
6843 if (priv->hw_version == MVPP21)
6844 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6845 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6382 6846
6383 /* Allow cache snoop when transmiting packets */ 6847 /* Allow cache snoop when transmiting packets */
6384 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 6848 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
@@ -6405,22 +6869,46 @@ static int mvpp2_probe(struct platform_device *pdev)
6405 struct device_node *port_node; 6869 struct device_node *port_node;
6406 struct mvpp2 *priv; 6870 struct mvpp2 *priv;
6407 struct resource *res; 6871 struct resource *res;
6408 int port_count, first_rxq; 6872 void __iomem *base;
6873 int port_count, cpu;
6409 int err; 6874 int err;
6410 6875
6411 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL); 6876 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6412 if (!priv) 6877 if (!priv)
6413 return -ENOMEM; 6878 return -ENOMEM;
6414 6879
6880 priv->hw_version =
6881 (unsigned long)of_device_get_match_data(&pdev->dev);
6882
6415 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 6883 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6416 priv->base = devm_ioremap_resource(&pdev->dev, res); 6884 base = devm_ioremap_resource(&pdev->dev, res);
6417 if (IS_ERR(priv->base)) 6885 if (IS_ERR(base))
6418 return PTR_ERR(priv->base); 6886 return PTR_ERR(base);
6887
6888 if (priv->hw_version == MVPP21) {
6889 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6890 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6891 if (IS_ERR(priv->lms_base))
6892 return PTR_ERR(priv->lms_base);
6893 } else {
6894 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6895 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
6896 if (IS_ERR(priv->iface_base))
6897 return PTR_ERR(priv->iface_base);
6898 }
6899
6900 for_each_present_cpu(cpu) {
6901 u32 addr_space_sz;
6902
6903 addr_space_sz = (priv->hw_version == MVPP21 ?
6904 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
6905 priv->cpu_base[cpu] = base + cpu * addr_space_sz;
6906 }
6419 6907
6420 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 6908 if (priv->hw_version == MVPP21)
6421 priv->lms_base = devm_ioremap_resource(&pdev->dev, res); 6909 priv->max_port_rxqs = 8;
6422 if (IS_ERR(priv->lms_base)) 6910 else
6423 return PTR_ERR(priv->lms_base); 6911 priv->max_port_rxqs = 32;
6424 6912
6425 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); 6913 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6426 if (IS_ERR(priv->pp_clk)) 6914 if (IS_ERR(priv->pp_clk))
@@ -6438,21 +6926,47 @@ static int mvpp2_probe(struct platform_device *pdev)
6438 if (err < 0) 6926 if (err < 0)
6439 goto err_pp_clk; 6927 goto err_pp_clk;
6440 6928
6929 if (priv->hw_version == MVPP22) {
6930 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
6931 if (IS_ERR(priv->mg_clk)) {
6932 err = PTR_ERR(priv->mg_clk);
6933 goto err_gop_clk;
6934 }
6935
6936 err = clk_prepare_enable(priv->mg_clk);
6937 if (err < 0)
6938 goto err_gop_clk;
6939 }
6940
6441 /* Get system's tclk rate */ 6941 /* Get system's tclk rate */
6442 priv->tclk = clk_get_rate(priv->pp_clk); 6942 priv->tclk = clk_get_rate(priv->pp_clk);
6443 6943
6944 if (priv->hw_version == MVPP22) {
6945 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
6946 if (err)
6947 goto err_mg_clk;
6948 /* Sadly, the BM pools all share the same register to
6949 * store the high 32 bits of their address. So they
6950 * must all have the same high 32 bits, which forces
6951 * us to restrict coherent memory to DMA_BIT_MASK(32).
6952 */
6953 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
6954 if (err)
6955 goto err_mg_clk;
6956 }
6957
6444 /* Initialize network controller */ 6958 /* Initialize network controller */
6445 err = mvpp2_init(pdev, priv); 6959 err = mvpp2_init(pdev, priv);
6446 if (err < 0) { 6960 if (err < 0) {
6447 dev_err(&pdev->dev, "failed to initialize controller\n"); 6961 dev_err(&pdev->dev, "failed to initialize controller\n");
6448 goto err_gop_clk; 6962 goto err_mg_clk;
6449 } 6963 }
6450 6964
6451 port_count = of_get_available_child_count(dn); 6965 port_count = of_get_available_child_count(dn);
6452 if (port_count == 0) { 6966 if (port_count == 0) {
6453 dev_err(&pdev->dev, "no ports enabled\n"); 6967 dev_err(&pdev->dev, "no ports enabled\n");
6454 err = -ENODEV; 6968 err = -ENODEV;
6455 goto err_gop_clk; 6969 goto err_mg_clk;
6456 } 6970 }
6457 6971
6458 priv->port_list = devm_kcalloc(&pdev->dev, port_count, 6972 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
@@ -6460,20 +6974,22 @@ static int mvpp2_probe(struct platform_device *pdev)
6460 GFP_KERNEL); 6974 GFP_KERNEL);
6461 if (!priv->port_list) { 6975 if (!priv->port_list) {
6462 err = -ENOMEM; 6976 err = -ENOMEM;
6463 goto err_gop_clk; 6977 goto err_mg_clk;
6464 } 6978 }
6465 6979
6466 /* Initialize ports */ 6980 /* Initialize ports */
6467 first_rxq = 0;
6468 for_each_available_child_of_node(dn, port_node) { 6981 for_each_available_child_of_node(dn, port_node) {
6469 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq); 6982 err = mvpp2_port_probe(pdev, port_node, priv);
6470 if (err < 0) 6983 if (err < 0)
6471 goto err_gop_clk; 6984 goto err_mg_clk;
6472 } 6985 }
6473 6986
6474 platform_set_drvdata(pdev, priv); 6987 platform_set_drvdata(pdev, priv);
6475 return 0; 6988 return 0;
6476 6989
6990err_mg_clk:
6991 if (priv->hw_version == MVPP22)
6992 clk_disable_unprepare(priv->mg_clk);
6477err_gop_clk: 6993err_gop_clk:
6478 clk_disable_unprepare(priv->gop_clk); 6994 clk_disable_unprepare(priv->gop_clk);
6479err_pp_clk: 6995err_pp_clk:
@@ -6506,9 +7022,10 @@ static int mvpp2_remove(struct platform_device *pdev)
6506 dma_free_coherent(&pdev->dev, 7022 dma_free_coherent(&pdev->dev,
6507 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 7023 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6508 aggr_txq->descs, 7024 aggr_txq->descs,
6509 aggr_txq->descs_phys); 7025 aggr_txq->descs_dma);
6510 } 7026 }
6511 7027
7028 clk_disable_unprepare(priv->mg_clk);
6512 clk_disable_unprepare(priv->pp_clk); 7029 clk_disable_unprepare(priv->pp_clk);
6513 clk_disable_unprepare(priv->gop_clk); 7030 clk_disable_unprepare(priv->gop_clk);
6514 7031
@@ -6516,7 +7033,14 @@ static int mvpp2_remove(struct platform_device *pdev)
6516} 7033}
6517 7034
6518static const struct of_device_id mvpp2_match[] = { 7035static const struct of_device_id mvpp2_match[] = {
6519 { .compatible = "marvell,armada-375-pp2" }, 7036 {
7037 .compatible = "marvell,armada-375-pp2",
7038 .data = (void *)MVPP21,
7039 },
7040 {
7041 .compatible = "marvell,armada-7k-pp22",
7042 .data = (void *)MVPP22,
7043 },
6520 { } 7044 { }
6521}; 7045};
6522MODULE_DEVICE_TABLE(of, mvpp2_match); 7046MODULE_DEVICE_TABLE(of, mvpp2_match);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 9e757684816d..bf6317eca2f6 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1908,10 +1908,9 @@ static int __init mtk_init(struct net_device *dev)
1908 1908
1909 /* If the mac address is invalid, use random mac address */ 1909 /* If the mac address is invalid, use random mac address */
1910 if (!is_valid_ether_addr(dev->dev_addr)) { 1910 if (!is_valid_ether_addr(dev->dev_addr)) {
1911 random_ether_addr(dev->dev_addr); 1911 eth_hw_addr_random(dev);
1912 dev_err(eth->dev, "generated random MAC address %pM\n", 1912 dev_err(eth->dev, "generated random MAC address %pM\n",
1913 dev->dev_addr); 1913 dev->dev_addr);
1914 dev->addr_assign_type = NET_ADDR_RANDOM;
1915 } 1914 }
1916 1915
1917 return mtk_phy_connect(dev); 1916 return mtk_phy_connect(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c4d714fcc7da..ffbcb27c05e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -117,7 +117,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
117 /* port statistics */ 117 /* port statistics */
118 "tso_packets", 118 "tso_packets",
119 "xmit_more", 119 "xmit_more",
120 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", 120 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_pages",
121 "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload", 121 "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
122 122
123 /* pf statistics */ 123 /* pf statistics */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 61420473fe5f..94fab20ef146 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -92,7 +92,9 @@ static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
92 if (tc->type != TC_SETUP_MQPRIO) 92 if (tc->type != TC_SETUP_MQPRIO)
93 return -EINVAL; 93 return -EINVAL;
94 94
95 return mlx4_en_setup_tc(dev, tc->tc); 95 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
96
97 return mlx4_en_setup_tc(dev, tc->mqprio->num_tc);
96} 98}
97 99
98#ifdef CONFIG_RFS_ACCEL 100#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 9166d90e7328..e0eb695318e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -213,6 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
213 priv->port_stats.rx_chksum_good = 0; 213 priv->port_stats.rx_chksum_good = 0;
214 priv->port_stats.rx_chksum_none = 0; 214 priv->port_stats.rx_chksum_none = 0;
215 priv->port_stats.rx_chksum_complete = 0; 215 priv->port_stats.rx_chksum_complete = 0;
216 priv->port_stats.rx_alloc_pages = 0;
216 priv->xdp_stats.rx_xdp_drop = 0; 217 priv->xdp_stats.rx_xdp_drop = 0;
217 priv->xdp_stats.rx_xdp_tx = 0; 218 priv->xdp_stats.rx_xdp_tx = 0;
218 priv->xdp_stats.rx_xdp_tx_full = 0; 219 priv->xdp_stats.rx_xdp_tx_full = 0;
@@ -223,6 +224,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
223 priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok); 224 priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok);
224 priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none); 225 priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none);
225 priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete); 226 priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
227 priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
226 priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop); 228 priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop);
227 priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx); 229 priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx);
228 priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full); 230 priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 867292880c07..aa074e57ce06 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -50,173 +50,62 @@
50 50
51#include "mlx4_en.h" 51#include "mlx4_en.h"
52 52
53static int mlx4_alloc_pages(struct mlx4_en_priv *priv, 53static int mlx4_alloc_page(struct mlx4_en_priv *priv,
54 struct mlx4_en_rx_alloc *page_alloc, 54 struct mlx4_en_rx_alloc *frag,
55 const struct mlx4_en_frag_info *frag_info, 55 gfp_t gfp)
56 gfp_t _gfp)
57{ 56{
58 int order;
59 struct page *page; 57 struct page *page;
60 dma_addr_t dma; 58 dma_addr_t dma;
61 59
62 for (order = frag_info->order; ;) { 60 page = alloc_page(gfp);
63 gfp_t gfp = _gfp; 61 if (unlikely(!page))
64 62 return -ENOMEM;
65 if (order) 63 dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
66 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
67 page = alloc_pages(gfp, order);
68 if (likely(page))
69 break;
70 if (--order < 0 ||
71 ((PAGE_SIZE << order) < frag_info->frag_size))
72 return -ENOMEM;
73 }
74 dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
75 frag_info->dma_dir);
76 if (unlikely(dma_mapping_error(priv->ddev, dma))) { 64 if (unlikely(dma_mapping_error(priv->ddev, dma))) {
77 put_page(page); 65 __free_page(page);
78 return -ENOMEM; 66 return -ENOMEM;
79 } 67 }
80 page_alloc->page_size = PAGE_SIZE << order; 68 frag->page = page;
81 page_alloc->page = page; 69 frag->dma = dma;
82 page_alloc->dma = dma; 70 frag->page_offset = priv->rx_headroom;
83 page_alloc->page_offset = 0;
84 /* Not doing get_page() for each frag is a big win
85 * on asymetric workloads. Note we can not use atomic_set().
86 */
87 page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1);
88 return 0; 71 return 0;
89} 72}
90 73
91static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, 74static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
75 struct mlx4_en_rx_ring *ring,
92 struct mlx4_en_rx_desc *rx_desc, 76 struct mlx4_en_rx_desc *rx_desc,
93 struct mlx4_en_rx_alloc *frags, 77 struct mlx4_en_rx_alloc *frags,
94 struct mlx4_en_rx_alloc *ring_alloc,
95 gfp_t gfp) 78 gfp_t gfp)
96{ 79{
97 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
98 const struct mlx4_en_frag_info *frag_info;
99 struct page *page;
100 int i; 80 int i;
101 81
102 for (i = 0; i < priv->num_frags; i++) { 82 for (i = 0; i < priv->num_frags; i++, frags++) {
103 frag_info = &priv->frag_info[i]; 83 if (!frags->page) {
104 page_alloc[i] = ring_alloc[i]; 84 if (mlx4_alloc_page(priv, frags, gfp))
105 page_alloc[i].page_offset += frag_info->frag_stride; 85 return -ENOMEM;
106 86 ring->rx_alloc_pages++;
107 if (page_alloc[i].page_offset + frag_info->frag_stride <=
108 ring_alloc[i].page_size)
109 continue;
110
111 if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i],
112 frag_info, gfp)))
113 goto out;
114 }
115
116 for (i = 0; i < priv->num_frags; i++) {
117 frags[i] = ring_alloc[i];
118 frags[i].page_offset += priv->frag_info[i].rx_headroom;
119 rx_desc->data[i].addr = cpu_to_be64(frags[i].dma +
120 frags[i].page_offset);
121 ring_alloc[i] = page_alloc[i];
122 }
123
124 return 0;
125
126out:
127 while (i--) {
128 if (page_alloc[i].page != ring_alloc[i].page) {
129 dma_unmap_page(priv->ddev, page_alloc[i].dma,
130 page_alloc[i].page_size,
131 priv->frag_info[i].dma_dir);
132 page = page_alloc[i].page;
133 /* Revert changes done by mlx4_alloc_pages */
134 page_ref_sub(page, page_alloc[i].page_size /
135 priv->frag_info[i].frag_stride - 1);
136 put_page(page);
137 } 87 }
138 } 88 rx_desc->data[i].addr = cpu_to_be64(frags->dma +
139 return -ENOMEM; 89 frags->page_offset);
140}
141
142static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
143 struct mlx4_en_rx_alloc *frags,
144 int i)
145{
146 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
147 u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
148
149
150 if (next_frag_end > frags[i].page_size)
151 dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
152 frag_info->dma_dir);
153
154 if (frags[i].page)
155 put_page(frags[i].page);
156}
157
158static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
159 struct mlx4_en_rx_ring *ring)
160{
161 int i;
162 struct mlx4_en_rx_alloc *page_alloc;
163
164 for (i = 0; i < priv->num_frags; i++) {
165 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
166
167 if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
168 frag_info, GFP_KERNEL | __GFP_COLD))
169 goto out;
170
171 en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n",
172 i, ring->page_alloc[i].page_size,
173 page_ref_count(ring->page_alloc[i].page));
174 } 90 }
175 return 0; 91 return 0;
176
177out:
178 while (i--) {
179 struct page *page;
180
181 page_alloc = &ring->page_alloc[i];
182 dma_unmap_page(priv->ddev, page_alloc->dma,
183 page_alloc->page_size,
184 priv->frag_info[i].dma_dir);
185 page = page_alloc->page;
186 /* Revert changes done by mlx4_alloc_pages */
187 page_ref_sub(page, page_alloc->page_size /
188 priv->frag_info[i].frag_stride - 1);
189 put_page(page);
190 page_alloc->page = NULL;
191 }
192 return -ENOMEM;
193} 92}
194 93
195static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, 94static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
196 struct mlx4_en_rx_ring *ring) 95 struct mlx4_en_rx_alloc *frag)
197{ 96{
198 struct mlx4_en_rx_alloc *page_alloc; 97 if (frag->page) {
199 int i; 98 dma_unmap_page(priv->ddev, frag->dma,
200 99 PAGE_SIZE, priv->dma_dir);
201 for (i = 0; i < priv->num_frags; i++) { 100 __free_page(frag->page);
202 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
203
204 page_alloc = &ring->page_alloc[i];
205 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
206 i, page_count(page_alloc->page));
207
208 dma_unmap_page(priv->ddev, page_alloc->dma,
209 page_alloc->page_size, frag_info->dma_dir);
210 while (page_alloc->page_offset + frag_info->frag_stride <
211 page_alloc->page_size) {
212 put_page(page_alloc->page);
213 page_alloc->page_offset += frag_info->frag_stride;
214 }
215 page_alloc->page = NULL;
216 } 101 }
102 /* We need to clear all fields, otherwise a change of priv->log_rx_info
103 * could lead to see garbage later in frag->page.
104 */
105 memset(frag, 0, sizeof(*frag));
217} 106}
218 107
219static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, 108static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv,
220 struct mlx4_en_rx_ring *ring, int index) 109 struct mlx4_en_rx_ring *ring, int index)
221{ 110{
222 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; 111 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
@@ -248,18 +137,23 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
248 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); 137 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
249 struct mlx4_en_rx_alloc *frags = ring->rx_info + 138 struct mlx4_en_rx_alloc *frags = ring->rx_info +
250 (index << priv->log_rx_info); 139 (index << priv->log_rx_info);
251
252 if (ring->page_cache.index > 0) { 140 if (ring->page_cache.index > 0) {
253 frags[0] = ring->page_cache.buf[--ring->page_cache.index]; 141 /* XDP uses a single page per frame */
254 rx_desc->data[0].addr = cpu_to_be64(frags[0].dma + 142 if (!frags->page) {
255 frags[0].page_offset); 143 ring->page_cache.index--;
144 frags->page = ring->page_cache.buf[ring->page_cache.index].page;
145 frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
146 }
147 frags->page_offset = XDP_PACKET_HEADROOM;
148 rx_desc->data[0].addr = cpu_to_be64(frags->dma +
149 XDP_PACKET_HEADROOM);
256 return 0; 150 return 0;
257 } 151 }
258 152
259 return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); 153 return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
260} 154}
261 155
262static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) 156static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
263{ 157{
264 return ring->prod == ring->cons; 158 return ring->prod == ring->cons;
265} 159}
@@ -269,7 +163,8 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
269 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); 163 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
270} 164}
271 165
272static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, 166/* slow path */
167static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
273 struct mlx4_en_rx_ring *ring, 168 struct mlx4_en_rx_ring *ring,
274 int index) 169 int index)
275{ 170{
@@ -279,7 +174,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
279 frags = ring->rx_info + (index << priv->log_rx_info); 174 frags = ring->rx_info + (index << priv->log_rx_info);
280 for (nr = 0; nr < priv->num_frags; nr++) { 175 for (nr = 0; nr < priv->num_frags; nr++) {
281 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); 176 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
282 mlx4_en_free_frag(priv, frags, nr); 177 mlx4_en_free_frag(priv, frags + nr);
283 } 178 }
284} 179}
285 180
@@ -335,12 +230,12 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
335 ring->cons, ring->prod); 230 ring->cons, ring->prod);
336 231
337 /* Unmap and free Rx buffers */ 232 /* Unmap and free Rx buffers */
338 while (!mlx4_en_is_ring_empty(ring)) { 233 for (index = 0; index < ring->size; index++) {
339 index = ring->cons & ring->size_mask;
340 en_dbg(DRV, priv, "Processing descriptor:%d\n", index); 234 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
341 mlx4_en_free_rx_desc(priv, ring, index); 235 mlx4_en_free_rx_desc(priv, ring, index);
342 ++ring->cons;
343 } 236 }
237 ring->cons = 0;
238 ring->prod = 0;
344} 239}
345 240
346void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) 241void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
@@ -392,9 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
392 287
393 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * 288 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
394 sizeof(struct mlx4_en_rx_alloc)); 289 sizeof(struct mlx4_en_rx_alloc));
395 ring->rx_info = vmalloc_node(tmp, node); 290 ring->rx_info = vzalloc_node(tmp, node);
396 if (!ring->rx_info) { 291 if (!ring->rx_info) {
397 ring->rx_info = vmalloc(tmp); 292 ring->rx_info = vzalloc(tmp);
398 if (!ring->rx_info) { 293 if (!ring->rx_info) {
399 err = -ENOMEM; 294 err = -ENOMEM;
400 goto err_ring; 295 goto err_ring;
@@ -464,16 +359,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
464 /* Initialize all descriptors */ 359 /* Initialize all descriptors */
465 for (i = 0; i < ring->size; i++) 360 for (i = 0; i < ring->size; i++)
466 mlx4_en_init_rx_desc(priv, ring, i); 361 mlx4_en_init_rx_desc(priv, ring, i);
467
468 /* Initialize page allocators */
469 err = mlx4_en_init_allocator(priv, ring);
470 if (err) {
471 en_err(priv, "Failed initializing ring allocator\n");
472 if (ring->stride <= TXBB_SIZE)
473 ring->buf -= TXBB_SIZE;
474 ring_ind--;
475 goto err_allocator;
476 }
477 } 362 }
478 err = mlx4_en_fill_rx_buffers(priv); 363 err = mlx4_en_fill_rx_buffers(priv);
479 if (err) 364 if (err)
@@ -493,11 +378,9 @@ err_buffers:
493 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); 378 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
494 379
495 ring_ind = priv->rx_ring_num - 1; 380 ring_ind = priv->rx_ring_num - 1;
496err_allocator:
497 while (ring_ind >= 0) { 381 while (ring_ind >= 0) {
498 if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) 382 if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
499 priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; 383 priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
500 mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
501 ring_ind--; 384 ring_ind--;
502 } 385 }
503 return err; 386 return err;
@@ -537,7 +420,9 @@ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
537 if (cache->index >= MLX4_EN_CACHE_SIZE) 420 if (cache->index >= MLX4_EN_CACHE_SIZE)
538 return false; 421 return false;
539 422
540 cache->buf[cache->index++] = *frame; 423 cache->buf[cache->index].page = frame->page;
424 cache->buf[cache->index].dma = frame->dma;
425 cache->index++;
541 return true; 426 return true;
542} 427}
543 428
@@ -567,136 +452,91 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
567 int i; 452 int i;
568 453
569 for (i = 0; i < ring->page_cache.index; i++) { 454 for (i = 0; i < ring->page_cache.index; i++) {
570 struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i]; 455 dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
571 456 PAGE_SIZE, priv->dma_dir);
572 dma_unmap_page(priv->ddev, frame->dma, frame->page_size, 457 put_page(ring->page_cache.buf[i].page);
573 priv->frag_info[0].dma_dir);
574 put_page(frame->page);
575 } 458 }
576 ring->page_cache.index = 0; 459 ring->page_cache.index = 0;
577 mlx4_en_free_rx_buf(priv, ring); 460 mlx4_en_free_rx_buf(priv, ring);
578 if (ring->stride <= TXBB_SIZE) 461 if (ring->stride <= TXBB_SIZE)
579 ring->buf -= TXBB_SIZE; 462 ring->buf -= TXBB_SIZE;
580 mlx4_en_destroy_allocator(priv, ring);
581} 463}
582 464
583 465
584static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, 466static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
585 struct mlx4_en_rx_desc *rx_desc,
586 struct mlx4_en_rx_alloc *frags, 467 struct mlx4_en_rx_alloc *frags,
587 struct sk_buff *skb, 468 struct sk_buff *skb,
588 int length) 469 int length)
589{ 470{
590 struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags; 471 const struct mlx4_en_frag_info *frag_info = priv->frag_info;
591 struct mlx4_en_frag_info *frag_info; 472 unsigned int truesize = 0;
592 int nr; 473 int nr, frag_size;
474 struct page *page;
593 dma_addr_t dma; 475 dma_addr_t dma;
476 bool release;
594 477
595 /* Collect used fragments while replacing them in the HW descriptors */ 478 /* Collect used fragments while replacing them in the HW descriptors */
596 for (nr = 0; nr < priv->num_frags; nr++) { 479 for (nr = 0;; frags++) {
597 frag_info = &priv->frag_info[nr]; 480 frag_size = min_t(int, length, frag_info->frag_size);
598 if (length <= frag_info->frag_prefix_size) 481
599 break; 482 page = frags->page;
600 if (unlikely(!frags[nr].page)) 483 if (unlikely(!page))
601 goto fail; 484 goto fail;
602 485
603 dma = be64_to_cpu(rx_desc->data[nr].addr); 486 dma = frags->dma;
604 dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size, 487 dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
605 DMA_FROM_DEVICE); 488 frag_size, priv->dma_dir);
489
490 __skb_fill_page_desc(skb, nr, page, frags->page_offset,
491 frag_size);
606 492
607 __skb_fill_page_desc(skb, nr, frags[nr].page, 493 truesize += frag_info->frag_stride;
608 frags[nr].page_offset, 494 if (frag_info->frag_stride == PAGE_SIZE / 2) {
609 frag_info->frag_size); 495 frags->page_offset ^= PAGE_SIZE / 2;
496 release = page_count(page) != 1 ||
497 page_is_pfmemalloc(page) ||
498 page_to_nid(page) != numa_mem_id();
499 } else {
500 u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
610 501
611 skb->truesize += frag_info->frag_stride; 502 frags->page_offset += sz_align;
612 frags[nr].page = NULL; 503 release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
504 }
505 if (release) {
506 dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
507 frags->page = NULL;
508 } else {
509 page_ref_inc(page);
510 }
511
512 nr++;
513 length -= frag_size;
514 if (!length)
515 break;
516 frag_info++;
613 } 517 }
614 /* Adjust size of last fragment to match actual length */ 518 skb->truesize += truesize;
615 if (nr > 0)
616 skb_frag_size_set(&skb_frags_rx[nr - 1],
617 length - priv->frag_info[nr - 1].frag_prefix_size);
618 return nr; 519 return nr;
619 520
620fail: 521fail:
621 while (nr > 0) { 522 while (nr > 0) {
622 nr--; 523 nr--;
623 __skb_frag_unref(&skb_frags_rx[nr]); 524 __skb_frag_unref(skb_shinfo(skb)->frags + nr);
624 } 525 }
625 return 0; 526 return 0;
626} 527}
627 528
628 529static void validate_loopback(struct mlx4_en_priv *priv, void *va)
629static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
630 struct mlx4_en_rx_desc *rx_desc,
631 struct mlx4_en_rx_alloc *frags,
632 unsigned int length)
633{
634 struct sk_buff *skb;
635 void *va;
636 int used_frags;
637 dma_addr_t dma;
638
639 skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
640 if (unlikely(!skb)) {
641 en_dbg(RX_ERR, priv, "Failed allocating skb\n");
642 return NULL;
643 }
644 skb_reserve(skb, NET_IP_ALIGN);
645 skb->len = length;
646
647 /* Get pointer to first fragment so we could copy the headers into the
648 * (linear part of the) skb */
649 va = page_address(frags[0].page) + frags[0].page_offset;
650
651 if (length <= SMALL_PACKET_SIZE) {
652 /* We are copying all relevant data to the skb - temporarily
653 * sync buffers for the copy */
654 dma = be64_to_cpu(rx_desc->data[0].addr);
655 dma_sync_single_for_cpu(priv->ddev, dma, length,
656 DMA_FROM_DEVICE);
657 skb_copy_to_linear_data(skb, va, length);
658 skb->tail += length;
659 } else {
660 unsigned int pull_len;
661
662 /* Move relevant fragments to skb */
663 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
664 skb, length);
665 if (unlikely(!used_frags)) {
666 kfree_skb(skb);
667 return NULL;
668 }
669 skb_shinfo(skb)->nr_frags = used_frags;
670
671 pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
672 /* Copy headers into the skb linear buffer */
673 memcpy(skb->data, va, pull_len);
674 skb->tail += pull_len;
675
676 /* Skip headers in first fragment */
677 skb_shinfo(skb)->frags[0].page_offset += pull_len;
678
679 /* Adjust size of first fragment */
680 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
681 skb->data_len = length - pull_len;
682 }
683 return skb;
684}
685
686static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
687{ 530{
531 const unsigned char *data = va + ETH_HLEN;
688 int i; 532 int i;
689 int offset = ETH_HLEN;
690 533
691 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { 534 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) {
692 if (*(skb->data + offset) != (unsigned char) (i & 0xff)) 535 if (data[i] != (unsigned char)i)
693 goto out_loopback; 536 return;
694 } 537 }
695 /* Loopback found */ 538 /* Loopback found */
696 priv->loopback_ok = 1; 539 priv->loopback_ok = 1;
697
698out_loopback:
699 dev_kfree_skb_any(skb);
700} 540}
701 541
702static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, 542static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
@@ -801,7 +641,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
801 struct mlx4_cqe *cqe; 641 struct mlx4_cqe *cqe;
802 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; 642 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
803 struct mlx4_en_rx_alloc *frags; 643 struct mlx4_en_rx_alloc *frags;
804 struct mlx4_en_rx_desc *rx_desc;
805 struct bpf_prog *xdp_prog; 644 struct bpf_prog *xdp_prog;
806 int doorbell_pending; 645 int doorbell_pending;
807 struct sk_buff *skb; 646 struct sk_buff *skb;
@@ -834,10 +673,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
834 /* Process all completed CQEs */ 673 /* Process all completed CQEs */
835 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 674 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
836 cq->mcq.cons_index & cq->size)) { 675 cq->mcq.cons_index & cq->size)) {
676 void *va;
837 677
838 frags = ring->rx_info + (index << priv->log_rx_info); 678 frags = ring->rx_info + (index << priv->log_rx_info);
839 rx_desc = ring->buf + (index << ring->log_stride); 679 va = page_address(frags[0].page) + frags[0].page_offset;
840
841 /* 680 /*
842 * make sure we read the CQE after we read the ownership bit 681 * make sure we read the CQE after we read the ownership bit
843 */ 682 */
@@ -860,16 +699,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
860 * and not performing the selftest or flb disabled 699 * and not performing the selftest or flb disabled
861 */ 700 */
862 if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { 701 if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
863 struct ethhdr *ethh; 702 const struct ethhdr *ethh = va;
864 dma_addr_t dma; 703 dma_addr_t dma;
865 /* Get pointer to first fragment since we haven't 704 /* Get pointer to first fragment since we haven't
866 * skb yet and cast it to ethhdr struct 705 * skb yet and cast it to ethhdr struct
867 */ 706 */
868 dma = be64_to_cpu(rx_desc->data[0].addr); 707 dma = frags[0].dma + frags[0].page_offset;
869 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), 708 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
870 DMA_FROM_DEVICE); 709 DMA_FROM_DEVICE);
871 ethh = (struct ethhdr *)(page_address(frags[0].page) +
872 frags[0].page_offset);
873 710
874 if (is_multicast_ether_addr(ethh->h_dest)) { 711 if (is_multicast_ether_addr(ethh->h_dest)) {
875 struct mlx4_mac_entry *entry; 712 struct mlx4_mac_entry *entry;
@@ -887,13 +724,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
887 } 724 }
888 } 725 }
889 726
727 if (unlikely(priv->validate_loopback)) {
728 validate_loopback(priv, va);
729 goto next;
730 }
731
890 /* 732 /*
891 * Packet is OK - process it. 733 * Packet is OK - process it.
892 */ 734 */
893 length = be32_to_cpu(cqe->byte_cnt); 735 length = be32_to_cpu(cqe->byte_cnt);
894 length -= ring->fcs_del; 736 length -= ring->fcs_del;
895 l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
896 (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
897 737
898 /* A bpf program gets first chance to drop the packet. It may 738 /* A bpf program gets first chance to drop the packet. It may
899 * read bytes but not past the end of the frag. 739 * read bytes but not past the end of the frag.
@@ -904,13 +744,13 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
904 void *orig_data; 744 void *orig_data;
905 u32 act; 745 u32 act;
906 746
907 dma = be64_to_cpu(rx_desc->data[0].addr); 747 dma = frags[0].dma + frags[0].page_offset;
908 dma_sync_single_for_cpu(priv->ddev, dma, 748 dma_sync_single_for_cpu(priv->ddev, dma,
909 priv->frag_info[0].frag_size, 749 priv->frag_info[0].frag_size,
910 DMA_FROM_DEVICE); 750 DMA_FROM_DEVICE);
911 751
912 xdp.data_hard_start = page_address(frags[0].page); 752 xdp.data_hard_start = va - frags[0].page_offset;
913 xdp.data = xdp.data_hard_start + frags[0].page_offset; 753 xdp.data = va;
914 xdp.data_end = xdp.data + length; 754 xdp.data_end = xdp.data + length;
915 orig_data = xdp.data; 755 orig_data = xdp.data;
916 756
@@ -920,6 +760,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
920 length = xdp.data_end - xdp.data; 760 length = xdp.data_end - xdp.data;
921 frags[0].page_offset = xdp.data - 761 frags[0].page_offset = xdp.data -
922 xdp.data_hard_start; 762 xdp.data_hard_start;
763 va = xdp.data;
923 } 764 }
924 765
925 switch (act) { 766 switch (act) {
@@ -928,8 +769,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
928 case XDP_TX: 769 case XDP_TX:
929 if (likely(!mlx4_en_xmit_frame(ring, frags, dev, 770 if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
930 length, cq->ring, 771 length, cq->ring,
931 &doorbell_pending))) 772 &doorbell_pending))) {
932 goto consumed; 773 frags[0].page = NULL;
774 goto next;
775 }
933 trace_xdp_exception(dev, xdp_prog, act); 776 trace_xdp_exception(dev, xdp_prog, act);
934 goto xdp_drop_no_cnt; /* Drop on xmit failure */ 777 goto xdp_drop_no_cnt; /* Drop on xmit failure */
935 default: 778 default:
@@ -939,8 +782,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
939 case XDP_DROP: 782 case XDP_DROP:
940 ring->xdp_drop++; 783 ring->xdp_drop++;
941xdp_drop_no_cnt: 784xdp_drop_no_cnt:
942 if (likely(mlx4_en_rx_recycle(ring, frags)))
943 goto consumed;
944 goto next; 785 goto next;
945 } 786 }
946 } 787 }
@@ -948,129 +789,51 @@ xdp_drop_no_cnt:
948 ring->bytes += length; 789 ring->bytes += length;
949 ring->packets++; 790 ring->packets++;
950 791
792 skb = napi_get_frags(&cq->napi);
793 if (!skb)
794 goto next;
795
796 if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
797 timestamp = mlx4_en_get_cqe_ts(cqe);
798 mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
799 timestamp);
800 }
801 skb_record_rx_queue(skb, cq->ring);
802
951 if (likely(dev->features & NETIF_F_RXCSUM)) { 803 if (likely(dev->features & NETIF_F_RXCSUM)) {
952 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | 804 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
953 MLX4_CQE_STATUS_UDP)) { 805 MLX4_CQE_STATUS_UDP)) {
954 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && 806 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
955 cqe->checksum == cpu_to_be16(0xffff)) { 807 cqe->checksum == cpu_to_be16(0xffff)) {
956 ip_summed = CHECKSUM_UNNECESSARY; 808 ip_summed = CHECKSUM_UNNECESSARY;
809 l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
810 (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
811 if (l2_tunnel)
812 skb->csum_level = 1;
957 ring->csum_ok++; 813 ring->csum_ok++;
958 } else { 814 } else {
959 ip_summed = CHECKSUM_NONE; 815 goto csum_none;
960 ring->csum_none++;
961 } 816 }
962 } else { 817 } else {
963 if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && 818 if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
964 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | 819 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
965 MLX4_CQE_STATUS_IPV6))) { 820 MLX4_CQE_STATUS_IPV6))) {
966 ip_summed = CHECKSUM_COMPLETE; 821 if (check_csum(cqe, skb, va, dev->features)) {
967 ring->csum_complete++; 822 goto csum_none;
823 } else {
824 ip_summed = CHECKSUM_COMPLETE;
825 ring->csum_complete++;
826 }
968 } else { 827 } else {
969 ip_summed = CHECKSUM_NONE; 828 goto csum_none;
970 ring->csum_none++;
971 } 829 }
972 } 830 }
973 } else { 831 } else {
832csum_none:
974 ip_summed = CHECKSUM_NONE; 833 ip_summed = CHECKSUM_NONE;
975 ring->csum_none++; 834 ring->csum_none++;
976 } 835 }
977
978 /* This packet is eligible for GRO if it is:
979 * - DIX Ethernet (type interpretation)
980 * - TCP/IP (v4)
981 * - without IP options
982 * - not an IP fragment
983 */
984 if (dev->features & NETIF_F_GRO) {
985 struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
986 if (!gro_skb)
987 goto next;
988
989 nr = mlx4_en_complete_rx_desc(priv,
990 rx_desc, frags, gro_skb,
991 length);
992 if (!nr)
993 goto next;
994
995 if (ip_summed == CHECKSUM_COMPLETE) {
996 void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
997 if (check_csum(cqe, gro_skb, va,
998 dev->features)) {
999 ip_summed = CHECKSUM_NONE;
1000 ring->csum_none++;
1001 ring->csum_complete--;
1002 }
1003 }
1004
1005 skb_shinfo(gro_skb)->nr_frags = nr;
1006 gro_skb->len = length;
1007 gro_skb->data_len = length;
1008 gro_skb->ip_summed = ip_summed;
1009
1010 if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
1011 gro_skb->csum_level = 1;
1012
1013 if ((cqe->vlan_my_qpn &
1014 cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
1015 (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1016 u16 vid = be16_to_cpu(cqe->sl_vid);
1017
1018 __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
1019 } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
1020 MLX4_CQE_SVLAN_PRESENT_MASK) &&
1021 (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
1022 __vlan_hwaccel_put_tag(gro_skb,
1023 htons(ETH_P_8021AD),
1024 be16_to_cpu(cqe->sl_vid));
1025 }
1026
1027 if (dev->features & NETIF_F_RXHASH)
1028 skb_set_hash(gro_skb,
1029 be32_to_cpu(cqe->immed_rss_invalid),
1030 (ip_summed == CHECKSUM_UNNECESSARY) ?
1031 PKT_HASH_TYPE_L4 :
1032 PKT_HASH_TYPE_L3);
1033
1034 skb_record_rx_queue(gro_skb, cq->ring);
1035
1036 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
1037 timestamp = mlx4_en_get_cqe_ts(cqe);
1038 mlx4_en_fill_hwtstamps(mdev,
1039 skb_hwtstamps(gro_skb),
1040 timestamp);
1041 }
1042
1043 napi_gro_frags(&cq->napi);
1044 goto next;
1045 }
1046
1047 /* GRO not possible, complete processing here */
1048 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
1049 if (unlikely(!skb)) {
1050 ring->dropped++;
1051 goto next;
1052 }
1053
1054 if (unlikely(priv->validate_loopback)) {
1055 validate_loopback(priv, skb);
1056 goto next;
1057 }
1058
1059 if (ip_summed == CHECKSUM_COMPLETE) {
1060 if (check_csum(cqe, skb, skb->data, dev->features)) {
1061 ip_summed = CHECKSUM_NONE;
1062 ring->csum_complete--;
1063 ring->csum_none++;
1064 }
1065 }
1066
1067 skb->ip_summed = ip_summed; 836 skb->ip_summed = ip_summed;
1068 skb->protocol = eth_type_trans(skb, dev);
1069 skb_record_rx_queue(skb, cq->ring);
1070
1071 if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
1072 skb->csum_level = 1;
1073
1074 if (dev->features & NETIF_F_RXHASH) 837 if (dev->features & NETIF_F_RXHASH)
1075 skb_set_hash(skb, 838 skb_set_hash(skb,
1076 be32_to_cpu(cqe->immed_rss_invalid), 839 be32_to_cpu(cqe->immed_rss_invalid),
@@ -1078,36 +841,36 @@ xdp_drop_no_cnt:
1078 PKT_HASH_TYPE_L4 : 841 PKT_HASH_TYPE_L4 :
1079 PKT_HASH_TYPE_L3); 842 PKT_HASH_TYPE_L3);
1080 843
1081 if ((be32_to_cpu(cqe->vlan_my_qpn) & 844
1082 MLX4_CQE_CVLAN_PRESENT_MASK) && 845 if ((cqe->vlan_my_qpn &
846 cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
1083 (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) 847 (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
1084 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); 848 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1085 else if ((be32_to_cpu(cqe->vlan_my_qpn) & 849 be16_to_cpu(cqe->sl_vid));
1086 MLX4_CQE_SVLAN_PRESENT_MASK) && 850 else if ((cqe->vlan_my_qpn &
851 cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) &&
1087 (dev->features & NETIF_F_HW_VLAN_STAG_RX)) 852 (dev->features & NETIF_F_HW_VLAN_STAG_RX))
1088 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), 853 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
1089 be16_to_cpu(cqe->sl_vid)); 854 be16_to_cpu(cqe->sl_vid));
1090 855
1091 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { 856 nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
1092 timestamp = mlx4_en_get_cqe_ts(cqe); 857 if (likely(nr)) {
1093 mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb), 858 skb_shinfo(skb)->nr_frags = nr;
1094 timestamp); 859 skb->len = length;
860 skb->data_len = length;
861 napi_gro_frags(&cq->napi);
862 } else {
863 skb->vlan_tci = 0;
864 skb_clear_hash(skb);
1095 } 865 }
1096
1097 napi_gro_receive(&cq->napi, skb);
1098next: 866next:
1099 for (nr = 0; nr < priv->num_frags; nr++)
1100 mlx4_en_free_frag(priv, frags, nr);
1101
1102consumed:
1103 ++cq->mcq.cons_index; 867 ++cq->mcq.cons_index;
1104 index = (cq->mcq.cons_index) & ring->size_mask; 868 index = (cq->mcq.cons_index) & ring->size_mask;
1105 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; 869 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
1106 if (++polled == budget) 870 if (++polled == budget)
1107 goto out; 871 break;
1108 } 872 }
1109 873
1110out:
1111 rcu_read_unlock(); 874 rcu_read_unlock();
1112 875
1113 if (polled) { 876 if (polled) {
@@ -1178,13 +941,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
1178 return done; 941 return done;
1179} 942}
1180 943
1181static const int frag_sizes[] = {
1182 FRAG_SZ0,
1183 FRAG_SZ1,
1184 FRAG_SZ2,
1185 FRAG_SZ3
1186};
1187
1188void mlx4_en_calc_rx_buf(struct net_device *dev) 944void mlx4_en_calc_rx_buf(struct net_device *dev)
1189{ 945{
1190 struct mlx4_en_priv *priv = netdev_priv(dev); 946 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1195,33 +951,43 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
1195 * This only works when num_frags == 1. 951 * This only works when num_frags == 1.
1196 */ 952 */
1197 if (priv->tx_ring_num[TX_XDP]) { 953 if (priv->tx_ring_num[TX_XDP]) {
1198 priv->frag_info[0].order = 0;
1199 priv->frag_info[0].frag_size = eff_mtu; 954 priv->frag_info[0].frag_size = eff_mtu;
1200 priv->frag_info[0].frag_prefix_size = 0;
1201 /* This will gain efficient xdp frame recycling at the 955 /* This will gain efficient xdp frame recycling at the
1202 * expense of more costly truesize accounting 956 * expense of more costly truesize accounting
1203 */ 957 */
1204 priv->frag_info[0].frag_stride = PAGE_SIZE; 958 priv->frag_info[0].frag_stride = PAGE_SIZE;
1205 priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL; 959 priv->dma_dir = PCI_DMA_BIDIRECTIONAL;
1206 priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM; 960 priv->rx_headroom = XDP_PACKET_HEADROOM;
1207 i = 1; 961 i = 1;
1208 } else { 962 } else {
1209 int buf_size = 0; 963 int frag_size_max = 2048, buf_size = 0;
964
965 /* should not happen, right ? */
966 if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
967 frag_size_max = PAGE_SIZE;
1210 968
1211 while (buf_size < eff_mtu) { 969 while (buf_size < eff_mtu) {
1212 priv->frag_info[i].order = MLX4_EN_ALLOC_PREFER_ORDER; 970 int frag_stride, frag_size = eff_mtu - buf_size;
1213 priv->frag_info[i].frag_size = 971 int pad, nb;
1214 (eff_mtu > buf_size + frag_sizes[i]) ? 972
1215 frag_sizes[i] : eff_mtu - buf_size; 973 if (i < MLX4_EN_MAX_RX_FRAGS - 1)
1216 priv->frag_info[i].frag_prefix_size = buf_size; 974 frag_size = min(frag_size, frag_size_max);
1217 priv->frag_info[i].frag_stride = 975
1218 ALIGN(priv->frag_info[i].frag_size, 976 priv->frag_info[i].frag_size = frag_size;
1219 SMP_CACHE_BYTES); 977 frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES);
1220 priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE; 978 /* We can only pack 2 1536-bytes frames in on 4K page
1221 priv->frag_info[i].rx_headroom = 0; 979 * Therefore, each frame would consume more bytes (truesize)
1222 buf_size += priv->frag_info[i].frag_size; 980 */
981 nb = PAGE_SIZE / frag_stride;
982 pad = (PAGE_SIZE - nb * frag_stride) / nb;
983 pad &= ~(SMP_CACHE_BYTES - 1);
984 priv->frag_info[i].frag_stride = frag_stride + pad;
985
986 buf_size += frag_size;
1223 i++; 987 i++;
1224 } 988 }
989 priv->dma_dir = PCI_DMA_FROMDEVICE;
990 priv->rx_headroom = 0;
1225 } 991 }
1226 992
1227 priv->num_frags = i; 993 priv->num_frags = i;
@@ -1232,10 +998,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
1232 eff_mtu, priv->num_frags); 998 eff_mtu, priv->num_frags);
1233 for (i = 0; i < priv->num_frags; i++) { 999 for (i = 0; i < priv->num_frags; i++) {
1234 en_err(priv, 1000 en_err(priv,
1235 " frag:%d - size:%d prefix:%d stride:%d\n", 1001 " frag:%d - size:%d stride:%d\n",
1236 i, 1002 i,
1237 priv->frag_info[i].frag_size, 1003 priv->frag_info[i].frag_size,
1238 priv->frag_info[i].frag_prefix_size,
1239 priv->frag_info[i].frag_stride); 1004 priv->frag_info[i].frag_stride);
1240 } 1005 }
1241} 1006}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 95290e1fc9fe..17112faafbcc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,14 +81,11 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
81{ 81{
82 u32 loopback_ok = 0; 82 u32 loopback_ok = 0;
83 int i; 83 int i;
84 bool gro_enabled;
85 84
86 priv->loopback_ok = 0; 85 priv->loopback_ok = 0;
87 priv->validate_loopback = 1; 86 priv->validate_loopback = 1;
88 gro_enabled = priv->dev->features & NETIF_F_GRO;
89 87
90 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 88 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
91 priv->dev->features &= ~NETIF_F_GRO;
92 89
93 /* xmit */ 90 /* xmit */
94 if (mlx4_en_test_loopback_xmit(priv)) { 91 if (mlx4_en_test_loopback_xmit(priv)) {
@@ -111,9 +108,6 @@ mlx4_en_test_loopback_exit:
111 108
112 priv->validate_loopback = 0; 109 priv->validate_loopback = 0;
113 110
114 if (gro_enabled)
115 priv->dev->features |= NETIF_F_GRO;
116
117 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 111 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
118 return !loopback_ok; 112 return !loopback_ok;
119} 113}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 3ed42199d3f1..e0c5ffb3e3a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -354,13 +354,11 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
354 struct mlx4_en_rx_alloc frame = { 354 struct mlx4_en_rx_alloc frame = {
355 .page = tx_info->page, 355 .page = tx_info->page,
356 .dma = tx_info->map0_dma, 356 .dma = tx_info->map0_dma,
357 .page_offset = XDP_PACKET_HEADROOM,
358 .page_size = PAGE_SIZE,
359 }; 357 };
360 358
361 if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { 359 if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
362 dma_unmap_page(priv->ddev, tx_info->map0_dma, 360 dma_unmap_page(priv->ddev, tx_info->map0_dma,
363 PAGE_SIZE, priv->frag_info[0].dma_dir); 361 PAGE_SIZE, priv->dma_dir);
364 put_page(tx_info->page); 362 put_page(tx_info->page);
365 } 363 }
366 364
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3629ce11a68b..39f401aa3047 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -102,17 +102,6 @@
102/* Use the maximum between 16384 and a single page */ 102/* Use the maximum between 16384 and a single page */
103#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) 103#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
104 104
105#define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768), \
106 PAGE_ALLOC_COSTLY_ORDER)
107
108/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
109 * and 4K allocations) */
110enum {
111 FRAG_SZ0 = 1536 - NET_IP_ALIGN,
112 FRAG_SZ1 = 4096,
113 FRAG_SZ2 = 4096,
114 FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
115};
116#define MLX4_EN_MAX_RX_FRAGS 4 105#define MLX4_EN_MAX_RX_FRAGS 4
117 106
118/* Maximum ring sizes */ 107/* Maximum ring sizes */
@@ -264,13 +253,16 @@ struct mlx4_en_rx_alloc {
264 struct page *page; 253 struct page *page;
265 dma_addr_t dma; 254 dma_addr_t dma;
266 u32 page_offset; 255 u32 page_offset;
267 u32 page_size;
268}; 256};
269 257
270#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT) 258#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
259
271struct mlx4_en_page_cache { 260struct mlx4_en_page_cache {
272 u32 index; 261 u32 index;
273 struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE]; 262 struct {
263 struct page *page;
264 dma_addr_t dma;
265 } buf[MLX4_EN_CACHE_SIZE];
274}; 266};
275 267
276struct mlx4_en_priv; 268struct mlx4_en_priv;
@@ -335,7 +327,6 @@ struct mlx4_en_rx_desc {
335 327
336struct mlx4_en_rx_ring { 328struct mlx4_en_rx_ring {
337 struct mlx4_hwq_resources wqres; 329 struct mlx4_hwq_resources wqres;
338 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
339 u32 size ; /* number of Rx descs*/ 330 u32 size ; /* number of Rx descs*/
340 u32 actual_size; 331 u32 actual_size;
341 u32 size_mask; 332 u32 size_mask;
@@ -355,6 +346,7 @@ struct mlx4_en_rx_ring {
355 unsigned long csum_ok; 346 unsigned long csum_ok;
356 unsigned long csum_none; 347 unsigned long csum_none;
357 unsigned long csum_complete; 348 unsigned long csum_complete;
349 unsigned long rx_alloc_pages;
358 unsigned long xdp_drop; 350 unsigned long xdp_drop;
359 unsigned long xdp_tx; 351 unsigned long xdp_tx;
360 unsigned long xdp_tx_full; 352 unsigned long xdp_tx_full;
@@ -472,11 +464,7 @@ struct mlx4_en_mc_list {
472 464
473struct mlx4_en_frag_info { 465struct mlx4_en_frag_info {
474 u16 frag_size; 466 u16 frag_size;
475 u16 frag_prefix_size;
476 u32 frag_stride; 467 u32 frag_stride;
477 enum dma_data_direction dma_dir;
478 u16 order;
479 u16 rx_headroom;
480}; 468};
481 469
482#ifdef CONFIG_MLX4_EN_DCB 470#ifdef CONFIG_MLX4_EN_DCB
@@ -584,8 +572,10 @@ struct mlx4_en_priv {
584 u32 rx_ring_num; 572 u32 rx_ring_num;
585 u32 rx_skb_size; 573 u32 rx_skb_size;
586 struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; 574 struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
587 u16 num_frags; 575 u8 num_frags;
588 u16 log_rx_info; 576 u8 log_rx_info;
577 u8 dma_dir;
578 u16 rx_headroom;
589 579
590 struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES]; 580 struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES];
591 struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS]; 581 struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 48641cb0367f..926f3c3f3665 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -37,7 +37,7 @@ struct mlx4_en_port_stats {
37 unsigned long queue_stopped; 37 unsigned long queue_stopped;
38 unsigned long wake_queue; 38 unsigned long wake_queue;
39 unsigned long tx_timeout; 39 unsigned long tx_timeout;
40 unsigned long rx_alloc_failed; 40 unsigned long rx_alloc_pages;
41 unsigned long rx_chksum_good; 41 unsigned long rx_chksum_good;
42 unsigned long rx_chksum_none; 42 unsigned long rx_chksum_none;
43 unsigned long rx_chksum_complete; 43 unsigned long rx_chksum_complete;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8ef64c4db2c2..f96a73ea8e0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2737,7 +2737,9 @@ mqprio:
2737 if (tc->type != TC_SETUP_MQPRIO) 2737 if (tc->type != TC_SETUP_MQPRIO)
2738 return -EINVAL; 2738 return -EINVAL;
2739 2739
2740 return mlx5e_setup_tc(dev, tc->tc); 2740 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2741
2742 return mlx5e_setup_tc(dev, tc->mqprio->num_tc);
2741} 2743}
2742 2744
2743static void 2745static void
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 6b6c30deee83..95fcacf9c8be 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -15,7 +15,8 @@ obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
15mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ 15mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
16 spectrum_switchdev.o spectrum_router.o \ 16 spectrum_switchdev.o spectrum_router.o \
17 spectrum_kvdl.o spectrum_acl_tcam.o \ 17 spectrum_kvdl.o spectrum_acl_tcam.o \
18 spectrum_acl.o spectrum_flower.o 18 spectrum_acl.o spectrum_flower.o \
19 spectrum_cnt.o
19mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o 20mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
20obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o 21obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
21mlxsw_minimal-objs := minimal.o 22mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index a1b48421648a..479511cf79bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -1043,13 +1043,6 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
1043 */ 1043 */
1044MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1); 1044MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
1045 1045
1046/* cmd_mbox_sw2hw_cq_oi
1047 * When set, overrun ignore is enabled. When set, updates of
1048 * CQ consumer counter (poll for completion) or Request completion
1049 * notifications (Arm CQ) DoorBells should not be rung on that CQ.
1050 */
1051MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
1052
1053/* cmd_mbox_sw2hw_cq_st 1046/* cmd_mbox_sw2hw_cq_st
1054 * Event delivery state machine 1047 * Event delivery state machine
1055 * 0x0 - FIRED 1048 * 0x0 - FIRED
@@ -1132,11 +1125,6 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
1132 */ 1125 */
1133MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1); 1126MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
1134 1127
1135/* cmd_mbox_sw2hw_eq_oi
1136 * When set, overrun ignore is enabled.
1137 */
1138MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
1139
1140/* cmd_mbox_sw2hw_eq_st 1128/* cmd_mbox_sw2hw_eq_st
1141 * Event delivery state machine 1129 * Event delivery state machine
1142 * 0x0 - FIRED 1130 * 0x0 - FIRED
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 5f337715a4da..a984c361926c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -567,6 +567,89 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
567 return oneact + MLXSW_AFA_PAYLOAD_OFFSET; 567 return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
568} 568}
569 569
570/* VLAN Action
571 * -----------
572 * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
573 * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs
574 * and more.
575 */
576
577#define MLXSW_AFA_VLAN_CODE 0x02
578#define MLXSW_AFA_VLAN_SIZE 1
579
580enum mlxsw_afa_vlan_vlan_tag_cmd {
581 MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
582 MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG,
583 MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG,
584};
585
586enum mlxsw_afa_vlan_cmd {
587 MLXSW_AFA_VLAN_CMD_NOP,
588 MLXSW_AFA_VLAN_CMD_SET_OUTER,
589 MLXSW_AFA_VLAN_CMD_SET_INNER,
590 MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER,
591 MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER,
592 MLXSW_AFA_VLAN_CMD_SWAP,
593};
594
595/* afa_vlan_vlan_tag_cmd
596 * Tag command: push, pop, nop VLAN header.
597 */
598MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3);
599
600/* afa_vlan_vid_cmd */
601MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3);
602
603/* afa_vlan_vid */
604MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12);
605
606/* afa_vlan_ethertype_cmd */
607MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3);
608
609/* afa_vlan_ethertype
610 * Index to EtherTypes in Switch VLAN EtherType Register (SVER).
611 */
612MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3);
613
614/* afa_vlan_pcp_cmd */
615MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3);
616
617/* afa_vlan_pcp */
618MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3);
619
620static inline void
621mlxsw_afa_vlan_pack(char *payload,
622 enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,
623 enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid,
624 enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp,
625 enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype)
626{
627 mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd);
628 mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd);
629 mlxsw_afa_vlan_vid_set(payload, vid);
630 mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd);
631 mlxsw_afa_vlan_pcp_set(payload, pcp);
632 mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd);
633 mlxsw_afa_vlan_ethertype_set(payload, ethertype);
634}
635
636int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
637 u16 vid, u8 pcp, u8 et)
638{
639 char *act = mlxsw_afa_block_append_action(block,
640 MLXSW_AFA_VLAN_CODE,
641 MLXSW_AFA_VLAN_SIZE);
642
643 if (!act)
644 return -ENOBUFS;
645 mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
646 MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
647 MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
648 MLXSW_AFA_VLAN_CMD_SET_OUTER, et);
649 return 0;
650}
651EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
652
570/* Trap / Discard Action 653/* Trap / Discard Action
571 * --------------------- 654 * ---------------------
572 * The Trap / Discard action enables trapping / mirroring packets to the CPU 655 * The Trap / Discard action enables trapping / mirroring packets to the CPU
@@ -677,3 +760,54 @@ err_append_action:
677 return err; 760 return err;
678} 761}
679EXPORT_SYMBOL(mlxsw_afa_block_append_fwd); 762EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
763
764/* Policing and Counting Action
765 * ----------------------------
766 * Policing and Counting action is used for binding policer and counter
767 * to ACL rules.
768 */
769
770#define MLXSW_AFA_POLCNT_CODE 0x08
771#define MLXSW_AFA_POLCNT_SIZE 1
772
773enum mlxsw_afa_polcnt_counter_set_type {
774 /* No count */
775 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
776 /* Count packets and bytes */
777 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
778 /* Count only packets */
779 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
780};
781
782/* afa_polcnt_counter_set_type
783 * Counter set type for flow counters.
784 */
785MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
786
787/* afa_polcnt_counter_index
788 * Counter index for flow counters.
789 */
790MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
791
792static inline void
793mlxsw_afa_polcnt_pack(char *payload,
794 enum mlxsw_afa_polcnt_counter_set_type set_type,
795 u32 counter_index)
796{
797 mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
798 mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
799}
800
801int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
802 u32 counter_index)
803{
804 char *act = mlxsw_afa_block_append_action(block,
805 MLXSW_AFA_POLCNT_CODE,
806 MLXSW_AFA_POLCNT_SIZE);
807 if (!act)
808 return -ENOBUFS;
809 mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
810 counter_index);
811 return 0;
812}
813EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 43f78dcfe394..a03362c1ef32 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -62,5 +62,9 @@ void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
62int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); 62int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
63int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, 63int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
64 u8 local_port, bool in_port); 64 u8 local_port, bool in_port);
65int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
66 u16 vid, u8 pcp, u8 et);
67int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
68 u32 counter_index);
65 69
66#endif 70#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index e4fcba7c2af2..c75e9141e3ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -54,6 +54,8 @@ enum mlxsw_afk_element {
54 MLXSW_AFK_ELEMENT_DST_IP6_LO, 54 MLXSW_AFK_ELEMENT_DST_IP6_LO,
55 MLXSW_AFK_ELEMENT_DST_L4_PORT, 55 MLXSW_AFK_ELEMENT_DST_L4_PORT,
56 MLXSW_AFK_ELEMENT_SRC_L4_PORT, 56 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
57 MLXSW_AFK_ELEMENT_VID,
58 MLXSW_AFK_ELEMENT_PCP,
57 MLXSW_AFK_ELEMENT_MAX, 59 MLXSW_AFK_ELEMENT_MAX,
58}; 60};
59 61
@@ -88,7 +90,7 @@ struct mlxsw_afk_element_info {
88 MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \ 90 MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \
89 _element, _offset, 0, _size) 91 _element, _offset, 0, _size)
90 92
91/* For the purpose of the driver, define a internal storage scratchpad 93/* For the purpose of the driver, define an internal storage scratchpad
92 * that will be used to store key/mask values. For each defined element type 94 * that will be used to store key/mask values. For each defined element type
93 * define an internal storage geometry. 95 * define an internal storage geometry.
94 */ 96 */
@@ -98,6 +100,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
98 MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6), 100 MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
99 MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16), 101 MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
100 MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8), 102 MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
103 MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
104 MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
101 MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), 105 MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
102 MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), 106 MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
103 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), 107 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index a223c85dfde0..ffeb746fe2f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -580,7 +580,6 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
580 580
581 mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */ 581 mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
582 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); 582 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
583 mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
584 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); 583 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
585 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); 584 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
586 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { 585 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
@@ -755,7 +754,6 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
755 } 754 }
756 755
757 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ 756 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
758 mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
759 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ 757 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
760 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); 758 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
761 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { 759 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index d9616daf8a70..e7a652c43b5c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4141,7 +4141,8 @@ static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
4141 4141
4142static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, 4142static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
4143 enum mlxsw_reg_ritr_if_type type, 4143 enum mlxsw_reg_ritr_if_type type,
4144 u16 rif, u16 mtu, const char *mac) 4144 u16 rif, u16 vr_id, u16 mtu,
4145 const char *mac)
4145{ 4146{
4146 bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL; 4147 bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
4147 4148
@@ -4153,6 +4154,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
4153 mlxsw_reg_ritr_rif_set(payload, rif); 4154 mlxsw_reg_ritr_rif_set(payload, rif);
4154 mlxsw_reg_ritr_ipv4_fe_set(payload, 1); 4155 mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
4155 mlxsw_reg_ritr_lb_en_set(payload, 1); 4156 mlxsw_reg_ritr_lb_en_set(payload, 1);
4157 mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
4156 mlxsw_reg_ritr_mtu_set(payload, mtu); 4158 mlxsw_reg_ritr_mtu_set(payload, mtu);
4157 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); 4159 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
4158} 4160}
@@ -5504,6 +5506,70 @@ static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
5504 mlxsw_reg_mpsc_rate_set(payload, rate); 5506 mlxsw_reg_mpsc_rate_set(payload, rate);
5505} 5507}
5506 5508
5509/* MGPC - Monitoring General Purpose Counter Set Register
5510 * The MGPC register retrieves and sets the General Purpose Counter Set.
5511 */
5512#define MLXSW_REG_MGPC_ID 0x9081
5513#define MLXSW_REG_MGPC_LEN 0x18
5514
5515MLXSW_REG_DEFINE(mgpc, MLXSW_REG_MGPC_ID, MLXSW_REG_MGPC_LEN);
5516
5517enum mlxsw_reg_mgpc_counter_set_type {
5518 /* No count */
5519 MLXSW_REG_MGPC_COUNTER_SET_TYPE_NO_COUT = 0x00,
5520 /* Count packets and bytes */
5521 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
5522 /* Count only packets */
5523 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS = 0x05,
5524};
5525
5526/* reg_mgpc_counter_set_type
5527 * Counter set type.
5528 * Access: OP
5529 */
5530MLXSW_ITEM32(reg, mgpc, counter_set_type, 0x00, 24, 8);
5531
5532/* reg_mgpc_counter_index
5533 * Counter index.
5534 * Access: Index
5535 */
5536MLXSW_ITEM32(reg, mgpc, counter_index, 0x00, 0, 24);
5537
5538enum mlxsw_reg_mgpc_opcode {
5539 /* Nop */
5540 MLXSW_REG_MGPC_OPCODE_NOP = 0x00,
5541 /* Clear counters */
5542 MLXSW_REG_MGPC_OPCODE_CLEAR = 0x08,
5543};
5544
5545/* reg_mgpc_opcode
5546 * Opcode.
5547 * Access: OP
5548 */
5549MLXSW_ITEM32(reg, mgpc, opcode, 0x04, 28, 4);
5550
5551/* reg_mgpc_byte_counter
5552 * Byte counter value.
5553 * Access: RW
5554 */
5555MLXSW_ITEM64(reg, mgpc, byte_counter, 0x08, 0, 64);
5556
5557/* reg_mgpc_packet_counter
5558 * Packet counter value.
5559 * Access: RW
5560 */
5561MLXSW_ITEM64(reg, mgpc, packet_counter, 0x10, 0, 64);
5562
5563static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
5564 enum mlxsw_reg_mgpc_opcode opcode,
5565 enum mlxsw_reg_mgpc_counter_set_type set_type)
5566{
5567 MLXSW_REG_ZERO(mgpc, payload);
5568 mlxsw_reg_mgpc_counter_index_set(payload, counter_index);
5569 mlxsw_reg_mgpc_counter_set_type_set(payload, set_type);
5570 mlxsw_reg_mgpc_opcode_set(payload, opcode);
5571}
5572
5507/* SBPR - Shared Buffer Pools Register 5573/* SBPR - Shared Buffer Pools Register
5508 * ----------------------------------- 5574 * -----------------------------------
5509 * The SBPR configures and retrieves the shared buffer pools and configuration. 5575 * The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -5977,6 +6043,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
5977 MLXSW_REG(mpar), 6043 MLXSW_REG(mpar),
5978 MLXSW_REG(mlcr), 6044 MLXSW_REG(mlcr),
5979 MLXSW_REG(mpsc), 6045 MLXSW_REG(mpsc),
6046 MLXSW_REG(mgpc),
5980 MLXSW_REG(sbpr), 6047 MLXSW_REG(sbpr),
5981 MLXSW_REG(sbcm), 6048 MLXSW_REG(sbcm),
5982 MLXSW_REG(sbpm), 6049 MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index bce8c2e00630..905a8e269f87 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -43,7 +43,9 @@ enum mlxsw_res_id {
43 MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE, 43 MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
44 MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE, 44 MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
45 MLXSW_RES_ID_MAX_TRAP_GROUPS, 45 MLXSW_RES_ID_MAX_TRAP_GROUPS,
46 MLXSW_RES_ID_COUNTER_POOL_SIZE,
46 MLXSW_RES_ID_MAX_SPAN, 47 MLXSW_RES_ID_MAX_SPAN,
48 MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
47 MLXSW_RES_ID_MAX_SYSTEM_PORT, 49 MLXSW_RES_ID_MAX_SYSTEM_PORT,
48 MLXSW_RES_ID_MAX_LAG, 50 MLXSW_RES_ID_MAX_LAG,
49 MLXSW_RES_ID_MAX_LAG_MEMBERS, 51 MLXSW_RES_ID_MAX_LAG_MEMBERS,
@@ -75,7 +77,9 @@ static u16 mlxsw_res_ids[] = {
75 [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002, 77 [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
76 [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003, 78 [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
77 [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201, 79 [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
80 [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
78 [MLXSW_RES_ID_MAX_SPAN] = 0x2420, 81 [MLXSW_RES_ID_MAX_SPAN] = 0x2420,
82 [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
79 [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502, 83 [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
80 [MLXSW_RES_ID_MAX_LAG] = 0x2520, 84 [MLXSW_RES_ID_MAX_LAG] = 0x2520,
81 [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, 85 [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 16484f24b7db..3ed77e10b4d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -66,6 +66,7 @@
66#include "port.h" 66#include "port.h"
67#include "trap.h" 67#include "trap.h"
68#include "txheader.h" 68#include "txheader.h"
69#include "spectrum_cnt.h"
69 70
70static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 71static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
71static const char mlxsw_sp_driver_version[] = "1.0"; 72static const char mlxsw_sp_driver_version[] = "1.0";
@@ -138,6 +139,60 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
138 */ 139 */
139MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 140MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
140 141
142int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
143 unsigned int counter_index, u64 *packets,
144 u64 *bytes)
145{
146 char mgpc_pl[MLXSW_REG_MGPC_LEN];
147 int err;
148
149 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
150 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
152 if (err)
153 return err;
154 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
155 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
156 return 0;
157}
158
159static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
160 unsigned int counter_index)
161{
162 char mgpc_pl[MLXSW_REG_MGPC_LEN];
163
164 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
165 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
166 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
167}
168
169int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
170 unsigned int *p_counter_index)
171{
172 int err;
173
174 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
175 p_counter_index);
176 if (err)
177 return err;
178 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
179 if (err)
180 goto err_counter_clear;
181 return 0;
182
183err_counter_clear:
184 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
185 *p_counter_index);
186 return err;
187}
188
189void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
190 unsigned int counter_index)
191{
192 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
193 counter_index);
194}
195
141static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 196static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
142 const struct mlxsw_tx_info *tx_info) 197 const struct mlxsw_tx_info *tx_info)
143{ 198{
@@ -1368,7 +1423,7 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1368 tc->cls_mall); 1423 tc->cls_mall);
1369 return 0; 1424 return 0;
1370 default: 1425 default:
1371 return -EINVAL; 1426 return -EOPNOTSUPP;
1372 } 1427 }
1373 case TC_SETUP_CLSFLOWER: 1428 case TC_SETUP_CLSFLOWER:
1374 switch (tc->cls_flower->command) { 1429 switch (tc->cls_flower->command) {
@@ -1379,6 +1434,9 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1379 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, 1434 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1380 tc->cls_flower); 1435 tc->cls_flower);
1381 return 0; 1436 return 0;
1437 case TC_CLSFLOWER_STATS:
1438 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
1439 tc->cls_flower);
1382 default: 1440 default:
1383 return -EOPNOTSUPP; 1441 return -EOPNOTSUPP;
1384 } 1442 }
@@ -3224,6 +3282,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3224 goto err_acl_init; 3282 goto err_acl_init;
3225 } 3283 }
3226 3284
3285 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3286 if (err) {
3287 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3288 goto err_counter_pool_init;
3289 }
3290
3227 err = mlxsw_sp_ports_create(mlxsw_sp); 3291 err = mlxsw_sp_ports_create(mlxsw_sp);
3228 if (err) { 3292 if (err) {
3229 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3293 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -3233,6 +3297,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3233 return 0; 3297 return 0;
3234 3298
3235err_ports_create: 3299err_ports_create:
3300 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3301err_counter_pool_init:
3236 mlxsw_sp_acl_fini(mlxsw_sp); 3302 mlxsw_sp_acl_fini(mlxsw_sp);
3237err_acl_init: 3303err_acl_init:
3238 mlxsw_sp_span_fini(mlxsw_sp); 3304 mlxsw_sp_span_fini(mlxsw_sp);
@@ -3255,6 +3321,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3255 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3321 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3256 3322
3257 mlxsw_sp_ports_remove(mlxsw_sp); 3323 mlxsw_sp_ports_remove(mlxsw_sp);
3324 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3258 mlxsw_sp_acl_fini(mlxsw_sp); 3325 mlxsw_sp_acl_fini(mlxsw_sp);
3259 mlxsw_sp_span_fini(mlxsw_sp); 3326 mlxsw_sp_span_fini(mlxsw_sp);
3260 mlxsw_sp_router_fini(mlxsw_sp); 3327 mlxsw_sp_router_fini(mlxsw_sp);
@@ -3326,13 +3393,13 @@ bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3326 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3393 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3327} 3394}
3328 3395
3329static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data) 3396static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3330{ 3397{
3331 struct mlxsw_sp_port **port = data; 3398 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3332 int ret = 0; 3399 int ret = 0;
3333 3400
3334 if (mlxsw_sp_port_dev_check(lower_dev)) { 3401 if (mlxsw_sp_port_dev_check(lower_dev)) {
3335 *port = netdev_priv(lower_dev); 3402 *p_mlxsw_sp_port = netdev_priv(lower_dev);
3336 ret = 1; 3403 ret = 1;
3337 } 3404 }
3338 3405
@@ -3341,18 +3408,18 @@ static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data)
3341 3408
3342static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3409static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3343{ 3410{
3344 struct mlxsw_sp_port *port; 3411 struct mlxsw_sp_port *mlxsw_sp_port;
3345 3412
3346 if (mlxsw_sp_port_dev_check(dev)) 3413 if (mlxsw_sp_port_dev_check(dev))
3347 return netdev_priv(dev); 3414 return netdev_priv(dev);
3348 3415
3349 port = NULL; 3416 mlxsw_sp_port = NULL;
3350 netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port); 3417 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3351 3418
3352 return port; 3419 return mlxsw_sp_port;
3353} 3420}
3354 3421
3355static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3422struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3356{ 3423{
3357 struct mlxsw_sp_port *mlxsw_sp_port; 3424 struct mlxsw_sp_port *mlxsw_sp_port;
3358 3425
@@ -3362,15 +3429,16 @@ static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3362 3429
3363static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3430static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3364{ 3431{
3365 struct mlxsw_sp_port *port; 3432 struct mlxsw_sp_port *mlxsw_sp_port;
3366 3433
3367 if (mlxsw_sp_port_dev_check(dev)) 3434 if (mlxsw_sp_port_dev_check(dev))
3368 return netdev_priv(dev); 3435 return netdev_priv(dev);
3369 3436
3370 port = NULL; 3437 mlxsw_sp_port = NULL;
3371 netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port); 3438 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3439 &mlxsw_sp_port);
3372 3440
3373 return port; 3441 return mlxsw_sp_port;
3374} 3442}
3375 3443
3376struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3444struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
@@ -3390,546 +3458,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3390 dev_put(mlxsw_sp_port->dev); 3458 dev_put(mlxsw_sp_port->dev);
3391} 3459}
3392 3460
3393static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
3394 unsigned long event)
3395{
3396 switch (event) {
3397 case NETDEV_UP:
3398 if (!r)
3399 return true;
3400 r->ref_count++;
3401 return false;
3402 case NETDEV_DOWN:
3403 if (r && --r->ref_count == 0)
3404 return true;
3405 /* It is possible we already removed the RIF ourselves
3406 * if it was assigned to a netdev that is now a bridge
3407 * or LAG slave.
3408 */
3409 return false;
3410 }
3411
3412 return false;
3413}
3414
3415static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
3416{
3417 int i;
3418
3419 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3420 if (!mlxsw_sp->rifs[i])
3421 return i;
3422
3423 return MLXSW_SP_INVALID_RIF;
3424}
3425
3426static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
3427 bool *p_lagged, u16 *p_system_port)
3428{
3429 u8 local_port = mlxsw_sp_vport->local_port;
3430
3431 *p_lagged = mlxsw_sp_vport->lagged;
3432 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
3433}
3434
3435static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
3436 struct net_device *l3_dev, u16 rif,
3437 bool create)
3438{
3439 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3440 bool lagged = mlxsw_sp_vport->lagged;
3441 char ritr_pl[MLXSW_REG_RITR_LEN];
3442 u16 system_port;
3443
3444 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
3445 l3_dev->mtu, l3_dev->dev_addr);
3446
3447 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
3448 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
3449 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
3450
3451 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3452}
3453
3454static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3455
3456static struct mlxsw_sp_fid *
3457mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
3458{
3459 struct mlxsw_sp_fid *f;
3460
3461 f = kzalloc(sizeof(*f), GFP_KERNEL);
3462 if (!f)
3463 return NULL;
3464
3465 f->leave = mlxsw_sp_vport_rif_sp_leave;
3466 f->ref_count = 0;
3467 f->dev = l3_dev;
3468 f->fid = fid;
3469
3470 return f;
3471}
3472
3473static struct mlxsw_sp_rif *
3474mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
3475{
3476 struct mlxsw_sp_rif *r;
3477
3478 r = kzalloc(sizeof(*r), GFP_KERNEL);
3479 if (!r)
3480 return NULL;
3481
3482 INIT_LIST_HEAD(&r->nexthop_list);
3483 INIT_LIST_HEAD(&r->neigh_list);
3484 ether_addr_copy(r->addr, l3_dev->dev_addr);
3485 r->mtu = l3_dev->mtu;
3486 r->ref_count = 1;
3487 r->dev = l3_dev;
3488 r->rif = rif;
3489 r->f = f;
3490
3491 return r;
3492}
3493
3494static struct mlxsw_sp_rif *
3495mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
3496 struct net_device *l3_dev)
3497{
3498 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3499 struct mlxsw_sp_fid *f;
3500 struct mlxsw_sp_rif *r;
3501 u16 fid, rif;
3502 int err;
3503
3504 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
3505 if (rif == MLXSW_SP_INVALID_RIF)
3506 return ERR_PTR(-ERANGE);
3507
3508 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
3509 if (err)
3510 return ERR_PTR(err);
3511
3512 fid = mlxsw_sp_rif_sp_to_fid(rif);
3513 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
3514 if (err)
3515 goto err_rif_fdb_op;
3516
3517 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
3518 if (!f) {
3519 err = -ENOMEM;
3520 goto err_rfid_alloc;
3521 }
3522
3523 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
3524 if (!r) {
3525 err = -ENOMEM;
3526 goto err_rif_alloc;
3527 }
3528
3529 f->r = r;
3530 mlxsw_sp->rifs[rif] = r;
3531
3532 return r;
3533
3534err_rif_alloc:
3535 kfree(f);
3536err_rfid_alloc:
3537 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3538err_rif_fdb_op:
3539 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
3540 return ERR_PTR(err);
3541}
3542
3543static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
3544 struct mlxsw_sp_rif *r)
3545{
3546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3547 struct net_device *l3_dev = r->dev;
3548 struct mlxsw_sp_fid *f = r->f;
3549 u16 fid = f->fid;
3550 u16 rif = r->rif;
3551
3552 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
3553
3554 mlxsw_sp->rifs[rif] = NULL;
3555 f->r = NULL;
3556
3557 kfree(r);
3558
3559 kfree(f);
3560
3561 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3562
3563 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
3564}
3565
3566static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3567 struct net_device *l3_dev)
3568{
3569 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3570 struct mlxsw_sp_rif *r;
3571
3572 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3573 if (!r) {
3574 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
3575 if (IS_ERR(r))
3576 return PTR_ERR(r);
3577 }
3578
3579 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
3580 r->f->ref_count++;
3581
3582 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
3583
3584 return 0;
3585}
3586
3587static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3588{
3589 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3590
3591 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3592
3593 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3594 if (--f->ref_count == 0)
3595 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
3596}
3597
3598static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
3599 struct net_device *port_dev,
3600 unsigned long event, u16 vid)
3601{
3602 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
3603 struct mlxsw_sp_port *mlxsw_sp_vport;
3604
3605 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3606 if (WARN_ON(!mlxsw_sp_vport))
3607 return -EINVAL;
3608
3609 switch (event) {
3610 case NETDEV_UP:
3611 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
3612 case NETDEV_DOWN:
3613 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
3614 break;
3615 }
3616
3617 return 0;
3618}
3619
3620static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3621 unsigned long event)
3622{
3623 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
3624 return 0;
3625
3626 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
3627}
3628
3629static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3630 struct net_device *lag_dev,
3631 unsigned long event, u16 vid)
3632{
3633 struct net_device *port_dev;
3634 struct list_head *iter;
3635 int err;
3636
3637 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3638 if (mlxsw_sp_port_dev_check(port_dev)) {
3639 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
3640 event, vid);
3641 if (err)
3642 return err;
3643 }
3644 }
3645
3646 return 0;
3647}
3648
3649static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3650 unsigned long event)
3651{
3652 if (netif_is_bridge_port(lag_dev))
3653 return 0;
3654
3655 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3656}
3657
3658static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
3659 struct net_device *l3_dev)
3660{
3661 u16 fid;
3662
3663 if (is_vlan_dev(l3_dev))
3664 fid = vlan_dev_vlan_id(l3_dev);
3665 else if (mlxsw_sp->master_bridge.dev == l3_dev)
3666 fid = 1;
3667 else
3668 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
3669
3670 return mlxsw_sp_fid_find(mlxsw_sp, fid);
3671}
3672
3673static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
3674{
3675 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
3676 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3677}
3678
3679static u16 mlxsw_sp_flood_table_index_get(u16 fid)
3680{
3681 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
3682}
3683
3684static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
3685 bool set)
3686{
3687 enum mlxsw_flood_table_type table_type;
3688 char *sftr_pl;
3689 u16 index;
3690 int err;
3691
3692 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
3693 if (!sftr_pl)
3694 return -ENOMEM;
3695
3696 table_type = mlxsw_sp_flood_table_type_get(fid);
3697 index = mlxsw_sp_flood_table_index_get(fid);
3698 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
3699 1, MLXSW_PORT_ROUTER_PORT, set);
3700 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
3701
3702 kfree(sftr_pl);
3703 return err;
3704}
3705
3706static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
3707{
3708 if (mlxsw_sp_fid_is_vfid(fid))
3709 return MLXSW_REG_RITR_FID_IF;
3710 else
3711 return MLXSW_REG_RITR_VLAN_IF;
3712}
3713
3714static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
3715 struct net_device *l3_dev,
3716 u16 fid, u16 rif,
3717 bool create)
3718{
3719 enum mlxsw_reg_ritr_if_type rif_type;
3720 char ritr_pl[MLXSW_REG_RITR_LEN];
3721
3722 rif_type = mlxsw_sp_rif_type_get(fid);
3723 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
3724 l3_dev->dev_addr);
3725 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
3726
3727 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3728}
3729
3730static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3731 struct net_device *l3_dev,
3732 struct mlxsw_sp_fid *f)
3733{
3734 struct mlxsw_sp_rif *r;
3735 u16 rif;
3736 int err;
3737
3738 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
3739 if (rif == MLXSW_SP_INVALID_RIF)
3740 return -ERANGE;
3741
3742 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3743 if (err)
3744 return err;
3745
3746 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
3747 if (err)
3748 goto err_rif_bridge_op;
3749
3750 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3751 if (err)
3752 goto err_rif_fdb_op;
3753
3754 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
3755 if (!r) {
3756 err = -ENOMEM;
3757 goto err_rif_alloc;
3758 }
3759
3760 f->r = r;
3761 mlxsw_sp->rifs[rif] = r;
3762
3763 netdev_dbg(l3_dev, "RIF=%d created\n", rif);
3764
3765 return 0;
3766
3767err_rif_alloc:
3768 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3769err_rif_fdb_op:
3770 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
3771err_rif_bridge_op:
3772 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3773 return err;
3774}
3775
3776void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
3777 struct mlxsw_sp_rif *r)
3778{
3779 struct net_device *l3_dev = r->dev;
3780 struct mlxsw_sp_fid *f = r->f;
3781 u16 rif = r->rif;
3782
3783 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
3784
3785 mlxsw_sp->rifs[rif] = NULL;
3786 f->r = NULL;
3787
3788 kfree(r);
3789
3790 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3791
3792 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
3793
3794 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3795
3796 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
3797}
3798
3799static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3800 struct net_device *br_dev,
3801 unsigned long event)
3802{
3803 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3804 struct mlxsw_sp_fid *f;
3805
3806 /* FID can either be an actual FID if the L3 device is the
3807 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3808 * L3 device is a VLAN-unaware bridge and we get a vFID.
3809 */
3810 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3811 if (WARN_ON(!f))
3812 return -EINVAL;
3813
3814 switch (event) {
3815 case NETDEV_UP:
3816 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3817 case NETDEV_DOWN:
3818 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3819 break;
3820 }
3821
3822 return 0;
3823}
3824
3825static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3826 unsigned long event)
3827{
3828 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3829 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3830 u16 vid = vlan_dev_vlan_id(vlan_dev);
3831
3832 if (mlxsw_sp_port_dev_check(real_dev))
3833 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3834 vid);
3835 else if (netif_is_lag_master(real_dev))
3836 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3837 vid);
3838 else if (netif_is_bridge_master(real_dev) &&
3839 mlxsw_sp->master_bridge.dev == real_dev)
3840 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3841 event);
3842
3843 return 0;
3844}
3845
3846static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3847 unsigned long event, void *ptr)
3848{
3849 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3850 struct net_device *dev = ifa->ifa_dev->dev;
3851 struct mlxsw_sp *mlxsw_sp;
3852 struct mlxsw_sp_rif *r;
3853 int err = 0;
3854
3855 mlxsw_sp = mlxsw_sp_lower_get(dev);
3856 if (!mlxsw_sp)
3857 goto out;
3858
3859 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3860 if (!mlxsw_sp_rif_should_config(r, event))
3861 goto out;
3862
3863 if (mlxsw_sp_port_dev_check(dev))
3864 err = mlxsw_sp_inetaddr_port_event(dev, event);
3865 else if (netif_is_lag_master(dev))
3866 err = mlxsw_sp_inetaddr_lag_event(dev, event);
3867 else if (netif_is_bridge_master(dev))
3868 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3869 else if (is_vlan_dev(dev))
3870 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
3871
3872out:
3873 return notifier_from_errno(err);
3874}
3875
3876static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
3877 const char *mac, int mtu)
3878{
3879 char ritr_pl[MLXSW_REG_RITR_LEN];
3880 int err;
3881
3882 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3883 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3884 if (err)
3885 return err;
3886
3887 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3888 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3889 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3890 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3891}
3892
3893static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3894{
3895 struct mlxsw_sp *mlxsw_sp;
3896 struct mlxsw_sp_rif *r;
3897 int err;
3898
3899 mlxsw_sp = mlxsw_sp_lower_get(dev);
3900 if (!mlxsw_sp)
3901 return 0;
3902
3903 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3904 if (!r)
3905 return 0;
3906
3907 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
3908 if (err)
3909 return err;
3910
3911 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
3912 if (err)
3913 goto err_rif_edit;
3914
3915 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
3916 if (err)
3917 goto err_rif_fdb_op;
3918
3919 ether_addr_copy(r->addr, dev->dev_addr);
3920 r->mtu = dev->mtu;
3921
3922 netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
3923
3924 return 0;
3925
3926err_rif_fdb_op:
3927 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
3928err_rif_edit:
3929 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
3930 return err;
3931}
3932
3933static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, 3461static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3934 u16 fid) 3462 u16 fid)
3935{ 3463{
@@ -4220,7 +3748,7 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4220 3748
4221static void 3749static void
4222mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3750mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4223 u16 lag_id) 3751 struct net_device *lag_dev, u16 lag_id)
4224{ 3752{
4225 struct mlxsw_sp_port *mlxsw_sp_vport; 3753 struct mlxsw_sp_port *mlxsw_sp_vport;
4226 struct mlxsw_sp_fid *f; 3754 struct mlxsw_sp_fid *f;
@@ -4238,6 +3766,7 @@ mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4238 3766
4239 mlxsw_sp_vport->lag_id = lag_id; 3767 mlxsw_sp_vport->lag_id = lag_id;
4240 mlxsw_sp_vport->lagged = 1; 3768 mlxsw_sp_vport->lagged = 1;
3769 mlxsw_sp_vport->dev = lag_dev;
4241} 3770}
4242 3771
4243static void 3772static void
@@ -4254,6 +3783,7 @@ mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4254 if (f) 3783 if (f)
4255 f->leave(mlxsw_sp_vport); 3784 f->leave(mlxsw_sp_vport);
4256 3785
3786 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
4257 mlxsw_sp_vport->lagged = 0; 3787 mlxsw_sp_vport->lagged = 0;
4258} 3788}
4259 3789
@@ -4293,7 +3823,7 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4293 mlxsw_sp_port->lagged = 1; 3823 mlxsw_sp_port->lagged = 1;
4294 lag->ref_count++; 3824 lag->ref_count++;
4295 3825
4296 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); 3826 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
4297 3827
4298 return 0; 3828 return 0;
4299 3829
@@ -4421,7 +3951,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
4421 upper_dev = info->upper_dev; 3951 upper_dev = info->upper_dev;
4422 if (!is_vlan_dev(upper_dev) && 3952 if (!is_vlan_dev(upper_dev) &&
4423 !netif_is_lag_master(upper_dev) && 3953 !netif_is_lag_master(upper_dev) &&
4424 !netif_is_bridge_master(upper_dev)) 3954 !netif_is_bridge_master(upper_dev) &&
3955 !netif_is_l3_master(upper_dev))
4425 return -EINVAL; 3956 return -EINVAL;
4426 if (!info->linking) 3957 if (!info->linking)
4427 break; 3958 break;
@@ -4461,6 +3992,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
4461 else 3992 else
4462 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 3993 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4463 upper_dev); 3994 upper_dev);
3995 } else if (netif_is_l3_master(upper_dev)) {
3996 if (info->linking)
3997 err = mlxsw_sp_port_vrf_join(mlxsw_sp_port);
3998 else
3999 mlxsw_sp_port_vrf_leave(mlxsw_sp_port);
4464 } else { 4000 } else {
4465 err = -EINVAL; 4001 err = -EINVAL;
4466 WARN_ON(1); 4002 WARN_ON(1);
@@ -4552,8 +4088,8 @@ static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
4552 struct mlxsw_sp_fid *f; 4088 struct mlxsw_sp_fid *f;
4553 4089
4554 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4090 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4555 if (f && f->r) 4091 if (f && f->rif)
4556 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4092 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
4557 if (f && --f->ref_count == 0) 4093 if (f && --f->ref_count == 0)
4558 mlxsw_sp_fid_destroy(mlxsw_sp, f); 4094 mlxsw_sp_fid_destroy(mlxsw_sp, f);
4559} 4095}
@@ -4564,33 +4100,46 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4564 struct netdev_notifier_changeupper_info *info; 4100 struct netdev_notifier_changeupper_info *info;
4565 struct net_device *upper_dev; 4101 struct net_device *upper_dev;
4566 struct mlxsw_sp *mlxsw_sp; 4102 struct mlxsw_sp *mlxsw_sp;
4567 int err; 4103 int err = 0;
4568 4104
4569 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4105 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4570 if (!mlxsw_sp) 4106 if (!mlxsw_sp)
4571 return 0; 4107 return 0;
4572 if (br_dev != mlxsw_sp->master_bridge.dev)
4573 return 0;
4574 4108
4575 info = ptr; 4109 info = ptr;
4576 4110
4577 switch (event) { 4111 switch (event) {
4112 case NETDEV_PRECHANGEUPPER:
4113 upper_dev = info->upper_dev;
4114 if (!is_vlan_dev(upper_dev) && !netif_is_l3_master(upper_dev))
4115 return -EINVAL;
4116 if (is_vlan_dev(upper_dev) &&
4117 br_dev != mlxsw_sp->master_bridge.dev)
4118 return -EINVAL;
4119 break;
4578 case NETDEV_CHANGEUPPER: 4120 case NETDEV_CHANGEUPPER:
4579 upper_dev = info->upper_dev; 4121 upper_dev = info->upper_dev;
4580 if (!is_vlan_dev(upper_dev)) 4122 if (is_vlan_dev(upper_dev)) {
4581 break; 4123 if (info->linking)
4582 if (info->linking) { 4124 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
4583 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, 4125 upper_dev);
4584 upper_dev); 4126 else
4585 if (err) 4127 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
4586 return err; 4128 upper_dev);
4129 } else if (netif_is_l3_master(upper_dev)) {
4130 if (info->linking)
4131 err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4132 br_dev);
4133 else
4134 mlxsw_sp_bridge_vrf_leave(mlxsw_sp, br_dev);
4587 } else { 4135 } else {
4588 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); 4136 err = -EINVAL;
4137 WARN_ON(1);
4589 } 4138 }
4590 break; 4139 break;
4591 } 4140 }
4592 4141
4593 return 0; 4142 return err;
4594} 4143}
4595 4144
4596static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) 4145static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
@@ -4657,8 +4206,8 @@ static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
4657 clear_bit(vfid, mlxsw_sp->vfids.mapped); 4206 clear_bit(vfid, mlxsw_sp->vfids.mapped);
4658 list_del(&f->list); 4207 list_del(&f->list);
4659 4208
4660 if (f->r) 4209 if (f->rif)
4661 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4210 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
4662 4211
4663 kfree(f); 4212 kfree(f);
4664 4213
@@ -4810,33 +4359,43 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
4810 int err = 0; 4359 int err = 0;
4811 4360
4812 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4361 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
4362 if (!mlxsw_sp_vport)
4363 return 0;
4813 4364
4814 switch (event) { 4365 switch (event) {
4815 case NETDEV_PRECHANGEUPPER: 4366 case NETDEV_PRECHANGEUPPER:
4816 upper_dev = info->upper_dev; 4367 upper_dev = info->upper_dev;
4817 if (!netif_is_bridge_master(upper_dev)) 4368 if (!netif_is_bridge_master(upper_dev) &&
4369 !netif_is_l3_master(upper_dev))
4818 return -EINVAL; 4370 return -EINVAL;
4819 if (!info->linking) 4371 if (!info->linking)
4820 break; 4372 break;
4821 /* We can't have multiple VLAN interfaces configured on 4373 /* We can't have multiple VLAN interfaces configured on
4822 * the same port and being members in the same bridge. 4374 * the same port and being members in the same bridge.
4823 */ 4375 */
4824 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 4376 if (netif_is_bridge_master(upper_dev) &&
4377 !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
4825 upper_dev)) 4378 upper_dev))
4826 return -EINVAL; 4379 return -EINVAL;
4827 break; 4380 break;
4828 case NETDEV_CHANGEUPPER: 4381 case NETDEV_CHANGEUPPER:
4829 upper_dev = info->upper_dev; 4382 upper_dev = info->upper_dev;
4830 if (info->linking) { 4383 if (netif_is_bridge_master(upper_dev)) {
4831 if (WARN_ON(!mlxsw_sp_vport)) 4384 if (info->linking)
4832 return -EINVAL; 4385 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
4833 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 4386 upper_dev);
4834 upper_dev); 4387 else
4388 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
4389 } else if (netif_is_l3_master(upper_dev)) {
4390 if (info->linking)
4391 err = mlxsw_sp_vport_vrf_join(mlxsw_sp_vport);
4392 else
4393 mlxsw_sp_vport_vrf_leave(mlxsw_sp_vport);
4835 } else { 4394 } else {
4836 if (!mlxsw_sp_vport) 4395 err = -EINVAL;
4837 return 0; 4396 WARN_ON(1);
4838 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
4839 } 4397 }
4398 break;
4840 } 4399 }
4841 4400
4842 return err; 4401 return err;
@@ -4862,6 +4421,47 @@ static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
4862 return 0; 4421 return 0;
4863} 4422}
4864 4423
4424static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4425 unsigned long event, void *ptr)
4426{
4427 struct netdev_notifier_changeupper_info *info;
4428 struct mlxsw_sp *mlxsw_sp;
4429 int err = 0;
4430
4431 mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4432 if (!mlxsw_sp)
4433 return 0;
4434
4435 info = ptr;
4436
4437 switch (event) {
4438 case NETDEV_PRECHANGEUPPER:
4439 /* VLAN devices are only allowed on top of the
4440 * VLAN-aware bridge.
4441 */
4442 if (WARN_ON(vlan_dev_real_dev(vlan_dev) !=
4443 mlxsw_sp->master_bridge.dev))
4444 return -EINVAL;
4445 if (!netif_is_l3_master(info->upper_dev))
4446 return -EINVAL;
4447 break;
4448 case NETDEV_CHANGEUPPER:
4449 if (netif_is_l3_master(info->upper_dev)) {
4450 if (info->linking)
4451 err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4452 vlan_dev);
4453 else
4454 mlxsw_sp_bridge_vrf_leave(mlxsw_sp, vlan_dev);
4455 } else {
4456 err = -EINVAL;
4457 WARN_ON(1);
4458 }
4459 break;
4460 }
4461
4462 return err;
4463}
4464
4865static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4465static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4866 unsigned long event, void *ptr) 4466 unsigned long event, void *ptr)
4867{ 4467{
@@ -4874,6 +4474,9 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4874 else if (netif_is_lag_master(real_dev)) 4474 else if (netif_is_lag_master(real_dev))
4875 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 4475 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4876 vid); 4476 vid);
4477 else if (netif_is_bridge_master(real_dev))
4478 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, event,
4479 ptr);
4877 4480
4878 return 0; 4481 return 0;
4879} 4482}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 13ec85e7c392..bffd9e698eff 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -58,7 +58,6 @@
58#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */ 58#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
59 59
60#define MLXSW_SP_RFID_BASE 15360 60#define MLXSW_SP_RFID_BASE 15360
61#define MLXSW_SP_INVALID_RIF 0xffff
62 61
63#define MLXSW_SP_MID_MAX 7000 62#define MLXSW_SP_MID_MAX 7000
64 63
@@ -92,6 +91,7 @@ static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
92} 91}
93 92
94struct mlxsw_sp_port; 93struct mlxsw_sp_port;
94struct mlxsw_sp_rif;
95 95
96struct mlxsw_sp_upper { 96struct mlxsw_sp_upper {
97 struct net_device *dev; 97 struct net_device *dev;
@@ -103,21 +103,10 @@ struct mlxsw_sp_fid {
103 struct list_head list; 103 struct list_head list;
104 unsigned int ref_count; 104 unsigned int ref_count;
105 struct net_device *dev; 105 struct net_device *dev;
106 struct mlxsw_sp_rif *r; 106 struct mlxsw_sp_rif *rif;
107 u16 fid; 107 u16 fid;
108}; 108};
109 109
110struct mlxsw_sp_rif {
111 struct list_head nexthop_list;
112 struct list_head neigh_list;
113 struct net_device *dev;
114 unsigned int ref_count;
115 struct mlxsw_sp_fid *f;
116 unsigned char addr[ETH_ALEN];
117 int mtu;
118 u16 rif;
119};
120
121struct mlxsw_sp_mid { 110struct mlxsw_sp_mid {
122 struct list_head list; 111 struct list_head list;
123 unsigned char addr[ETH_ALEN]; 112 unsigned char addr[ETH_ALEN];
@@ -141,16 +130,6 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
141 return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE; 130 return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
142} 131}
143 132
144static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
145{
146 return fid >= MLXSW_SP_RFID_BASE;
147}
148
149static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
150{
151 return MLXSW_SP_RFID_BASE + rif;
152}
153
154struct mlxsw_sp_sb_pr { 133struct mlxsw_sp_sb_pr {
155 enum mlxsw_reg_sbpr_mode mode; 134 enum mlxsw_reg_sbpr_mode mode;
156 u32 size; 135 u32 size;
@@ -207,11 +186,9 @@ struct mlxsw_sp_fib;
207 186
208struct mlxsw_sp_vr { 187struct mlxsw_sp_vr {
209 u16 id; /* virtual router ID */ 188 u16 id; /* virtual router ID */
210 bool used;
211 enum mlxsw_sp_l3proto proto;
212 u32 tb_id; /* kernel fib table id */ 189 u32 tb_id; /* kernel fib table id */
213 struct mlxsw_sp_lpm_tree *lpm_tree; 190 unsigned int rif_count;
214 struct mlxsw_sp_fib *fib; 191 struct mlxsw_sp_fib *fib4;
215}; 192};
216 193
217enum mlxsw_sp_span_type { 194enum mlxsw_sp_span_type {
@@ -269,6 +246,7 @@ struct mlxsw_sp_router {
269}; 246};
270 247
271struct mlxsw_sp_acl; 248struct mlxsw_sp_acl;
249struct mlxsw_sp_counter_pool;
272 250
273struct mlxsw_sp { 251struct mlxsw_sp {
274 struct { 252 struct {
@@ -304,6 +282,7 @@ struct mlxsw_sp {
304 DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); 282 DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
305 } kvdl; 283 } kvdl;
306 284
285 struct mlxsw_sp_counter_pool *counter_pool;
307 struct { 286 struct {
308 struct mlxsw_sp_span_entry *entries; 287 struct mlxsw_sp_span_entry *entries;
309 int entries_count; 288 int entries_count;
@@ -386,6 +365,7 @@ struct mlxsw_sp_port {
386}; 365};
387 366
388bool mlxsw_sp_port_dev_check(const struct net_device *dev); 367bool mlxsw_sp_port_dev_check(const struct net_device *dev);
368struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
389struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); 369struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
390void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); 370void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
391 371
@@ -497,19 +477,6 @@ mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
497 return NULL; 477 return NULL;
498} 478}
499 479
500static inline struct mlxsw_sp_rif *
501mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
502 const struct net_device *dev)
503{
504 int i;
505
506 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
507 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
508 return mlxsw_sp->rifs[i];
509
510 return NULL;
511}
512
513enum mlxsw_sp_flood_table { 480enum mlxsw_sp_flood_table {
514 MLXSW_SP_FLOOD_TABLE_UC, 481 MLXSW_SP_FLOOD_TABLE_UC,
515 MLXSW_SP_FLOOD_TABLE_BC, 482 MLXSW_SP_FLOOD_TABLE_BC,
@@ -570,8 +537,6 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
570 bool adding); 537 bool adding);
571struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid); 538struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
572void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f); 539void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
573void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
574 struct mlxsw_sp_rif *r);
575int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 540int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
576 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 541 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
577 bool dwrr, u8 dwrr_weight); 542 bool dwrr, u8 dwrr_weight);
@@ -608,8 +573,19 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
608void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); 573void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
609int mlxsw_sp_router_netevent_event(struct notifier_block *unused, 574int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
610 unsigned long event, void *ptr); 575 unsigned long event, void *ptr);
611void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 576int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
612 struct mlxsw_sp_rif *r); 577int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
578 unsigned long event, void *ptr);
579void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
580 struct mlxsw_sp_rif *rif);
581int mlxsw_sp_vport_vrf_join(struct mlxsw_sp_port *mlxsw_sp_vport);
582void mlxsw_sp_vport_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
583int mlxsw_sp_port_vrf_join(struct mlxsw_sp_port *mlxsw_sp_port);
584void mlxsw_sp_port_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_port);
585int mlxsw_sp_bridge_vrf_join(struct mlxsw_sp *mlxsw_sp,
586 struct net_device *l3_dev);
587void mlxsw_sp_bridge_vrf_leave(struct mlxsw_sp *mlxsw_sp,
588 struct net_device *l3_dev);
613 589
614int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); 590int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
615void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); 591void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
@@ -620,6 +596,8 @@ struct mlxsw_sp_acl_rule_info {
620 unsigned int priority; 596 unsigned int priority;
621 struct mlxsw_afk_element_values values; 597 struct mlxsw_afk_element_values values;
622 struct mlxsw_afa_block *act_block; 598 struct mlxsw_afa_block *act_block;
599 unsigned int counter_index;
600 bool counter_valid;
623}; 601};
624 602
625enum mlxsw_sp_acl_profile { 603enum mlxsw_sp_acl_profile {
@@ -639,6 +617,8 @@ struct mlxsw_sp_acl_profile_ops {
639 void *ruleset_priv, void *rule_priv, 617 void *ruleset_priv, void *rule_priv,
640 struct mlxsw_sp_acl_rule_info *rulei); 618 struct mlxsw_sp_acl_rule_info *rulei);
641 void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); 619 void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
620 int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
621 bool *activity);
642}; 622};
643 623
644struct mlxsw_sp_acl_ops { 624struct mlxsw_sp_acl_ops {
@@ -679,6 +659,11 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
679int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, 659int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
680 struct mlxsw_sp_acl_rule_info *rulei, 660 struct mlxsw_sp_acl_rule_info *rulei,
681 struct net_device *out_dev); 661 struct net_device *out_dev);
662int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
663 struct mlxsw_sp_acl_rule_info *rulei,
664 u32 action, u16 vid, u16 proto, u8 prio);
665int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
666 struct mlxsw_sp_acl_rule_info *rulei);
682 667
683struct mlxsw_sp_acl_rule; 668struct mlxsw_sp_acl_rule;
684 669
@@ -698,6 +683,9 @@ mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
698 unsigned long cookie); 683 unsigned long cookie);
699struct mlxsw_sp_acl_rule_info * 684struct mlxsw_sp_acl_rule_info *
700mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule); 685mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
686int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
687 struct mlxsw_sp_acl_rule *rule,
688 u64 *packets, u64 *bytes, u64 *last_use);
701 689
702int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); 690int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
703void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); 691void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
@@ -708,5 +696,14 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
708 __be16 protocol, struct tc_cls_flower_offload *f); 696 __be16 protocol, struct tc_cls_flower_offload *f);
709void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, 697void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
710 struct tc_cls_flower_offload *f); 698 struct tc_cls_flower_offload *f);
699int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
700 struct tc_cls_flower_offload *f);
701int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
702 unsigned int counter_index, u64 *packets,
703 u64 *bytes);
704int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
705 unsigned int *p_counter_index);
706void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
707 unsigned int counter_index);
711 708
712#endif 709#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 8a18b3aa70dc..4d6920d45026 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -39,6 +39,7 @@
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/rhashtable.h> 40#include <linux/rhashtable.h>
41#include <linux/netdevice.h> 41#include <linux/netdevice.h>
42#include <net/tc_act/tc_vlan.h>
42 43
43#include "reg.h" 44#include "reg.h"
44#include "core.h" 45#include "core.h"
@@ -49,10 +50,17 @@
49#include "spectrum_acl_flex_keys.h" 50#include "spectrum_acl_flex_keys.h"
50 51
51struct mlxsw_sp_acl { 52struct mlxsw_sp_acl {
53 struct mlxsw_sp *mlxsw_sp;
52 struct mlxsw_afk *afk; 54 struct mlxsw_afk *afk;
53 struct mlxsw_afa *afa; 55 struct mlxsw_afa *afa;
54 const struct mlxsw_sp_acl_ops *ops; 56 const struct mlxsw_sp_acl_ops *ops;
55 struct rhashtable ruleset_ht; 57 struct rhashtable ruleset_ht;
58 struct list_head rules;
59 struct {
60 struct delayed_work dw;
61 unsigned long interval; /* ms */
62#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
63 } rule_activity_update;
56 unsigned long priv[0]; 64 unsigned long priv[0];
57 /* priv has to be always the last item */ 65 /* priv has to be always the last item */
58}; 66};
@@ -79,9 +87,13 @@ struct mlxsw_sp_acl_ruleset {
79 87
80struct mlxsw_sp_acl_rule { 88struct mlxsw_sp_acl_rule {
81 struct rhash_head ht_node; /* Member of rule HT */ 89 struct rhash_head ht_node; /* Member of rule HT */
90 struct list_head list;
82 unsigned long cookie; /* HT key */ 91 unsigned long cookie; /* HT key */
83 struct mlxsw_sp_acl_ruleset *ruleset; 92 struct mlxsw_sp_acl_ruleset *ruleset;
84 struct mlxsw_sp_acl_rule_info *rulei; 93 struct mlxsw_sp_acl_rule_info *rulei;
94 u64 last_used;
95 u64 last_packets;
96 u64 last_bytes;
85 unsigned long priv[0]; 97 unsigned long priv[0];
86 /* priv has to be always the last item */ 98 /* priv has to be always the last item */
87}; 99};
@@ -237,6 +249,27 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
237 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); 249 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
238} 250}
239 251
252static int
253mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
254 struct mlxsw_sp_acl_rule_info *rulei)
255{
256 int err;
257
258 err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
259 if (err)
260 return err;
261 rulei->counter_valid = true;
262 return 0;
263}
264
265static void
266mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
267 struct mlxsw_sp_acl_rule_info *rulei)
268{
269 rulei->counter_valid = false;
270 mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
271}
272
240struct mlxsw_sp_acl_rule_info * 273struct mlxsw_sp_acl_rule_info *
241mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) 274mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
242{ 275{
@@ -335,6 +368,41 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
335 local_port, in_port); 368 local_port, in_port);
336} 369}
337 370
371int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
372 struct mlxsw_sp_acl_rule_info *rulei,
373 u32 action, u16 vid, u16 proto, u8 prio)
374{
375 u8 ethertype;
376
377 if (action == TCA_VLAN_ACT_MODIFY) {
378 switch (proto) {
379 case ETH_P_8021Q:
380 ethertype = 0;
381 break;
382 case ETH_P_8021AD:
383 ethertype = 1;
384 break;
385 default:
386 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
387 proto);
388 return -EINVAL;
389 }
390
391 return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
392 vid, prio, ethertype);
393 } else {
394 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
395 return -EINVAL;
396 }
397}
398
399int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
400 struct mlxsw_sp_acl_rule_info *rulei)
401{
402 return mlxsw_afa_block_append_counter(rulei->act_block,
403 rulei->counter_index);
404}
405
338struct mlxsw_sp_acl_rule * 406struct mlxsw_sp_acl_rule *
339mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, 407mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
340 struct mlxsw_sp_acl_ruleset *ruleset, 408 struct mlxsw_sp_acl_ruleset *ruleset,
@@ -358,8 +426,14 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
358 err = PTR_ERR(rule->rulei); 426 err = PTR_ERR(rule->rulei);
359 goto err_rulei_create; 427 goto err_rulei_create;
360 } 428 }
429
430 err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
431 if (err)
432 goto err_counter_alloc;
361 return rule; 433 return rule;
362 434
435err_counter_alloc:
436 mlxsw_sp_acl_rulei_destroy(rule->rulei);
363err_rulei_create: 437err_rulei_create:
364 kfree(rule); 438 kfree(rule);
365err_alloc: 439err_alloc:
@@ -372,6 +446,7 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
372{ 446{
373 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; 447 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
374 448
449 mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
375 mlxsw_sp_acl_rulei_destroy(rule->rulei); 450 mlxsw_sp_acl_rulei_destroy(rule->rulei);
376 kfree(rule); 451 kfree(rule);
377 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); 452 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
@@ -393,6 +468,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
393 if (err) 468 if (err)
394 goto err_rhashtable_insert; 469 goto err_rhashtable_insert;
395 470
471 list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
396 return 0; 472 return 0;
397 473
398err_rhashtable_insert: 474err_rhashtable_insert:
@@ -406,6 +482,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
406 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; 482 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
407 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 483 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
408 484
485 list_del(&rule->list);
409 rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, 486 rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
410 mlxsw_sp_acl_rule_ht_params); 487 mlxsw_sp_acl_rule_ht_params);
411 ops->rule_del(mlxsw_sp, rule->priv); 488 ops->rule_del(mlxsw_sp, rule->priv);
@@ -426,6 +503,90 @@ mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
426 return rule->rulei; 503 return rule->rulei;
427} 504}
428 505
506static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
507 struct mlxsw_sp_acl_rule *rule)
508{
509 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
510 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
511 bool active;
512 int err;
513
514 err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
515 if (err)
516 return err;
517 if (active)
518 rule->last_used = jiffies;
519 return 0;
520}
521
522static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
523{
524 struct mlxsw_sp_acl_rule *rule;
525 int err;
526
527 /* Protect internal structures from changes */
528 rtnl_lock();
529 list_for_each_entry(rule, &acl->rules, list) {
530 err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
531 rule);
532 if (err)
533 goto err_rule_update;
534 }
535 rtnl_unlock();
536 return 0;
537
538err_rule_update:
539 rtnl_unlock();
540 return err;
541}
542
543static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
544{
545 unsigned long interval = acl->rule_activity_update.interval;
546
547 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
548 msecs_to_jiffies(interval));
549}
550
551static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
552{
553 struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
554 rule_activity_update.dw.work);
555 int err;
556
557 err = mlxsw_sp_acl_rules_activity_update(acl);
558 if (err)
559 dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
560
561 mlxsw_sp_acl_rule_activity_work_schedule(acl);
562}
563
564int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
565 struct mlxsw_sp_acl_rule *rule,
566 u64 *packets, u64 *bytes, u64 *last_use)
567
568{
569 struct mlxsw_sp_acl_rule_info *rulei;
570 u64 current_packets;
571 u64 current_bytes;
572 int err;
573
574 rulei = mlxsw_sp_acl_rule_rulei(rule);
575 err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
576 &current_packets, &current_bytes);
577 if (err)
578 return err;
579
580 *packets = current_packets - rule->last_packets;
581 *bytes = current_bytes - rule->last_bytes;
582 *last_use = rule->last_used;
583
584 rule->last_bytes = current_bytes;
585 rule->last_packets = current_packets;
586
587 return 0;
588}
589
429#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1 590#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
430 591
431static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, 592static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
@@ -518,7 +679,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
518 if (!acl) 679 if (!acl)
519 return -ENOMEM; 680 return -ENOMEM;
520 mlxsw_sp->acl = acl; 681 mlxsw_sp->acl = acl;
521 682 acl->mlxsw_sp = mlxsw_sp;
522 acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, 683 acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
523 ACL_FLEX_KEYS), 684 ACL_FLEX_KEYS),
524 mlxsw_sp_afk_blocks, 685 mlxsw_sp_afk_blocks,
@@ -541,11 +702,18 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
541 if (err) 702 if (err)
542 goto err_rhashtable_init; 703 goto err_rhashtable_init;
543 704
705 INIT_LIST_HEAD(&acl->rules);
544 err = acl_ops->init(mlxsw_sp, acl->priv); 706 err = acl_ops->init(mlxsw_sp, acl->priv);
545 if (err) 707 if (err)
546 goto err_acl_ops_init; 708 goto err_acl_ops_init;
547 709
548 acl->ops = acl_ops; 710 acl->ops = acl_ops;
711
712 /* Create the delayed work for the rule activity_update */
713 INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
714 mlxsw_sp_acl_rul_activity_update_work);
715 acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
716 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
549 return 0; 717 return 0;
550 718
551err_acl_ops_init: 719err_acl_ops_init:
@@ -564,7 +732,9 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
564 struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 732 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
565 const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; 733 const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
566 734
735 cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
567 acl_ops->fini(mlxsw_sp, acl->priv); 736 acl_ops->fini(mlxsw_sp, acl->priv);
737 WARN_ON(!list_empty(&acl->rules));
568 rhashtable_destroy(&acl->ruleset_ht); 738 rhashtable_destroy(&acl->ruleset_ht);
569 mlxsw_afa_destroy(acl->afa); 739 mlxsw_afa_destroy(acl->afa);
570 mlxsw_afk_destroy(acl->afk); 740 mlxsw_afk_destroy(acl->afk);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
index 82b81cf7f4a7..af7b7bad48df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
@@ -39,11 +39,15 @@
39 39
40static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { 40static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
41 MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6), 41 MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
42 MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
43 MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
42 MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), 44 MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
43}; 45};
44 46
45static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { 47static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
46 MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6), 48 MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
49 MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
50 MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
47 MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), 51 MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
48}; 52};
49 53
@@ -65,6 +69,8 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
65}; 69};
66 70
67static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { 71static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
72 MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
73 MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
68 MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), 74 MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
69 MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), 75 MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
70}; 76};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7382832215fa..3a24289979d9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -561,6 +561,24 @@ mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
561 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); 561 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
562} 562}
563 563
564static int
565mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
566 struct mlxsw_sp_acl_tcam_region *region,
567 unsigned int offset,
568 bool *activity)
569{
570 char ptce2_pl[MLXSW_REG_PTCE2_LEN];
571 int err;
572
573 mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
574 region->tcam_region_info, offset);
575 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
576 if (err)
577 return err;
578 *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
579 return 0;
580}
581
564#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) 582#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
565 583
566static int 584static int
@@ -940,6 +958,19 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
940 mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); 958 mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
941} 959}
942 960
961static int
962mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
963 struct mlxsw_sp_acl_tcam_entry *entry,
964 bool *activity)
965{
966 struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
967 struct mlxsw_sp_acl_tcam_region *region = chunk->region;
968
969 return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
970 entry->parman_item.index,
971 activity);
972}
973
943static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { 974static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
944 MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 975 MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
945 MLXSW_AFK_ELEMENT_DMAC, 976 MLXSW_AFK_ELEMENT_DMAC,
@@ -950,6 +981,8 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
950 MLXSW_AFK_ELEMENT_DST_IP4, 981 MLXSW_AFK_ELEMENT_DST_IP4,
951 MLXSW_AFK_ELEMENT_DST_L4_PORT, 982 MLXSW_AFK_ELEMENT_DST_L4_PORT,
952 MLXSW_AFK_ELEMENT_SRC_L4_PORT, 983 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
984 MLXSW_AFK_ELEMENT_VID,
985 MLXSW_AFK_ELEMENT_PCP,
953}; 986};
954 987
955static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { 988static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
@@ -1046,6 +1079,16 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1046 mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); 1079 mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
1047} 1080}
1048 1081
1082static int
1083mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1084 void *rule_priv, bool *activity)
1085{
1086 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1087
1088 return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
1089 activity);
1090}
1091
1049static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { 1092static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1050 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset), 1093 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1051 .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add, 1094 .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
@@ -1055,6 +1098,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1055 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), 1098 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1056 .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, 1099 .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
1057 .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, 1100 .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
1101 .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1058}; 1102};
1059 1103
1060static const struct mlxsw_sp_acl_profile_ops * 1104static const struct mlxsw_sp_acl_profile_ops *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
new file mode 100644
index 000000000000..1631e01908c0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -0,0 +1,198 @@
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/bitops.h>
37
38#include "spectrum_cnt.h"
39
40#define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096
41
42struct mlxsw_sp_counter_sub_pool {
43 unsigned int base_index;
44 unsigned int size;
45 unsigned int entry_size;
46 unsigned int bank_count;
47};
48
49struct mlxsw_sp_counter_pool {
50 unsigned int pool_size;
51 unsigned long *usage; /* Usage bitmap */
52 struct mlxsw_sp_counter_sub_pool *sub_pools;
53};
54
55static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
56 [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
57 .bank_count = 6,
58 },
59};
60
61static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
62{
63 unsigned int total_bank_config = 0;
64 unsigned int pool_size;
65 int i;
66
67 pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
68 /* Check config is valid, no bank over subscription */
69 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
70 total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
71 if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1)
72 return -EINVAL;
73 return 0;
74}
75
76static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
77{
78 struct mlxsw_sp_counter_sub_pool *sub_pool;
79
80 /* Prepare generic flow pool*/
81 sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
82 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
83 return -EIO;
84 sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
85 COUNTER_SIZE_PACKETS_BYTES);
86 return 0;
87}
88
89int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
90{
91 struct mlxsw_sp_counter_sub_pool *sub_pool;
92 struct mlxsw_sp_counter_pool *pool;
93 unsigned int base_index;
94 unsigned int map_size;
95 int i;
96 int err;
97
98 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE))
99 return -EIO;
100
101 err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
102 if (err)
103 return err;
104
105 err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
106 if (err)
107 return err;
108
109 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
110 if (!pool)
111 return -ENOMEM;
112
113 pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
114 map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
115
116 pool->usage = kzalloc(map_size, GFP_KERNEL);
117 if (!pool->usage) {
118 err = -ENOMEM;
119 goto err_usage_alloc;
120 }
121
122 pool->sub_pools = mlxsw_sp_counter_sub_pools;
123 /* Allocation is based on bank count which should be
124 * specified for each sub pool statically.
125 */
126 base_index = 0;
127 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
128 sub_pool = &pool->sub_pools[i];
129 sub_pool->size = sub_pool->bank_count *
130 MLXSW_SP_COUNTER_POOL_BANK_SIZE;
131 sub_pool->base_index = base_index;
132 base_index += sub_pool->size;
133 /* The last bank can't be fully used */
134 if (sub_pool->base_index + sub_pool->size > pool->pool_size)
135 sub_pool->size = pool->pool_size - sub_pool->base_index;
136 }
137
138 mlxsw_sp->counter_pool = pool;
139 return 0;
140
141err_usage_alloc:
142 kfree(pool);
143 return err;
144}
145
146void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
147{
148 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
149
150 WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
151 pool->pool_size);
152 kfree(pool->usage);
153 kfree(pool);
154}
155
156int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
157 enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
158 unsigned int *p_counter_index)
159{
160 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
161 struct mlxsw_sp_counter_sub_pool *sub_pool;
162 unsigned int entry_index;
163 unsigned int stop_index;
164 int i;
165
166 sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
167 stop_index = sub_pool->base_index + sub_pool->size;
168 entry_index = sub_pool->base_index;
169
170 entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
171 if (entry_index == stop_index)
172 return -ENOBUFS;
173 /* The sub-pools can contain non-integer number of entries
174 * so we must check for overflow
175 */
176 if (entry_index + sub_pool->entry_size > stop_index)
177 return -ENOBUFS;
178 for (i = 0; i < sub_pool->entry_size; i++)
179 __set_bit(entry_index + i, pool->usage);
180
181 *p_counter_index = entry_index;
182 return 0;
183}
184
185void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
186 enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
187 unsigned int counter_index)
188{
189 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
190 struct mlxsw_sp_counter_sub_pool *sub_pool;
191 int i;
192
193 if (WARN_ON(counter_index >= pool->pool_size))
194 return;
195 sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
196 for (i = 0; i < sub_pool->entry_size; i++)
197 __clear_bit(counter_index + i, pool->usage);
198}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
new file mode 100644
index 000000000000..031bc4abbe2d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -0,0 +1,53 @@
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Arkadi Sharshevsky <arkdis@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef _MLXSW_SPECTRUM_CNT_H
36#define _MLXSW_SPECTRUM_CNT_H
37
38#include "spectrum.h"
39
40enum mlxsw_sp_counter_sub_pool_id {
41 MLXSW_SP_COUNTER_SUB_POOL_FLOW,
42};
43
44int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
45 enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
46 unsigned int *p_counter_index);
47void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
48 enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
49 unsigned int counter_index);
50int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp);
51void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp);
52
53#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index ae6cccc666e4..e724c6266247 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -39,6 +39,7 @@
39#include <net/pkt_cls.h> 39#include <net/pkt_cls.h>
40#include <net/tc_act/tc_gact.h> 40#include <net/tc_act/tc_gact.h>
41#include <net/tc_act/tc_mirred.h> 41#include <net/tc_act/tc_mirred.h>
42#include <net/tc_act/tc_vlan.h>
42 43
43#include "spectrum.h" 44#include "spectrum.h"
44#include "core_acl_flex_keys.h" 45#include "core_acl_flex_keys.h"
@@ -55,6 +56,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
55 if (tc_no_actions(exts)) 56 if (tc_no_actions(exts))
56 return 0; 57 return 0;
57 58
59 /* Count action is inserted first */
60 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
61 if (err)
62 return err;
63
58 tcf_exts_to_list(exts, &actions); 64 tcf_exts_to_list(exts, &actions);
59 list_for_each_entry(a, &actions, list) { 65 list_for_each_entry(a, &actions, list) {
60 if (is_tcf_gact_shot(a)) { 66 if (is_tcf_gact_shot(a)) {
@@ -73,6 +79,15 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
73 out_dev); 79 out_dev);
74 if (err) 80 if (err)
75 return err; 81 return err;
82 } else if (is_tcf_vlan(a)) {
83 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
84 u32 action = tcf_vlan_action(a);
85 u8 prio = tcf_vlan_push_prio(a);
86 u16 vid = tcf_vlan_push_vid(a);
87
88 return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
89 action, vid,
90 proto, prio);
76 } else { 91 } else {
77 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); 92 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
78 return -EOPNOTSUPP; 93 return -EOPNOTSUPP;
@@ -173,7 +188,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
173 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 188 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
174 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 189 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
175 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 190 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
176 BIT(FLOW_DISSECTOR_KEY_PORTS))) { 191 BIT(FLOW_DISSECTOR_KEY_PORTS) |
192 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
177 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); 193 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
178 return -EOPNOTSUPP; 194 return -EOPNOTSUPP;
179 } 195 }
@@ -234,6 +250,27 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
234 sizeof(key->src)); 250 sizeof(key->src));
235 } 251 }
236 252
253 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
254 struct flow_dissector_key_vlan *key =
255 skb_flow_dissector_target(f->dissector,
256 FLOW_DISSECTOR_KEY_VLAN,
257 f->key);
258 struct flow_dissector_key_vlan *mask =
259 skb_flow_dissector_target(f->dissector,
260 FLOW_DISSECTOR_KEY_VLAN,
261 f->mask);
262 if (mask->vlan_id != 0)
263 mlxsw_sp_acl_rulei_keymask_u32(rulei,
264 MLXSW_AFK_ELEMENT_VID,
265 key->vlan_id,
266 mask->vlan_id);
267 if (mask->vlan_priority != 0)
268 mlxsw_sp_acl_rulei_keymask_u32(rulei,
269 MLXSW_AFK_ELEMENT_PCP,
270 key->vlan_priority,
271 mask->vlan_priority);
272 }
273
237 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 274 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
238 mlxsw_sp_flower_parse_ipv4(rulei, f); 275 mlxsw_sp_flower_parse_ipv4(rulei, f);
239 276
@@ -314,3 +351,47 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
314 351
315 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 352 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
316} 353}
354
355int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
356 struct tc_cls_flower_offload *f)
357{
358 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
359 struct mlxsw_sp_acl_ruleset *ruleset;
360 struct mlxsw_sp_acl_rule *rule;
361 struct tc_action *a;
362 LIST_HEAD(actions);
363 u64 packets;
364 u64 lastuse;
365 u64 bytes;
366 int err;
367
368 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
369 ingress,
370 MLXSW_SP_ACL_PROFILE_FLOWER);
371 if (WARN_ON(IS_ERR(ruleset)))
372 return -EINVAL;
373
374 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
375 if (!rule)
376 return -EINVAL;
377
378 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &bytes, &packets,
379 &lastuse);
380 if (err)
381 goto err_rule_get_stats;
382
383 preempt_disable();
384
385 tcf_exts_to_list(f->exts, &actions);
386 list_for_each_entry(a, &actions, list)
387 tcf_action_stats_update(a, bytes, packets, lastuse);
388
389 preempt_enable();
390
391 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
392 return 0;
393
394err_rule_get_stats:
395 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
396 return err;
397}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index bd8de6b9be71..fe4a55e3272b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -41,15 +41,33 @@
41#include <linux/in6.h> 41#include <linux/in6.h>
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/inetdevice.h> 43#include <linux/inetdevice.h>
44#include <linux/netdevice.h>
44#include <net/netevent.h> 45#include <net/netevent.h>
45#include <net/neighbour.h> 46#include <net/neighbour.h>
46#include <net/arp.h> 47#include <net/arp.h>
47#include <net/ip_fib.h> 48#include <net/ip_fib.h>
49#include <net/fib_rules.h>
50#include <net/l3mdev.h>
48 51
49#include "spectrum.h" 52#include "spectrum.h"
50#include "core.h" 53#include "core.h"
51#include "reg.h" 54#include "reg.h"
52 55
56struct mlxsw_sp_rif {
57 struct list_head nexthop_list;
58 struct list_head neigh_list;
59 struct net_device *dev;
60 struct mlxsw_sp_fid *f;
61 unsigned char addr[ETH_ALEN];
62 int mtu;
63 u16 rif_index;
64 u16 vr_id;
65};
66
67static struct mlxsw_sp_rif *
68mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
69 const struct net_device *dev);
70
53#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \ 71#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
54 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT) 72 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
55 73
@@ -89,12 +107,6 @@ mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
89} 107}
90 108
91static void 109static void
92mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
93{
94 memset(prefix_usage, 0, sizeof(*prefix_usage));
95}
96
97static void
98mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage, 110mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
99 unsigned char prefix_len) 111 unsigned char prefix_len)
100{ 112{
@@ -125,7 +137,7 @@ struct mlxsw_sp_fib_node {
125 struct list_head entry_list; 137 struct list_head entry_list;
126 struct list_head list; 138 struct list_head list;
127 struct rhash_head ht_node; 139 struct rhash_head ht_node;
128 struct mlxsw_sp_vr *vr; 140 struct mlxsw_sp_fib *fib;
129 struct mlxsw_sp_fib_key key; 141 struct mlxsw_sp_fib_key key;
130}; 142};
131 143
@@ -149,13 +161,17 @@ struct mlxsw_sp_fib_entry {
149struct mlxsw_sp_fib { 161struct mlxsw_sp_fib {
150 struct rhashtable ht; 162 struct rhashtable ht;
151 struct list_head node_list; 163 struct list_head node_list;
164 struct mlxsw_sp_vr *vr;
165 struct mlxsw_sp_lpm_tree *lpm_tree;
152 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; 166 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
153 struct mlxsw_sp_prefix_usage prefix_usage; 167 struct mlxsw_sp_prefix_usage prefix_usage;
168 enum mlxsw_sp_l3proto proto;
154}; 169};
155 170
156static const struct rhashtable_params mlxsw_sp_fib_ht_params; 171static const struct rhashtable_params mlxsw_sp_fib_ht_params;
157 172
158static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) 173static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
174 enum mlxsw_sp_l3proto proto)
159{ 175{
160 struct mlxsw_sp_fib *fib; 176 struct mlxsw_sp_fib *fib;
161 int err; 177 int err;
@@ -167,6 +183,8 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
167 if (err) 183 if (err)
168 goto err_rhashtable_init; 184 goto err_rhashtable_init;
169 INIT_LIST_HEAD(&fib->node_list); 185 INIT_LIST_HEAD(&fib->node_list);
186 fib->proto = proto;
187 fib->vr = vr;
170 return fib; 188 return fib;
171 189
172err_rhashtable_init: 190err_rhashtable_init:
@@ -177,24 +195,21 @@ err_rhashtable_init:
177static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib) 195static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
178{ 196{
179 WARN_ON(!list_empty(&fib->node_list)); 197 WARN_ON(!list_empty(&fib->node_list));
198 WARN_ON(fib->lpm_tree);
180 rhashtable_destroy(&fib->ht); 199 rhashtable_destroy(&fib->ht);
181 kfree(fib); 200 kfree(fib);
182} 201}
183 202
184static struct mlxsw_sp_lpm_tree * 203static struct mlxsw_sp_lpm_tree *
185mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved) 204mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
186{ 205{
187 static struct mlxsw_sp_lpm_tree *lpm_tree; 206 static struct mlxsw_sp_lpm_tree *lpm_tree;
188 int i; 207 int i;
189 208
190 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { 209 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
191 lpm_tree = &mlxsw_sp->router.lpm_trees[i]; 210 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
192 if (lpm_tree->ref_count == 0) { 211 if (lpm_tree->ref_count == 0)
193 if (one_reserved) 212 return lpm_tree;
194 one_reserved = false;
195 else
196 return lpm_tree;
197 }
198 } 213 }
199 return NULL; 214 return NULL;
200} 215}
@@ -248,12 +263,12 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
248static struct mlxsw_sp_lpm_tree * 263static struct mlxsw_sp_lpm_tree *
249mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, 264mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
250 struct mlxsw_sp_prefix_usage *prefix_usage, 265 struct mlxsw_sp_prefix_usage *prefix_usage,
251 enum mlxsw_sp_l3proto proto, bool one_reserved) 266 enum mlxsw_sp_l3proto proto)
252{ 267{
253 struct mlxsw_sp_lpm_tree *lpm_tree; 268 struct mlxsw_sp_lpm_tree *lpm_tree;
254 int err; 269 int err;
255 270
256 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved); 271 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
257 if (!lpm_tree) 272 if (!lpm_tree)
258 return ERR_PTR(-EBUSY); 273 return ERR_PTR(-EBUSY);
259 lpm_tree->proto = proto; 274 lpm_tree->proto = proto;
@@ -283,7 +298,7 @@ static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
283static struct mlxsw_sp_lpm_tree * 298static struct mlxsw_sp_lpm_tree *
284mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, 299mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
285 struct mlxsw_sp_prefix_usage *prefix_usage, 300 struct mlxsw_sp_prefix_usage *prefix_usage,
286 enum mlxsw_sp_l3proto proto, bool one_reserved) 301 enum mlxsw_sp_l3proto proto)
287{ 302{
288 struct mlxsw_sp_lpm_tree *lpm_tree; 303 struct mlxsw_sp_lpm_tree *lpm_tree;
289 int i; 304 int i;
@@ -297,7 +312,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
297 goto inc_ref_count; 312 goto inc_ref_count;
298 } 313 }
299 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, 314 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
300 proto, one_reserved); 315 proto);
301 if (IS_ERR(lpm_tree)) 316 if (IS_ERR(lpm_tree))
302 return lpm_tree; 317 return lpm_tree;
303 318
@@ -325,6 +340,11 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
325 } 340 }
326} 341}
327 342
343static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
344{
345 return !!vr->fib4;
346}
347
328static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) 348static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
329{ 349{
330 struct mlxsw_sp_vr *vr; 350 struct mlxsw_sp_vr *vr;
@@ -332,31 +352,31 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
332 352
333 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 353 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
334 vr = &mlxsw_sp->router.vrs[i]; 354 vr = &mlxsw_sp->router.vrs[i];
335 if (!vr->used) 355 if (!mlxsw_sp_vr_is_used(vr))
336 return vr; 356 return vr;
337 } 357 }
338 return NULL; 358 return NULL;
339} 359}
340 360
341static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, 361static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
342 struct mlxsw_sp_vr *vr) 362 const struct mlxsw_sp_fib *fib)
343{ 363{
344 char raltb_pl[MLXSW_REG_RALTB_LEN]; 364 char raltb_pl[MLXSW_REG_RALTB_LEN];
345 365
346 mlxsw_reg_raltb_pack(raltb_pl, vr->id, 366 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
347 (enum mlxsw_reg_ralxx_protocol) vr->proto, 367 (enum mlxsw_reg_ralxx_protocol) fib->proto,
348 vr->lpm_tree->id); 368 fib->lpm_tree->id);
349 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); 369 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
350} 370}
351 371
352static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, 372static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
353 struct mlxsw_sp_vr *vr) 373 const struct mlxsw_sp_fib *fib)
354{ 374{
355 char raltb_pl[MLXSW_REG_RALTB_LEN]; 375 char raltb_pl[MLXSW_REG_RALTB_LEN];
356 376
357 /* Bind to tree 0 which is default */ 377 /* Bind to tree 0 which is default */
358 mlxsw_reg_raltb_pack(raltb_pl, vr->id, 378 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
359 (enum mlxsw_reg_ralxx_protocol) vr->proto, 0); 379 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
360 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); 380 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
361} 381}
362 382
@@ -369,8 +389,7 @@ static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
369} 389}
370 390
371static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, 391static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
372 u32 tb_id, 392 u32 tb_id)
373 enum mlxsw_sp_l3proto proto)
374{ 393{
375 struct mlxsw_sp_vr *vr; 394 struct mlxsw_sp_vr *vr;
376 int i; 395 int i;
@@ -379,69 +398,50 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
379 398
380 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 399 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
381 vr = &mlxsw_sp->router.vrs[i]; 400 vr = &mlxsw_sp->router.vrs[i];
382 if (vr->used && vr->proto == proto && vr->tb_id == tb_id) 401 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
383 return vr; 402 return vr;
384 } 403 }
385 return NULL; 404 return NULL;
386} 405}
387 406
407static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
408 enum mlxsw_sp_l3proto proto)
409{
410 switch (proto) {
411 case MLXSW_SP_L3_PROTO_IPV4:
412 return vr->fib4;
413 case MLXSW_SP_L3_PROTO_IPV6:
414 BUG_ON(1);
415 }
416 return NULL;
417}
418
388static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, 419static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
389 unsigned char prefix_len, 420 u32 tb_id)
390 u32 tb_id,
391 enum mlxsw_sp_l3proto proto)
392{ 421{
393 struct mlxsw_sp_prefix_usage req_prefix_usage;
394 struct mlxsw_sp_lpm_tree *lpm_tree;
395 struct mlxsw_sp_vr *vr; 422 struct mlxsw_sp_vr *vr;
396 int err;
397 423
398 vr = mlxsw_sp_vr_find_unused(mlxsw_sp); 424 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
399 if (!vr) 425 if (!vr)
400 return ERR_PTR(-EBUSY); 426 return ERR_PTR(-EBUSY);
401 vr->fib = mlxsw_sp_fib_create(); 427 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
402 if (IS_ERR(vr->fib)) 428 if (IS_ERR(vr->fib4))
403 return ERR_CAST(vr->fib); 429 return ERR_CAST(vr->fib4);
404
405 vr->proto = proto;
406 vr->tb_id = tb_id; 430 vr->tb_id = tb_id;
407 mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
408 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
409 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
410 proto, true);
411 if (IS_ERR(lpm_tree)) {
412 err = PTR_ERR(lpm_tree);
413 goto err_tree_get;
414 }
415 vr->lpm_tree = lpm_tree;
416 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
417 if (err)
418 goto err_tree_bind;
419
420 vr->used = true;
421 return vr; 431 return vr;
422
423err_tree_bind:
424 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
425err_tree_get:
426 mlxsw_sp_fib_destroy(vr->fib);
427
428 return ERR_PTR(err);
429} 432}
430 433
431static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, 434static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
432 struct mlxsw_sp_vr *vr)
433{ 435{
434 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); 436 mlxsw_sp_fib_destroy(vr->fib4);
435 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); 437 vr->fib4 = NULL;
436 mlxsw_sp_fib_destroy(vr->fib);
437 vr->used = false;
438} 438}
439 439
440static int 440static int
441mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, 441mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
442 struct mlxsw_sp_prefix_usage *req_prefix_usage) 442 struct mlxsw_sp_prefix_usage *req_prefix_usage)
443{ 443{
444 struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree; 444 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
445 struct mlxsw_sp_lpm_tree *new_tree; 445 struct mlxsw_sp_lpm_tree *new_tree;
446 int err; 446 int err;
447 447
@@ -449,7 +449,7 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
449 return 0; 449 return 0;
450 450
451 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, 451 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
452 vr->proto, false); 452 fib->proto);
453 if (IS_ERR(new_tree)) { 453 if (IS_ERR(new_tree)) {
454 /* We failed to get a tree according to the required 454 /* We failed to get a tree according to the required
455 * prefix usage. However, the current tree might be still good 455 * prefix usage. However, the current tree might be still good
@@ -463,8 +463,8 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
463 } 463 }
464 464
465 /* Prevent packet loss by overwriting existing binding */ 465 /* Prevent packet loss by overwriting existing binding */
466 vr->lpm_tree = new_tree; 466 fib->lpm_tree = new_tree;
467 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); 467 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
468 if (err) 468 if (err)
469 goto err_tree_bind; 469 goto err_tree_bind;
470 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 470 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
@@ -472,53 +472,26 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
472 return 0; 472 return 0;
473 473
474err_tree_bind: 474err_tree_bind:
475 vr->lpm_tree = lpm_tree; 475 fib->lpm_tree = lpm_tree;
476 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); 476 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
477 return err; 477 return err;
478} 478}
479 479
480static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, 480static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
481 unsigned char prefix_len,
482 u32 tb_id,
483 enum mlxsw_sp_l3proto proto)
484{ 481{
485 struct mlxsw_sp_vr *vr; 482 struct mlxsw_sp_vr *vr;
486 int err;
487 483
488 tb_id = mlxsw_sp_fix_tb_id(tb_id); 484 tb_id = mlxsw_sp_fix_tb_id(tb_id);
489 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto); 485 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
490 if (!vr) { 486 if (!vr)
491 vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto); 487 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
492 if (IS_ERR(vr))
493 return vr;
494 } else {
495 struct mlxsw_sp_prefix_usage req_prefix_usage;
496
497 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
498 &vr->fib->prefix_usage);
499 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
500 /* Need to replace LPM tree in case new prefix is required. */
501 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
502 &req_prefix_usage);
503 if (err)
504 return ERR_PTR(err);
505 }
506 return vr; 488 return vr;
507} 489}
508 490
509static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) 491static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
510{ 492{
511 /* Destroy virtual router entity in case the associated FIB is empty 493 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
512 * and allow it to be used for other tables in future. Otherwise, 494 mlxsw_sp_vr_destroy(vr);
513 * check if some prefix usage did not disappear and change tree if
514 * that is the case. Note that in case new, smaller tree cannot be
515 * allocated, the original one will be kept being used.
516 */
517 if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
518 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
519 else
520 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
521 &vr->fib->prefix_usage);
522} 495}
523 496
524static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) 497static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
@@ -627,14 +600,14 @@ static struct mlxsw_sp_neigh_entry *
627mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) 600mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
628{ 601{
629 struct mlxsw_sp_neigh_entry *neigh_entry; 602 struct mlxsw_sp_neigh_entry *neigh_entry;
630 struct mlxsw_sp_rif *r; 603 struct mlxsw_sp_rif *rif;
631 int err; 604 int err;
632 605
633 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); 606 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
634 if (!r) 607 if (!rif)
635 return ERR_PTR(-EINVAL); 608 return ERR_PTR(-EINVAL);
636 609
637 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif); 610 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
638 if (!neigh_entry) 611 if (!neigh_entry)
639 return ERR_PTR(-ENOMEM); 612 return ERR_PTR(-ENOMEM);
640 613
@@ -642,7 +615,7 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
642 if (err) 615 if (err)
643 goto err_neigh_entry_insert; 616 goto err_neigh_entry_insert;
644 617
645 list_add(&neigh_entry->rif_list_node, &r->neigh_list); 618 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
646 619
647 return neigh_entry; 620 return neigh_entry;
648 621
@@ -1050,22 +1023,22 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1050} 1023}
1051 1024
1052static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp, 1025static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
1053 const struct mlxsw_sp_rif *r) 1026 const struct mlxsw_sp_rif *rif)
1054{ 1027{
1055 char rauht_pl[MLXSW_REG_RAUHT_LEN]; 1028 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1056 1029
1057 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL, 1030 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
1058 r->rif, r->addr); 1031 rif->rif_index, rif->addr);
1059 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); 1032 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1060} 1033}
1061 1034
1062static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 1035static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1063 struct mlxsw_sp_rif *r) 1036 struct mlxsw_sp_rif *rif)
1064{ 1037{
1065 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; 1038 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1066 1039
1067 mlxsw_sp_neigh_rif_flush(mlxsw_sp, r); 1040 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
1068 list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list, 1041 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
1069 rif_list_node) 1042 rif_list_node)
1070 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); 1043 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1071} 1044}
@@ -1082,7 +1055,7 @@ struct mlxsw_sp_nexthop {
1082 */ 1055 */
1083 struct rhash_head ht_node; 1056 struct rhash_head ht_node;
1084 struct mlxsw_sp_nexthop_key key; 1057 struct mlxsw_sp_nexthop_key key;
1085 struct mlxsw_sp_rif *r; 1058 struct mlxsw_sp_rif *rif;
1086 u8 should_offload:1, /* set indicates this neigh is connected and 1059 u8 should_offload:1, /* set indicates this neigh is connected and
1087 * should be put to KVD linear area of this group. 1060 * should be put to KVD linear area of this group.
1088 */ 1061 */
@@ -1109,7 +1082,7 @@ struct mlxsw_sp_nexthop_group {
1109 u16 ecmp_size; 1082 u16 ecmp_size;
1110 u16 count; 1083 u16 count;
1111 struct mlxsw_sp_nexthop nexthops[0]; 1084 struct mlxsw_sp_nexthop nexthops[0];
1112#define nh_rif nexthops[0].r 1085#define nh_rif nexthops[0].rif
1113}; 1086};
1114 1087
1115static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { 1088static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
@@ -1171,7 +1144,7 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1171} 1144}
1172 1145
1173static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, 1146static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1174 struct mlxsw_sp_vr *vr, 1147 const struct mlxsw_sp_fib *fib,
1175 u32 adj_index, u16 ecmp_size, 1148 u32 adj_index, u16 ecmp_size,
1176 u32 new_adj_index, 1149 u32 new_adj_index,
1177 u16 new_ecmp_size) 1150 u16 new_ecmp_size)
@@ -1179,8 +1152,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1179 char raleu_pl[MLXSW_REG_RALEU_LEN]; 1152 char raleu_pl[MLXSW_REG_RALEU_LEN];
1180 1153
1181 mlxsw_reg_raleu_pack(raleu_pl, 1154 mlxsw_reg_raleu_pack(raleu_pl,
1182 (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id, 1155 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1183 adj_index, ecmp_size, new_adj_index, 1156 fib->vr->id, adj_index, ecmp_size, new_adj_index,
1184 new_ecmp_size); 1157 new_ecmp_size);
1185 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl); 1158 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1186} 1159}
@@ -1190,14 +1163,14 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1190 u32 old_adj_index, u16 old_ecmp_size) 1163 u32 old_adj_index, u16 old_ecmp_size)
1191{ 1164{
1192 struct mlxsw_sp_fib_entry *fib_entry; 1165 struct mlxsw_sp_fib_entry *fib_entry;
1193 struct mlxsw_sp_vr *vr = NULL; 1166 struct mlxsw_sp_fib *fib = NULL;
1194 int err; 1167 int err;
1195 1168
1196 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { 1169 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1197 if (vr == fib_entry->fib_node->vr) 1170 if (fib == fib_entry->fib_node->fib)
1198 continue; 1171 continue;
1199 vr = fib_entry->fib_node->vr; 1172 fib = fib_entry->fib_node->fib;
1200 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr, 1173 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
1201 old_adj_index, 1174 old_adj_index,
1202 old_ecmp_size, 1175 old_ecmp_size,
1203 nh_grp->adj_index, 1176 nh_grp->adj_index,
@@ -1399,22 +1372,22 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1399} 1372}
1400 1373
1401static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh, 1374static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
1402 struct mlxsw_sp_rif *r) 1375 struct mlxsw_sp_rif *rif)
1403{ 1376{
1404 if (nh->r) 1377 if (nh->rif)
1405 return; 1378 return;
1406 1379
1407 nh->r = r; 1380 nh->rif = rif;
1408 list_add(&nh->rif_list_node, &r->nexthop_list); 1381 list_add(&nh->rif_list_node, &rif->nexthop_list);
1409} 1382}
1410 1383
1411static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh) 1384static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1412{ 1385{
1413 if (!nh->r) 1386 if (!nh->rif)
1414 return; 1387 return;
1415 1388
1416 list_del(&nh->rif_list_node); 1389 list_del(&nh->rif_list_node);
1417 nh->r = NULL; 1390 nh->rif = NULL;
1418} 1391}
1419 1392
1420static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, 1393static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
@@ -1505,7 +1478,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1505{ 1478{
1506 struct net_device *dev = fib_nh->nh_dev; 1479 struct net_device *dev = fib_nh->nh_dev;
1507 struct in_device *in_dev; 1480 struct in_device *in_dev;
1508 struct mlxsw_sp_rif *r; 1481 struct mlxsw_sp_rif *rif;
1509 int err; 1482 int err;
1510 1483
1511 nh->nh_grp = nh_grp; 1484 nh->nh_grp = nh_grp;
@@ -1514,15 +1487,18 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1514 if (err) 1487 if (err)
1515 return err; 1488 return err;
1516 1489
1490 if (!dev)
1491 return 0;
1492
1517 in_dev = __in_dev_get_rtnl(dev); 1493 in_dev = __in_dev_get_rtnl(dev);
1518 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && 1494 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1519 fib_nh->nh_flags & RTNH_F_LINKDOWN) 1495 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1520 return 0; 1496 return 0;
1521 1497
1522 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 1498 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1523 if (!r) 1499 if (!rif)
1524 return 0; 1500 return 0;
1525 mlxsw_sp_nexthop_rif_init(nh, r); 1501 mlxsw_sp_nexthop_rif_init(nh, rif);
1526 1502
1527 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); 1503 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1528 if (err) 1504 if (err)
@@ -1548,7 +1524,7 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1548{ 1524{
1549 struct mlxsw_sp_nexthop_key key; 1525 struct mlxsw_sp_nexthop_key key;
1550 struct mlxsw_sp_nexthop *nh; 1526 struct mlxsw_sp_nexthop *nh;
1551 struct mlxsw_sp_rif *r; 1527 struct mlxsw_sp_rif *rif;
1552 1528
1553 if (mlxsw_sp->router.aborted) 1529 if (mlxsw_sp->router.aborted)
1554 return; 1530 return;
@@ -1558,13 +1534,13 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1558 if (WARN_ON_ONCE(!nh)) 1534 if (WARN_ON_ONCE(!nh))
1559 return; 1535 return;
1560 1536
1561 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev); 1537 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1562 if (!r) 1538 if (!rif)
1563 return; 1539 return;
1564 1540
1565 switch (event) { 1541 switch (event) {
1566 case FIB_EVENT_NH_ADD: 1542 case FIB_EVENT_NH_ADD:
1567 mlxsw_sp_nexthop_rif_init(nh, r); 1543 mlxsw_sp_nexthop_rif_init(nh, rif);
1568 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); 1544 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1569 break; 1545 break;
1570 case FIB_EVENT_NH_DEL: 1546 case FIB_EVENT_NH_DEL:
@@ -1577,11 +1553,11 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1577} 1553}
1578 1554
1579static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 1555static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1580 struct mlxsw_sp_rif *r) 1556 struct mlxsw_sp_rif *rif)
1581{ 1557{
1582 struct mlxsw_sp_nexthop *nh, *tmp; 1558 struct mlxsw_sp_nexthop *nh, *tmp;
1583 1559
1584 list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) { 1560 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
1585 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); 1561 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1586 mlxsw_sp_nexthop_rif_fini(nh); 1562 mlxsw_sp_nexthop_rif_fini(nh);
1587 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 1563 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
@@ -1699,7 +1675,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1699{ 1675{
1700 fib_entry->offloaded = true; 1676 fib_entry->offloaded = true;
1701 1677
1702 switch (fib_entry->fib_node->vr->proto) { 1678 switch (fib_entry->fib_node->fib->proto) {
1703 case MLXSW_SP_L3_PROTO_IPV4: 1679 case MLXSW_SP_L3_PROTO_IPV4:
1704 fib_info_offload_inc(fib_entry->nh_group->key.fi); 1680 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1705 break; 1681 break;
@@ -1711,7 +1687,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1711static void 1687static void
1712mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) 1688mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1713{ 1689{
1714 switch (fib_entry->fib_node->vr->proto) { 1690 switch (fib_entry->fib_node->fib->proto) {
1715 case MLXSW_SP_L3_PROTO_IPV4: 1691 case MLXSW_SP_L3_PROTO_IPV4:
1716 fib_info_offload_dec(fib_entry->nh_group->key.fi); 1692 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1717 break; 1693 break;
@@ -1751,8 +1727,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1751 enum mlxsw_reg_ralue_op op) 1727 enum mlxsw_reg_ralue_op op)
1752{ 1728{
1753 char ralue_pl[MLXSW_REG_RALUE_LEN]; 1729 char ralue_pl[MLXSW_REG_RALUE_LEN];
1730 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
1754 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; 1731 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
1755 struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
1756 enum mlxsw_reg_ralue_trap_action trap_action; 1732 enum mlxsw_reg_ralue_trap_action trap_action;
1757 u16 trap_id = 0; 1733 u16 trap_id = 0;
1758 u32 adjacency_index = 0; 1734 u32 adjacency_index = 0;
@@ -1772,8 +1748,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1772 } 1748 }
1773 1749
1774 mlxsw_reg_ralue_pack4(ralue_pl, 1750 mlxsw_reg_ralue_pack4(ralue_pl,
1775 (enum mlxsw_reg_ralxx_protocol) vr->proto, op, 1751 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1776 vr->id, fib_entry->fib_node->key.prefix_len, 1752 fib->vr->id, fib_entry->fib_node->key.prefix_len,
1777 *p_dip); 1753 *p_dip);
1778 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, 1754 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1779 adjacency_index, ecmp_size); 1755 adjacency_index, ecmp_size);
@@ -1784,27 +1760,28 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1784 struct mlxsw_sp_fib_entry *fib_entry, 1760 struct mlxsw_sp_fib_entry *fib_entry,
1785 enum mlxsw_reg_ralue_op op) 1761 enum mlxsw_reg_ralue_op op)
1786{ 1762{
1787 struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif; 1763 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
1764 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
1788 enum mlxsw_reg_ralue_trap_action trap_action; 1765 enum mlxsw_reg_ralue_trap_action trap_action;
1789 char ralue_pl[MLXSW_REG_RALUE_LEN]; 1766 char ralue_pl[MLXSW_REG_RALUE_LEN];
1790 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; 1767 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
1791 struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
1792 u16 trap_id = 0; 1768 u16 trap_id = 0;
1793 u16 rif = 0; 1769 u16 rif_index = 0;
1794 1770
1795 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { 1771 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
1796 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; 1772 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1797 rif = r->rif; 1773 rif_index = rif->rif_index;
1798 } else { 1774 } else {
1799 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; 1775 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1800 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; 1776 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1801 } 1777 }
1802 1778
1803 mlxsw_reg_ralue_pack4(ralue_pl, 1779 mlxsw_reg_ralue_pack4(ralue_pl,
1804 (enum mlxsw_reg_ralxx_protocol) vr->proto, op, 1780 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1805 vr->id, fib_entry->fib_node->key.prefix_len, 1781 fib->vr->id, fib_entry->fib_node->key.prefix_len,
1806 *p_dip); 1782 *p_dip);
1807 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif); 1783 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
1784 rif_index);
1808 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 1785 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1809} 1786}
1810 1787
@@ -1812,13 +1789,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1812 struct mlxsw_sp_fib_entry *fib_entry, 1789 struct mlxsw_sp_fib_entry *fib_entry,
1813 enum mlxsw_reg_ralue_op op) 1790 enum mlxsw_reg_ralue_op op)
1814{ 1791{
1792 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
1815 char ralue_pl[MLXSW_REG_RALUE_LEN]; 1793 char ralue_pl[MLXSW_REG_RALUE_LEN];
1816 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; 1794 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
1817 struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
1818 1795
1819 mlxsw_reg_ralue_pack4(ralue_pl, 1796 mlxsw_reg_ralue_pack4(ralue_pl,
1820 (enum mlxsw_reg_ralxx_protocol) vr->proto, op, 1797 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1821 vr->id, fib_entry->fib_node->key.prefix_len, 1798 fib->vr->id, fib_entry->fib_node->key.prefix_len,
1822 *p_dip); 1799 *p_dip);
1823 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); 1800 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1824 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 1801 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1845,7 +1822,7 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1845{ 1822{
1846 int err = -EINVAL; 1823 int err = -EINVAL;
1847 1824
1848 switch (fib_entry->fib_node->vr->proto) { 1825 switch (fib_entry->fib_node->fib->proto) {
1849 case MLXSW_SP_L3_PROTO_IPV4: 1826 case MLXSW_SP_L3_PROTO_IPV4:
1850 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); 1827 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1851 break; 1828 break;
@@ -1877,17 +1854,29 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
1877{ 1854{
1878 struct fib_info *fi = fen_info->fi; 1855 struct fib_info *fi = fen_info->fi;
1879 1856
1880 if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) { 1857 switch (fen_info->type) {
1858 case RTN_BROADCAST: /* fall through */
1859 case RTN_LOCAL:
1881 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 1860 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1882 return 0; 1861 return 0;
1883 } 1862 case RTN_UNREACHABLE: /* fall through */
1884 if (fen_info->type != RTN_UNICAST) 1863 case RTN_BLACKHOLE: /* fall through */
1885 return -EINVAL; 1864 case RTN_PROHIBIT:
1886 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) 1865 /* Packets hitting these routes need to be trapped, but
1866 * can do so with a lower priority than packets directed
1867 * at the host, so use action type local instead of trap.
1868 */
1887 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; 1869 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
1888 else 1870 return 0;
1889 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; 1871 case RTN_UNICAST:
1890 return 0; 1872 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
1873 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
1874 else
1875 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
1876 return 0;
1877 default:
1878 return -EINVAL;
1879 }
1891} 1880}
1892 1881
1893static struct mlxsw_sp_fib_entry * 1882static struct mlxsw_sp_fib_entry *
@@ -1996,7 +1985,7 @@ mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1996} 1985}
1997 1986
1998static struct mlxsw_sp_fib_node * 1987static struct mlxsw_sp_fib_node *
1999mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr, 1988mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
2000 size_t addr_len, unsigned char prefix_len) 1989 size_t addr_len, unsigned char prefix_len)
2001{ 1990{
2002 struct mlxsw_sp_fib_node *fib_node; 1991 struct mlxsw_sp_fib_node *fib_node;
@@ -2006,18 +1995,15 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
2006 return NULL; 1995 return NULL;
2007 1996
2008 INIT_LIST_HEAD(&fib_node->entry_list); 1997 INIT_LIST_HEAD(&fib_node->entry_list);
2009 list_add(&fib_node->list, &vr->fib->node_list); 1998 list_add(&fib_node->list, &fib->node_list);
2010 memcpy(fib_node->key.addr, addr, addr_len); 1999 memcpy(fib_node->key.addr, addr, addr_len);
2011 fib_node->key.prefix_len = prefix_len; 2000 fib_node->key.prefix_len = prefix_len;
2012 mlxsw_sp_fib_node_insert(vr->fib, fib_node);
2013 fib_node->vr = vr;
2014 2001
2015 return fib_node; 2002 return fib_node;
2016} 2003}
2017 2004
2018static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node) 2005static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2019{ 2006{
2020 mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node);
2021 list_del(&fib_node->list); 2007 list_del(&fib_node->list);
2022 WARN_ON(!list_empty(&fib_node->entry_list)); 2008 WARN_ON(!list_empty(&fib_node->entry_list));
2023 kfree(fib_node); 2009 kfree(fib_node);
@@ -2034,7 +2020,7 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2034static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) 2020static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2035{ 2021{
2036 unsigned char prefix_len = fib_node->key.prefix_len; 2022 unsigned char prefix_len = fib_node->key.prefix_len;
2037 struct mlxsw_sp_fib *fib = fib_node->vr->fib; 2023 struct mlxsw_sp_fib *fib = fib_node->fib;
2038 2024
2039 if (fib->prefix_ref_count[prefix_len]++ == 0) 2025 if (fib->prefix_ref_count[prefix_len]++ == 0)
2040 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len); 2026 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
@@ -2043,32 +2029,98 @@ static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2043static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node) 2029static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2044{ 2030{
2045 unsigned char prefix_len = fib_node->key.prefix_len; 2031 unsigned char prefix_len = fib_node->key.prefix_len;
2046 struct mlxsw_sp_fib *fib = fib_node->vr->fib; 2032 struct mlxsw_sp_fib *fib = fib_node->fib;
2047 2033
2048 if (--fib->prefix_ref_count[prefix_len] == 0) 2034 if (--fib->prefix_ref_count[prefix_len] == 0)
2049 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len); 2035 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2050} 2036}
2051 2037
2038static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2039 struct mlxsw_sp_fib_node *fib_node,
2040 struct mlxsw_sp_fib *fib)
2041{
2042 struct mlxsw_sp_prefix_usage req_prefix_usage;
2043 struct mlxsw_sp_lpm_tree *lpm_tree;
2044 int err;
2045
2046 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2047 if (err)
2048 return err;
2049 fib_node->fib = fib;
2050
2051 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2052 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2053
2054 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2055 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2056 &req_prefix_usage);
2057 if (err)
2058 goto err_tree_check;
2059 } else {
2060 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2061 fib->proto);
2062 if (IS_ERR(lpm_tree))
2063 return PTR_ERR(lpm_tree);
2064 fib->lpm_tree = lpm_tree;
2065 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2066 if (err)
2067 goto err_tree_bind;
2068 }
2069
2070 mlxsw_sp_fib_node_prefix_inc(fib_node);
2071
2072 return 0;
2073
2074err_tree_bind:
2075 fib->lpm_tree = NULL;
2076 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2077err_tree_check:
2078 fib_node->fib = NULL;
2079 mlxsw_sp_fib_node_remove(fib, fib_node);
2080 return err;
2081}
2082
2083static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2084 struct mlxsw_sp_fib_node *fib_node)
2085{
2086 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2087 struct mlxsw_sp_fib *fib = fib_node->fib;
2088
2089 mlxsw_sp_fib_node_prefix_dec(fib_node);
2090
2091 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2092 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2093 fib->lpm_tree = NULL;
2094 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2095 } else {
2096 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2097 }
2098
2099 fib_node->fib = NULL;
2100 mlxsw_sp_fib_node_remove(fib, fib_node);
2101}
2102
2052static struct mlxsw_sp_fib_node * 2103static struct mlxsw_sp_fib_node *
2053mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, 2104mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2054 const struct fib_entry_notifier_info *fen_info) 2105 const struct fib_entry_notifier_info *fen_info)
2055{ 2106{
2056 struct mlxsw_sp_fib_node *fib_node; 2107 struct mlxsw_sp_fib_node *fib_node;
2108 struct mlxsw_sp_fib *fib;
2057 struct mlxsw_sp_vr *vr; 2109 struct mlxsw_sp_vr *vr;
2058 int err; 2110 int err;
2059 2111
2060 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id, 2112 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
2061 MLXSW_SP_L3_PROTO_IPV4);
2062 if (IS_ERR(vr)) 2113 if (IS_ERR(vr))
2063 return ERR_CAST(vr); 2114 return ERR_CAST(vr);
2115 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
2064 2116
2065 fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst, 2117 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
2066 sizeof(fen_info->dst), 2118 sizeof(fen_info->dst),
2067 fen_info->dst_len); 2119 fen_info->dst_len);
2068 if (fib_node) 2120 if (fib_node)
2069 return fib_node; 2121 return fib_node;
2070 2122
2071 fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst, 2123 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
2072 sizeof(fen_info->dst), 2124 sizeof(fen_info->dst),
2073 fen_info->dst_len); 2125 fen_info->dst_len);
2074 if (!fib_node) { 2126 if (!fib_node) {
@@ -2076,22 +2128,29 @@ mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2076 goto err_fib_node_create; 2128 goto err_fib_node_create;
2077 } 2129 }
2078 2130
2131 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2132 if (err)
2133 goto err_fib_node_init;
2134
2079 return fib_node; 2135 return fib_node;
2080 2136
2137err_fib_node_init:
2138 mlxsw_sp_fib_node_destroy(fib_node);
2081err_fib_node_create: 2139err_fib_node_create:
2082 mlxsw_sp_vr_put(mlxsw_sp, vr); 2140 mlxsw_sp_vr_put(vr);
2083 return ERR_PTR(err); 2141 return ERR_PTR(err);
2084} 2142}
2085 2143
2086static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp, 2144static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2087 struct mlxsw_sp_fib_node *fib_node) 2145 struct mlxsw_sp_fib_node *fib_node)
2088{ 2146{
2089 struct mlxsw_sp_vr *vr = fib_node->vr; 2147 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
2090 2148
2091 if (!list_empty(&fib_node->entry_list)) 2149 if (!list_empty(&fib_node->entry_list))
2092 return; 2150 return;
2151 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
2093 mlxsw_sp_fib_node_destroy(fib_node); 2152 mlxsw_sp_fib_node_destroy(fib_node);
2094 mlxsw_sp_vr_put(mlxsw_sp, vr); 2153 mlxsw_sp_vr_put(vr);
2095} 2154}
2096 2155
2097static struct mlxsw_sp_fib_entry * 2156static struct mlxsw_sp_fib_entry *
@@ -2236,8 +2295,6 @@ static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
2236 if (err) 2295 if (err)
2237 goto err_fib4_node_entry_add; 2296 goto err_fib4_node_entry_add;
2238 2297
2239 mlxsw_sp_fib_node_prefix_inc(fib_node);
2240
2241 return 0; 2298 return 0;
2242 2299
2243err_fib4_node_entry_add: 2300err_fib4_node_entry_add:
@@ -2251,7 +2308,6 @@ mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2251{ 2308{
2252 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; 2309 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2253 2310
2254 mlxsw_sp_fib_node_prefix_dec(fib_node);
2255 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry); 2311 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2256 mlxsw_sp_fib4_node_list_remove(fib_entry); 2312 mlxsw_sp_fib4_node_list_remove(fib_entry);
2257} 2313}
@@ -2340,9 +2396,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2340{ 2396{
2341 char ralta_pl[MLXSW_REG_RALTA_LEN]; 2397 char ralta_pl[MLXSW_REG_RALTA_LEN];
2342 char ralst_pl[MLXSW_REG_RALST_LEN]; 2398 char ralst_pl[MLXSW_REG_RALST_LEN];
2343 char raltb_pl[MLXSW_REG_RALTB_LEN]; 2399 int i, err;
2344 char ralue_pl[MLXSW_REG_RALUE_LEN];
2345 int err;
2346 2400
2347 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4, 2401 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2348 MLXSW_SP_LPM_TREE_MIN); 2402 MLXSW_SP_LPM_TREE_MIN);
@@ -2355,16 +2409,33 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2355 if (err) 2409 if (err)
2356 return err; 2410 return err;
2357 2411
2358 mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 2412 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
2359 MLXSW_SP_LPM_TREE_MIN); 2413 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
2360 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); 2414 char raltb_pl[MLXSW_REG_RALTB_LEN];
2361 if (err) 2415 char ralue_pl[MLXSW_REG_RALUE_LEN];
2362 return err;
2363 2416
2364 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4, 2417 if (!mlxsw_sp_vr_is_used(vr))
2365 MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0); 2418 continue;
2366 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); 2419
2367 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 2420 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2421 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2422 MLXSW_SP_LPM_TREE_MIN);
2423 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2424 raltb_pl);
2425 if (err)
2426 return err;
2427
2428 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2429 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2430 0);
2431 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2432 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2433 ralue_pl);
2434 if (err)
2435 return err;
2436 }
2437
2438 return 0;
2368} 2439}
2369 2440
2370static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, 2441static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
@@ -2390,7 +2461,7 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2390static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, 2461static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2391 struct mlxsw_sp_fib_node *fib_node) 2462 struct mlxsw_sp_fib_node *fib_node)
2392{ 2463{
2393 switch (fib_node->vr->proto) { 2464 switch (fib_node->fib->proto) {
2394 case MLXSW_SP_L3_PROTO_IPV4: 2465 case MLXSW_SP_L3_PROTO_IPV4:
2395 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node); 2466 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2396 break; 2467 break;
@@ -2400,26 +2471,32 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2400 } 2471 }
2401} 2472}
2402 2473
2403static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) 2474static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2475 struct mlxsw_sp_vr *vr,
2476 enum mlxsw_sp_l3proto proto)
2404{ 2477{
2478 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
2405 struct mlxsw_sp_fib_node *fib_node, *tmp; 2479 struct mlxsw_sp_fib_node *fib_node, *tmp;
2406 struct mlxsw_sp_vr *vr; 2480
2481 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2482 bool do_break = &tmp->list == &fib->node_list;
2483
2484 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2485 if (do_break)
2486 break;
2487 }
2488}
2489
2490static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
2491{
2407 int i; 2492 int i;
2408 2493
2409 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 2494 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
2410 vr = &mlxsw_sp->router.vrs[i]; 2495 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
2411 2496
2412 if (!vr->used) 2497 if (!mlxsw_sp_vr_is_used(vr))
2413 continue; 2498 continue;
2414 2499 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
2415 list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list,
2416 list) {
2417 bool do_break = &tmp->list == &vr->fib->node_list;
2418
2419 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2420 if (do_break)
2421 break;
2422 }
2423 } 2500 }
2424} 2501}
2425 2502
@@ -2437,74 +2514,11 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2437 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n"); 2514 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2438} 2515}
2439 2516
2440static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2441{
2442 char ritr_pl[MLXSW_REG_RITR_LEN];
2443 int err;
2444
2445 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2446 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2447 if (WARN_ON_ONCE(err))
2448 return err;
2449
2450 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2451 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2452}
2453
2454void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2455 struct mlxsw_sp_rif *r)
2456{
2457 mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
2458 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
2459 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
2460}
2461
2462static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
2463{
2464 char rgcr_pl[MLXSW_REG_RGCR_LEN];
2465 u64 max_rifs;
2466 int err;
2467
2468 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
2469 return -EIO;
2470
2471 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2472 mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
2473 GFP_KERNEL);
2474 if (!mlxsw_sp->rifs)
2475 return -ENOMEM;
2476
2477 mlxsw_reg_rgcr_pack(rgcr_pl, true);
2478 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
2479 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
2480 if (err)
2481 goto err_rgcr_fail;
2482
2483 return 0;
2484
2485err_rgcr_fail:
2486 kfree(mlxsw_sp->rifs);
2487 return err;
2488}
2489
2490static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
2491{
2492 char rgcr_pl[MLXSW_REG_RGCR_LEN];
2493 int i;
2494
2495 mlxsw_reg_rgcr_pack(rgcr_pl, false);
2496 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
2497
2498 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2499 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
2500
2501 kfree(mlxsw_sp->rifs);
2502}
2503
2504struct mlxsw_sp_fib_event_work { 2517struct mlxsw_sp_fib_event_work {
2505 struct work_struct work; 2518 struct work_struct work;
2506 union { 2519 union {
2507 struct fib_entry_notifier_info fen_info; 2520 struct fib_entry_notifier_info fen_info;
2521 struct fib_rule_notifier_info fr_info;
2508 struct fib_nh_notifier_info fnh_info; 2522 struct fib_nh_notifier_info fnh_info;
2509 }; 2523 };
2510 struct mlxsw_sp *mlxsw_sp; 2524 struct mlxsw_sp *mlxsw_sp;
@@ -2516,6 +2530,7 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
2516 struct mlxsw_sp_fib_event_work *fib_work = 2530 struct mlxsw_sp_fib_event_work *fib_work =
2517 container_of(work, struct mlxsw_sp_fib_event_work, work); 2531 container_of(work, struct mlxsw_sp_fib_event_work, work);
2518 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; 2532 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
2533 struct fib_rule *rule;
2519 bool replace, append; 2534 bool replace, append;
2520 int err; 2535 int err;
2521 2536
@@ -2539,7 +2554,10 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
2539 break; 2554 break;
2540 case FIB_EVENT_RULE_ADD: /* fall through */ 2555 case FIB_EVENT_RULE_ADD: /* fall through */
2541 case FIB_EVENT_RULE_DEL: 2556 case FIB_EVENT_RULE_DEL:
2542 mlxsw_sp_router_fib4_abort(mlxsw_sp); 2557 rule = fib_work->fr_info.rule;
2558 if (!fib4_rule_default(rule) && !rule->l3mdev)
2559 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2560 fib_rule_put(rule);
2543 break; 2561 break;
2544 case FIB_EVENT_NH_ADD: /* fall through */ 2562 case FIB_EVENT_NH_ADD: /* fall through */
2545 case FIB_EVENT_NH_DEL: 2563 case FIB_EVENT_NH_DEL:
@@ -2582,6 +2600,11 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2582 */ 2600 */
2583 fib_info_hold(fib_work->fen_info.fi); 2601 fib_info_hold(fib_work->fen_info.fi);
2584 break; 2602 break;
2603 case FIB_EVENT_RULE_ADD: /* fall through */
2604 case FIB_EVENT_RULE_DEL:
2605 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2606 fib_rule_get(fib_work->fr_info.rule);
2607 break;
2585 case FIB_EVENT_NH_ADD: /* fall through */ 2608 case FIB_EVENT_NH_ADD: /* fall through */
2586 case FIB_EVENT_NH_DEL: 2609 case FIB_EVENT_NH_DEL:
2587 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info)); 2610 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
@@ -2594,6 +2617,688 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2594 return NOTIFY_DONE; 2617 return NOTIFY_DONE;
2595} 2618}
2596 2619
2620static struct mlxsw_sp_rif *
2621mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2622 const struct net_device *dev)
2623{
2624 int i;
2625
2626 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2627 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
2628 return mlxsw_sp->rifs[i];
2629
2630 return NULL;
2631}
2632
2633static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2634{
2635 char ritr_pl[MLXSW_REG_RITR_LEN];
2636 int err;
2637
2638 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2639 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2640 if (WARN_ON_ONCE(err))
2641 return err;
2642
2643 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2644 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2645}
2646
2647static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2648 struct mlxsw_sp_rif *rif)
2649{
2650 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
2651 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
2652 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
2653}
2654
2655static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
2656 const struct in_device *in_dev,
2657 unsigned long event)
2658{
2659 switch (event) {
2660 case NETDEV_UP:
2661 if (!rif)
2662 return true;
2663 return false;
2664 case NETDEV_DOWN:
2665 if (rif && !in_dev->ifa_list &&
2666 !netif_is_l3_slave(rif->dev))
2667 return true;
2668 /* It is possible we already removed the RIF ourselves
2669 * if it was assigned to a netdev that is now a bridge
2670 * or LAG slave.
2671 */
2672 return false;
2673 }
2674
2675 return false;
2676}
2677
2678#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
2679static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2680{
2681 int i;
2682
2683 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2684 if (!mlxsw_sp->rifs[i])
2685 return i;
2686
2687 return MLXSW_SP_INVALID_INDEX_RIF;
2688}
2689
2690static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2691 bool *p_lagged, u16 *p_system_port)
2692{
2693 u8 local_port = mlxsw_sp_vport->local_port;
2694
2695 *p_lagged = mlxsw_sp_vport->lagged;
2696 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2697}
2698
2699static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
2700 u16 vr_id, struct net_device *l3_dev,
2701 u16 rif_index, bool create)
2702{
2703 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2704 bool lagged = mlxsw_sp_vport->lagged;
2705 char ritr_pl[MLXSW_REG_RITR_LEN];
2706 u16 system_port;
2707
2708 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
2709 vr_id, l3_dev->mtu, l3_dev->dev_addr);
2710
2711 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2712 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2713 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2714
2715 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2716}
2717
2718static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2719
2720static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
2721{
2722 return MLXSW_SP_RFID_BASE + rif_index;
2723}
2724
2725static struct mlxsw_sp_fid *
2726mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2727{
2728 struct mlxsw_sp_fid *f;
2729
2730 f = kzalloc(sizeof(*f), GFP_KERNEL);
2731 if (!f)
2732 return NULL;
2733
2734 f->leave = mlxsw_sp_vport_rif_sp_leave;
2735 f->ref_count = 0;
2736 f->dev = l3_dev;
2737 f->fid = fid;
2738
2739 return f;
2740}
2741
2742static struct mlxsw_sp_rif *
2743mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
2744 struct mlxsw_sp_fid *f)
2745{
2746 struct mlxsw_sp_rif *rif;
2747
2748 rif = kzalloc(sizeof(*rif), GFP_KERNEL);
2749 if (!rif)
2750 return NULL;
2751
2752 INIT_LIST_HEAD(&rif->nexthop_list);
2753 INIT_LIST_HEAD(&rif->neigh_list);
2754 ether_addr_copy(rif->addr, l3_dev->dev_addr);
2755 rif->mtu = l3_dev->mtu;
2756 rif->vr_id = vr_id;
2757 rif->dev = l3_dev;
2758 rif->rif_index = rif_index;
2759 rif->f = f;
2760
2761 return rif;
2762}
2763
2764static struct mlxsw_sp_rif *
2765mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2766 struct net_device *l3_dev)
2767{
2768 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2769 u32 tb_id = l3mdev_fib_table(l3_dev);
2770 struct mlxsw_sp_vr *vr;
2771 struct mlxsw_sp_fid *f;
2772 struct mlxsw_sp_rif *rif;
2773 u16 fid, rif_index;
2774 int err;
2775
2776 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
2777 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
2778 return ERR_PTR(-ERANGE);
2779
2780 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
2781 if (IS_ERR(vr))
2782 return ERR_CAST(vr);
2783
2784 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
2785 rif_index, true);
2786 if (err)
2787 goto err_vport_rif_sp_op;
2788
2789 fid = mlxsw_sp_rif_sp_to_fid(rif_index);
2790 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2791 if (err)
2792 goto err_rif_fdb_op;
2793
2794 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2795 if (!f) {
2796 err = -ENOMEM;
2797 goto err_rfid_alloc;
2798 }
2799
2800 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
2801 if (!rif) {
2802 err = -ENOMEM;
2803 goto err_rif_alloc;
2804 }
2805
2806 f->rif = rif;
2807 mlxsw_sp->rifs[rif_index] = rif;
2808 vr->rif_count++;
2809
2810 return rif;
2811
2812err_rif_alloc:
2813 kfree(f);
2814err_rfid_alloc:
2815 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2816err_rif_fdb_op:
2817 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
2818 false);
2819err_vport_rif_sp_op:
2820 mlxsw_sp_vr_put(vr);
2821 return ERR_PTR(err);
2822}
2823
2824static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
2825 struct mlxsw_sp_rif *rif)
2826{
2827 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2828 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
2829 struct net_device *l3_dev = rif->dev;
2830 struct mlxsw_sp_fid *f = rif->f;
2831 u16 rif_index = rif->rif_index;
2832 u16 fid = f->fid;
2833
2834 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
2835
2836 vr->rif_count--;
2837 mlxsw_sp->rifs[rif_index] = NULL;
2838 f->rif = NULL;
2839
2840 kfree(rif);
2841
2842 kfree(f);
2843
2844 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2845
2846 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
2847 false);
2848 mlxsw_sp_vr_put(vr);
2849}
2850
2851static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2852 struct net_device *l3_dev)
2853{
2854 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2855 struct mlxsw_sp_rif *rif;
2856
2857 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2858 if (!rif) {
2859 rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2860 if (IS_ERR(rif))
2861 return PTR_ERR(rif);
2862 }
2863
2864 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
2865 rif->f->ref_count++;
2866
2867 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
2868
2869 return 0;
2870}
2871
2872static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2873{
2874 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2875
2876 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2877
2878 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2879 if (--f->ref_count == 0)
2880 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
2881}
2882
2883static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2884 struct net_device *port_dev,
2885 unsigned long event, u16 vid)
2886{
2887 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2888 struct mlxsw_sp_port *mlxsw_sp_vport;
2889
2890 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2891 if (WARN_ON(!mlxsw_sp_vport))
2892 return -EINVAL;
2893
2894 switch (event) {
2895 case NETDEV_UP:
2896 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2897 case NETDEV_DOWN:
2898 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2899 break;
2900 }
2901
2902 return 0;
2903}
2904
2905static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2906 unsigned long event)
2907{
2908 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2909 return 0;
2910
2911 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2912}
2913
2914static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2915 struct net_device *lag_dev,
2916 unsigned long event, u16 vid)
2917{
2918 struct net_device *port_dev;
2919 struct list_head *iter;
2920 int err;
2921
2922 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2923 if (mlxsw_sp_port_dev_check(port_dev)) {
2924 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2925 event, vid);
2926 if (err)
2927 return err;
2928 }
2929 }
2930
2931 return 0;
2932}
2933
2934static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2935 unsigned long event)
2936{
2937 if (netif_is_bridge_port(lag_dev))
2938 return 0;
2939
2940 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2941}
2942
2943static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2944 struct net_device *l3_dev)
2945{
2946 u16 fid;
2947
2948 if (is_vlan_dev(l3_dev))
2949 fid = vlan_dev_vlan_id(l3_dev);
2950 else if (mlxsw_sp->master_bridge.dev == l3_dev)
2951 fid = 1;
2952 else
2953 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2954
2955 return mlxsw_sp_fid_find(mlxsw_sp, fid);
2956}
2957
2958static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
2959{
2960 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
2961 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2962}
2963
2964static u16 mlxsw_sp_flood_table_index_get(u16 fid)
2965{
2966 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
2967}
2968
2969static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
2970 bool set)
2971{
2972 enum mlxsw_flood_table_type table_type;
2973 char *sftr_pl;
2974 u16 index;
2975 int err;
2976
2977 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
2978 if (!sftr_pl)
2979 return -ENOMEM;
2980
2981 table_type = mlxsw_sp_flood_table_type_get(fid);
2982 index = mlxsw_sp_flood_table_index_get(fid);
2983 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
2984 1, MLXSW_PORT_ROUTER_PORT, set);
2985 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
2986
2987 kfree(sftr_pl);
2988 return err;
2989}
2990
2991static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
2992{
2993 if (mlxsw_sp_fid_is_vfid(fid))
2994 return MLXSW_REG_RITR_FID_IF;
2995 else
2996 return MLXSW_REG_RITR_VLAN_IF;
2997}
2998
2999static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
3000 struct net_device *l3_dev,
3001 u16 fid, u16 rif,
3002 bool create)
3003{
3004 enum mlxsw_reg_ritr_if_type rif_type;
3005 char ritr_pl[MLXSW_REG_RITR_LEN];
3006
3007 rif_type = mlxsw_sp_rif_type_get(fid);
3008 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
3009 l3_dev->dev_addr);
3010 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
3011
3012 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3013}
3014
3015static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3016 struct net_device *l3_dev,
3017 struct mlxsw_sp_fid *f)
3018{
3019 u32 tb_id = l3mdev_fib_table(l3_dev);
3020 struct mlxsw_sp_rif *rif;
3021 struct mlxsw_sp_vr *vr;
3022 u16 rif_index;
3023 int err;
3024
3025 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
3026 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
3027 return -ERANGE;
3028
3029 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
3030 if (IS_ERR(vr))
3031 return PTR_ERR(vr);
3032
3033 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3034 if (err)
3035 goto err_port_flood_set;
3036
3037 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
3038 rif_index, true);
3039 if (err)
3040 goto err_rif_bridge_op;
3041
3042 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3043 if (err)
3044 goto err_rif_fdb_op;
3045
3046 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
3047 if (!rif) {
3048 err = -ENOMEM;
3049 goto err_rif_alloc;
3050 }
3051
3052 f->rif = rif;
3053 mlxsw_sp->rifs[rif_index] = rif;
3054 vr->rif_count++;
3055
3056 netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
3057
3058 return 0;
3059
3060err_rif_alloc:
3061 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3062err_rif_fdb_op:
3063 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3064 false);
3065err_rif_bridge_op:
3066 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3067err_port_flood_set:
3068 mlxsw_sp_vr_put(vr);
3069 return err;
3070}
3071
3072void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
3073 struct mlxsw_sp_rif *rif)
3074{
3075 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
3076 struct net_device *l3_dev = rif->dev;
3077 struct mlxsw_sp_fid *f = rif->f;
3078 u16 rif_index = rif->rif_index;
3079
3080 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
3081
3082 vr->rif_count--;
3083 mlxsw_sp->rifs[rif_index] = NULL;
3084 f->rif = NULL;
3085
3086 kfree(rif);
3087
3088 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3089
3090 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3091 false);
3092
3093 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3094
3095 mlxsw_sp_vr_put(vr);
3096
3097 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
3098}
3099
3100static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3101 struct net_device *br_dev,
3102 unsigned long event)
3103{
3104 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3105 struct mlxsw_sp_fid *f;
3106
3107 /* FID can either be an actual FID if the L3 device is the
3108 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3109 * L3 device is a VLAN-unaware bridge and we get a vFID.
3110 */
3111 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3112 if (WARN_ON(!f))
3113 return -EINVAL;
3114
3115 switch (event) {
3116 case NETDEV_UP:
3117 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3118 case NETDEV_DOWN:
3119 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
3120 break;
3121 }
3122
3123 return 0;
3124}
3125
3126static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3127 unsigned long event)
3128{
3129 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3130 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3131 u16 vid = vlan_dev_vlan_id(vlan_dev);
3132
3133 if (mlxsw_sp_port_dev_check(real_dev))
3134 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3135 vid);
3136 else if (netif_is_lag_master(real_dev))
3137 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3138 vid);
3139 else if (netif_is_bridge_master(real_dev) &&
3140 mlxsw_sp->master_bridge.dev == real_dev)
3141 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3142 event);
3143
3144 return 0;
3145}
3146
3147int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3148 unsigned long event, void *ptr)
3149{
3150 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3151 struct net_device *dev = ifa->ifa_dev->dev;
3152 struct mlxsw_sp *mlxsw_sp;
3153 struct mlxsw_sp_rif *rif;
3154 int err = 0;
3155
3156 mlxsw_sp = mlxsw_sp_lower_get(dev);
3157 if (!mlxsw_sp)
3158 goto out;
3159
3160 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3161 if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
3162 goto out;
3163
3164 if (mlxsw_sp_port_dev_check(dev))
3165 err = mlxsw_sp_inetaddr_port_event(dev, event);
3166 else if (netif_is_lag_master(dev))
3167 err = mlxsw_sp_inetaddr_lag_event(dev, event);
3168 else if (netif_is_bridge_master(dev))
3169 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3170 else if (is_vlan_dev(dev))
3171 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
3172
3173out:
3174 return notifier_from_errno(err);
3175}
3176
3177static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
3178 const char *mac, int mtu)
3179{
3180 char ritr_pl[MLXSW_REG_RITR_LEN];
3181 int err;
3182
3183 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
3184 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3185 if (err)
3186 return err;
3187
3188 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3189 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3190 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3191 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3192}
3193
3194int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3195{
3196 struct mlxsw_sp *mlxsw_sp;
3197 struct mlxsw_sp_rif *rif;
3198 int err;
3199
3200 mlxsw_sp = mlxsw_sp_lower_get(dev);
3201 if (!mlxsw_sp)
3202 return 0;
3203
3204 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3205 if (!rif)
3206 return 0;
3207
3208 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
3209 if (err)
3210 return err;
3211
3212 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
3213 dev->mtu);
3214 if (err)
3215 goto err_rif_edit;
3216
3217 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
3218 if (err)
3219 goto err_rif_fdb_op;
3220
3221 ether_addr_copy(rif->addr, dev->dev_addr);
3222 rif->mtu = dev->mtu;
3223
3224 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
3225
3226 return 0;
3227
3228err_rif_fdb_op:
3229 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
3230err_rif_edit:
3231 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
3232 return err;
3233}
3234
3235int mlxsw_sp_vport_vrf_join(struct mlxsw_sp_port *mlxsw_sp_vport)
3236{
3237 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3238 struct net_device *dev = mlxsw_sp_vport->dev;
3239
3240 /* In case vPort already has a RIF, then we need to drop it.
3241 * A new one will be created using the VRF's VR.
3242 */
3243 if (f && f->rif)
3244 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
3245
3246 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, dev);
3247}
3248
3249void mlxsw_sp_vport_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3250{
3251 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
3252}
3253
3254int mlxsw_sp_port_vrf_join(struct mlxsw_sp_port *mlxsw_sp_port)
3255{
3256 struct mlxsw_sp_port *mlxsw_sp_vport;
3257
3258 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3259 if (WARN_ON(!mlxsw_sp_vport))
3260 return -EINVAL;
3261
3262 return mlxsw_sp_vport_vrf_join(mlxsw_sp_vport);
3263}
3264
3265void mlxsw_sp_port_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3266{
3267 struct mlxsw_sp_port *mlxsw_sp_vport;
3268
3269 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3270 if (WARN_ON(!mlxsw_sp_vport))
3271 return;
3272
3273 mlxsw_sp_vport_vrf_leave(mlxsw_sp_vport);
3274}
3275
3276int mlxsw_sp_bridge_vrf_join(struct mlxsw_sp *mlxsw_sp,
3277 struct net_device *l3_dev)
3278{
3279 struct mlxsw_sp_fid *f;
3280
3281 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3282 if (WARN_ON(!f))
3283 return -EINVAL;
3284
3285 if (f->rif)
3286 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
3287
3288 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3289}
3290
3291void mlxsw_sp_bridge_vrf_leave(struct mlxsw_sp *mlxsw_sp,
3292 struct net_device *l3_dev)
3293{
3294 struct mlxsw_sp_fid *f;
3295
3296 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3297 if (WARN_ON(!f))
3298 return;
3299 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
3300}
3301
2597static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb) 3302static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
2598{ 3303{
2599 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb); 3304 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
@@ -2606,6 +3311,48 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
2606 mlxsw_sp_router_fib_flush(mlxsw_sp); 3311 mlxsw_sp_router_fib_flush(mlxsw_sp);
2607} 3312}
2608 3313
3314static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3315{
3316 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3317 u64 max_rifs;
3318 int err;
3319
3320 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3321 return -EIO;
3322
3323 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3324 mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
3325 GFP_KERNEL);
3326 if (!mlxsw_sp->rifs)
3327 return -ENOMEM;
3328
3329 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3330 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3331 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3332 if (err)
3333 goto err_rgcr_fail;
3334
3335 return 0;
3336
3337err_rgcr_fail:
3338 kfree(mlxsw_sp->rifs);
3339 return err;
3340}
3341
3342static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3343{
3344 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3345 int i;
3346
3347 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3348 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3349
3350 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3351 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
3352
3353 kfree(mlxsw_sp->rifs);
3354}
3355
2609int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) 3356int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
2610{ 3357{
2611 int err; 3358 int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 598727d578c1..d44d92fe7ff3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -568,8 +568,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
568 568
569 list_del(&f->list); 569 list_del(&f->list);
570 570
571 if (f->r) 571 if (f->rif)
572 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 572 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
573 573
574 kfree(f); 574 kfree(f);
575 575
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 279ee4612981..20358f87de57 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -212,25 +212,6 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
212} 212}
213 213
214/** 214/**
215 * ks8851_rx_1msg - select whether to use one or two messages for spi read
216 * @ks: The device structure
217 *
218 * Return whether to generate a single message with a tx and rx buffer
219 * supplied to spi_sync(), or alternatively send the tx and rx buffers
220 * as separate messages.
221 *
222 * Depending on the hardware in use, a single message may be more efficient
223 * on interrupts or work done by the driver.
224 *
225 * This currently always returns true until we add some per-device data passed
226 * from the platform code to specify which mode is better.
227 */
228static inline bool ks8851_rx_1msg(struct ks8851_net *ks)
229{
230 return true;
231}
232
233/**
234 * ks8851_rdreg - issue read register command and return the data 215 * ks8851_rdreg - issue read register command and return the data
235 * @ks: The device state 216 * @ks: The device state
236 * @op: The register address and byte enables in message format. 217 * @op: The register address and byte enables in message format.
@@ -251,14 +232,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
251 232
252 txb[0] = cpu_to_le16(op | KS_SPIOP_RD); 233 txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
253 234
254 if (ks8851_rx_1msg(ks)) { 235 if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
255 msg = &ks->spi_msg1;
256 xfer = &ks->spi_xfer1;
257
258 xfer->tx_buf = txb;
259 xfer->rx_buf = trx;
260 xfer->len = rxl + 2;
261 } else {
262 msg = &ks->spi_msg2; 236 msg = &ks->spi_msg2;
263 xfer = ks->spi_xfer2; 237 xfer = ks->spi_xfer2;
264 238
@@ -270,15 +244,22 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
270 xfer->tx_buf = NULL; 244 xfer->tx_buf = NULL;
271 xfer->rx_buf = trx; 245 xfer->rx_buf = trx;
272 xfer->len = rxl; 246 xfer->len = rxl;
247 } else {
248 msg = &ks->spi_msg1;
249 xfer = &ks->spi_xfer1;
250
251 xfer->tx_buf = txb;
252 xfer->rx_buf = trx;
253 xfer->len = rxl + 2;
273 } 254 }
274 255
275 ret = spi_sync(ks->spidev, msg); 256 ret = spi_sync(ks->spidev, msg);
276 if (ret < 0) 257 if (ret < 0)
277 netdev_err(ks->netdev, "read: spi_sync() failed\n"); 258 netdev_err(ks->netdev, "read: spi_sync() failed\n");
278 else if (ks8851_rx_1msg(ks)) 259 else if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
279 memcpy(rxb, trx + 2, rxl);
280 else
281 memcpy(rxb, trx, rxl); 260 memcpy(rxb, trx, rxl);
261 else
262 memcpy(rxb, trx + 2, rxl);
282} 263}
283 264
284/** 265/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index e614a376b595..4d45f4573b57 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -50,14 +50,14 @@
50 50
51#include "nfp_net_ctrl.h" 51#include "nfp_net_ctrl.h"
52 52
53#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args) 53#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args)
54#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args) 54#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args)
55#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args) 55#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args)
56#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args) 56#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args)
57#define nn_warn_ratelimit(nn, fmt, args...) \ 57#define nn_dp_warn(dp, fmt, args...) \
58 do { \ 58 do { \
59 if (unlikely(net_ratelimit())) \ 59 if (unlikely(net_ratelimit())) \
60 netdev_warn((nn)->netdev, fmt, ## args); \ 60 netdev_warn((dp)->netdev, fmt, ## args); \
61 } while (0) 61 } while (0)
62 62
63/* Max time to wait for NFP to respond on updates (in seconds) */ 63/* Max time to wait for NFP to respond on updates (in seconds) */
@@ -112,6 +112,7 @@
112 112
113/* Forward declarations */ 113/* Forward declarations */
114struct nfp_cpp; 114struct nfp_cpp;
115struct nfp_eth_table_port;
115struct nfp_net; 116struct nfp_net;
116struct nfp_net_r_vector; 117struct nfp_net_r_vector;
117 118
@@ -315,8 +316,6 @@ struct nfp_net_rx_buf {
315 * @rxds: Virtual address of FL/RX ring in host memory 316 * @rxds: Virtual address of FL/RX ring in host memory
316 * @dma: DMA address of the FL/RX ring 317 * @dma: DMA address of the FL/RX ring
317 * @size: Size, in bytes, of the FL/RX ring (needed to free) 318 * @size: Size, in bytes, of the FL/RX ring (needed to free)
318 * @bufsz: Buffer allocation size for convenience of management routines
319 * (NOTE: this is in second cache line, do not use on fast path!)
320 */ 319 */
321struct nfp_net_rx_ring { 320struct nfp_net_rx_ring {
322 struct nfp_net_r_vector *r_vec; 321 struct nfp_net_r_vector *r_vec;
@@ -338,7 +337,6 @@ struct nfp_net_rx_ring {
338 337
339 dma_addr_t dma; 338 dma_addr_t dma;
340 unsigned int size; 339 unsigned int size;
341 unsigned int bufsz;
342} ____cacheline_aligned; 340} ____cacheline_aligned;
343 341
344/** 342/**
@@ -433,19 +431,76 @@ struct nfp_stat_pair {
433}; 431};
434 432
435/** 433/**
436 * struct nfp_net - NFP network device structure 434 * struct nfp_net_dp - NFP network device datapath data structure
437 * @pdev: Backpointer to PCI device 435 * @dev: Backpointer to struct device
438 * @netdev: Backpointer to net_device structure 436 * @netdev: Backpointer to net_device structure
439 * @is_vf: Is the driver attached to a VF? 437 * @is_vf: Is the driver attached to a VF?
440 * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf 438 * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
441 * @bpf_offload_xdp: Offloaded BPF program is XDP 439 * @bpf_offload_xdp: Offloaded BPF program is XDP
442 * @ctrl: Local copy of the control register/word. 440 * @chained_metadata_format: Firemware will use new metadata format
443 * @fl_bufsz: Currently configured size of the freelist buffers 441 * @rx_dma_dir: Mapping direction for RX buffers
442 * @rx_dma_off: Offset at which DMA packets (for XDP headroom)
444 * @rx_offset: Offset in the RX buffers where packet data starts 443 * @rx_offset: Offset in the RX buffers where packet data starts
444 * @ctrl: Local copy of the control register/word.
445 * @fl_bufsz: Currently configured size of the freelist buffers
445 * @xdp_prog: Installed XDP program 446 * @xdp_prog: Installed XDP program
446 * @fw_ver: Firmware version 447 * @tx_rings: Array of pre-allocated TX ring structures
448 * @rx_rings: Array of pre-allocated RX ring structures
449 * @ctrl_bar: Pointer to mapped control BAR
450 *
451 * @txd_cnt: Size of the TX ring in number of descriptors
452 * @rxd_cnt: Size of the RX ring in number of descriptors
453 * @num_r_vecs: Number of used ring vectors
454 * @num_tx_rings: Currently configured number of TX rings
455 * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
456 * @num_rx_rings: Currently configured number of RX rings
457 * @mtu: Device MTU
458 */
459struct nfp_net_dp {
460 struct device *dev;
461 struct net_device *netdev;
462
463 u8 is_vf:1;
464 u8 bpf_offload_skip_sw:1;
465 u8 bpf_offload_xdp:1;
466 u8 chained_metadata_format:1;
467
468 u8 rx_dma_dir;
469 u8 rx_dma_off;
470
471 u8 rx_offset;
472
473 u32 ctrl;
474 u32 fl_bufsz;
475
476 struct bpf_prog *xdp_prog;
477
478 struct nfp_net_tx_ring *tx_rings;
479 struct nfp_net_rx_ring *rx_rings;
480
481 u8 __iomem *ctrl_bar;
482
483 /* Cold data follows */
484
485 unsigned int txd_cnt;
486 unsigned int rxd_cnt;
487
488 unsigned int num_r_vecs;
489
490 unsigned int num_tx_rings;
491 unsigned int num_stack_tx_rings;
492 unsigned int num_rx_rings;
493
494 unsigned int mtu;
495};
496
497/**
498 * struct nfp_net - NFP network device structure
499 * @dp: Datapath structure
500 * @fw_ver: Firmware version
447 * @cap: Capabilities advertised by the Firmware 501 * @cap: Capabilities advertised by the Firmware
448 * @max_mtu: Maximum support MTU advertised by the Firmware 502 * @max_mtu: Maximum support MTU advertised by the Firmware
503 * @rss_hfunc: RSS selected hash function
449 * @rss_cfg: RSS configuration 504 * @rss_cfg: RSS configuration
450 * @rss_key: RSS secret key 505 * @rss_key: RSS secret key
451 * @rss_itbl: RSS indirection table 506 * @rss_itbl: RSS indirection table
@@ -454,17 +509,9 @@ struct nfp_stat_pair {
454 * @rx_filter_change: Jiffies when statistics last changed 509 * @rx_filter_change: Jiffies when statistics last changed
455 * @rx_filter_stats_timer: Timer for polling filter offload statistics 510 * @rx_filter_stats_timer: Timer for polling filter offload statistics
456 * @rx_filter_lock: Lock protecting timer state changes (teardown) 511 * @rx_filter_lock: Lock protecting timer state changes (teardown)
512 * @max_r_vecs: Number of allocated interrupt vectors for RX/TX
457 * @max_tx_rings: Maximum number of TX rings supported by the Firmware 513 * @max_tx_rings: Maximum number of TX rings supported by the Firmware
458 * @max_rx_rings: Maximum number of RX rings supported by the Firmware 514 * @max_rx_rings: Maximum number of RX rings supported by the Firmware
459 * @num_tx_rings: Currently configured number of TX rings
460 * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
461 * @num_rx_rings: Currently configured number of RX rings
462 * @txd_cnt: Size of the TX ring in number of descriptors
463 * @rxd_cnt: Size of the RX ring in number of descriptors
464 * @tx_rings: Array of pre-allocated TX ring structures
465 * @rx_rings: Array of pre-allocated RX ring structures
466 * @max_r_vecs: Number of allocated interrupt vectors for RX/TX
467 * @num_r_vecs: Number of used ring vectors
468 * @r_vecs: Pre-allocated array of ring vectors 515 * @r_vecs: Pre-allocated array of ring vectors
469 * @irq_entries: Pre-allocated array of MSI-X entries 516 * @irq_entries: Pre-allocated array of MSI-X entries
470 * @lsc_handler: Handler for Link State Change interrupt 517 * @lsc_handler: Handler for Link State Change interrupt
@@ -488,36 +535,24 @@ struct nfp_stat_pair {
488 * @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW 535 * @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW
489 * @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts 536 * @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts
490 * @qcp_cfg: Pointer to QCP queue used for configuration notification 537 * @qcp_cfg: Pointer to QCP queue used for configuration notification
491 * @ctrl_bar: Pointer to mapped control BAR
492 * @tx_bar: Pointer to mapped TX queues 538 * @tx_bar: Pointer to mapped TX queues
493 * @rx_bar: Pointer to mapped FL/RX queues 539 * @rx_bar: Pointer to mapped FL/RX queues
494 * @debugfs_dir: Device directory in debugfs 540 * @debugfs_dir: Device directory in debugfs
495 * @ethtool_dump_flag: Ethtool dump flag 541 * @ethtool_dump_flag: Ethtool dump flag
496 * @port_list: Entry on device port list 542 * @port_list: Entry on device port list
543 * @pdev: Backpointer to PCI device
497 * @cpp: CPP device handle if available 544 * @cpp: CPP device handle if available
545 * @eth_port: Translated ETH Table port entry
498 */ 546 */
499struct nfp_net { 547struct nfp_net {
500 struct pci_dev *pdev; 548 struct nfp_net_dp dp;
501 struct net_device *netdev;
502
503 unsigned is_vf:1;
504 unsigned bpf_offload_skip_sw:1;
505 unsigned bpf_offload_xdp:1;
506
507 u32 ctrl;
508 u32 fl_bufsz;
509
510 u32 rx_offset;
511
512 struct bpf_prog *xdp_prog;
513
514 struct nfp_net_tx_ring *tx_rings;
515 struct nfp_net_rx_ring *rx_rings;
516 549
517 struct nfp_net_fw_version fw_ver; 550 struct nfp_net_fw_version fw_ver;
551
518 u32 cap; 552 u32 cap;
519 u32 max_mtu; 553 u32 max_mtu;
520 554
555 u8 rss_hfunc;
521 u32 rss_cfg; 556 u32 rss_cfg;
522 u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ]; 557 u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
523 u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ]; 558 u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
@@ -530,18 +565,10 @@ struct nfp_net {
530 unsigned int max_tx_rings; 565 unsigned int max_tx_rings;
531 unsigned int max_rx_rings; 566 unsigned int max_rx_rings;
532 567
533 unsigned int num_tx_rings;
534 unsigned int num_stack_tx_rings;
535 unsigned int num_rx_rings;
536
537 int stride_tx; 568 int stride_tx;
538 int stride_rx; 569 int stride_rx;
539 570
540 int txd_cnt;
541 int rxd_cnt;
542
543 unsigned int max_r_vecs; 571 unsigned int max_r_vecs;
544 unsigned int num_r_vecs;
545 struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS]; 572 struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
546 struct msix_entry irq_entries[NFP_NET_MAX_IRQS]; 573 struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
547 574
@@ -575,7 +602,6 @@ struct nfp_net {
575 602
576 u8 __iomem *qcp_cfg; 603 u8 __iomem *qcp_cfg;
577 604
578 u8 __iomem *ctrl_bar;
579 u8 __iomem *tx_bar; 605 u8 __iomem *tx_bar;
580 u8 __iomem *rx_bar; 606 u8 __iomem *rx_bar;
581 607
@@ -584,14 +610,10 @@ struct nfp_net {
584 610
585 struct list_head port_list; 611 struct list_head port_list;
586 612
613 struct pci_dev *pdev;
587 struct nfp_cpp *cpp; 614 struct nfp_cpp *cpp;
588};
589 615
590struct nfp_net_ring_set { 616 struct nfp_eth_table_port *eth_port;
591 unsigned int n_rings;
592 unsigned int mtu;
593 unsigned int dcnt;
594 void *rings;
595}; 617};
596 618
597/* Functions to read/write from/to a BAR 619/* Functions to read/write from/to a BAR
@@ -599,42 +621,42 @@ struct nfp_net_ring_set {
599 */ 621 */
600static inline u16 nn_readb(struct nfp_net *nn, int off) 622static inline u16 nn_readb(struct nfp_net *nn, int off)
601{ 623{
602 return readb(nn->ctrl_bar + off); 624 return readb(nn->dp.ctrl_bar + off);
603} 625}
604 626
605static inline void nn_writeb(struct nfp_net *nn, int off, u8 val) 627static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
606{ 628{
607 writeb(val, nn->ctrl_bar + off); 629 writeb(val, nn->dp.ctrl_bar + off);
608} 630}
609 631
610static inline u16 nn_readw(struct nfp_net *nn, int off) 632static inline u16 nn_readw(struct nfp_net *nn, int off)
611{ 633{
612 return readw(nn->ctrl_bar + off); 634 return readw(nn->dp.ctrl_bar + off);
613} 635}
614 636
615static inline void nn_writew(struct nfp_net *nn, int off, u16 val) 637static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
616{ 638{
617 writew(val, nn->ctrl_bar + off); 639 writew(val, nn->dp.ctrl_bar + off);
618} 640}
619 641
620static inline u32 nn_readl(struct nfp_net *nn, int off) 642static inline u32 nn_readl(struct nfp_net *nn, int off)
621{ 643{
622 return readl(nn->ctrl_bar + off); 644 return readl(nn->dp.ctrl_bar + off);
623} 645}
624 646
625static inline void nn_writel(struct nfp_net *nn, int off, u32 val) 647static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
626{ 648{
627 writel(val, nn->ctrl_bar + off); 649 writel(val, nn->dp.ctrl_bar + off);
628} 650}
629 651
630static inline u64 nn_readq(struct nfp_net *nn, int off) 652static inline u64 nn_readq(struct nfp_net *nn, int off)
631{ 653{
632 return readq(nn->ctrl_bar + off); 654 return readq(nn->dp.ctrl_bar + off);
633} 655}
634 656
635static inline void nn_writeq(struct nfp_net *nn, int off, u64 val) 657static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
636{ 658{
637 writeq(val, nn->ctrl_bar + off); 659 writeq(val, nn->dp.ctrl_bar + off);
638} 660}
639 661
640/* Flush posted PCI writes by reading something without side effects */ 662/* Flush posted PCI writes by reading something without side effects */
@@ -776,6 +798,7 @@ void nfp_net_netdev_clean(struct net_device *netdev);
776void nfp_net_set_ethtool_ops(struct net_device *netdev); 798void nfp_net_set_ethtool_ops(struct net_device *netdev);
777void nfp_net_info(struct nfp_net *nn); 799void nfp_net_info(struct nfp_net *nn);
778int nfp_net_reconfig(struct nfp_net *nn, u32 update); 800int nfp_net_reconfig(struct nfp_net *nn, u32 update);
801unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
779void nfp_net_rss_write_itbl(struct nfp_net *nn); 802void nfp_net_rss_write_itbl(struct nfp_net *nn);
780void nfp_net_rss_write_key(struct nfp_net *nn); 803void nfp_net_rss_write_key(struct nfp_net *nn);
781void nfp_net_coalesce_write_cfg(struct nfp_net *nn); 804void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
@@ -787,9 +810,9 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
787void 810void
788nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, 811nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
789 unsigned int n); 812 unsigned int n);
790int 813
791nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, 814struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
792 struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx); 815int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new);
793 816
794#ifdef CONFIG_NFP_DEBUG 817#ifdef CONFIG_NFP_DEBUG
795void nfp_net_debugfs_create(void); 818void nfp_net_debugfs_create(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9179a99563af..f134f1808b9a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -41,6 +41,7 @@
41 * Chris Telfer <chris.telfer@netronome.com> 41 * Chris Telfer <chris.telfer@netronome.com>
42 */ 42 */
43 43
44#include <linux/bitfield.h>
44#include <linux/bpf.h> 45#include <linux/bpf.h>
45#include <linux/bpf_trace.h> 46#include <linux/bpf_trace.h>
46#include <linux/module.h> 47#include <linux/module.h>
@@ -66,6 +67,7 @@
66#include <net/pkt_cls.h> 67#include <net/pkt_cls.h>
67#include <net/vxlan.h> 68#include <net/vxlan.h>
68 69
70#include "nfpcore/nfp_nsp_eth.h"
69#include "nfp_net_ctrl.h" 71#include "nfp_net_ctrl.h"
70#include "nfp_net.h" 72#include "nfp_net.h"
71 73
@@ -83,20 +85,18 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
83 put_unaligned_le32(reg, fw_ver); 85 put_unaligned_le32(reg, fw_ver);
84} 86}
85 87
86static dma_addr_t 88static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
87nfp_net_dma_map_rx(struct nfp_net *nn, void *frag, unsigned int bufsz,
88 int direction)
89{ 89{
90 return dma_map_single(&nn->pdev->dev, frag + NFP_NET_RX_BUF_HEADROOM, 90 return dma_map_single(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
91 bufsz - NFP_NET_RX_BUF_NON_DATA, direction); 91 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
92 dp->rx_dma_dir);
92} 93}
93 94
94static void 95static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
95nfp_net_dma_unmap_rx(struct nfp_net *nn, dma_addr_t dma_addr,
96 unsigned int bufsz, int direction)
97{ 96{
98 dma_unmap_single(&nn->pdev->dev, dma_addr, 97 dma_unmap_single(dp->dev, dma_addr,
99 bufsz - NFP_NET_RX_BUF_NON_DATA, direction); 98 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
99 dp->rx_dma_dir);
100} 100}
101 101
102/* Firmware reconfig 102/* Firmware reconfig
@@ -327,19 +327,22 @@ void
327nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, 327nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
328 unsigned int n) 328 unsigned int n)
329{ 329{
330 struct nfp_net_dp *dp = &nn->dp;
331
330 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS; 332 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
331 nn->num_r_vecs = nn->max_r_vecs; 333 dp->num_r_vecs = nn->max_r_vecs;
332 334
333 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n); 335 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
334 336
335 if (nn->num_rx_rings > nn->num_r_vecs || 337 if (dp->num_rx_rings > dp->num_r_vecs ||
336 nn->num_tx_rings > nn->num_r_vecs) 338 dp->num_tx_rings > dp->num_r_vecs)
337 nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n", 339 nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
338 nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs); 340 dp->num_rx_rings, dp->num_tx_rings,
341 dp->num_r_vecs);
339 342
340 nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings); 343 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
341 nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings); 344 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
342 nn->num_stack_tx_rings = nn->num_tx_rings; 345 dp->num_stack_tx_rings = dp->num_tx_rings;
343} 346}
344 347
345/** 348/**
@@ -394,11 +397,11 @@ static void nfp_net_read_link_status(struct nfp_net *nn)
394 nn->link_up = link_up; 397 nn->link_up = link_up;
395 398
396 if (nn->link_up) { 399 if (nn->link_up) {
397 netif_carrier_on(nn->netdev); 400 netif_carrier_on(nn->dp.netdev);
398 netdev_info(nn->netdev, "NIC Link is Up\n"); 401 netdev_info(nn->dp.netdev, "NIC Link is Up\n");
399 } else { 402 } else {
400 netif_carrier_off(nn->netdev); 403 netif_carrier_off(nn->dp.netdev);
401 netdev_info(nn->netdev, "NIC Link is Down\n"); 404 netdev_info(nn->dp.netdev, "NIC Link is Down\n");
402 } 405 }
403out: 406out:
404 spin_unlock_irqrestore(&nn->link_status_lock, flags); 407 spin_unlock_irqrestore(&nn->link_status_lock, flags);
@@ -530,7 +533,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
530 533
531 entry = &nn->irq_entries[vector_idx]; 534 entry = &nn->irq_entries[vector_idx];
532 535
533 snprintf(name, name_sz, format, netdev_name(nn->netdev)); 536 snprintf(name, name_sz, format, netdev_name(nn->dp.netdev));
534 err = request_irq(entry->vector, handler, 0, name, nn); 537 err = request_irq(entry->vector, handler, 0, name, nn);
535 if (err) { 538 if (err) {
536 nn_err(nn, "Failed to request IRQ %d (err=%d).\n", 539 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
@@ -617,7 +620,6 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
617 620
618/** 621/**
619 * nfp_net_tx_tso() - Set up Tx descriptor for LSO 622 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
620 * @nn: NFP Net device
621 * @r_vec: per-ring structure 623 * @r_vec: per-ring structure
622 * @txbuf: Pointer to driver soft TX descriptor 624 * @txbuf: Pointer to driver soft TX descriptor
623 * @txd: Pointer to HW TX descriptor 625 * @txd: Pointer to HW TX descriptor
@@ -626,7 +628,7 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
626 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs. 628 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
627 * Return error on packet header greater than maximum supported LSO header size. 629 * Return error on packet header greater than maximum supported LSO header size.
628 */ 630 */
629static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, 631static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
630 struct nfp_net_tx_buf *txbuf, 632 struct nfp_net_tx_buf *txbuf,
631 struct nfp_net_tx_desc *txd, struct sk_buff *skb) 633 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
632{ 634{
@@ -657,7 +659,7 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
657 659
658/** 660/**
659 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor 661 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
660 * @nn: NFP Net device 662 * @dp: NFP Net data path struct
661 * @r_vec: per-ring structure 663 * @r_vec: per-ring structure
662 * @txbuf: Pointer to driver soft TX descriptor 664 * @txbuf: Pointer to driver soft TX descriptor
663 * @txd: Pointer to TX descriptor 665 * @txd: Pointer to TX descriptor
@@ -666,7 +668,8 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
666 * This function sets the TX checksum flags in the TX descriptor based 668 * This function sets the TX checksum flags in the TX descriptor based
667 * on the configuration and the protocol of the packet to be transmitted. 669 * on the configuration and the protocol of the packet to be transmitted.
668 */ 670 */
669static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, 671static void nfp_net_tx_csum(struct nfp_net_dp *dp,
672 struct nfp_net_r_vector *r_vec,
670 struct nfp_net_tx_buf *txbuf, 673 struct nfp_net_tx_buf *txbuf,
671 struct nfp_net_tx_desc *txd, struct sk_buff *skb) 674 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
672{ 675{
@@ -674,7 +677,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
674 struct iphdr *iph; 677 struct iphdr *iph;
675 u8 l4_hdr; 678 u8 l4_hdr;
676 679
677 if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) 680 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
678 return; 681 return;
679 682
680 if (skb->ip_summed != CHECKSUM_PARTIAL) 683 if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -693,8 +696,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
693 } else if (ipv6h->version == 6) { 696 } else if (ipv6h->version == 6) {
694 l4_hdr = ipv6h->nexthdr; 697 l4_hdr = ipv6h->nexthdr;
695 } else { 698 } else {
696 nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n", 699 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
697 iph->version);
698 return; 700 return;
699 } 701 }
700 702
@@ -706,8 +708,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
706 txd->flags |= PCIE_DESC_TX_UDP_CSUM; 708 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
707 break; 709 break;
708 default: 710 default:
709 nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n", 711 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
710 l4_hdr);
711 return; 712 return;
712 } 713 }
713 714
@@ -737,27 +738,29 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
737{ 738{
738 struct nfp_net *nn = netdev_priv(netdev); 739 struct nfp_net *nn = netdev_priv(netdev);
739 const struct skb_frag_struct *frag; 740 const struct skb_frag_struct *frag;
740 struct nfp_net_r_vector *r_vec;
741 struct nfp_net_tx_desc *txd, txdg; 741 struct nfp_net_tx_desc *txd, txdg;
742 struct nfp_net_tx_buf *txbuf;
743 struct nfp_net_tx_ring *tx_ring; 742 struct nfp_net_tx_ring *tx_ring;
743 struct nfp_net_r_vector *r_vec;
744 struct nfp_net_tx_buf *txbuf;
744 struct netdev_queue *nd_q; 745 struct netdev_queue *nd_q;
746 struct nfp_net_dp *dp;
745 dma_addr_t dma_addr; 747 dma_addr_t dma_addr;
746 unsigned int fsize; 748 unsigned int fsize;
747 int f, nr_frags; 749 int f, nr_frags;
748 int wr_idx; 750 int wr_idx;
749 u16 qidx; 751 u16 qidx;
750 752
753 dp = &nn->dp;
751 qidx = skb_get_queue_mapping(skb); 754 qidx = skb_get_queue_mapping(skb);
752 tx_ring = &nn->tx_rings[qidx]; 755 tx_ring = &dp->tx_rings[qidx];
753 r_vec = tx_ring->r_vec; 756 r_vec = tx_ring->r_vec;
754 nd_q = netdev_get_tx_queue(nn->netdev, qidx); 757 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
755 758
756 nr_frags = skb_shinfo(skb)->nr_frags; 759 nr_frags = skb_shinfo(skb)->nr_frags;
757 760
758 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { 761 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
759 nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n", 762 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
760 qidx, tx_ring->wr_p, tx_ring->rd_p); 763 qidx, tx_ring->wr_p, tx_ring->rd_p);
761 netif_tx_stop_queue(nd_q); 764 netif_tx_stop_queue(nd_q);
762 u64_stats_update_begin(&r_vec->tx_sync); 765 u64_stats_update_begin(&r_vec->tx_sync);
763 r_vec->tx_busy++; 766 r_vec->tx_busy++;
@@ -766,9 +769,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
766 } 769 }
767 770
768 /* Start with the head skbuf */ 771 /* Start with the head skbuf */
769 dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb), 772 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
770 DMA_TO_DEVICE); 773 DMA_TO_DEVICE);
771 if (dma_mapping_error(&nn->pdev->dev, dma_addr)) 774 if (dma_mapping_error(dp->dev, dma_addr))
772 goto err_free; 775 goto err_free;
773 776
774 wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1); 777 wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
@@ -792,11 +795,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
792 txd->mss = 0; 795 txd->mss = 0;
793 txd->l4_offset = 0; 796 txd->l4_offset = 0;
794 797
795 nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb); 798 nfp_net_tx_tso(r_vec, txbuf, txd, skb);
796 799
797 nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb); 800 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
798 801
799 if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { 802 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
800 txd->flags |= PCIE_DESC_TX_VLAN; 803 txd->flags |= PCIE_DESC_TX_VLAN;
801 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); 804 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
802 } 805 }
@@ -810,9 +813,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
810 frag = &skb_shinfo(skb)->frags[f]; 813 frag = &skb_shinfo(skb)->frags[f];
811 fsize = skb_frag_size(frag); 814 fsize = skb_frag_size(frag);
812 815
813 dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0, 816 dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
814 fsize, DMA_TO_DEVICE); 817 fsize, DMA_TO_DEVICE);
815 if (dma_mapping_error(&nn->pdev->dev, dma_addr)) 818 if (dma_mapping_error(dp->dev, dma_addr))
816 goto err_unmap; 819 goto err_unmap;
817 820
818 wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1); 821 wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
@@ -851,8 +854,7 @@ err_unmap:
851 --f; 854 --f;
852 while (f >= 0) { 855 while (f >= 0) {
853 frag = &skb_shinfo(skb)->frags[f]; 856 frag = &skb_shinfo(skb)->frags[f];
854 dma_unmap_page(&nn->pdev->dev, 857 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
855 tx_ring->txbufs[wr_idx].dma_addr,
856 skb_frag_size(frag), DMA_TO_DEVICE); 858 skb_frag_size(frag), DMA_TO_DEVICE);
857 tx_ring->txbufs[wr_idx].skb = NULL; 859 tx_ring->txbufs[wr_idx].skb = NULL;
858 tx_ring->txbufs[wr_idx].dma_addr = 0; 860 tx_ring->txbufs[wr_idx].dma_addr = 0;
@@ -861,13 +863,13 @@ err_unmap:
861 if (wr_idx < 0) 863 if (wr_idx < 0)
862 wr_idx += tx_ring->cnt; 864 wr_idx += tx_ring->cnt;
863 } 865 }
864 dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr, 866 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
865 skb_headlen(skb), DMA_TO_DEVICE); 867 skb_headlen(skb), DMA_TO_DEVICE);
866 tx_ring->txbufs[wr_idx].skb = NULL; 868 tx_ring->txbufs[wr_idx].skb = NULL;
867 tx_ring->txbufs[wr_idx].dma_addr = 0; 869 tx_ring->txbufs[wr_idx].dma_addr = 0;
868 tx_ring->txbufs[wr_idx].fidx = -2; 870 tx_ring->txbufs[wr_idx].fidx = -2;
869err_free: 871err_free:
870 nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n"); 872 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
871 u64_stats_update_begin(&r_vec->tx_sync); 873 u64_stats_update_begin(&r_vec->tx_sync);
872 r_vec->tx_errors++; 874 r_vec->tx_errors++;
873 u64_stats_update_end(&r_vec->tx_sync); 875 u64_stats_update_end(&r_vec->tx_sync);
@@ -884,7 +886,7 @@ err_free:
884static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) 886static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
885{ 887{
886 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 888 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
887 struct nfp_net *nn = r_vec->nfp_net; 889 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
888 const struct skb_frag_struct *frag; 890 const struct skb_frag_struct *frag;
889 struct netdev_queue *nd_q; 891 struct netdev_queue *nd_q;
890 u32 done_pkts = 0, done_bytes = 0; 892 u32 done_pkts = 0, done_bytes = 0;
@@ -918,8 +920,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
918 920
919 if (fidx == -1) { 921 if (fidx == -1) {
920 /* unmap head */ 922 /* unmap head */
921 dma_unmap_single(&nn->pdev->dev, 923 dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
922 tx_ring->txbufs[idx].dma_addr,
923 skb_headlen(skb), DMA_TO_DEVICE); 924 skb_headlen(skb), DMA_TO_DEVICE);
924 925
925 done_pkts += tx_ring->txbufs[idx].pkt_cnt; 926 done_pkts += tx_ring->txbufs[idx].pkt_cnt;
@@ -927,8 +928,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
927 } else { 928 } else {
928 /* unmap fragment */ 929 /* unmap fragment */
929 frag = &skb_shinfo(skb)->frags[fidx]; 930 frag = &skb_shinfo(skb)->frags[fidx];
930 dma_unmap_page(&nn->pdev->dev, 931 dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
931 tx_ring->txbufs[idx].dma_addr,
932 skb_frag_size(frag), DMA_TO_DEVICE); 932 skb_frag_size(frag), DMA_TO_DEVICE);
933 } 933 }
934 934
@@ -948,7 +948,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
948 r_vec->tx_pkts += done_pkts; 948 r_vec->tx_pkts += done_pkts;
949 u64_stats_update_end(&r_vec->tx_sync); 949 u64_stats_update_end(&r_vec->tx_sync);
950 950
951 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); 951 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
952 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes); 952 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
953 if (nfp_net_tx_ring_should_wake(tx_ring)) { 953 if (nfp_net_tx_ring_should_wake(tx_ring)) {
954 /* Make sure TX thread will see updated tx_ring->rd_p */ 954 /* Make sure TX thread will see updated tx_ring->rd_p */
@@ -966,7 +966,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
966static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) 966static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
967{ 967{
968 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 968 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
969 struct nfp_net *nn = r_vec->nfp_net; 969 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
970 u32 done_pkts = 0, done_bytes = 0; 970 u32 done_pkts = 0, done_bytes = 0;
971 int idx, todo; 971 int idx, todo;
972 u32 qcp_rd_p; 972 u32 qcp_rd_p;
@@ -989,8 +989,7 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
989 if (!tx_ring->txbufs[idx].frag) 989 if (!tx_ring->txbufs[idx].frag)
990 continue; 990 continue;
991 991
992 nfp_net_dma_unmap_rx(nn, tx_ring->txbufs[idx].dma_addr, 992 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[idx].dma_addr);
993 nn->fl_bufsz, DMA_BIDIRECTIONAL);
994 __free_page(virt_to_page(tx_ring->txbufs[idx].frag)); 993 __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
995 994
996 done_pkts++; 995 done_pkts++;
@@ -1015,17 +1014,16 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
1015 1014
1016/** 1015/**
1017 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers 1016 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
1018 * @nn: NFP Net device 1017 * @dp: NFP Net data path struct
1019 * @tx_ring: TX ring structure 1018 * @tx_ring: TX ring structure
1020 * 1019 *
1021 * Assumes that the device is stopped 1020 * Assumes that the device is stopped
1022 */ 1021 */
1023static void 1022static void
1024nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) 1023nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1025{ 1024{
1026 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 1025 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1027 const struct skb_frag_struct *frag; 1026 const struct skb_frag_struct *frag;
1028 struct pci_dev *pdev = nn->pdev;
1029 struct netdev_queue *nd_q; 1027 struct netdev_queue *nd_q;
1030 1028
1031 while (tx_ring->rd_p != tx_ring->wr_p) { 1029 while (tx_ring->rd_p != tx_ring->wr_p) {
@@ -1036,8 +1034,7 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
1036 tx_buf = &tx_ring->txbufs[idx]; 1034 tx_buf = &tx_ring->txbufs[idx];
1037 1035
1038 if (tx_ring == r_vec->xdp_ring) { 1036 if (tx_ring == r_vec->xdp_ring) {
1039 nfp_net_dma_unmap_rx(nn, tx_buf->dma_addr, 1037 nfp_net_dma_unmap_rx(dp, tx_buf->dma_addr);
1040 nn->fl_bufsz, DMA_BIDIRECTIONAL);
1041 __free_page(virt_to_page(tx_ring->txbufs[idx].frag)); 1038 __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
1042 } else { 1039 } else {
1043 struct sk_buff *skb = tx_ring->txbufs[idx].skb; 1040 struct sk_buff *skb = tx_ring->txbufs[idx].skb;
@@ -1045,13 +1042,13 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
1045 1042
1046 if (tx_buf->fidx == -1) { 1043 if (tx_buf->fidx == -1) {
1047 /* unmap head */ 1044 /* unmap head */
1048 dma_unmap_single(&pdev->dev, tx_buf->dma_addr, 1045 dma_unmap_single(dp->dev, tx_buf->dma_addr,
1049 skb_headlen(skb), 1046 skb_headlen(skb),
1050 DMA_TO_DEVICE); 1047 DMA_TO_DEVICE);
1051 } else { 1048 } else {
1052 /* unmap fragment */ 1049 /* unmap fragment */
1053 frag = &skb_shinfo(skb)->frags[tx_buf->fidx]; 1050 frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
1054 dma_unmap_page(&pdev->dev, tx_buf->dma_addr, 1051 dma_unmap_page(dp->dev, tx_buf->dma_addr,
1055 skb_frag_size(frag), 1052 skb_frag_size(frag),
1056 DMA_TO_DEVICE); 1053 DMA_TO_DEVICE);
1057 } 1054 }
@@ -1078,7 +1075,7 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
1078 if (tx_ring == r_vec->xdp_ring) 1075 if (tx_ring == r_vec->xdp_ring)
1079 return; 1076 return;
1080 1077
1081 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); 1078 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1082 netdev_tx_reset_queue(nd_q); 1079 netdev_tx_reset_queue(nd_q);
1083} 1080}
1084 1081
@@ -1087,7 +1084,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
1087 struct nfp_net *nn = netdev_priv(netdev); 1084 struct nfp_net *nn = netdev_priv(netdev);
1088 int i; 1085 int i;
1089 1086
1090 for (i = 0; i < nn->netdev->real_num_tx_queues; i++) { 1087 for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
1091 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i))) 1088 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1092 continue; 1089 continue;
1093 nn_warn(nn, "TX timeout on ring: %d\n", i); 1090 nn_warn(nn, "TX timeout on ring: %d\n", i);
@@ -1098,16 +1095,17 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
1098/* Receive processing 1095/* Receive processing
1099 */ 1096 */
1100static unsigned int 1097static unsigned int
1101nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu) 1098nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
1102{ 1099{
1103 unsigned int fl_bufsz; 1100 unsigned int fl_bufsz;
1104 1101
1105 fl_bufsz = NFP_NET_RX_BUF_HEADROOM; 1102 fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
1106 if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) 1103 fl_bufsz += dp->rx_dma_off;
1104 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1107 fl_bufsz += NFP_NET_MAX_PREPEND; 1105 fl_bufsz += NFP_NET_MAX_PREPEND;
1108 else 1106 else
1109 fl_bufsz += nn->rx_offset; 1107 fl_bufsz += dp->rx_offset;
1110 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + mtu; 1108 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
1111 1109
1112 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz); 1110 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
1113 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1111 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1126,62 +1124,56 @@ nfp_net_free_frag(void *frag, bool xdp)
1126 1124
1127/** 1125/**
1128 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX 1126 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
1127 * @dp: NFP Net data path struct
1129 * @rx_ring: RX ring structure of the skb 1128 * @rx_ring: RX ring structure of the skb
1130 * @dma_addr: Pointer to storage for DMA address (output param) 1129 * @dma_addr: Pointer to storage for DMA address (output param)
1131 * @fl_bufsz: size of freelist buffers
1132 * @xdp: Whether XDP is enabled
1133 * 1130 *
1134 * This function will allcate a new page frag, map it for DMA. 1131 * This function will allcate a new page frag, map it for DMA.
1135 * 1132 *
1136 * Return: allocated page frag or NULL on failure. 1133 * Return: allocated page frag or NULL on failure.
1137 */ 1134 */
1138static void * 1135static void *
1139nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr, 1136nfp_net_rx_alloc_one(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
1140 unsigned int fl_bufsz, bool xdp) 1137 dma_addr_t *dma_addr)
1141{ 1138{
1142 struct nfp_net *nn = rx_ring->r_vec->nfp_net;
1143 int direction;
1144 void *frag; 1139 void *frag;
1145 1140
1146 if (!xdp) 1141 if (!dp->xdp_prog)
1147 frag = netdev_alloc_frag(fl_bufsz); 1142 frag = netdev_alloc_frag(dp->fl_bufsz);
1148 else 1143 else
1149 frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); 1144 frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
1150 if (!frag) { 1145 if (!frag) {
1151 nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n"); 1146 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1152 return NULL; 1147 return NULL;
1153 } 1148 }
1154 1149
1155 direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 1150 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1156 1151 if (dma_mapping_error(dp->dev, *dma_addr)) {
1157 *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction); 1152 nfp_net_free_frag(frag, dp->xdp_prog);
1158 if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { 1153 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1159 nfp_net_free_frag(frag, xdp);
1160 nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
1161 return NULL; 1154 return NULL;
1162 } 1155 }
1163 1156
1164 return frag; 1157 return frag;
1165} 1158}
1166 1159
1167static void * 1160static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1168nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
1169{ 1161{
1170 void *frag; 1162 void *frag;
1171 1163
1172 if (!nn->xdp_prog) 1164 if (!dp->xdp_prog)
1173 frag = napi_alloc_frag(nn->fl_bufsz); 1165 frag = napi_alloc_frag(dp->fl_bufsz);
1174 else 1166 else
1175 frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); 1167 frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
1176 if (!frag) { 1168 if (!frag) {
1177 nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n"); 1169 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1178 return NULL; 1170 return NULL;
1179 } 1171 }
1180 1172
1181 *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction); 1173 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1182 if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { 1174 if (dma_mapping_error(dp->dev, *dma_addr)) {
1183 nfp_net_free_frag(frag, nn->xdp_prog); 1175 nfp_net_free_frag(frag, dp->xdp_prog);
1184 nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); 1176 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1185 return NULL; 1177 return NULL;
1186 } 1178 }
1187 1179
@@ -1190,11 +1182,13 @@ nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
1190 1182
1191/** 1183/**
1192 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings 1184 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1185 * @dp: NFP Net data path struct
1193 * @rx_ring: RX ring structure 1186 * @rx_ring: RX ring structure
1194 * @frag: page fragment buffer 1187 * @frag: page fragment buffer
1195 * @dma_addr: DMA address of skb mapping 1188 * @dma_addr: DMA address of skb mapping
1196 */ 1189 */
1197static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring, 1190static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
1191 struct nfp_net_rx_ring *rx_ring,
1198 void *frag, dma_addr_t dma_addr) 1192 void *frag, dma_addr_t dma_addr)
1199{ 1193{
1200 unsigned int wr_idx; 1194 unsigned int wr_idx;
@@ -1208,7 +1202,8 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
1208 /* Fill freelist descriptor */ 1202 /* Fill freelist descriptor */
1209 rx_ring->rxds[wr_idx].fld.reserved = 0; 1203 rx_ring->rxds[wr_idx].fld.reserved = 0;
1210 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; 1204 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1211 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr); 1205 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
1206 dma_addr + dp->rx_dma_off);
1212 1207
1213 rx_ring->wr_p++; 1208 rx_ring->wr_p++;
1214 rx_ring->wr_ptr_add++; 1209 rx_ring->wr_ptr_add++;
@@ -1249,19 +1244,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1249 1244
1250/** 1245/**
1251 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring 1246 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1252 * @nn: NFP Net device 1247 * @dp: NFP Net data path struct
1253 * @rx_ring: RX ring to remove buffers from 1248 * @rx_ring: RX ring to remove buffers from
1254 * @xdp: Whether XDP is enabled
1255 * 1249 *
1256 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1) 1250 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1257 * entries. After device is disabled nfp_net_rx_ring_reset() must be called 1251 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1258 * to restore required ring geometry. 1252 * to restore required ring geometry.
1259 */ 1253 */
1260static void 1254static void
1261nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, 1255nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
1262 bool xdp) 1256 struct nfp_net_rx_ring *rx_ring)
1263{ 1257{
1264 int direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1265 unsigned int i; 1258 unsigned int i;
1266 1259
1267 for (i = 0; i < rx_ring->cnt - 1; i++) { 1260 for (i = 0; i < rx_ring->cnt - 1; i++) {
@@ -1272,9 +1265,8 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
1272 if (!rx_ring->rxbufs[i].frag) 1265 if (!rx_ring->rxbufs[i].frag)
1273 continue; 1266 continue;
1274 1267
1275 nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr, 1268 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
1276 rx_ring->bufsz, direction); 1269 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
1277 nfp_net_free_frag(rx_ring->rxbufs[i].frag, xdp);
1278 rx_ring->rxbufs[i].dma_addr = 0; 1270 rx_ring->rxbufs[i].dma_addr = 0;
1279 rx_ring->rxbufs[i].frag = NULL; 1271 rx_ring->rxbufs[i].frag = NULL;
1280 } 1272 }
@@ -1282,13 +1274,12 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
1282 1274
1283/** 1275/**
1284 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW) 1276 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1285 * @nn: NFP Net device 1277 * @dp: NFP Net data path struct
1286 * @rx_ring: RX ring to remove buffers from 1278 * @rx_ring: RX ring to remove buffers from
1287 * @xdp: Whether XDP is enabled
1288 */ 1279 */
1289static int 1280static int
1290nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, 1281nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
1291 bool xdp) 1282 struct nfp_net_rx_ring *rx_ring)
1292{ 1283{
1293 struct nfp_net_rx_buf *rxbufs; 1284 struct nfp_net_rx_buf *rxbufs;
1294 unsigned int i; 1285 unsigned int i;
@@ -1297,10 +1288,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
1297 1288
1298 for (i = 0; i < rx_ring->cnt - 1; i++) { 1289 for (i = 0; i < rx_ring->cnt - 1; i++) {
1299 rxbufs[i].frag = 1290 rxbufs[i].frag =
1300 nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr, 1291 nfp_net_rx_alloc_one(dp, rx_ring, &rxbufs[i].dma_addr);
1301 rx_ring->bufsz, xdp);
1302 if (!rxbufs[i].frag) { 1292 if (!rxbufs[i].frag) {
1303 nfp_net_rx_ring_bufs_free(nn, rx_ring, xdp); 1293 nfp_net_rx_ring_bufs_free(dp, rx_ring);
1304 return -ENOMEM; 1294 return -ENOMEM;
1305 } 1295 }
1306 } 1296 }
@@ -1310,14 +1300,17 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
1310 1300
1311/** 1301/**
1312 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW 1302 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1303 * @dp: NFP Net data path struct
1313 * @rx_ring: RX ring to fill 1304 * @rx_ring: RX ring to fill
1314 */ 1305 */
1315static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring) 1306static void
1307nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
1308 struct nfp_net_rx_ring *rx_ring)
1316{ 1309{
1317 unsigned int i; 1310 unsigned int i;
1318 1311
1319 for (i = 0; i < rx_ring->cnt - 1; i++) 1312 for (i = 0; i < rx_ring->cnt - 1; i++)
1320 nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].frag, 1313 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
1321 rx_ring->rxbufs[i].dma_addr); 1314 rx_ring->rxbufs[i].dma_addr);
1322} 1315}
1323 1316
@@ -1337,17 +1330,18 @@ static int nfp_net_rx_csum_has_errors(u16 flags)
1337 1330
1338/** 1331/**
1339 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags 1332 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1340 * @nn: NFP Net device 1333 * @dp: NFP Net data path struct
1341 * @r_vec: per-ring structure 1334 * @r_vec: per-ring structure
1342 * @rxd: Pointer to RX descriptor 1335 * @rxd: Pointer to RX descriptor
1343 * @skb: Pointer to SKB 1336 * @skb: Pointer to SKB
1344 */ 1337 */
1345static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, 1338static void nfp_net_rx_csum(struct nfp_net_dp *dp,
1339 struct nfp_net_r_vector *r_vec,
1346 struct nfp_net_rx_desc *rxd, struct sk_buff *skb) 1340 struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
1347{ 1341{
1348 skb_checksum_none_assert(skb); 1342 skb_checksum_none_assert(skb);
1349 1343
1350 if (!(nn->netdev->features & NETIF_F_RXCSUM)) 1344 if (!(dp->netdev->features & NETIF_F_RXCSUM))
1351 return; 1345 return;
1352 1346
1353 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) { 1347 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
@@ -1398,24 +1392,21 @@ static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
1398 1392
1399static void 1393static void
1400nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb, 1394nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
1401 struct nfp_net_rx_desc *rxd) 1395 void *data, struct nfp_net_rx_desc *rxd)
1402{ 1396{
1403 struct nfp_net_rx_hash *rx_hash; 1397 struct nfp_net_rx_hash *rx_hash = data;
1404 1398
1405 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) 1399 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1406 return; 1400 return;
1407 1401
1408 rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
1409
1410 nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type), 1402 nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
1411 &rx_hash->hash); 1403 &rx_hash->hash);
1412} 1404}
1413 1405
1414static void * 1406static void *
1415nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb, 1407nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
1416 int meta_len) 1408 void *data, int meta_len)
1417{ 1409{
1418 u8 *data = skb->data - meta_len;
1419 u32 meta_info; 1410 u32 meta_info;
1420 1411
1421 meta_info = get_unaligned_be32(data); 1412 meta_info = get_unaligned_be32(data);
@@ -1445,8 +1436,9 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
1445} 1436}
1446 1437
1447static void 1438static void
1448nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, 1439nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
1449 struct nfp_net_rx_buf *rxbuf, struct sk_buff *skb) 1440 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
1441 struct sk_buff *skb)
1450{ 1442{
1451 u64_stats_update_begin(&r_vec->rx_sync); 1443 u64_stats_update_begin(&r_vec->rx_sync);
1452 r_vec->rx_drops++; 1444 r_vec->rx_drops++;
@@ -1458,15 +1450,15 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
1458 if (skb && rxbuf && skb->head == rxbuf->frag) 1450 if (skb && rxbuf && skb->head == rxbuf->frag)
1459 page_ref_inc(virt_to_head_page(rxbuf->frag)); 1451 page_ref_inc(virt_to_head_page(rxbuf->frag));
1460 if (rxbuf) 1452 if (rxbuf)
1461 nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr); 1453 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
1462 if (skb) 1454 if (skb)
1463 dev_kfree_skb_any(skb); 1455 dev_kfree_skb_any(skb);
1464} 1456}
1465 1457
1466static bool 1458static bool
1467nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, 1459nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
1468 struct nfp_net_tx_ring *tx_ring, 1460 struct nfp_net_tx_ring *tx_ring,
1469 struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off, 1461 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
1470 unsigned int pkt_len) 1462 unsigned int pkt_len)
1471{ 1463{
1472 struct nfp_net_tx_buf *txbuf; 1464 struct nfp_net_tx_buf *txbuf;
@@ -1476,16 +1468,16 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
1476 int wr_idx; 1468 int wr_idx;
1477 1469
1478 if (unlikely(nfp_net_tx_full(tx_ring, 1))) { 1470 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1479 nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); 1471 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
1480 return false; 1472 return false;
1481 } 1473 }
1482 1474
1483 new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr); 1475 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
1484 if (unlikely(!new_frag)) { 1476 if (unlikely(!new_frag)) {
1485 nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); 1477 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
1486 return false; 1478 return false;
1487 } 1479 }
1488 nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); 1480 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1489 1481
1490 wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1); 1482 wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
1491 1483
@@ -1497,14 +1489,14 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
1497 txbuf->pkt_cnt = 1; 1489 txbuf->pkt_cnt = 1;
1498 txbuf->real_len = pkt_len; 1490 txbuf->real_len = pkt_len;
1499 1491
1500 dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off, 1492 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
1501 pkt_len, DMA_BIDIRECTIONAL); 1493 pkt_len, DMA_BIDIRECTIONAL);
1502 1494
1503 /* Build TX descriptor */ 1495 /* Build TX descriptor */
1504 txd = &tx_ring->txds[wr_idx]; 1496 txd = &tx_ring->txds[wr_idx];
1505 txd->offset_eop = PCIE_DESC_TX_EOP; 1497 txd->offset_eop = PCIE_DESC_TX_EOP;
1506 txd->dma_len = cpu_to_le16(pkt_len); 1498 txd->dma_len = cpu_to_le16(pkt_len);
1507 nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + pkt_off); 1499 nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
1508 txd->data_len = cpu_to_le16(pkt_len); 1500 txd->data_len = cpu_to_le16(pkt_len);
1509 1501
1510 txd->flags = 0; 1502 txd->flags = 0;
@@ -1516,14 +1508,24 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
1516 return true; 1508 return true;
1517} 1509}
1518 1510
1519static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) 1511static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start,
1512 unsigned int *off, unsigned int *len)
1520{ 1513{
1521 struct xdp_buff xdp; 1514 struct xdp_buff xdp;
1515 void *orig_data;
1516 int ret;
1517
1518 xdp.data_hard_start = hard_start;
1519 xdp.data = data + *off;
1520 xdp.data_end = data + *off + *len;
1522 1521
1523 xdp.data = data; 1522 orig_data = xdp.data;
1524 xdp.data_end = data + len; 1523 ret = bpf_prog_run_xdp(prog, &xdp);
1525 1524
1526 return bpf_prog_run_xdp(prog, &xdp); 1525 *len -= xdp.data - orig_data;
1526 *off += xdp.data - orig_data;
1527
1528 return ret;
1527} 1529}
1528 1530
1529/** 1531/**
@@ -1540,27 +1542,27 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
1540static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) 1542static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1541{ 1543{
1542 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; 1544 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1543 struct nfp_net *nn = r_vec->nfp_net; 1545 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1544 struct nfp_net_tx_ring *tx_ring; 1546 struct nfp_net_tx_ring *tx_ring;
1545 struct bpf_prog *xdp_prog; 1547 struct bpf_prog *xdp_prog;
1546 unsigned int true_bufsz; 1548 unsigned int true_bufsz;
1547 struct sk_buff *skb; 1549 struct sk_buff *skb;
1548 int pkts_polled = 0; 1550 int pkts_polled = 0;
1549 int rx_dma_map_dir;
1550 int idx; 1551 int idx;
1551 1552
1552 rcu_read_lock(); 1553 rcu_read_lock();
1553 xdp_prog = READ_ONCE(nn->xdp_prog); 1554 xdp_prog = READ_ONCE(dp->xdp_prog);
1554 rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 1555 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
1555 true_bufsz = xdp_prog ? PAGE_SIZE : nn->fl_bufsz;
1556 tx_ring = r_vec->xdp_ring; 1556 tx_ring = r_vec->xdp_ring;
1557 1557
1558 while (pkts_polled < budget) { 1558 while (pkts_polled < budget) {
1559 unsigned int meta_len, data_len, data_off, pkt_len, pkt_off; 1559 unsigned int meta_len, data_len, data_off, pkt_len;
1560 u8 meta_prepend[NFP_NET_MAX_PREPEND];
1560 struct nfp_net_rx_buf *rxbuf; 1561 struct nfp_net_rx_buf *rxbuf;
1561 struct nfp_net_rx_desc *rxd; 1562 struct nfp_net_rx_desc *rxd;
1562 dma_addr_t new_dma_addr; 1563 dma_addr_t new_dma_addr;
1563 void *new_frag; 1564 void *new_frag;
1565 u8 *meta;
1564 1566
1565 idx = rx_ring->rd_p & (rx_ring->cnt - 1); 1567 idx = rx_ring->rd_p & (rx_ring->cnt - 1);
1566 1568
@@ -1593,11 +1595,11 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1593 data_len = le16_to_cpu(rxd->rxd.data_len); 1595 data_len = le16_to_cpu(rxd->rxd.data_len);
1594 pkt_len = data_len - meta_len; 1596 pkt_len = data_len - meta_len;
1595 1597
1596 if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) 1598 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1597 pkt_off = meta_len; 1599 data_off = NFP_NET_RX_BUF_HEADROOM + meta_len;
1598 else 1600 else
1599 pkt_off = nn->rx_offset; 1601 data_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_offset;
1600 data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off; 1602 data_off += dp->rx_dma_off;
1601 1603
1602 /* Stats update */ 1604 /* Stats update */
1603 u64_stats_update_begin(&r_vec->rx_sync); 1605 u64_stats_update_begin(&r_vec->rx_sync);
@@ -1605,30 +1607,55 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1605 r_vec->rx_bytes += pkt_len; 1607 r_vec->rx_bytes += pkt_len;
1606 u64_stats_update_end(&r_vec->rx_sync); 1608 u64_stats_update_end(&r_vec->rx_sync);
1607 1609
1610 /* Pointer to start of metadata */
1611 meta = rxbuf->frag + data_off - meta_len;
1612
1613 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
1614 (dp->rx_offset && meta_len > dp->rx_offset))) {
1615 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
1616 meta_len);
1617 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1618 continue;
1619 }
1620
1608 if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF && 1621 if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
1609 nn->bpf_offload_xdp)) { 1622 dp->bpf_offload_xdp)) {
1623 unsigned int dma_off;
1624 void *hard_start;
1610 int act; 1625 int act;
1611 1626
1612 dma_sync_single_for_cpu(&nn->pdev->dev, 1627 hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
1613 rxbuf->dma_addr + pkt_off, 1628 dma_off = data_off - NFP_NET_RX_BUF_HEADROOM;
1614 pkt_len, DMA_BIDIRECTIONAL); 1629 dma_sync_single_for_cpu(dp->dev, rxbuf->dma_addr,
1615 act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off, 1630 dma_off + pkt_len,
1616 pkt_len); 1631 DMA_BIDIRECTIONAL);
1632
1633 /* Move prepend out of the way */
1634 if (xdp_prog->xdp_adjust_head) {
1635 memcpy(meta_prepend, meta, meta_len);
1636 meta = meta_prepend;
1637 }
1638
1639 act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
1640 &data_off, &pkt_len);
1617 switch (act) { 1641 switch (act) {
1618 case XDP_PASS: 1642 case XDP_PASS:
1619 break; 1643 break;
1620 case XDP_TX: 1644 case XDP_TX:
1621 if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring, 1645 dma_off = data_off - NFP_NET_RX_BUF_HEADROOM;
1646 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
1622 tx_ring, rxbuf, 1647 tx_ring, rxbuf,
1623 pkt_off, pkt_len))) 1648 dma_off,
1624 trace_xdp_exception(nn->netdev, xdp_prog, act); 1649 pkt_len)))
1650 trace_xdp_exception(dp->netdev,
1651 xdp_prog, act);
1625 continue; 1652 continue;
1626 default: 1653 default:
1627 bpf_warn_invalid_xdp_action(act); 1654 bpf_warn_invalid_xdp_action(act);
1628 case XDP_ABORTED: 1655 case XDP_ABORTED:
1629 trace_xdp_exception(nn->netdev, xdp_prog, act); 1656 trace_xdp_exception(dp->netdev, xdp_prog, act);
1630 case XDP_DROP: 1657 case XDP_DROP:
1631 nfp_net_rx_give_one(rx_ring, rxbuf->frag, 1658 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
1632 rxbuf->dma_addr); 1659 rxbuf->dma_addr);
1633 continue; 1660 continue;
1634 } 1661 }
@@ -1636,41 +1663,40 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1636 1663
1637 skb = build_skb(rxbuf->frag, true_bufsz); 1664 skb = build_skb(rxbuf->frag, true_bufsz);
1638 if (unlikely(!skb)) { 1665 if (unlikely(!skb)) {
1639 nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL); 1666 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1640 continue; 1667 continue;
1641 } 1668 }
1642 new_frag = nfp_net_napi_alloc_one(nn, rx_dma_map_dir, 1669 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
1643 &new_dma_addr);
1644 if (unlikely(!new_frag)) { 1670 if (unlikely(!new_frag)) {
1645 nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb); 1671 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1646 continue; 1672 continue;
1647 } 1673 }
1648 1674
1649 nfp_net_dma_unmap_rx(nn, rxbuf->dma_addr, nn->fl_bufsz, 1675 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1650 rx_dma_map_dir);
1651 1676
1652 nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); 1677 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1653 1678
1654 skb_reserve(skb, data_off); 1679 skb_reserve(skb, data_off);
1655 skb_put(skb, pkt_len); 1680 skb_put(skb, pkt_len);
1656 1681
1657 if (nn->fw_ver.major <= 3) { 1682 if (!dp->chained_metadata_format) {
1658 nfp_net_set_hash_desc(nn->netdev, skb, rxd); 1683 nfp_net_set_hash_desc(dp->netdev, skb, meta, rxd);
1659 } else if (meta_len) { 1684 } else if (meta_len) {
1660 void *end; 1685 void *end;
1661 1686
1662 end = nfp_net_parse_meta(nn->netdev, skb, meta_len); 1687 end = nfp_net_parse_meta(dp->netdev, skb, meta,
1663 if (unlikely(end != skb->data)) { 1688 meta_len);
1664 nn_warn_ratelimit(nn, "invalid RX packet metadata\n"); 1689 if (unlikely(end != meta + meta_len)) {
1665 nfp_net_rx_drop(r_vec, rx_ring, NULL, skb); 1690 nn_dp_warn(dp, "invalid RX packet metadata\n");
1691 nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
1666 continue; 1692 continue;
1667 } 1693 }
1668 } 1694 }
1669 1695
1670 skb_record_rx_queue(skb, rx_ring->idx); 1696 skb_record_rx_queue(skb, rx_ring->idx);
1671 skb->protocol = eth_type_trans(skb, nn->netdev); 1697 skb->protocol = eth_type_trans(skb, dp->netdev);
1672 1698
1673 nfp_net_rx_csum(nn, r_vec, rxd, skb); 1699 nfp_net_rx_csum(dp, r_vec, rxd, skb);
1674 1700
1675 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) 1701 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1676 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1702 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1707,10 +1733,9 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
1707 nfp_net_xdp_complete(r_vec->xdp_ring); 1733 nfp_net_xdp_complete(r_vec->xdp_ring);
1708 } 1734 }
1709 1735
1710 if (pkts_polled < budget) { 1736 if (pkts_polled < budget)
1711 napi_complete_done(napi, pkts_polled); 1737 if (napi_complete_done(napi, pkts_polled))
1712 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); 1738 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1713 }
1714 1739
1715 return pkts_polled; 1740 return pkts_polled;
1716} 1741}
@@ -1725,13 +1750,12 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
1725static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) 1750static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
1726{ 1751{
1727 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 1752 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1728 struct nfp_net *nn = r_vec->nfp_net; 1753 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1729 struct pci_dev *pdev = nn->pdev;
1730 1754
1731 kfree(tx_ring->txbufs); 1755 kfree(tx_ring->txbufs);
1732 1756
1733 if (tx_ring->txds) 1757 if (tx_ring->txds)
1734 dma_free_coherent(&pdev->dev, tx_ring->size, 1758 dma_free_coherent(dp->dev, tx_ring->size,
1735 tx_ring->txds, tx_ring->dma); 1759 tx_ring->txds, tx_ring->dma);
1736 1760
1737 tx_ring->cnt = 0; 1761 tx_ring->cnt = 0;
@@ -1743,24 +1767,23 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
1743 1767
1744/** 1768/**
1745 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring 1769 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
1770 * @dp: NFP Net data path struct
1746 * @tx_ring: TX Ring structure to allocate 1771 * @tx_ring: TX Ring structure to allocate
1747 * @cnt: Ring buffer count
1748 * @is_xdp: True if ring will be used for XDP 1772 * @is_xdp: True if ring will be used for XDP
1749 * 1773 *
1750 * Return: 0 on success, negative errno otherwise. 1774 * Return: 0 on success, negative errno otherwise.
1751 */ 1775 */
1752static int 1776static int
1753nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp) 1777nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring,
1778 bool is_xdp)
1754{ 1779{
1755 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 1780 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1756 struct nfp_net *nn = r_vec->nfp_net;
1757 struct pci_dev *pdev = nn->pdev;
1758 int sz; 1781 int sz;
1759 1782
1760 tx_ring->cnt = cnt; 1783 tx_ring->cnt = dp->txd_cnt;
1761 1784
1762 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; 1785 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
1763 tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size, 1786 tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
1764 &tx_ring->dma, GFP_KERNEL); 1787 &tx_ring->dma, GFP_KERNEL);
1765 if (!tx_ring->txds) 1788 if (!tx_ring->txds)
1766 goto err_alloc; 1789 goto err_alloc;
@@ -1771,14 +1794,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
1771 goto err_alloc; 1794 goto err_alloc;
1772 1795
1773 if (!is_xdp) 1796 if (!is_xdp)
1774 netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, 1797 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
1775 tx_ring->idx); 1798 tx_ring->idx);
1776 1799
1777 nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p %s\n",
1778 tx_ring->idx, tx_ring->qcidx,
1779 tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds,
1780 is_xdp ? "XDP" : "");
1781
1782 return 0; 1800 return 0;
1783 1801
1784err_alloc: 1802err_alloc:
@@ -1786,62 +1804,45 @@ err_alloc:
1786 return -ENOMEM; 1804 return -ENOMEM;
1787} 1805}
1788 1806
1789static struct nfp_net_tx_ring * 1807static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
1790nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
1791 unsigned int num_stack_tx_rings)
1792{ 1808{
1793 struct nfp_net_tx_ring *rings;
1794 unsigned int r; 1809 unsigned int r;
1795 1810
1796 rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL); 1811 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
1797 if (!rings) 1812 GFP_KERNEL);
1798 return NULL; 1813 if (!dp->tx_rings)
1814 return -ENOMEM;
1799 1815
1800 for (r = 0; r < s->n_rings; r++) { 1816 for (r = 0; r < dp->num_tx_rings; r++) {
1801 int bias = 0; 1817 int bias = 0;
1802 1818
1803 if (r >= num_stack_tx_rings) 1819 if (r >= dp->num_stack_tx_rings)
1804 bias = num_stack_tx_rings; 1820 bias = dp->num_stack_tx_rings;
1805 1821
1806 nfp_net_tx_ring_init(&rings[r], &nn->r_vecs[r - bias], r); 1822 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
1823 r);
1807 1824
1808 if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt, bias)) 1825 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r], bias))
1809 goto err_free_prev; 1826 goto err_free_prev;
1810 } 1827 }
1811 1828
1812 return s->rings = rings; 1829 return 0;
1813 1830
1814err_free_prev: 1831err_free_prev:
1815 while (r--) 1832 while (r--)
1816 nfp_net_tx_ring_free(&rings[r]); 1833 nfp_net_tx_ring_free(&dp->tx_rings[r]);
1817 kfree(rings); 1834 kfree(dp->tx_rings);
1818 return NULL; 1835 return -ENOMEM;
1819}
1820
1821static void
1822nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
1823{
1824 struct nfp_net_ring_set new = *s;
1825
1826 s->dcnt = nn->txd_cnt;
1827 s->rings = nn->tx_rings;
1828 s->n_rings = nn->num_tx_rings;
1829
1830 nn->txd_cnt = new.dcnt;
1831 nn->tx_rings = new.rings;
1832 nn->num_tx_rings = new.n_rings;
1833} 1836}
1834 1837
1835static void 1838static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
1836nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
1837{ 1839{
1838 struct nfp_net_tx_ring *rings = s->rings;
1839 unsigned int r; 1840 unsigned int r;
1840 1841
1841 for (r = 0; r < s->n_rings; r++) 1842 for (r = 0; r < dp->num_tx_rings; r++)
1842 nfp_net_tx_ring_free(&rings[r]); 1843 nfp_net_tx_ring_free(&dp->tx_rings[r]);
1843 1844
1844 kfree(rings); 1845 kfree(dp->tx_rings);
1845} 1846}
1846 1847
1847/** 1848/**
@@ -1851,13 +1852,12 @@ nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
1851static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) 1852static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
1852{ 1853{
1853 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; 1854 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1854 struct nfp_net *nn = r_vec->nfp_net; 1855 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1855 struct pci_dev *pdev = nn->pdev;
1856 1856
1857 kfree(rx_ring->rxbufs); 1857 kfree(rx_ring->rxbufs);
1858 1858
1859 if (rx_ring->rxds) 1859 if (rx_ring->rxds)
1860 dma_free_coherent(&pdev->dev, rx_ring->size, 1860 dma_free_coherent(dp->dev, rx_ring->size,
1861 rx_ring->rxds, rx_ring->dma); 1861 rx_ring->rxds, rx_ring->dma);
1862 1862
1863 rx_ring->cnt = 0; 1863 rx_ring->cnt = 0;
@@ -1869,26 +1869,19 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
1869 1869
1870/** 1870/**
1871 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring 1871 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
1872 * @dp: NFP Net data path struct
1872 * @rx_ring: RX ring to allocate 1873 * @rx_ring: RX ring to allocate
1873 * @fl_bufsz: Size of buffers to allocate
1874 * @cnt: Ring buffer count
1875 * 1874 *
1876 * Return: 0 on success, negative errno otherwise. 1875 * Return: 0 on success, negative errno otherwise.
1877 */ 1876 */
1878static int 1877static int
1879nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz, 1878nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
1880 u32 cnt)
1881{ 1879{
1882 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1883 struct nfp_net *nn = r_vec->nfp_net;
1884 struct pci_dev *pdev = nn->pdev;
1885 int sz; 1880 int sz;
1886 1881
1887 rx_ring->cnt = cnt; 1882 rx_ring->cnt = dp->rxd_cnt;
1888 rx_ring->bufsz = fl_bufsz;
1889
1890 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; 1883 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
1891 rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size, 1884 rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
1892 &rx_ring->dma, GFP_KERNEL); 1885 &rx_ring->dma, GFP_KERNEL);
1893 if (!rx_ring->rxds) 1886 if (!rx_ring->rxds)
1894 goto err_alloc; 1887 goto err_alloc;
@@ -1898,10 +1891,6 @@ nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
1898 if (!rx_ring->rxbufs) 1891 if (!rx_ring->rxbufs)
1899 goto err_alloc; 1892 goto err_alloc;
1900 1893
1901 nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
1902 rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
1903 rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
1904
1905 return 0; 1894 return 0;
1906 1895
1907err_alloc: 1896err_alloc:
@@ -1909,82 +1898,59 @@ err_alloc:
1909 return -ENOMEM; 1898 return -ENOMEM;
1910} 1899}
1911 1900
1912static struct nfp_net_rx_ring * 1901static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
1913nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
1914 bool xdp)
1915{ 1902{
1916 unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu);
1917 struct nfp_net_rx_ring *rings;
1918 unsigned int r; 1903 unsigned int r;
1919 1904
1920 rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL); 1905 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
1921 if (!rings) 1906 GFP_KERNEL);
1922 return NULL; 1907 if (!dp->rx_rings)
1908 return -ENOMEM;
1923 1909
1924 for (r = 0; r < s->n_rings; r++) { 1910 for (r = 0; r < dp->num_rx_rings; r++) {
1925 nfp_net_rx_ring_init(&rings[r], &nn->r_vecs[r], r); 1911 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
1926 1912
1927 if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt)) 1913 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
1928 goto err_free_prev; 1914 goto err_free_prev;
1929 1915
1930 if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r], xdp)) 1916 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
1931 goto err_free_ring; 1917 goto err_free_ring;
1932 } 1918 }
1933 1919
1934 return s->rings = rings; 1920 return 0;
1935 1921
1936err_free_prev: 1922err_free_prev:
1937 while (r--) { 1923 while (r--) {
1938 nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp); 1924 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
1939err_free_ring: 1925err_free_ring:
1940 nfp_net_rx_ring_free(&rings[r]); 1926 nfp_net_rx_ring_free(&dp->rx_rings[r]);
1941 } 1927 }
1942 kfree(rings); 1928 kfree(dp->rx_rings);
1943 return NULL; 1929 return -ENOMEM;
1944}
1945
1946static void
1947nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
1948{
1949 struct nfp_net_ring_set new = *s;
1950
1951 s->mtu = nn->netdev->mtu;
1952 s->dcnt = nn->rxd_cnt;
1953 s->rings = nn->rx_rings;
1954 s->n_rings = nn->num_rx_rings;
1955
1956 nn->netdev->mtu = new.mtu;
1957 nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu);
1958 nn->rxd_cnt = new.dcnt;
1959 nn->rx_rings = new.rings;
1960 nn->num_rx_rings = new.n_rings;
1961} 1930}
1962 1931
1963static void 1932static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
1964nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s,
1965 bool xdp)
1966{ 1933{
1967 struct nfp_net_rx_ring *rings = s->rings;
1968 unsigned int r; 1934 unsigned int r;
1969 1935
1970 for (r = 0; r < s->n_rings; r++) { 1936 for (r = 0; r < dp->num_rx_rings; r++) {
1971 nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp); 1937 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
1972 nfp_net_rx_ring_free(&rings[r]); 1938 nfp_net_rx_ring_free(&dp->rx_rings[r]);
1973 } 1939 }
1974 1940
1975 kfree(rings); 1941 kfree(dp->rx_rings);
1976} 1942}
1977 1943
1978static void 1944static void
1979nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, 1945nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
1980 int idx) 1946 struct nfp_net_r_vector *r_vec, int idx)
1981{ 1947{
1982 r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL; 1948 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
1983 r_vec->tx_ring = 1949 r_vec->tx_ring =
1984 idx < nn->num_stack_tx_rings ? &nn->tx_rings[idx] : NULL; 1950 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
1985 1951
1986 r_vec->xdp_ring = idx < nn->num_tx_rings - nn->num_stack_tx_rings ? 1952 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
1987 &nn->tx_rings[nn->num_stack_tx_rings + idx] : NULL; 1953 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
1988} 1954}
1989 1955
1990static int 1956static int
@@ -1994,11 +1960,11 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1994 int err; 1960 int err;
1995 1961
1996 /* Setup NAPI */ 1962 /* Setup NAPI */
1997 netif_napi_add(nn->netdev, &r_vec->napi, 1963 netif_napi_add(nn->dp.netdev, &r_vec->napi,
1998 nfp_net_poll, NAPI_POLL_WEIGHT); 1964 nfp_net_poll, NAPI_POLL_WEIGHT);
1999 1965
2000 snprintf(r_vec->name, sizeof(r_vec->name), 1966 snprintf(r_vec->name, sizeof(r_vec->name),
2001 "%s-rxtx-%d", nn->netdev->name, idx); 1967 "%s-rxtx-%d", nn->dp.netdev->name, idx);
2002 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name, 1968 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
2003 r_vec); 1969 r_vec);
2004 if (err) { 1970 if (err) {
@@ -2045,7 +2011,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn)
2045{ 2011{
2046 int i; 2012 int i;
2047 2013
2048 for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4) 2014 for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
2049 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i, 2015 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
2050 get_unaligned_le32(nn->rss_key + i)); 2016 get_unaligned_le32(nn->rss_key + i));
2051} 2017}
@@ -2069,13 +2035,13 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
2069 /* copy RX interrupt coalesce parameters */ 2035 /* copy RX interrupt coalesce parameters */
2070 value = (nn->rx_coalesce_max_frames << 16) | 2036 value = (nn->rx_coalesce_max_frames << 16) |
2071 (factor * nn->rx_coalesce_usecs); 2037 (factor * nn->rx_coalesce_usecs);
2072 for (i = 0; i < nn->num_rx_rings; i++) 2038 for (i = 0; i < nn->dp.num_rx_rings; i++)
2073 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value); 2039 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
2074 2040
2075 /* copy TX interrupt coalesce parameters */ 2041 /* copy TX interrupt coalesce parameters */
2076 value = (nn->tx_coalesce_max_frames << 16) | 2042 value = (nn->tx_coalesce_max_frames << 16) |
2077 (factor * nn->tx_coalesce_usecs); 2043 (factor * nn->tx_coalesce_usecs);
2078 for (i = 0; i < nn->num_tx_rings; i++) 2044 for (i = 0; i < nn->dp.num_tx_rings; i++)
2079 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value); 2045 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
2080} 2046}
2081 2047
@@ -2090,9 +2056,9 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
2090static void nfp_net_write_mac_addr(struct nfp_net *nn) 2056static void nfp_net_write_mac_addr(struct nfp_net *nn)
2091{ 2057{
2092 nn_writel(nn, NFP_NET_CFG_MACADDR + 0, 2058 nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
2093 get_unaligned_be32(nn->netdev->dev_addr)); 2059 get_unaligned_be32(nn->dp.netdev->dev_addr));
2094 nn_writew(nn, NFP_NET_CFG_MACADDR + 6, 2060 nn_writew(nn, NFP_NET_CFG_MACADDR + 6,
2095 get_unaligned_be16(nn->netdev->dev_addr + 4)); 2061 get_unaligned_be16(nn->dp.netdev->dev_addr + 4));
2096} 2062}
2097 2063
2098static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx) 2064static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
@@ -2116,7 +2082,7 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
2116 unsigned int r; 2082 unsigned int r;
2117 int err; 2083 int err;
2118 2084
2119 new_ctrl = nn->ctrl; 2085 new_ctrl = nn->dp.ctrl;
2120 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE; 2086 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
2121 update = NFP_NET_CFG_UPDATE_GEN; 2087 update = NFP_NET_CFG_UPDATE_GEN;
2122 update |= NFP_NET_CFG_UPDATE_MSIX; 2088 update |= NFP_NET_CFG_UPDATE_MSIX;
@@ -2133,14 +2099,14 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
2133 if (err) 2099 if (err)
2134 nn_err(nn, "Could not disable device: %d\n", err); 2100 nn_err(nn, "Could not disable device: %d\n", err);
2135 2101
2136 for (r = 0; r < nn->num_rx_rings; r++) 2102 for (r = 0; r < nn->dp.num_rx_rings; r++)
2137 nfp_net_rx_ring_reset(&nn->rx_rings[r]); 2103 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
2138 for (r = 0; r < nn->num_tx_rings; r++) 2104 for (r = 0; r < nn->dp.num_tx_rings; r++)
2139 nfp_net_tx_ring_reset(nn, &nn->tx_rings[r]); 2105 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
2140 for (r = 0; r < nn->num_r_vecs; r++) 2106 for (r = 0; r < nn->dp.num_r_vecs; r++)
2141 nfp_net_vec_clear_ring_data(nn, r); 2107 nfp_net_vec_clear_ring_data(nn, r);
2142 2108
2143 nn->ctrl = new_ctrl; 2109 nn->dp.ctrl = new_ctrl;
2144} 2110}
2145 2111
2146static void 2112static void
@@ -2168,7 +2134,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
2168 unsigned int r; 2134 unsigned int r;
2169 int err; 2135 int err;
2170 2136
2171 new_ctrl = nn->ctrl; 2137 new_ctrl = nn->dp.ctrl;
2172 2138
2173 if (nn->cap & NFP_NET_CFG_CTRL_RSS) { 2139 if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
2174 nfp_net_rss_write_key(nn); 2140 nfp_net_rss_write_key(nn);
@@ -2184,22 +2150,22 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
2184 update |= NFP_NET_CFG_UPDATE_IRQMOD; 2150 update |= NFP_NET_CFG_UPDATE_IRQMOD;
2185 } 2151 }
2186 2152
2187 for (r = 0; r < nn->num_tx_rings; r++) 2153 for (r = 0; r < nn->dp.num_tx_rings; r++)
2188 nfp_net_tx_ring_hw_cfg_write(nn, &nn->tx_rings[r], r); 2154 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
2189 for (r = 0; r < nn->num_rx_rings; r++) 2155 for (r = 0; r < nn->dp.num_rx_rings; r++)
2190 nfp_net_rx_ring_hw_cfg_write(nn, &nn->rx_rings[r], r); 2156 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
2191 2157
2192 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ? 2158 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
2193 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1); 2159 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
2194 2160
2195 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? 2161 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
2196 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); 2162 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
2197 2163
2198 nfp_net_write_mac_addr(nn); 2164 nfp_net_write_mac_addr(nn);
2199 2165
2200 nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); 2166 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu);
2201 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, 2167 nn_writel(nn, NFP_NET_CFG_FLBUFSZ,
2202 nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA); 2168 nn->dp.fl_bufsz - NFP_NET_RX_BUF_NON_DATA);
2203 2169
2204 /* Enable device */ 2170 /* Enable device */
2205 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 2171 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
@@ -2212,18 +2178,18 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
2212 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 2178 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2213 err = nfp_net_reconfig(nn, update); 2179 err = nfp_net_reconfig(nn, update);
2214 2180
2215 nn->ctrl = new_ctrl; 2181 nn->dp.ctrl = new_ctrl;
2216 2182
2217 for (r = 0; r < nn->num_rx_rings; r++) 2183 for (r = 0; r < nn->dp.num_rx_rings; r++)
2218 nfp_net_rx_ring_fill_freelist(&nn->rx_rings[r]); 2184 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
2219 2185
2220 /* Since reconfiguration requests while NFP is down are ignored we 2186 /* Since reconfiguration requests while NFP is down are ignored we
2221 * have to wipe the entire VXLAN configuration and reinitialize it. 2187 * have to wipe the entire VXLAN configuration and reinitialize it.
2222 */ 2188 */
2223 if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { 2189 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
2224 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); 2190 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
2225 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); 2191 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
2226 udp_tunnel_get_rx_info(nn->netdev); 2192 udp_tunnel_get_rx_info(nn->dp.netdev);
2227 } 2193 }
2228 2194
2229 return err; 2195 return err;
@@ -2252,12 +2218,12 @@ static void nfp_net_open_stack(struct nfp_net *nn)
2252{ 2218{
2253 unsigned int r; 2219 unsigned int r;
2254 2220
2255 for (r = 0; r < nn->num_r_vecs; r++) { 2221 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2256 napi_enable(&nn->r_vecs[r].napi); 2222 napi_enable(&nn->r_vecs[r].napi);
2257 enable_irq(nn->r_vecs[r].irq_vector); 2223 enable_irq(nn->r_vecs[r].irq_vector);
2258 } 2224 }
2259 2225
2260 netif_tx_wake_all_queues(nn->netdev); 2226 netif_tx_wake_all_queues(nn->dp.netdev);
2261 2227
2262 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); 2228 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2263 nfp_net_read_link_status(nn); 2229 nfp_net_read_link_status(nn);
@@ -2266,19 +2232,10 @@ static void nfp_net_open_stack(struct nfp_net *nn)
2266static int nfp_net_netdev_open(struct net_device *netdev) 2232static int nfp_net_netdev_open(struct net_device *netdev)
2267{ 2233{
2268 struct nfp_net *nn = netdev_priv(netdev); 2234 struct nfp_net *nn = netdev_priv(netdev);
2269 struct nfp_net_ring_set rx = {
2270 .n_rings = nn->num_rx_rings,
2271 .mtu = nn->netdev->mtu,
2272 .dcnt = nn->rxd_cnt,
2273 };
2274 struct nfp_net_ring_set tx = {
2275 .n_rings = nn->num_tx_rings,
2276 .dcnt = nn->txd_cnt,
2277 };
2278 int err, r; 2235 int err, r;
2279 2236
2280 if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) { 2237 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE) {
2281 nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl); 2238 nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->dp.ctrl);
2282 return -EBUSY; 2239 return -EBUSY;
2283 } 2240 }
2284 2241
@@ -2299,33 +2256,28 @@ static int nfp_net_netdev_open(struct net_device *netdev)
2299 goto err_free_exn; 2256 goto err_free_exn;
2300 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); 2257 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2301 2258
2302 for (r = 0; r < nn->num_r_vecs; r++) { 2259 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2303 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); 2260 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2304 if (err) 2261 if (err)
2305 goto err_cleanup_vec_p; 2262 goto err_cleanup_vec_p;
2306 } 2263 }
2307 2264
2308 nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx, nn->xdp_prog); 2265 err = nfp_net_rx_rings_prepare(nn, &nn->dp);
2309 if (!nn->rx_rings) { 2266 if (err)
2310 err = -ENOMEM;
2311 goto err_cleanup_vec; 2267 goto err_cleanup_vec;
2312 }
2313 2268
2314 nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx, 2269 err = nfp_net_tx_rings_prepare(nn, &nn->dp);
2315 nn->num_stack_tx_rings); 2270 if (err)
2316 if (!nn->tx_rings) {
2317 err = -ENOMEM;
2318 goto err_free_rx_rings; 2271 goto err_free_rx_rings;
2319 }
2320 2272
2321 for (r = 0; r < nn->max_r_vecs; r++) 2273 for (r = 0; r < nn->max_r_vecs; r++)
2322 nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r); 2274 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2323 2275
2324 err = netif_set_real_num_tx_queues(netdev, nn->num_stack_tx_rings); 2276 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
2325 if (err) 2277 if (err)
2326 goto err_free_rings; 2278 goto err_free_rings;
2327 2279
2328 err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings); 2280 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
2329 if (err) 2281 if (err)
2330 goto err_free_rings; 2282 goto err_free_rings;
2331 2283
@@ -2351,11 +2303,11 @@ static int nfp_net_netdev_open(struct net_device *netdev)
2351 return 0; 2303 return 0;
2352 2304
2353err_free_rings: 2305err_free_rings:
2354 nfp_net_tx_ring_set_free(nn, &tx); 2306 nfp_net_tx_rings_free(&nn->dp);
2355err_free_rx_rings: 2307err_free_rx_rings:
2356 nfp_net_rx_ring_set_free(nn, &rx, nn->xdp_prog); 2308 nfp_net_rx_rings_free(&nn->dp);
2357err_cleanup_vec: 2309err_cleanup_vec:
2358 r = nn->num_r_vecs; 2310 r = nn->dp.num_r_vecs;
2359err_cleanup_vec_p: 2311err_cleanup_vec_p:
2360 while (r--) 2312 while (r--)
2361 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 2313 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
@@ -2374,15 +2326,15 @@ static void nfp_net_close_stack(struct nfp_net *nn)
2374 unsigned int r; 2326 unsigned int r;
2375 2327
2376 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); 2328 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2377 netif_carrier_off(nn->netdev); 2329 netif_carrier_off(nn->dp.netdev);
2378 nn->link_up = false; 2330 nn->link_up = false;
2379 2331
2380 for (r = 0; r < nn->num_r_vecs; r++) { 2332 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2381 disable_irq(nn->r_vecs[r].irq_vector); 2333 disable_irq(nn->r_vecs[r].irq_vector);
2382 napi_disable(&nn->r_vecs[r].napi); 2334 napi_disable(&nn->r_vecs[r].napi);
2383 } 2335 }
2384 2336
2385 netif_tx_disable(nn->netdev); 2337 netif_tx_disable(nn->dp.netdev);
2386} 2338}
2387 2339
2388/** 2340/**
@@ -2393,17 +2345,17 @@ static void nfp_net_close_free_all(struct nfp_net *nn)
2393{ 2345{
2394 unsigned int r; 2346 unsigned int r;
2395 2347
2396 for (r = 0; r < nn->num_rx_rings; r++) { 2348 for (r = 0; r < nn->dp.num_rx_rings; r++) {
2397 nfp_net_rx_ring_bufs_free(nn, &nn->rx_rings[r], nn->xdp_prog); 2349 nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]);
2398 nfp_net_rx_ring_free(&nn->rx_rings[r]); 2350 nfp_net_rx_ring_free(&nn->dp.rx_rings[r]);
2399 } 2351 }
2400 for (r = 0; r < nn->num_tx_rings; r++) 2352 for (r = 0; r < nn->dp.num_tx_rings; r++)
2401 nfp_net_tx_ring_free(&nn->tx_rings[r]); 2353 nfp_net_tx_ring_free(&nn->dp.tx_rings[r]);
2402 for (r = 0; r < nn->num_r_vecs; r++) 2354 for (r = 0; r < nn->dp.num_r_vecs; r++)
2403 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 2355 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2404 2356
2405 kfree(nn->rx_rings); 2357 kfree(nn->dp.rx_rings);
2406 kfree(nn->tx_rings); 2358 kfree(nn->dp.tx_rings);
2407 2359
2408 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 2360 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2409 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); 2361 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
@@ -2417,8 +2369,8 @@ static int nfp_net_netdev_close(struct net_device *netdev)
2417{ 2369{
2418 struct nfp_net *nn = netdev_priv(netdev); 2370 struct nfp_net *nn = netdev_priv(netdev);
2419 2371
2420 if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) { 2372 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
2421 nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl); 2373 nn_err(nn, "Dev is not up: 0x%08x\n", nn->dp.ctrl);
2422 return 0; 2374 return 0;
2423 } 2375 }
2424 2376
@@ -2443,7 +2395,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
2443 struct nfp_net *nn = netdev_priv(netdev); 2395 struct nfp_net *nn = netdev_priv(netdev);
2444 u32 new_ctrl; 2396 u32 new_ctrl;
2445 2397
2446 new_ctrl = nn->ctrl; 2398 new_ctrl = nn->dp.ctrl;
2447 2399
2448 if (netdev->flags & IFF_PROMISC) { 2400 if (netdev->flags & IFF_PROMISC) {
2449 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC) 2401 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
@@ -2454,13 +2406,13 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
2454 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC; 2406 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2455 } 2407 }
2456 2408
2457 if (new_ctrl == nn->ctrl) 2409 if (new_ctrl == nn->dp.ctrl)
2458 return; 2410 return;
2459 2411
2460 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 2412 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2461 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN); 2413 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
2462 2414
2463 nn->ctrl = new_ctrl; 2415 nn->dp.ctrl = new_ctrl;
2464} 2416}
2465 2417
2466static void nfp_net_rss_init_itbl(struct nfp_net *nn) 2418static void nfp_net_rss_init_itbl(struct nfp_net *nn)
@@ -2469,42 +2421,39 @@ static void nfp_net_rss_init_itbl(struct nfp_net *nn)
2469 2421
2470 for (i = 0; i < sizeof(nn->rss_itbl); i++) 2422 for (i = 0; i < sizeof(nn->rss_itbl); i++)
2471 nn->rss_itbl[i] = 2423 nn->rss_itbl[i] =
2472 ethtool_rxfh_indir_default(i, nn->num_rx_rings); 2424 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
2473} 2425}
2474 2426
2475static int 2427static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
2476nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs, 2428{
2477 unsigned int *stack_tx_rings, 2429 struct nfp_net_dp new_dp = *dp;
2478 struct bpf_prog **xdp_prog, 2430
2479 struct nfp_net_ring_set *rx, 2431 *dp = nn->dp;
2480 struct nfp_net_ring_set *tx) 2432 nn->dp = new_dp;
2433
2434 nn->dp.netdev->mtu = new_dp.mtu;
2435
2436 if (!netif_is_rxfh_configured(nn->dp.netdev))
2437 nfp_net_rss_init_itbl(nn);
2438}
2439
2440static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
2481{ 2441{
2482 unsigned int r; 2442 unsigned int r;
2483 int err; 2443 int err;
2484 2444
2485 if (rx) 2445 nfp_net_dp_swap(nn, dp);
2486 nfp_net_rx_ring_set_swap(nn, rx);
2487 if (tx)
2488 nfp_net_tx_ring_set_swap(nn, tx);
2489
2490 swap(*num_vecs, nn->num_r_vecs);
2491 swap(*stack_tx_rings, nn->num_stack_tx_rings);
2492 *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
2493 2446
2494 for (r = 0; r < nn->max_r_vecs; r++) 2447 for (r = 0; r < nn->max_r_vecs; r++)
2495 nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r); 2448 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2496 2449
2497 if (!netif_is_rxfh_configured(nn->netdev)) 2450 err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
2498 nfp_net_rss_init_itbl(nn);
2499
2500 err = netif_set_real_num_rx_queues(nn->netdev,
2501 nn->num_rx_rings);
2502 if (err) 2451 if (err)
2503 return err; 2452 return err;
2504 2453
2505 if (nn->netdev->real_num_tx_queues != nn->num_stack_tx_rings) { 2454 if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
2506 err = netif_set_real_num_tx_queues(nn->netdev, 2455 err = netif_set_real_num_tx_queues(nn->dp.netdev,
2507 nn->num_stack_tx_rings); 2456 nn->dp.num_stack_tx_rings);
2508 if (err) 2457 if (err)
2509 return err; 2458 return err;
2510 } 2459 }
@@ -2512,18 +2461,36 @@ nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs,
2512 return __nfp_net_set_config_and_enable(nn); 2461 return __nfp_net_set_config_and_enable(nn);
2513} 2462}
2514 2463
2515static int 2464struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
2516nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog, 2465{
2517 struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx) 2466 struct nfp_net_dp *new;
2467
2468 new = kmalloc(sizeof(*new), GFP_KERNEL);
2469 if (!new)
2470 return NULL;
2471
2472 *new = nn->dp;
2473
2474 /* Clear things which need to be recomputed */
2475 new->fl_bufsz = 0;
2476 new->tx_rings = NULL;
2477 new->rx_rings = NULL;
2478 new->num_r_vecs = 0;
2479 new->num_stack_tx_rings = 0;
2480
2481 return new;
2482}
2483
2484static int nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp)
2518{ 2485{
2519 /* XDP-enabled tests */ 2486 /* XDP-enabled tests */
2520 if (!xdp_prog) 2487 if (!dp->xdp_prog)
2521 return 0; 2488 return 0;
2522 if (rx && nfp_net_calc_fl_bufsz(nn, rx->mtu) > PAGE_SIZE) { 2489 if (dp->fl_bufsz > PAGE_SIZE) {
2523 nn_warn(nn, "MTU too large w/ XDP enabled\n"); 2490 nn_warn(nn, "MTU too large w/ XDP enabled\n");
2524 return -EINVAL; 2491 return -EINVAL;
2525 } 2492 }
2526 if (tx && tx->n_rings > nn->max_tx_rings) { 2493 if (dp->num_tx_rings > nn->max_tx_rings) {
2527 nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n"); 2494 nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n");
2528 return -EINVAL; 2495 return -EINVAL;
2529 } 2496 }
@@ -2531,119 +2498,94 @@ nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog,
2531 return 0; 2498 return 0;
2532} 2499}
2533 2500
2534static void 2501int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp)
2535nfp_net_ring_reconfig_down(struct nfp_net *nn, struct bpf_prog **xdp_prog,
2536 struct nfp_net_ring_set *rx,
2537 struct nfp_net_ring_set *tx,
2538 unsigned int stack_tx_rings, unsigned int num_vecs)
2539{
2540 nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu;
2541 nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu);
2542 nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt;
2543 nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt;
2544 nn->num_rx_rings = rx ? rx->n_rings : nn->num_rx_rings;
2545 nn->num_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
2546 nn->num_stack_tx_rings = stack_tx_rings;
2547 nn->num_r_vecs = num_vecs;
2548 *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
2549
2550 if (!netif_is_rxfh_configured(nn->netdev))
2551 nfp_net_rss_init_itbl(nn);
2552}
2553
2554int
2555nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
2556 struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
2557{ 2502{
2558 unsigned int stack_tx_rings, num_vecs, r; 2503 int r, err;
2559 int err; 2504
2505 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
2560 2506
2561 stack_tx_rings = tx ? tx->n_rings : nn->num_tx_rings; 2507 dp->num_stack_tx_rings = dp->num_tx_rings;
2562 if (*xdp_prog) 2508 if (dp->xdp_prog)
2563 stack_tx_rings -= rx ? rx->n_rings : nn->num_rx_rings; 2509 dp->num_stack_tx_rings -= dp->num_rx_rings;
2564 2510
2565 num_vecs = max(rx ? rx->n_rings : nn->num_rx_rings, stack_tx_rings); 2511 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
2566 2512
2567 err = nfp_net_check_config(nn, *xdp_prog, rx, tx); 2513 err = nfp_net_check_config(nn, dp);
2568 if (err) 2514 if (err)
2569 return err; 2515 goto exit_free_dp;
2570 2516
2571 if (!netif_running(nn->netdev)) { 2517 if (!netif_running(dp->netdev)) {
2572 nfp_net_ring_reconfig_down(nn, xdp_prog, rx, tx, 2518 nfp_net_dp_swap(nn, dp);
2573 stack_tx_rings, num_vecs); 2519 err = 0;
2574 return 0; 2520 goto exit_free_dp;
2575 } 2521 }
2576 2522
2577 /* Prepare new rings */ 2523 /* Prepare new rings */
2578 for (r = nn->num_r_vecs; r < num_vecs; r++) { 2524 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
2579 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); 2525 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2580 if (err) { 2526 if (err) {
2581 num_vecs = r; 2527 dp->num_r_vecs = r;
2582 goto err_cleanup_vecs; 2528 goto err_cleanup_vecs;
2583 } 2529 }
2584 } 2530 }
2585 if (rx) { 2531
2586 if (!nfp_net_rx_ring_set_prepare(nn, rx, *xdp_prog)) { 2532 err = nfp_net_rx_rings_prepare(nn, dp);
2587 err = -ENOMEM; 2533 if (err)
2588 goto err_cleanup_vecs; 2534 goto err_cleanup_vecs;
2589 } 2535
2590 } 2536 err = nfp_net_tx_rings_prepare(nn, dp);
2591 if (tx) { 2537 if (err)
2592 if (!nfp_net_tx_ring_set_prepare(nn, tx, stack_tx_rings)) { 2538 goto err_free_rx;
2593 err = -ENOMEM;
2594 goto err_free_rx;
2595 }
2596 }
2597 2539
2598 /* Stop device, swap in new rings, try to start the firmware */ 2540 /* Stop device, swap in new rings, try to start the firmware */
2599 nfp_net_close_stack(nn); 2541 nfp_net_close_stack(nn);
2600 nfp_net_clear_config_and_disable(nn); 2542 nfp_net_clear_config_and_disable(nn);
2601 2543
2602 err = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings, 2544 err = nfp_net_dp_swap_enable(nn, dp);
2603 xdp_prog, rx, tx);
2604 if (err) { 2545 if (err) {
2605 int err2; 2546 int err2;
2606 2547
2607 nfp_net_clear_config_and_disable(nn); 2548 nfp_net_clear_config_and_disable(nn);
2608 2549
2609 /* Try with old configuration and old rings */ 2550 /* Try with old configuration and old rings */
2610 err2 = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings, 2551 err2 = nfp_net_dp_swap_enable(nn, dp);
2611 xdp_prog, rx, tx);
2612 if (err2) 2552 if (err2)
2613 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n", 2553 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
2614 err, err2); 2554 err, err2);
2615 } 2555 }
2616 for (r = num_vecs - 1; r >= nn->num_r_vecs; r--) 2556 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
2617 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 2557 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2618 2558
2619 if (rx) 2559 nfp_net_rx_rings_free(dp);
2620 nfp_net_rx_ring_set_free(nn, rx, *xdp_prog); 2560 nfp_net_tx_rings_free(dp);
2621 if (tx)
2622 nfp_net_tx_ring_set_free(nn, tx);
2623 2561
2624 nfp_net_open_stack(nn); 2562 nfp_net_open_stack(nn);
2563exit_free_dp:
2564 kfree(dp);
2625 2565
2626 return err; 2566 return err;
2627 2567
2628err_free_rx: 2568err_free_rx:
2629 if (rx) 2569 nfp_net_rx_rings_free(dp);
2630 nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
2631err_cleanup_vecs: 2570err_cleanup_vecs:
2632 for (r = num_vecs - 1; r >= nn->num_r_vecs; r--) 2571 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
2633 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 2572 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2573 kfree(dp);
2634 return err; 2574 return err;
2635} 2575}
2636 2576
2637static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) 2577static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
2638{ 2578{
2639 struct nfp_net *nn = netdev_priv(netdev); 2579 struct nfp_net *nn = netdev_priv(netdev);
2640 struct nfp_net_ring_set rx = { 2580 struct nfp_net_dp *dp;
2641 .n_rings = nn->num_rx_rings, 2581
2642 .mtu = new_mtu, 2582 dp = nfp_net_clone_dp(nn);
2643 .dcnt = nn->rxd_cnt, 2583 if (!dp)
2644 }; 2584 return -ENOMEM;
2645 2585
2646 return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL); 2586 dp->mtu = new_mtu;
2587
2588 return nfp_net_ring_reconfig(nn, dp);
2647} 2589}
2648 2590
2649static void nfp_net_stat64(struct net_device *netdev, 2591static void nfp_net_stat64(struct net_device *netdev,
@@ -2652,7 +2594,7 @@ static void nfp_net_stat64(struct net_device *netdev,
2652 struct nfp_net *nn = netdev_priv(netdev); 2594 struct nfp_net *nn = netdev_priv(netdev);
2653 int r; 2595 int r;
2654 2596
2655 for (r = 0; r < nn->num_r_vecs; r++) { 2597 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2656 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; 2598 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
2657 u64 data[3]; 2599 u64 data[3];
2658 unsigned int start; 2600 unsigned int start;
@@ -2699,7 +2641,7 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
2699 return -ENOTSUPP; 2641 return -ENOTSUPP;
2700 2642
2701 if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) { 2643 if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
2702 if (!nn->bpf_offload_xdp) 2644 if (!nn->dp.bpf_offload_xdp)
2703 return nfp_net_bpf_offload(nn, tc->cls_bpf); 2645 return nfp_net_bpf_offload(nn, tc->cls_bpf);
2704 else 2646 else
2705 return -EBUSY; 2647 return -EBUSY;
@@ -2718,7 +2660,7 @@ static int nfp_net_set_features(struct net_device *netdev,
2718 2660
2719 /* Assume this is not called with features we have not advertised */ 2661 /* Assume this is not called with features we have not advertised */
2720 2662
2721 new_ctrl = nn->ctrl; 2663 new_ctrl = nn->dp.ctrl;
2722 2664
2723 if (changed & NETIF_F_RXCSUM) { 2665 if (changed & NETIF_F_RXCSUM) {
2724 if (features & NETIF_F_RXCSUM) 2666 if (features & NETIF_F_RXCSUM)
@@ -2762,7 +2704,7 @@ static int nfp_net_set_features(struct net_device *netdev,
2762 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER; 2704 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
2763 } 2705 }
2764 2706
2765 if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) { 2707 if (changed & NETIF_F_HW_TC && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
2766 nn_err(nn, "Cannot disable HW TC offload while in use\n"); 2708 nn_err(nn, "Cannot disable HW TC offload while in use\n");
2767 return -EBUSY; 2709 return -EBUSY;
2768 } 2710 }
@@ -2770,16 +2712,16 @@ static int nfp_net_set_features(struct net_device *netdev,
2770 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n", 2712 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
2771 netdev->features, features, changed); 2713 netdev->features, features, changed);
2772 2714
2773 if (new_ctrl == nn->ctrl) 2715 if (new_ctrl == nn->dp.ctrl)
2774 return 0; 2716 return 0;
2775 2717
2776 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl); 2718 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
2777 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 2719 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2778 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); 2720 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
2779 if (err) 2721 if (err)
2780 return err; 2722 return err;
2781 2723
2782 nn->ctrl = new_ctrl; 2724 nn->dp.ctrl = new_ctrl;
2783 2725
2784 return 0; 2726 return 0;
2785} 2727}
@@ -2830,6 +2772,26 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
2830 return features; 2772 return features;
2831} 2773}
2832 2774
2775static int
2776nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
2777{
2778 struct nfp_net *nn = netdev_priv(netdev);
2779 int err;
2780
2781 if (!nn->eth_port)
2782 return -EOPNOTSUPP;
2783
2784 if (!nn->eth_port->is_split)
2785 err = snprintf(name, len, "p%d", nn->eth_port->label_port);
2786 else
2787 err = snprintf(name, len, "p%ds%d", nn->eth_port->label_port,
2788 nn->eth_port->label_subport);
2789 if (err >= len)
2790 return -EINVAL;
2791
2792 return 0;
2793}
2794
2833/** 2795/**
2834 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW 2796 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
2835 * @nn: NFP Net device to reconfigure 2797 * @nn: NFP Net device to reconfigure
@@ -2842,7 +2804,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
2842 2804
2843 nn->vxlan_ports[idx] = port; 2805 nn->vxlan_ports[idx] = port;
2844 2806
2845 if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN)) 2807 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
2846 return; 2808 return;
2847 2809
2848 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); 2810 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
@@ -2921,8 +2883,8 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
2921 if (!nfp_net_ebpf_capable(nn)) 2883 if (!nfp_net_ebpf_capable(nn))
2922 return -EINVAL; 2884 return -EINVAL;
2923 2885
2924 if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) { 2886 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
2925 if (!nn->bpf_offload_xdp) 2887 if (!nn->dp.bpf_offload_xdp)
2926 return prog ? -EBUSY : 0; 2888 return prog ? -EBUSY : 0;
2927 cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY; 2889 cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
2928 } else { 2890 } else {
@@ -2935,48 +2897,47 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
2935 /* Stop offload if replace not possible */ 2897 /* Stop offload if replace not possible */
2936 if (ret && cmd.command == TC_CLSBPF_REPLACE) 2898 if (ret && cmd.command == TC_CLSBPF_REPLACE)
2937 nfp_net_xdp_offload(nn, NULL); 2899 nfp_net_xdp_offload(nn, NULL);
2938 nn->bpf_offload_xdp = prog && !ret; 2900 nn->dp.bpf_offload_xdp = prog && !ret;
2939 return ret; 2901 return ret;
2940} 2902}
2941 2903
2942static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog) 2904static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog)
2943{ 2905{
2944 struct nfp_net_ring_set rx = { 2906 struct bpf_prog *old_prog = nn->dp.xdp_prog;
2945 .n_rings = nn->num_rx_rings, 2907 struct nfp_net_dp *dp;
2946 .mtu = nn->netdev->mtu,
2947 .dcnt = nn->rxd_cnt,
2948 };
2949 struct nfp_net_ring_set tx = {
2950 .n_rings = nn->num_tx_rings,
2951 .dcnt = nn->txd_cnt,
2952 };
2953 int err; 2908 int err;
2954 2909
2955 if (prog && prog->xdp_adjust_head) { 2910 if (!prog && !nn->dp.xdp_prog)
2956 nn_err(nn, "Does not support bpf_xdp_adjust_head()\n");
2957 return -EOPNOTSUPP;
2958 }
2959 if (!prog && !nn->xdp_prog)
2960 return 0; 2911 return 0;
2961 if (prog && nn->xdp_prog) { 2912 if (prog && nn->dp.xdp_prog) {
2962 prog = xchg(&nn->xdp_prog, prog); 2913 prog = xchg(&nn->dp.xdp_prog, prog);
2963 bpf_prog_put(prog); 2914 bpf_prog_put(prog);
2964 nfp_net_xdp_offload(nn, nn->xdp_prog); 2915 nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
2965 return 0; 2916 return 0;
2966 } 2917 }
2967 2918
2968 tx.n_rings += prog ? nn->num_rx_rings : -nn->num_rx_rings; 2919 dp = nfp_net_clone_dp(nn);
2920 if (!dp)
2921 return -ENOMEM;
2922
2923 dp->xdp_prog = prog;
2924 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
2925 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2926 if (prog)
2927 dp->rx_dma_off = XDP_PACKET_HEADROOM -
2928 (nn->dp.rx_offset ?: NFP_NET_MAX_PREPEND);
2929 else
2930 dp->rx_dma_off = 0;
2969 2931
2970 /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */ 2932 /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
2971 err = nfp_net_ring_reconfig(nn, &prog, &rx, &tx); 2933 err = nfp_net_ring_reconfig(nn, dp);
2972 if (err) 2934 if (err)
2973 return err; 2935 return err;
2974 2936
2975 /* @prog got swapped and is now the old one */ 2937 if (old_prog)
2976 if (prog) 2938 bpf_prog_put(old_prog);
2977 bpf_prog_put(prog);
2978 2939
2979 nfp_net_xdp_offload(nn, nn->xdp_prog); 2940 nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
2980 2941
2981 return 0; 2942 return 0;
2982} 2943}
@@ -2989,7 +2950,7 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
2989 case XDP_SETUP_PROG: 2950 case XDP_SETUP_PROG:
2990 return nfp_net_xdp_setup(nn, xdp->prog); 2951 return nfp_net_xdp_setup(nn, xdp->prog);
2991 case XDP_QUERY_PROG: 2952 case XDP_QUERY_PROG:
2992 xdp->prog_attached = !!nn->xdp_prog; 2953 xdp->prog_attached = !!nn->dp.xdp_prog;
2993 return 0; 2954 return 0;
2994 default: 2955 default:
2995 return -EINVAL; 2956 return -EINVAL;
@@ -3008,6 +2969,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
3008 .ndo_set_mac_address = eth_mac_addr, 2969 .ndo_set_mac_address = eth_mac_addr,
3009 .ndo_set_features = nfp_net_set_features, 2970 .ndo_set_features = nfp_net_set_features,
3010 .ndo_features_check = nfp_net_features_check, 2971 .ndo_features_check = nfp_net_features_check,
2972 .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
3011 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, 2973 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
3012 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, 2974 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
3013 .ndo_xdp = nfp_net_xdp, 2975 .ndo_xdp = nfp_net_xdp,
@@ -3020,9 +2982,9 @@ static const struct net_device_ops nfp_net_netdev_ops = {
3020void nfp_net_info(struct nfp_net *nn) 2982void nfp_net_info(struct nfp_net *nn)
3021{ 2983{
3022 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n", 2984 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
3023 nn->is_vf ? "VF " : "", 2985 nn->dp.is_vf ? "VF " : "",
3024 nn->num_tx_rings, nn->max_tx_rings, 2986 nn->dp.num_tx_rings, nn->max_tx_rings,
3025 nn->num_rx_rings, nn->max_rx_rings); 2987 nn->dp.num_rx_rings, nn->max_rx_rings);
3026 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n", 2988 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
3027 nn->fw_ver.resv, nn->fw_ver.class, 2989 nn->fw_ver.resv, nn->fw_ver.class,
3028 nn->fw_ver.major, nn->fw_ver.minor, 2990 nn->fw_ver.major, nn->fw_ver.minor,
@@ -3074,21 +3036,24 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
3074 SET_NETDEV_DEV(netdev, &pdev->dev); 3036 SET_NETDEV_DEV(netdev, &pdev->dev);
3075 nn = netdev_priv(netdev); 3037 nn = netdev_priv(netdev);
3076 3038
3077 nn->netdev = netdev; 3039 nn->dp.netdev = netdev;
3040 nn->dp.dev = &pdev->dev;
3078 nn->pdev = pdev; 3041 nn->pdev = pdev;
3079 3042
3080 nn->max_tx_rings = max_tx_rings; 3043 nn->max_tx_rings = max_tx_rings;
3081 nn->max_rx_rings = max_rx_rings; 3044 nn->max_rx_rings = max_rx_rings;
3082 3045
3083 nn->num_tx_rings = min_t(unsigned int, max_tx_rings, num_online_cpus()); 3046 nn->dp.num_tx_rings = min_t(unsigned int,
3084 nn->num_rx_rings = min_t(unsigned int, max_rx_rings, 3047 max_tx_rings, num_online_cpus());
3048 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
3085 netif_get_num_default_rss_queues()); 3049 netif_get_num_default_rss_queues());
3086 3050
3087 nn->num_r_vecs = max(nn->num_tx_rings, nn->num_rx_rings); 3051 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
3088 nn->num_r_vecs = min_t(unsigned int, nn->num_r_vecs, num_online_cpus()); 3052 nn->dp.num_r_vecs = min_t(unsigned int,
3053 nn->dp.num_r_vecs, num_online_cpus());
3089 3054
3090 nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT; 3055 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
3091 nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; 3056 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
3092 3057
3093 spin_lock_init(&nn->reconfig_lock); 3058 spin_lock_init(&nn->reconfig_lock);
3094 spin_lock_init(&nn->rx_filter_lock); 3059 spin_lock_init(&nn->rx_filter_lock);
@@ -3108,7 +3073,28 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
3108 */ 3073 */
3109void nfp_net_netdev_free(struct nfp_net *nn) 3074void nfp_net_netdev_free(struct nfp_net *nn)
3110{ 3075{
3111 free_netdev(nn->netdev); 3076 free_netdev(nn->dp.netdev);
3077}
3078
3079/**
3080 * nfp_net_rss_key_sz() - Get current size of the RSS key
3081 * @nn: NFP Net device instance
3082 *
3083 * Return: size of the RSS key for currently selected hash function.
3084 */
3085unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
3086{
3087 switch (nn->rss_hfunc) {
3088 case ETH_RSS_HASH_TOP:
3089 return NFP_NET_CFG_RSS_KEY_SZ;
3090 case ETH_RSS_HASH_XOR:
3091 return 0;
3092 case ETH_RSS_HASH_CRC32:
3093 return 4;
3094 }
3095
3096 nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
3097 return 0;
3112} 3098}
3113 3099
3114/** 3100/**
@@ -3117,14 +3103,32 @@ void nfp_net_netdev_free(struct nfp_net *nn)
3117 */ 3103 */
3118static void nfp_net_rss_init(struct nfp_net *nn) 3104static void nfp_net_rss_init(struct nfp_net *nn)
3119{ 3105{
3120 netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ); 3106 unsigned long func_bit, rss_cap_hfunc;
3107 u32 reg;
3108
3109 /* Read the RSS function capability and select first supported func */
3110 reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
3111 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
3112 if (!rss_cap_hfunc)
3113 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
3114 NFP_NET_CFG_RSS_TOEPLITZ);
3115
3116 func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
3117 if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
3118 dev_warn(nn->dp.dev,
3119 "Bad RSS config, defaulting to Toeplitz hash\n");
3120 func_bit = ETH_RSS_HASH_TOP_BIT;
3121 }
3122 nn->rss_hfunc = 1 << func_bit;
3123
3124 netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
3121 3125
3122 nfp_net_rss_init_itbl(nn); 3126 nfp_net_rss_init_itbl(nn);
3123 3127
3124 /* Enable IPv4/IPv6 TCP by default */ 3128 /* Enable IPv4/IPv6 TCP by default */
3125 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP | 3129 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
3126 NFP_NET_CFG_RSS_IPV6_TCP | 3130 NFP_NET_CFG_RSS_IPV6_TCP |
3127 NFP_NET_CFG_RSS_TOEPLITZ | 3131 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
3128 NFP_NET_CFG_RSS_MASK; 3132 NFP_NET_CFG_RSS_MASK;
3129} 3133}
3130 3134
@@ -3151,6 +3155,17 @@ int nfp_net_netdev_init(struct net_device *netdev)
3151 struct nfp_net *nn = netdev_priv(netdev); 3155 struct nfp_net *nn = netdev_priv(netdev);
3152 int err; 3156 int err;
3153 3157
3158 /* XDP calls for 256 byte packet headroom which wouldn't fit in a u8.
3159 * We, however, reuse the metadata prepend space for XDP buffers which
3160 * is at least 1 byte long and as long as XDP headroom doesn't increase
3161 * above 256 the *extra* XDP headroom will fit on 8 bits.
3162 */
3163 BUILD_BUG_ON(XDP_PACKET_HEADROOM > 256);
3164
3165 nn->dp.chained_metadata_format = nn->fw_ver.major > 3;
3166
3167 nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
3168
3154 /* Get some of the read-only fields from the BAR */ 3169 /* Get some of the read-only fields from the BAR */
3155 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); 3170 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
3156 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); 3171 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
@@ -3158,17 +3173,26 @@ int nfp_net_netdev_init(struct net_device *netdev)
3158 nfp_net_write_mac_addr(nn); 3173 nfp_net_write_mac_addr(nn);
3159 3174
3160 /* Determine RX packet/metadata boundary offset */ 3175 /* Determine RX packet/metadata boundary offset */
3161 if (nn->fw_ver.major >= 2) 3176 if (nn->fw_ver.major >= 2) {
3162 nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET); 3177 u32 reg;
3163 else 3178
3164 nn->rx_offset = NFP_NET_RX_OFFSET; 3179 reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
3180 if (reg > NFP_NET_MAX_PREPEND) {
3181 nn_err(nn, "Invalid rx offset: %d\n", reg);
3182 return -EINVAL;
3183 }
3184 nn->dp.rx_offset = reg;
3185 } else {
3186 nn->dp.rx_offset = NFP_NET_RX_OFFSET;
3187 }
3165 3188
3166 /* Set default MTU and Freelist buffer size */ 3189 /* Set default MTU and Freelist buffer size */
3167 if (nn->max_mtu < NFP_NET_DEFAULT_MTU) 3190 if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
3168 netdev->mtu = nn->max_mtu; 3191 netdev->mtu = nn->max_mtu;
3169 else 3192 else
3170 netdev->mtu = NFP_NET_DEFAULT_MTU; 3193 netdev->mtu = NFP_NET_DEFAULT_MTU;
3171 nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, netdev->mtu); 3194 nn->dp.mtu = netdev->mtu;
3195 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
3172 3196
3173 /* Advertise/enable offloads based on capabilities 3197 /* Advertise/enable offloads based on capabilities
3174 * 3198 *
@@ -3179,31 +3203,31 @@ int nfp_net_netdev_init(struct net_device *netdev)
3179 netdev->hw_features = NETIF_F_HIGHDMA; 3203 netdev->hw_features = NETIF_F_HIGHDMA;
3180 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) { 3204 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
3181 netdev->hw_features |= NETIF_F_RXCSUM; 3205 netdev->hw_features |= NETIF_F_RXCSUM;
3182 nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM; 3206 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
3183 } 3207 }
3184 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) { 3208 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
3185 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3209 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3186 nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM; 3210 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3187 } 3211 }
3188 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) { 3212 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
3189 netdev->hw_features |= NETIF_F_SG; 3213 netdev->hw_features |= NETIF_F_SG;
3190 nn->ctrl |= NFP_NET_CFG_CTRL_GATHER; 3214 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
3191 } 3215 }
3192 if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) { 3216 if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
3193 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 3217 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3194 nn->ctrl |= NFP_NET_CFG_CTRL_LSO; 3218 nn->dp.ctrl |= NFP_NET_CFG_CTRL_LSO;
3195 } 3219 }
3196 if (nn->cap & NFP_NET_CFG_CTRL_RSS) { 3220 if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
3197 netdev->hw_features |= NETIF_F_RXHASH; 3221 netdev->hw_features |= NETIF_F_RXHASH;
3198 nfp_net_rss_init(nn); 3222 nfp_net_rss_init(nn);
3199 nn->ctrl |= NFP_NET_CFG_CTRL_RSS; 3223 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RSS;
3200 } 3224 }
3201 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && 3225 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
3202 nn->cap & NFP_NET_CFG_CTRL_NVGRE) { 3226 nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
3203 if (nn->cap & NFP_NET_CFG_CTRL_LSO) 3227 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3204 netdev->hw_features |= NETIF_F_GSO_GRE | 3228 netdev->hw_features |= NETIF_F_GSO_GRE |
3205 NETIF_F_GSO_UDP_TUNNEL; 3229 NETIF_F_GSO_UDP_TUNNEL;
3206 nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; 3230 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
3207 3231
3208 netdev->hw_enc_features = netdev->hw_features; 3232 netdev->hw_enc_features = netdev->hw_features;
3209 } 3233 }
@@ -3212,11 +3236,11 @@ int nfp_net_netdev_init(struct net_device *netdev)
3212 3236
3213 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) { 3237 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
3214 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 3238 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3215 nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN; 3239 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3216 } 3240 }
3217 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) { 3241 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
3218 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 3242 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
3219 nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN; 3243 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3220 } 3244 }
3221 3245
3222 netdev->features = netdev->hw_features; 3246 netdev->features = netdev->hw_features;
@@ -3229,14 +3253,14 @@ int nfp_net_netdev_init(struct net_device *netdev)
3229 3253
3230 /* Allow L2 Broadcast and Multicast through by default, if supported */ 3254 /* Allow L2 Broadcast and Multicast through by default, if supported */
3231 if (nn->cap & NFP_NET_CFG_CTRL_L2BC) 3255 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
3232 nn->ctrl |= NFP_NET_CFG_CTRL_L2BC; 3256 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
3233 if (nn->cap & NFP_NET_CFG_CTRL_L2MC) 3257 if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
3234 nn->ctrl |= NFP_NET_CFG_CTRL_L2MC; 3258 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
3235 3259
3236 /* Allow IRQ moderation, if supported */ 3260 /* Allow IRQ moderation, if supported */
3237 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { 3261 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
3238 nfp_net_irqmod_init(nn); 3262 nfp_net_irqmod_init(nn);
3239 nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD; 3263 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
3240 } 3264 }
3241 3265
3242 /* Stash the re-configuration queue away. First odd queue in TX Bar */ 3266 /* Stash the re-configuration queue away. First odd queue in TX Bar */
@@ -3275,9 +3299,9 @@ void nfp_net_netdev_clean(struct net_device *netdev)
3275{ 3299{
3276 struct nfp_net *nn = netdev_priv(netdev); 3300 struct nfp_net *nn = netdev_priv(netdev);
3277 3301
3278 if (nn->xdp_prog) 3302 if (nn->dp.xdp_prog)
3279 bpf_prog_put(nn->xdp_prog); 3303 bpf_prog_put(nn->dp.xdp_prog);
3280 if (nn->bpf_offload_xdp) 3304 if (nn->dp.bpf_offload_xdp)
3281 nfp_net_xdp_offload(nn, NULL); 3305 nfp_net_xdp_offload(nn, NULL);
3282 unregister_netdev(nn->netdev); 3306 unregister_netdev(nn->dp.netdev);
3283} 3307}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 385ba355c965..71d86171b4ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2015 Netronome Systems, Inc. 2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
3 * 3 *
4 * This software is dual licensed under the GNU General License Version 2, 4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this 5 * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -192,6 +192,14 @@
192#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */ 192#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */
193 193
194/** 194/**
195 * RSS capabilities
196 * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
197 * @NFP_NET_CFG_RSS_HFUNC)
198 */
199#define NFP_NET_CFG_RSS_CAP 0x0054
200#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
201
202/**
195 * VXLAN/UDP encap configuration 203 * VXLAN/UDP encap configuration
196 * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports 204 * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
197 * @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes 205 * @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
@@ -249,7 +257,11 @@
249#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */ 257#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */
250#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */ 258#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */
251#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */ 259#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */
260#define NFP_NET_CFG_RSS_HFUNC 0xff000000
252#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */ 261#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */
262#define NFP_NET_CFG_RSS_XOR (1 << 25) /* Use XOR as hash */
263#define NFP_NET_CFG_RSS_CRC32 (1 << 26) /* Use CRC32 as hash */
264#define NFP_NET_CFG_RSS_HFUNCS 3
253#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4) 265#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4)
254#define NFP_NET_CFG_RSS_KEY_SZ 0x28 266#define NFP_NET_CFG_RSS_KEY_SZ 0x28
255#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \ 267#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 6e9372a18375..74125584260b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -54,7 +54,7 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
54 goto out; 54 goto out;
55 nn = r_vec->nfp_net; 55 nn = r_vec->nfp_net;
56 rx_ring = r_vec->rx_ring; 56 rx_ring = r_vec->rx_ring;
57 if (!netif_running(nn->netdev)) 57 if (!netif_running(nn->dp.netdev))
58 goto out; 58 goto out;
59 59
60 rxd_cnt = rx_ring->cnt; 60 rxd_cnt = rx_ring->cnt;
@@ -64,8 +64,10 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
64 rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx); 64 rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx);
65 rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx); 65 rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
66 66
67 seq_printf(file, "RX[%02d]: H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n", 67 seq_printf(file, "RX[%02d,%02d,%02d]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n",
68 rx_ring->idx, rx_ring->rd_p, rx_ring->wr_p, 68 rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
69 rx_ring->cnt, &rx_ring->dma, rx_ring->rxds,
70 rx_ring->rd_p, rx_ring->wr_p,
69 fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p); 71 fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p);
70 72
71 for (i = 0; i < rxd_cnt; i++) { 73 for (i = 0; i < rxd_cnt; i++) {
@@ -143,7 +145,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
143 if (!r_vec->nfp_net || !tx_ring) 145 if (!r_vec->nfp_net || !tx_ring)
144 goto out; 146 goto out;
145 nn = r_vec->nfp_net; 147 nn = r_vec->nfp_net;
146 if (!netif_running(nn->netdev)) 148 if (!netif_running(nn->dp.netdev))
147 goto out; 149 goto out;
148 150
149 txd_cnt = tx_ring->cnt; 151 txd_cnt = tx_ring->cnt;
@@ -151,8 +153,11 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
151 d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); 153 d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
152 d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); 154 d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
153 155
154 seq_printf(file, "TX[%02d]: H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n", 156 seq_printf(file, "TX[%02d,%02d%s]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n",
155 tx_ring->idx, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); 157 tx_ring->idx, tx_ring->qcidx,
158 tx_ring == r_vec->tx_ring ? "" : "xdp",
159 tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
160 tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
156 161
157 for (i = 0; i < txd_cnt; i++) { 162 for (i = 0; i < txd_cnt; i++) {
158 txd = &tx_ring->txds[i]; 163 txd = &tx_ring->txds[i];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2649f7523c81..ed22a813e579 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -40,6 +40,7 @@
40 * Brad Petrus <brad.petrus@netronome.com> 40 * Brad Petrus <brad.petrus@netronome.com>
41 */ 41 */
42 42
43#include <linux/bitfield.h>
43#include <linux/kernel.h> 44#include <linux/kernel.h>
44#include <linux/netdevice.h> 45#include <linux/netdevice.h>
45#include <linux/etherdevice.h> 46#include <linux/etherdevice.h>
@@ -126,9 +127,9 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
126}; 127};
127 128
128#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) 129#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
129#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3) 130#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3)
130#define NN_ET_RVEC_GATHER_STATS 7 131#define NN_ET_RVEC_GATHER_STATS 7
131#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2) 132#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2)
132#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \ 133#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
133 NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN) 134 NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
134 135
@@ -179,30 +180,22 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
179 180
180 ring->rx_max_pending = NFP_NET_MAX_RX_DESCS; 181 ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
181 ring->tx_max_pending = NFP_NET_MAX_TX_DESCS; 182 ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
182 ring->rx_pending = nn->rxd_cnt; 183 ring->rx_pending = nn->dp.rxd_cnt;
183 ring->tx_pending = nn->txd_cnt; 184 ring->tx_pending = nn->dp.txd_cnt;
184} 185}
185 186
186static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) 187static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
187{ 188{
188 struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL; 189 struct nfp_net_dp *dp;
189 struct nfp_net_ring_set rx = { 190
190 .n_rings = nn->num_rx_rings, 191 dp = nfp_net_clone_dp(nn);
191 .mtu = nn->netdev->mtu, 192 if (!dp)
192 .dcnt = rxd_cnt, 193 return -ENOMEM;
193 };
194 struct nfp_net_ring_set tx = {
195 .n_rings = nn->num_tx_rings,
196 .dcnt = txd_cnt,
197 };
198 194
199 if (nn->rxd_cnt != rxd_cnt) 195 dp->rxd_cnt = rxd_cnt;
200 reconfig_rx = &rx; 196 dp->txd_cnt = txd_cnt;
201 if (nn->txd_cnt != txd_cnt)
202 reconfig_tx = &tx;
203 197
204 return nfp_net_ring_reconfig(nn, &nn->xdp_prog, 198 return nfp_net_ring_reconfig(nn, dp);
205 reconfig_rx, reconfig_tx);
206} 199}
207 200
208static int nfp_net_set_ringparam(struct net_device *netdev, 201static int nfp_net_set_ringparam(struct net_device *netdev,
@@ -223,11 +216,11 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
223 txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS) 216 txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
224 return -EINVAL; 217 return -EINVAL;
225 218
226 if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt) 219 if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
227 return 0; 220 return 0;
228 221
229 nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", 222 nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
230 nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); 223 nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
231 224
232 return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); 225 return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
233} 226}
@@ -245,7 +238,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
245 memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN); 238 memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
246 p += ETH_GSTRING_LEN; 239 p += ETH_GSTRING_LEN;
247 } 240 }
248 for (i = 0; i < nn->num_r_vecs; i++) { 241 for (i = 0; i < nn->dp.num_r_vecs; i++) {
249 sprintf(p, "rvec_%u_rx_pkts", i); 242 sprintf(p, "rvec_%u_rx_pkts", i);
250 p += ETH_GSTRING_LEN; 243 p += ETH_GSTRING_LEN;
251 sprintf(p, "rvec_%u_tx_pkts", i); 244 sprintf(p, "rvec_%u_tx_pkts", i);
@@ -267,13 +260,13 @@ static void nfp_net_get_strings(struct net_device *netdev,
267 p += ETH_GSTRING_LEN; 260 p += ETH_GSTRING_LEN;
268 strncpy(p, "tx_lso", ETH_GSTRING_LEN); 261 strncpy(p, "tx_lso", ETH_GSTRING_LEN);
269 p += ETH_GSTRING_LEN; 262 p += ETH_GSTRING_LEN;
270 for (i = 0; i < nn->num_tx_rings; i++) { 263 for (i = 0; i < nn->dp.num_tx_rings; i++) {
271 sprintf(p, "txq_%u_pkts", i); 264 sprintf(p, "txq_%u_pkts", i);
272 p += ETH_GSTRING_LEN; 265 p += ETH_GSTRING_LEN;
273 sprintf(p, "txq_%u_bytes", i); 266 sprintf(p, "txq_%u_bytes", i);
274 p += ETH_GSTRING_LEN; 267 p += ETH_GSTRING_LEN;
275 } 268 }
276 for (i = 0; i < nn->num_rx_rings; i++) { 269 for (i = 0; i < nn->dp.num_rx_rings; i++) {
277 sprintf(p, "rxq_%u_pkts", i); 270 sprintf(p, "rxq_%u_pkts", i);
278 p += ETH_GSTRING_LEN; 271 p += ETH_GSTRING_LEN;
279 sprintf(p, "rxq_%u_bytes", i); 272 sprintf(p, "rxq_%u_bytes", i);
@@ -306,12 +299,12 @@ static void nfp_net_get_stats(struct net_device *netdev,
306 break; 299 break;
307 300
308 case NFP_NET_DEV_ET_STATS: 301 case NFP_NET_DEV_ET_STATS:
309 io_p = nn->ctrl_bar + nfp_net_et_stats[i].off; 302 io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off;
310 data[i] = readq(io_p); 303 data[i] = readq(io_p);
311 break; 304 break;
312 } 305 }
313 } 306 }
314 for (j = 0; j < nn->num_r_vecs; j++) { 307 for (j = 0; j < nn->dp.num_r_vecs; j++) {
315 unsigned int start; 308 unsigned int start;
316 309
317 do { 310 do {
@@ -337,16 +330,16 @@ static void nfp_net_get_stats(struct net_device *netdev,
337 } 330 }
338 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) 331 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
339 data[i++] = gathered_stats[j]; 332 data[i++] = gathered_stats[j];
340 for (j = 0; j < nn->num_tx_rings; j++) { 333 for (j = 0; j < nn->dp.num_tx_rings; j++) {
341 io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j); 334 io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
342 data[i++] = readq(io_p); 335 data[i++] = readq(io_p);
343 io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; 336 io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
344 data[i++] = readq(io_p); 337 data[i++] = readq(io_p);
345 } 338 }
346 for (j = 0; j < nn->num_rx_rings; j++) { 339 for (j = 0; j < nn->dp.num_rx_rings; j++) {
347 io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j); 340 io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
348 data[i++] = readq(io_p); 341 data[i++] = readq(io_p);
349 io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; 342 io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
350 data[i++] = readq(io_p); 343 data[i++] = readq(io_p);
351 } 344 }
352} 345}
@@ -410,7 +403,7 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
410 403
411 switch (cmd->cmd) { 404 switch (cmd->cmd) {
412 case ETHTOOL_GRXRINGS: 405 case ETHTOOL_GRXRINGS:
413 cmd->data = nn->num_rx_rings; 406 cmd->data = nn->dp.num_rx_rings;
414 return 0; 407 return 0;
415 case ETHTOOL_GRXFH: 408 case ETHTOOL_GRXFH:
416 return nfp_net_get_rss_hash_opts(nn, cmd); 409 return nfp_net_get_rss_hash_opts(nn, cmd);
@@ -454,13 +447,13 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
454 return -EINVAL; 447 return -EINVAL;
455 } 448 }
456 449
457 new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ; 450 new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc);
458 new_rss_cfg |= NFP_NET_CFG_RSS_MASK; 451 new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
459 452
460 if (new_rss_cfg == nn->rss_cfg) 453 if (new_rss_cfg == nn->rss_cfg)
461 return 0; 454 return 0;
462 455
463 writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL); 456 writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL);
464 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS); 457 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
465 if (err) 458 if (err)
466 return err; 459 return err;
@@ -496,7 +489,12 @@ static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
496 489
497static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev) 490static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
498{ 491{
499 return NFP_NET_CFG_RSS_KEY_SZ; 492 struct nfp_net *nn = netdev_priv(netdev);
493
494 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
495 return -EOPNOTSUPP;
496
497 return nfp_net_rss_key_sz(nn);
500} 498}
501 499
502static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, 500static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -512,9 +510,12 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
512 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++) 510 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
513 indir[i] = nn->rss_itbl[i]; 511 indir[i] = nn->rss_itbl[i];
514 if (key) 512 if (key)
515 memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ); 513 memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
516 if (hfunc) 514 if (hfunc) {
517 *hfunc = ETH_RSS_HASH_TOP; 515 *hfunc = nn->rss_hfunc;
516 if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
517 *hfunc = ETH_RSS_HASH_UNKNOWN;
518 }
518 519
519 return 0; 520 return 0;
520} 521}
@@ -527,14 +528,14 @@ static int nfp_net_set_rxfh(struct net_device *netdev,
527 int i; 528 int i;
528 529
529 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) || 530 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
530 !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP)) 531 !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
531 return -EOPNOTSUPP; 532 return -EOPNOTSUPP;
532 533
533 if (!key && !indir) 534 if (!key && !indir)
534 return 0; 535 return 0;
535 536
536 if (key) { 537 if (key) {
537 memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ); 538 memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
538 nfp_net_rss_write_key(nn); 539 nfp_net_rss_write_key(nn);
539 } 540 }
540 if (indir) { 541 if (indir) {
@@ -564,7 +565,7 @@ static void nfp_net_get_regs(struct net_device *netdev,
564 regs->version = nn_readl(nn, NFP_NET_CFG_VERSION); 565 regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
565 566
566 for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++) 567 for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
567 regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32))); 568 regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32)));
568} 569}
569 570
570static int nfp_net_get_coalesce(struct net_device *netdev, 571static int nfp_net_get_coalesce(struct net_device *netdev,
@@ -736,16 +737,16 @@ static void nfp_net_get_channels(struct net_device *netdev,
736 struct nfp_net *nn = netdev_priv(netdev); 737 struct nfp_net *nn = netdev_priv(netdev);
737 unsigned int num_tx_rings; 738 unsigned int num_tx_rings;
738 739
739 num_tx_rings = nn->num_tx_rings; 740 num_tx_rings = nn->dp.num_tx_rings;
740 if (nn->xdp_prog) 741 if (nn->dp.xdp_prog)
741 num_tx_rings -= nn->num_rx_rings; 742 num_tx_rings -= nn->dp.num_rx_rings;
742 743
743 channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs); 744 channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
744 channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs); 745 channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
745 channel->max_combined = min(channel->max_rx, channel->max_tx); 746 channel->max_combined = min(channel->max_rx, channel->max_tx);
746 channel->max_other = NFP_NET_NON_Q_VECTORS; 747 channel->max_other = NFP_NET_NON_Q_VECTORS;
747 channel->combined_count = min(nn->num_rx_rings, num_tx_rings); 748 channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
748 channel->rx_count = nn->num_rx_rings - channel->combined_count; 749 channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
749 channel->tx_count = num_tx_rings - channel->combined_count; 750 channel->tx_count = num_tx_rings - channel->combined_count;
750 channel->other_count = NFP_NET_NON_Q_VECTORS; 751 channel->other_count = NFP_NET_NON_Q_VECTORS;
751} 752}
@@ -753,29 +754,19 @@ static void nfp_net_get_channels(struct net_device *netdev,
753static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx, 754static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
754 unsigned int total_tx) 755 unsigned int total_tx)
755{ 756{
756 struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL; 757 struct nfp_net_dp *dp;
757 struct nfp_net_ring_set rx = {
758 .n_rings = total_rx,
759 .mtu = nn->netdev->mtu,
760 .dcnt = nn->rxd_cnt,
761 };
762 struct nfp_net_ring_set tx = {
763 .n_rings = total_tx,
764 .dcnt = nn->txd_cnt,
765 };
766 758
767 if (nn->num_rx_rings != total_rx) 759 dp = nfp_net_clone_dp(nn);
768 reconfig_rx = &rx; 760 if (!dp)
769 if (nn->num_stack_tx_rings != total_tx || 761 return -ENOMEM;
770 (nn->xdp_prog && reconfig_rx))
771 reconfig_tx = &tx;
772 762
773 /* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */ 763 dp->num_rx_rings = total_rx;
774 if (nn->xdp_prog) 764 dp->num_tx_rings = total_tx;
775 tx.n_rings += total_rx; 765 /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
766 if (dp->xdp_prog)
767 dp->num_tx_rings += total_rx;
776 768
777 return nfp_net_ring_reconfig(nn, &nn->xdp_prog, 769 return nfp_net_ring_reconfig(nn, dp);
778 reconfig_rx, reconfig_tx);
779} 770}
780 771
781static int nfp_net_set_channels(struct net_device *netdev, 772static int nfp_net_set_channels(struct net_device *netdev,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 3afcdc11480c..2025cb7c6d90 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -130,7 +130,7 @@ err_area:
130} 130}
131 131
132static void 132static void
133nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp, 133nfp_net_get_mac_addr_hwinfo(struct nfp_net_dp *dp, struct nfp_cpp *cpp,
134 unsigned int id) 134 unsigned int id)
135{ 135{
136 u8 mac_addr[ETH_ALEN]; 136 u8 mac_addr[ETH_ALEN];
@@ -141,23 +141,22 @@ nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
141 141
142 mac_str = nfp_hwinfo_lookup(cpp, name); 142 mac_str = nfp_hwinfo_lookup(cpp, name);
143 if (!mac_str) { 143 if (!mac_str) {
144 dev_warn(&nn->pdev->dev, 144 dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
145 "Can't lookup MAC address. Generate\n"); 145 eth_hw_addr_random(dp->netdev);
146 eth_hw_addr_random(nn->netdev);
147 return; 146 return;
148 } 147 }
149 148
150 if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", 149 if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
151 &mac_addr[0], &mac_addr[1], &mac_addr[2], 150 &mac_addr[0], &mac_addr[1], &mac_addr[2],
152 &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { 151 &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
153 dev_warn(&nn->pdev->dev, 152 dev_warn(dp->dev,
154 "Can't parse MAC address (%s). Generate.\n", mac_str); 153 "Can't parse MAC address (%s). Generate.\n", mac_str);
155 eth_hw_addr_random(nn->netdev); 154 eth_hw_addr_random(dp->netdev);
156 return; 155 return;
157 } 156 }
158 157
159 ether_addr_copy(nn->netdev->dev_addr, mac_addr); 158 ether_addr_copy(dp->netdev->dev_addr, mac_addr);
160 ether_addr_copy(nn->netdev->perm_addr, mac_addr); 159 ether_addr_copy(dp->netdev->perm_addr, mac_addr);
161} 160}
162 161
163/** 162/**
@@ -178,12 +177,14 @@ nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
178 if (pf->eth_tbl->ports[i].eth_index == id) { 177 if (pf->eth_tbl->ports[i].eth_index == id) {
179 const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr; 178 const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr;
180 179
181 ether_addr_copy(nn->netdev->dev_addr, mac_addr); 180 nn->eth_port = &pf->eth_tbl->ports[i];
182 ether_addr_copy(nn->netdev->perm_addr, mac_addr); 181
182 ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
183 ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
183 return; 184 return;
184 } 185 }
185 186
186 nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id); 187 nfp_net_get_mac_addr_hwinfo(&nn->dp, pf->cpp, id);
187} 188}
188 189
189static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf) 190static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
@@ -305,10 +306,10 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
305 306
306 nn->cpp = pf->cpp; 307 nn->cpp = pf->cpp;
307 nn->fw_ver = *fw_ver; 308 nn->fw_ver = *fw_ver;
308 nn->ctrl_bar = ctrl_bar; 309 nn->dp.ctrl_bar = ctrl_bar;
309 nn->tx_bar = tx_bar; 310 nn->tx_bar = tx_bar;
310 nn->rx_bar = rx_bar; 311 nn->rx_bar = rx_bar;
311 nn->is_vf = 0; 312 nn->dp.is_vf = 0;
312 nn->stride_rx = stride; 313 nn->stride_rx = stride;
313 nn->stride_tx = stride; 314 nn->stride_tx = stride;
314 315
@@ -330,7 +331,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
330 */ 331 */
331 nn->me_freq_mhz = 1200; 332 nn->me_freq_mhz = 1200;
332 333
333 err = nfp_net_netdev_init(nn->netdev); 334 err = nfp_net_netdev_init(nn->dp.netdev);
334 if (err) 335 if (err)
335 return err; 336 return err;
336 337
@@ -399,7 +400,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
399 /* Get MSI-X vectors */ 400 /* Get MSI-X vectors */
400 wanted_irqs = 0; 401 wanted_irqs = 0;
401 list_for_each_entry(nn, &pf->ports, port_list) 402 list_for_each_entry(nn, &pf->ports, port_list)
402 wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs; 403 wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
403 pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries), 404 pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
404 GFP_KERNEL); 405 GFP_KERNEL);
405 if (!pf->irq_entries) { 406 if (!pf->irq_entries) {
@@ -444,7 +445,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
444err_prev_deinit: 445err_prev_deinit:
445 list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) { 446 list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
446 nfp_net_debugfs_dir_clean(&nn->debugfs_dir); 447 nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
447 nfp_net_netdev_clean(nn->netdev); 448 nfp_net_netdev_clean(nn->dp.netdev);
448 } 449 }
449 nfp_net_irqs_disable(pf->pdev); 450 nfp_net_irqs_disable(pf->pdev);
450err_vec_free: 451err_vec_free:
@@ -570,7 +571,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
570 list_for_each_entry(nn, &pf->ports, port_list) { 571 list_for_each_entry(nn, &pf->ports, port_list) {
571 nfp_net_debugfs_dir_clean(&nn->debugfs_dir); 572 nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
572 573
573 nfp_net_netdev_clean(nn->netdev); 574 nfp_net_netdev_clean(nn->dp.netdev);
574 } 575 }
575 576
576 nfp_net_pf_free_netdevs(pf); 577 nfp_net_pf_free_netdevs(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
index 18a851eb3508..b5b6f69d1e0f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
@@ -58,7 +58,7 @@ void nfp_net_filter_stats_timer(unsigned long data)
58 58
59 spin_lock_bh(&nn->rx_filter_lock); 59 spin_lock_bh(&nn->rx_filter_lock);
60 60
61 if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) 61 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
62 mod_timer(&nn->rx_filter_stats_timer, 62 mod_timer(&nn->rx_filter_stats_timer,
63 jiffies + NFP_NET_STAT_POLL_IVL); 63 jiffies + NFP_NET_STAT_POLL_IVL);
64 64
@@ -132,7 +132,7 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
132 return NN_ACT_TC_DROP; 132 return NN_ACT_TC_DROP;
133 133
134 if (is_tcf_mirred_egress_redirect(a) && 134 if (is_tcf_mirred_egress_redirect(a) &&
135 tcf_mirred_ifindex(a) == nn->netdev->ifindex) 135 tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
136 return NN_ACT_TC_REDIR; 136 return NN_ACT_TC_REDIR;
137 } 137 }
138 138
@@ -160,7 +160,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
160 act = ret; 160 act = ret;
161 161
162 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; 162 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
163 if (max_mtu < nn->netdev->mtu) { 163 if (max_mtu < nn->dp.netdev->mtu) {
164 nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); 164 nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
165 return -ENOTSUPP; 165 return -ENOTSUPP;
166 } 166 }
@@ -168,8 +168,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
168 start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); 168 start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
169 done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); 169 done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
170 170
171 *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr, 171 *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
172 GFP_KERNEL);
173 if (!*code) 172 if (!*code)
174 return -ENOMEM; 173 return -ENOMEM;
175 174
@@ -181,7 +180,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
181 return 0; 180 return 0;
182 181
183out: 182out:
184 dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr); 183 dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
185 return ret; 184 return ret;
186} 185}
187 186
@@ -194,7 +193,7 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
194 u64 bpf_addr = dma_addr; 193 u64 bpf_addr = dma_addr;
195 int err; 194 int err;
196 195
197 nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); 196 nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
198 197
199 if (dense_mode) 198 if (dense_mode)
200 bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX; 199 bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
@@ -208,13 +207,13 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
208 nn_err(nn, "FW command error while loading BPF: %d\n", err); 207 nn_err(nn, "FW command error while loading BPF: %d\n", err);
209 208
210 /* Enable passing packets through BPF function */ 209 /* Enable passing packets through BPF function */
211 nn->ctrl |= NFP_NET_CFG_CTRL_BPF; 210 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
212 nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl); 211 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
213 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); 212 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
214 if (err) 213 if (err)
215 nn_err(nn, "FW command error while enabling BPF: %d\n", err); 214 nn_err(nn, "FW command error while enabling BPF: %d\n", err);
216 215
217 dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr); 216 dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
218 217
219 nfp_net_bpf_stats_reset(nn); 218 nfp_net_bpf_stats_reset(nn);
220 mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL); 219 mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
@@ -222,16 +221,16 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
222 221
223static int nfp_net_bpf_stop(struct nfp_net *nn) 222static int nfp_net_bpf_stop(struct nfp_net *nn)
224{ 223{
225 if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF)) 224 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
226 return 0; 225 return 0;
227 226
228 spin_lock_bh(&nn->rx_filter_lock); 227 spin_lock_bh(&nn->rx_filter_lock);
229 nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF; 228 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
230 spin_unlock_bh(&nn->rx_filter_lock); 229 spin_unlock_bh(&nn->rx_filter_lock);
231 nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl); 230 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
232 231
233 del_timer_sync(&nn->rx_filter_stats_timer); 232 del_timer_sync(&nn->rx_filter_stats_timer);
234 nn->bpf_offload_skip_sw = 0; 233 nn->dp.bpf_offload_skip_sw = 0;
235 234
236 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); 235 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
237} 236}
@@ -255,7 +254,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
255 * frames which didn't have BPF applied in the hardware should 254 * frames which didn't have BPF applied in the hardware should
256 * be fine if software fallback is available, though. 255 * be fine if software fallback is available, though.
257 */ 256 */
258 if (nn->bpf_offload_skip_sw) 257 if (nn->dp.bpf_offload_skip_sw)
259 return -EBUSY; 258 return -EBUSY;
260 259
261 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, 260 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
@@ -270,7 +269,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
270 return 0; 269 return 0;
271 270
272 case TC_CLSBPF_ADD: 271 case TC_CLSBPF_ADD:
273 if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) 272 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
274 return -EBUSY; 273 return -EBUSY;
275 274
276 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, 275 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 39407f7cc586..86e61be6f35c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -84,12 +84,12 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
84 put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]); 84 put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
85 85
86 if (!is_valid_ether_addr(mac_addr)) { 86 if (!is_valid_ether_addr(mac_addr)) {
87 eth_hw_addr_random(nn->netdev); 87 eth_hw_addr_random(nn->dp.netdev);
88 return; 88 return;
89 } 89 }
90 90
91 ether_addr_copy(nn->netdev->dev_addr, mac_addr); 91 ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
92 ether_addr_copy(nn->netdev->perm_addr, mac_addr); 92 ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
93} 93}
94 94
95static int nfp_netvf_pci_probe(struct pci_dev *pdev, 95static int nfp_netvf_pci_probe(struct pci_dev *pdev,
@@ -210,8 +210,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
210 vf->nn = nn; 210 vf->nn = nn;
211 211
212 nn->fw_ver = fw_ver; 212 nn->fw_ver = fw_ver;
213 nn->ctrl_bar = ctrl_bar; 213 nn->dp.ctrl_bar = ctrl_bar;
214 nn->is_vf = 1; 214 nn->dp.is_vf = 1;
215 nn->stride_tx = stride; 215 nn->stride_tx = stride;
216 nn->stride_rx = stride; 216 nn->stride_rx = stride;
217 217
@@ -268,7 +268,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
268 268
269 num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries, 269 num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
270 NFP_NET_MIN_PORT_IRQS, 270 NFP_NET_MIN_PORT_IRQS,
271 NFP_NET_NON_Q_VECTORS + nn->num_r_vecs); 271 NFP_NET_NON_Q_VECTORS +
272 nn->dp.num_r_vecs);
272 if (!num_irqs) { 273 if (!num_irqs) {
273 nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); 274 nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
274 err = -EIO; 275 err = -EIO;
@@ -282,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
282 */ 283 */
283 nn->me_freq_mhz = 1200; 284 nn->me_freq_mhz = 1200;
284 285
285 err = nfp_net_netdev_init(nn->netdev); 286 err = nfp_net_netdev_init(nn->dp.netdev);
286 if (err) 287 if (err)
287 goto err_irqs_disable; 288 goto err_irqs_disable;
288 289
@@ -327,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
327 nfp_net_debugfs_dir_clean(&nn->debugfs_dir); 328 nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
328 nfp_net_debugfs_dir_clean(&vf->ddir); 329 nfp_net_debugfs_dir_clean(&vf->ddir);
329 330
330 nfp_net_netdev_clean(nn->netdev); 331 nfp_net_netdev_clean(nn->dp.netdev);
331 332
332 nfp_net_irqs_disable(pdev); 333 nfp_net_irqs_disable(pdev);
333 334
@@ -337,7 +338,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
337 } else { 338 } else {
338 iounmap(vf->q_bar); 339 iounmap(vf->q_bar);
339 } 340 }
340 iounmap(nn->ctrl_bar); 341 iounmap(nn->dp.ctrl_bar);
341 342
342 nfp_net_netdev_free(nn); 343 nfp_net_netdev_free(nn);
343 344
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 1ece1f8ae4b3..38bd80077e33 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -134,9 +134,32 @@ nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index,
134 134
135 nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr); 135 nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr);
136 136
137 snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu", 137 dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port);
138 FIELD_GET(NSP_ETH_PORT_PHYLABEL, port), 138 dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port);
139 FIELD_GET(NSP_ETH_PORT_LABEL, port)); 139}
140
141static void
142nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table)
143{
144 unsigned int i, j;
145
146 for (i = 0; i < table->count; i++)
147 for (j = 0; j < table->count; j++) {
148 if (i == j)
149 continue;
150 if (table->ports[i].label_port !=
151 table->ports[j].label_port)
152 continue;
153 if (table->ports[i].label_subport ==
154 table->ports[j].label_subport)
155 nfp_warn(cpp,
156 "Port %d subport %d is a duplicate\n",
157 table->ports[i].label_port,
158 table->ports[i].label_subport);
159
160 table->ports[i].is_split = true;
161 break;
162 }
140} 163}
141 164
142/** 165/**
@@ -168,8 +191,7 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
168{ 191{
169 struct eth_table_entry *entries; 192 struct eth_table_entry *entries;
170 struct nfp_eth_table *table; 193 struct nfp_eth_table *table;
171 unsigned int cnt; 194 int i, j, ret, cnt = 0;
172 int i, j, ret;
173 195
174 entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); 196 entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
175 if (!entries) 197 if (!entries)
@@ -178,24 +200,27 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
178 ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); 200 ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
179 if (ret < 0) { 201 if (ret < 0) {
180 nfp_err(cpp, "reading port table failed %d\n", ret); 202 nfp_err(cpp, "reading port table failed %d\n", ret);
181 kfree(entries); 203 goto err;
182 return NULL;
183 } 204 }
184 205
185 /* Some versions of flash will give us 0 instead of port count */ 206 for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
186 cnt = ret; 207 if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
187 if (!cnt) { 208 cnt++;
188 for (i = 0; i < NSP_ETH_MAX_COUNT; i++) 209
189 if (entries[i].port & NSP_ETH_PORT_LANES_MASK) 210 /* Some versions of flash will give us 0 instead of port count.
190 cnt++; 211 * For those that give a port count, verify it against the value
212 * calculated above.
213 */
214 if (ret && ret != cnt) {
215 nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n",
216 ret, cnt);
217 goto err;
191 } 218 }
192 219
193 table = kzalloc(sizeof(*table) + 220 table = kzalloc(sizeof(*table) +
194 sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL); 221 sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL);
195 if (!table) { 222 if (!table)
196 kfree(entries); 223 goto err;
197 return NULL;
198 }
199 224
200 table->count = cnt; 225 table->count = cnt;
201 for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++) 226 for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++)
@@ -203,9 +228,15 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
203 nfp_eth_port_translate(&entries[i], i, 228 nfp_eth_port_translate(&entries[i], i,
204 &table->ports[j++]); 229 &table->ports[j++]);
205 230
231 nfp_eth_mark_split_ports(cpp, table);
232
206 kfree(entries); 233 kfree(entries);
207 234
208 return table; 235 return table;
236
237err:
238 kfree(entries);
239 return NULL;
209} 240}
210 241
211/** 242/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
index edf703d319c8..325e841ca90a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
@@ -49,10 +49,13 @@
49 * @lanes: number of channels 49 * @lanes: number of channels
50 * @speed: interface speed (in Mbps) 50 * @speed: interface speed (in Mbps)
51 * @mac_addr: interface MAC address 51 * @mac_addr: interface MAC address
52 * @label: interface id string 52 * @label_port: port id
53 * @label_subport: id of interface within port (for split ports)
53 * @enabled: is enabled? 54 * @enabled: is enabled?
54 * @tx_enabled: is TX enabled? 55 * @tx_enabled: is TX enabled?
55 * @rx_enabled: is RX enabled? 56 * @rx_enabled: is RX enabled?
57 *
58 * @is_split: is interface part of a split port
56 */ 59 */
57struct nfp_eth_table { 60struct nfp_eth_table {
58 unsigned int count; 61 unsigned int count;
@@ -65,14 +68,22 @@ struct nfp_eth_table {
65 unsigned int speed; 68 unsigned int speed;
66 69
67 u8 mac_addr[ETH_ALEN]; 70 u8 mac_addr[ETH_ALEN];
68 char label[8]; 71
72 u8 label_port;
73 u8 label_subport;
69 74
70 bool enabled; 75 bool enabled;
71 bool tx_enabled; 76 bool tx_enabled;
72 bool rx_enabled; 77 bool rx_enabled;
78
79 /* Computed fields */
80 bool is_split;
73 } ports[0]; 81 } ports[0];
74}; 82};
75 83
84struct nfp_cpp;
85struct nfp_nsp;
86
76struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); 87struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
77struct nfp_eth_table * 88struct nfp_eth_table *
78__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp); 89__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7b43a3b4abdc..3dd973475125 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1375,13 +1375,8 @@ netxen_receive_peg_ready(struct netxen_adapter *adapter)
1375 1375
1376 } while (--retries); 1376 } while (--retries);
1377 1377
1378 if (!retries) { 1378 pr_err("Receive Peg initialization not complete, state: 0x%x.\n", val);
1379 printk(KERN_ERR "Receive Peg initialization not " 1379 return -EIO;
1380 "complete, state: 0x%x.\n", val);
1381 return -EIO;
1382 }
1383
1384 return 0;
1385} 1380}
1386 1381
1387int netxen_init_firmware(struct netxen_adapter *adapter) 1382int netxen_init_firmware(struct netxen_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 00c17fa6545b..ca30a27df035 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -51,7 +51,7 @@
51#include "qed_hsi.h" 51#include "qed_hsi.h"
52 52
53extern const struct qed_common_ops qed_common_ops_pass; 53extern const struct qed_common_ops qed_common_ops_pass;
54#define DRV_MODULE_VERSION "8.10.10.20" 54#define DRV_MODULE_VERSION "8.10.10.21"
55 55
56#define MAX_HWFNS_PER_DEVICE (4) 56#define MAX_HWFNS_PER_DEVICE (4)
57#define NAME_SIZE 16 57#define NAME_SIZE 16
@@ -219,7 +219,9 @@ enum QED_PORT_MODE {
219 QED_PORT_MODE_DE_4X20G, 219 QED_PORT_MODE_DE_4X20G,
220 QED_PORT_MODE_DE_1X40G, 220 QED_PORT_MODE_DE_1X40G,
221 QED_PORT_MODE_DE_2X25G, 221 QED_PORT_MODE_DE_2X25G,
222 QED_PORT_MODE_DE_1X25G 222 QED_PORT_MODE_DE_1X25G,
223 QED_PORT_MODE_DE_4X25G,
224 QED_PORT_MODE_DE_2X10G,
223}; 225};
224 226
225enum qed_dev_cap { 227enum qed_dev_cap {
@@ -364,7 +366,8 @@ struct qed_hwfn {
364#define IS_LEAD_HWFN(edev) (!((edev)->my_id)) 366#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
365 u8 rel_pf_id; /* Relative to engine*/ 367 u8 rel_pf_id; /* Relative to engine*/
366 u8 abs_pf_id; 368 u8 abs_pf_id;
367#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1) 369#define QED_PATH_ID(_p_hwfn) \
370 (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
368 u8 port_id; 371 u8 port_id;
369 bool b_active; 372 bool b_active;
370 373
@@ -523,9 +526,7 @@ struct qed_dev {
523 u8 dp_level; 526 u8 dp_level;
524 char name[NAME_SIZE]; 527 char name[NAME_SIZE];
525 528
526 u8 type; 529 enum qed_dev_type type;
527#define QED_DEV_TYPE_BB (0 << 0)
528#define QED_DEV_TYPE_AH BIT(0)
529/* Translate type/revision combo into the proper conditions */ 530/* Translate type/revision combo into the proper conditions */
530#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) 531#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
531#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \ 532#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
@@ -540,6 +541,9 @@ struct qed_dev {
540 541
541 u16 vendor_id; 542 u16 vendor_id;
542 u16 device_id; 543 u16 device_id;
544#define QED_DEV_ID_MASK 0xff00
545#define QED_DEV_ID_MASK_BB 0x1600
546#define QED_DEV_ID_MASK_AH 0x8000
543 547
544 u16 chip_num; 548 u16 chip_num;
545#define CHIP_NUM_MASK 0xffff 549#define CHIP_NUM_MASK 0xffff
@@ -654,10 +658,16 @@ struct qed_dev {
654 u32 rdma_max_srq_sge; 658 u32 rdma_max_srq_sge;
655}; 659};
656 660
657#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB 661#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
658#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB 662 : MAX_NUM_VFS_K2)
659#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB 663#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
660#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB 664 : MAX_NUM_L2_QUEUES_K2)
665#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
666 : MAX_NUM_PORTS_K2)
667#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
668 : MAX_SB_PER_PATH_K2)
669#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
670 : MAX_NUM_PFS_K2)
661 671
662/** 672/**
663 * @brief qed_concrete_to_sw_fid - get the sw function id from 673 * @brief qed_concrete_to_sw_fid - get the sw function id from
@@ -694,6 +704,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
694 704
695void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 705void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
696#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) 706#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
707int qed_device_num_engines(struct qed_dev *cdev);
697 708
698/* Other Linux specific common definitions */ 709/* Other Linux specific common definitions */
699#define DP_NAME(cdev) ((cdev)->name) 710#define DP_NAME(cdev) ((cdev)->name)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 7e3a6fed3da6..9ff62cc5723d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -71,8 +71,7 @@
71#define TM_ALIGN BIT(TM_SHIFT) 71#define TM_ALIGN BIT(TM_SHIFT)
72#define TM_ELEM_SIZE 4 72#define TM_ELEM_SIZE 4
73 73
74/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */ 74#define ILT_DEFAULT_HW_P_SIZE 4
75#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
76 75
77#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) 76#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
78#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET 77#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -1127,7 +1126,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1127 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT); 1126 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1128 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT); 1127 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1129 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE); 1128 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
1130 /* default ILT page size for all clients is 32K */ 1129 /* default ILT page size for all clients is 64K */
1131 for (i = 0; i < ILT_CLI_MAX; i++) 1130 for (i = 0; i < ILT_CLI_MAX; i++)
1132 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; 1131 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1133 1132
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 68f19ca57f96..483241b4b05d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -17,7 +17,6 @@
17 17
18/* Chip IDs enum */ 18/* Chip IDs enum */
19enum chip_ids { 19enum chip_ids {
20 CHIP_RESERVED,
21 CHIP_BB_B0, 20 CHIP_BB_B0,
22 CHIP_K2, 21 CHIP_K2,
23 MAX_CHIP_IDS 22 MAX_CHIP_IDS
@@ -40,6 +39,7 @@ enum mem_groups {
40 MEM_GROUP_BTB_RAM, 39 MEM_GROUP_BTB_RAM,
41 MEM_GROUP_RDIF_CTX, 40 MEM_GROUP_RDIF_CTX,
42 MEM_GROUP_TDIF_CTX, 41 MEM_GROUP_TDIF_CTX,
42 MEM_GROUP_CFC_MEM,
43 MEM_GROUP_CONN_CFC_MEM, 43 MEM_GROUP_CONN_CFC_MEM,
44 MEM_GROUP_TASK_CFC_MEM, 44 MEM_GROUP_TASK_CFC_MEM,
45 MEM_GROUP_CAU_PI, 45 MEM_GROUP_CAU_PI,
@@ -72,6 +72,7 @@ static const char * const s_mem_group_names[] = {
72 "BTB_RAM", 72 "BTB_RAM",
73 "RDIF_CTX", 73 "RDIF_CTX",
74 "TDIF_CTX", 74 "TDIF_CTX",
75 "CFC_MEM",
75 "CONN_CFC_MEM", 76 "CONN_CFC_MEM",
76 "TASK_CFC_MEM", 77 "TASK_CFC_MEM",
77 "CAU_PI", 78 "CAU_PI",
@@ -185,13 +186,16 @@ struct dbg_array {
185 u32 size_in_dwords; 186 u32 size_in_dwords;
186}; 187};
187 188
189struct chip_platform_defs {
190 u8 num_ports;
191 u8 num_pfs;
192 u8 num_vfs;
193};
194
188/* Chip constant definitions */ 195/* Chip constant definitions */
189struct chip_defs { 196struct chip_defs {
190 const char *name; 197 const char *name;
191 struct { 198 struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
192 u8 num_ports;
193 u8 num_pfs;
194 } per_platform[MAX_PLATFORM_IDS];
195}; 199};
196 200
197/* Platform constant definitions */ 201/* Platform constant definitions */
@@ -405,22 +409,23 @@ struct phy_defs {
405/***************************** Constant Arrays *******************************/ 409/***************************** Constant Arrays *******************************/
406 410
407/* Debug arrays */ 411/* Debug arrays */
408static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; 412static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
409 413
410/* Chip constant definitions array */ 414/* Chip constant definitions array */
411static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { 415static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
412 { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
413 { "bb_b0", 416 { "bb_b0",
414 { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } }, 417 { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
415 { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } } 418 {0, 0, 0}, {0, 0, 0} } },
419 { "k2",
420 { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
421 {0, 0, 0}, {0, 0, 0} } }
416}; 422};
417 423
418/* Storm constant definitions array */ 424/* Storm constant definitions array */
419static struct storm_defs s_storm_defs[] = { 425static struct storm_defs s_storm_defs[] = {
420 /* Tstorm */ 426 /* Tstorm */
421 {'T', BLOCK_TSEM, 427 {'T', BLOCK_TSEM,
422 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, 428 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
423 DBG_BUS_CLIENT_RBCT}, true,
424 TSEM_REG_FAST_MEMORY, 429 TSEM_REG_FAST_MEMORY,
425 TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE, 430 TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
426 TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG, 431 TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
@@ -432,8 +437,7 @@ static struct storm_defs s_storm_defs[] = {
432 4, TCM_REG_SM_TASK_CTX}, 437 4, TCM_REG_SM_TASK_CTX},
433 /* Mstorm */ 438 /* Mstorm */
434 {'M', BLOCK_MSEM, 439 {'M', BLOCK_MSEM,
435 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, 440 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
436 DBG_BUS_CLIENT_RBCM}, false,
437 MSEM_REG_FAST_MEMORY, 441 MSEM_REG_FAST_MEMORY,
438 MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE, 442 MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
439 MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG, 443 MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
@@ -445,8 +449,7 @@ static struct storm_defs s_storm_defs[] = {
445 7, MCM_REG_SM_TASK_CTX}, 449 7, MCM_REG_SM_TASK_CTX},
446 /* Ustorm */ 450 /* Ustorm */
447 {'U', BLOCK_USEM, 451 {'U', BLOCK_USEM,
448 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, 452 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
449 DBG_BUS_CLIENT_RBCU}, false,
450 USEM_REG_FAST_MEMORY, 453 USEM_REG_FAST_MEMORY,
451 USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE, 454 USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
452 USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG, 455 USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
@@ -458,8 +461,7 @@ static struct storm_defs s_storm_defs[] = {
458 3, UCM_REG_SM_TASK_CTX}, 461 3, UCM_REG_SM_TASK_CTX},
459 /* Xstorm */ 462 /* Xstorm */
460 {'X', BLOCK_XSEM, 463 {'X', BLOCK_XSEM,
461 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, 464 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
462 DBG_BUS_CLIENT_RBCX}, false,
463 XSEM_REG_FAST_MEMORY, 465 XSEM_REG_FAST_MEMORY,
464 XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE, 466 XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
465 XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG, 467 XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
@@ -471,8 +473,7 @@ static struct storm_defs s_storm_defs[] = {
471 0, 0}, 473 0, 0},
472 /* Ystorm */ 474 /* Ystorm */
473 {'Y', BLOCK_YSEM, 475 {'Y', BLOCK_YSEM,
474 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, 476 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
475 DBG_BUS_CLIENT_RBCY}, false,
476 YSEM_REG_FAST_MEMORY, 477 YSEM_REG_FAST_MEMORY,
477 YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE, 478 YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
478 YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG, 479 YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
@@ -484,8 +485,7 @@ static struct storm_defs s_storm_defs[] = {
484 12, YCM_REG_SM_TASK_CTX}, 485 12, YCM_REG_SM_TASK_CTX},
485 /* Pstorm */ 486 /* Pstorm */
486 {'P', BLOCK_PSEM, 487 {'P', BLOCK_PSEM,
487 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, 488 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
488 DBG_BUS_CLIENT_RBCS}, true,
489 PSEM_REG_FAST_MEMORY, 489 PSEM_REG_FAST_MEMORY,
490 PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE, 490 PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
491 PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG, 491 PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
@@ -499,8 +499,9 @@ static struct storm_defs s_storm_defs[] = {
499 499
500/* Block definitions array */ 500/* Block definitions array */
501static struct block_defs block_grc_defs = { 501static struct block_defs block_grc_defs = {
502 "grc", {true, true, true}, false, 0, 502 "grc",
503 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, 503 {true, true}, false, 0,
504 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
504 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE, 505 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
505 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID, 506 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
506 GRC_REG_DBG_FORCE_FRAME, 507 GRC_REG_DBG_FORCE_FRAME,
@@ -508,29 +509,30 @@ static struct block_defs block_grc_defs = {
508}; 509};
509 510
510static struct block_defs block_miscs_defs = { 511static struct block_defs block_miscs_defs = {
511 "miscs", {false, false, false}, false, 0, 512 "miscs", {false, false}, false, 0,
512 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 513 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
513 0, 0, 0, 0, 0, 514 0, 0, 0, 0, 0,
514 false, false, MAX_DBG_RESET_REGS, 0 515 false, false, MAX_DBG_RESET_REGS, 0
515}; 516};
516 517
517static struct block_defs block_misc_defs = { 518static struct block_defs block_misc_defs = {
518 "misc", {false, false, false}, false, 0, 519 "misc", {false, false}, false, 0,
519 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 520 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
520 0, 0, 0, 0, 0, 521 0, 0, 0, 0, 0,
521 false, false, MAX_DBG_RESET_REGS, 0 522 false, false, MAX_DBG_RESET_REGS, 0
522}; 523};
523 524
524static struct block_defs block_dbu_defs = { 525static struct block_defs block_dbu_defs = {
525 "dbu", {false, false, false}, false, 0, 526 "dbu", {false, false}, false, 0,
526 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 527 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
527 0, 0, 0, 0, 0, 528 0, 0, 0, 0, 0,
528 false, false, MAX_DBG_RESET_REGS, 0 529 false, false, MAX_DBG_RESET_REGS, 0
529}; 530};
530 531
531static struct block_defs block_pglue_b_defs = { 532static struct block_defs block_pglue_b_defs = {
532 "pglue_b", {true, true, true}, false, 0, 533 "pglue_b",
533 {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH}, 534 {true, true}, false, 0,
535 {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
534 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE, 536 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
535 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID, 537 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
536 PGLUE_B_REG_DBG_FORCE_FRAME, 538 PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -538,8 +540,9 @@ static struct block_defs block_pglue_b_defs = {
538}; 540};
539 541
540static struct block_defs block_cnig_defs = { 542static struct block_defs block_cnig_defs = {
541 "cnig", {false, false, true}, false, 0, 543 "cnig",
542 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, 544 {false, true}, false, 0,
545 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
543 CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2, 546 CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
544 CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2, 547 CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
545 CNIG_REG_DBG_FORCE_FRAME_K2, 548 CNIG_REG_DBG_FORCE_FRAME_K2,
@@ -547,15 +550,16 @@ static struct block_defs block_cnig_defs = {
547}; 550};
548 551
549static struct block_defs block_cpmu_defs = { 552static struct block_defs block_cpmu_defs = {
550 "cpmu", {false, false, false}, false, 0, 553 "cpmu", {false, false}, false, 0,
551 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 554 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
552 0, 0, 0, 0, 0, 555 0, 0, 0, 0, 0,
553 true, false, DBG_RESET_REG_MISCS_PL_HV, 8 556 true, false, DBG_RESET_REG_MISCS_PL_HV, 8
554}; 557};
555 558
556static struct block_defs block_ncsi_defs = { 559static struct block_defs block_ncsi_defs = {
557 "ncsi", {true, true, true}, false, 0, 560 "ncsi",
558 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, 561 {true, true}, false, 0,
562 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
559 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE, 563 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
560 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID, 564 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
561 NCSI_REG_DBG_FORCE_FRAME, 565 NCSI_REG_DBG_FORCE_FRAME,
@@ -563,15 +567,16 @@ static struct block_defs block_ncsi_defs = {
563}; 567};
564 568
565static struct block_defs block_opte_defs = { 569static struct block_defs block_opte_defs = {
566 "opte", {false, false, false}, false, 0, 570 "opte", {false, false}, false, 0,
567 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 571 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
568 0, 0, 0, 0, 0, 572 0, 0, 0, 0, 0,
569 true, false, DBG_RESET_REG_MISCS_PL_HV, 4 573 true, false, DBG_RESET_REG_MISCS_PL_HV, 4
570}; 574};
571 575
572static struct block_defs block_bmb_defs = { 576static struct block_defs block_bmb_defs = {
573 "bmb", {true, true, true}, false, 0, 577 "bmb",
574 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB}, 578 {true, true}, false, 0,
579 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
575 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE, 580 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
576 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID, 581 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
577 BMB_REG_DBG_FORCE_FRAME, 582 BMB_REG_DBG_FORCE_FRAME,
@@ -579,8 +584,9 @@ static struct block_defs block_bmb_defs = {
579}; 584};
580 585
581static struct block_defs block_pcie_defs = { 586static struct block_defs block_pcie_defs = {
582 "pcie", {false, false, true}, false, 0, 587 "pcie",
583 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, 588 {false, true}, false, 0,
589 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
584 PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE, 590 PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
585 PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID, 591 PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
586 PCIE_REG_DBG_COMMON_FORCE_FRAME, 592 PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -588,15 +594,16 @@ static struct block_defs block_pcie_defs = {
588}; 594};
589 595
590static struct block_defs block_mcp_defs = { 596static struct block_defs block_mcp_defs = {
591 "mcp", {false, false, false}, false, 0, 597 "mcp", {false, false}, false, 0,
592 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 598 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
593 0, 0, 0, 0, 0, 599 0, 0, 0, 0, 0,
594 false, false, MAX_DBG_RESET_REGS, 0 600 false, false, MAX_DBG_RESET_REGS, 0
595}; 601};
596 602
597static struct block_defs block_mcp2_defs = { 603static struct block_defs block_mcp2_defs = {
598 "mcp2", {true, true, true}, false, 0, 604 "mcp2",
599 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, 605 {true, true}, false, 0,
606 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
600 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE, 607 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
601 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID, 608 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
602 MCP2_REG_DBG_FORCE_FRAME, 609 MCP2_REG_DBG_FORCE_FRAME,
@@ -604,8 +611,9 @@ static struct block_defs block_mcp2_defs = {
604}; 611};
605 612
606static struct block_defs block_pswhst_defs = { 613static struct block_defs block_pswhst_defs = {
607 "pswhst", {true, true, true}, false, 0, 614 "pswhst",
608 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 615 {true, true}, false, 0,
616 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
609 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE, 617 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
610 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID, 618 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
611 PSWHST_REG_DBG_FORCE_FRAME, 619 PSWHST_REG_DBG_FORCE_FRAME,
@@ -613,8 +621,9 @@ static struct block_defs block_pswhst_defs = {
613}; 621};
614 622
615static struct block_defs block_pswhst2_defs = { 623static struct block_defs block_pswhst2_defs = {
616 "pswhst2", {true, true, true}, false, 0, 624 "pswhst2",
617 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 625 {true, true}, false, 0,
626 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
618 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE, 627 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
619 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID, 628 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
620 PSWHST2_REG_DBG_FORCE_FRAME, 629 PSWHST2_REG_DBG_FORCE_FRAME,
@@ -622,8 +631,9 @@ static struct block_defs block_pswhst2_defs = {
622}; 631};
623 632
624static struct block_defs block_pswrd_defs = { 633static struct block_defs block_pswrd_defs = {
625 "pswrd", {true, true, true}, false, 0, 634 "pswrd",
626 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 635 {true, true}, false, 0,
636 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
627 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE, 637 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
628 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID, 638 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
629 PSWRD_REG_DBG_FORCE_FRAME, 639 PSWRD_REG_DBG_FORCE_FRAME,
@@ -631,8 +641,9 @@ static struct block_defs block_pswrd_defs = {
631}; 641};
632 642
633static struct block_defs block_pswrd2_defs = { 643static struct block_defs block_pswrd2_defs = {
634 "pswrd2", {true, true, true}, false, 0, 644 "pswrd2",
635 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 645 {true, true}, false, 0,
646 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
636 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE, 647 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
637 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID, 648 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
638 PSWRD2_REG_DBG_FORCE_FRAME, 649 PSWRD2_REG_DBG_FORCE_FRAME,
@@ -640,8 +651,9 @@ static struct block_defs block_pswrd2_defs = {
640}; 651};
641 652
642static struct block_defs block_pswwr_defs = { 653static struct block_defs block_pswwr_defs = {
643 "pswwr", {true, true, true}, false, 0, 654 "pswwr",
644 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 655 {true, true}, false, 0,
656 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
645 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE, 657 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
646 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID, 658 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
647 PSWWR_REG_DBG_FORCE_FRAME, 659 PSWWR_REG_DBG_FORCE_FRAME,
@@ -649,15 +661,16 @@ static struct block_defs block_pswwr_defs = {
649}; 661};
650 662
651static struct block_defs block_pswwr2_defs = { 663static struct block_defs block_pswwr2_defs = {
652 "pswwr2", {false, false, false}, false, 0, 664 "pswwr2", {false, false}, false, 0,
653 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 665 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
654 0, 0, 0, 0, 0, 666 0, 0, 0, 0, 0,
655 true, false, DBG_RESET_REG_MISC_PL_HV, 3 667 true, false, DBG_RESET_REG_MISC_PL_HV, 3
656}; 668};
657 669
658static struct block_defs block_pswrq_defs = { 670static struct block_defs block_pswrq_defs = {
659 "pswrq", {true, true, true}, false, 0, 671 "pswrq",
660 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 672 {true, true}, false, 0,
673 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
661 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE, 674 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
662 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID, 675 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
663 PSWRQ_REG_DBG_FORCE_FRAME, 676 PSWRQ_REG_DBG_FORCE_FRAME,
@@ -665,8 +678,9 @@ static struct block_defs block_pswrq_defs = {
665}; 678};
666 679
667static struct block_defs block_pswrq2_defs = { 680static struct block_defs block_pswrq2_defs = {
668 "pswrq2", {true, true, true}, false, 0, 681 "pswrq2",
669 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 682 {true, true}, false, 0,
683 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
670 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE, 684 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
671 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID, 685 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
672 PSWRQ2_REG_DBG_FORCE_FRAME, 686 PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -674,8 +688,9 @@ static struct block_defs block_pswrq2_defs = {
674}; 688};
675 689
676static struct block_defs block_pglcs_defs = { 690static struct block_defs block_pglcs_defs = {
677 "pglcs", {false, false, true}, false, 0, 691 "pglcs",
678 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, 692 {false, true}, false, 0,
693 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
679 PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE, 694 PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
680 PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID, 695 PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
681 PGLCS_REG_DBG_FORCE_FRAME, 696 PGLCS_REG_DBG_FORCE_FRAME,
@@ -683,8 +698,9 @@ static struct block_defs block_pglcs_defs = {
683}; 698};
684 699
685static struct block_defs block_ptu_defs = { 700static struct block_defs block_ptu_defs = {
686 "ptu", {true, true, true}, false, 0, 701 "ptu",
687 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 702 {true, true}, false, 0,
703 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
688 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE, 704 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
689 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID, 705 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
690 PTU_REG_DBG_FORCE_FRAME, 706 PTU_REG_DBG_FORCE_FRAME,
@@ -692,8 +708,9 @@ static struct block_defs block_ptu_defs = {
692}; 708};
693 709
694static struct block_defs block_dmae_defs = { 710static struct block_defs block_dmae_defs = {
695 "dmae", {true, true, true}, false, 0, 711 "dmae",
696 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 712 {true, true}, false, 0,
713 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
697 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE, 714 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
698 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID, 715 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
699 DMAE_REG_DBG_FORCE_FRAME, 716 DMAE_REG_DBG_FORCE_FRAME,
@@ -701,8 +718,9 @@ static struct block_defs block_dmae_defs = {
701}; 718};
702 719
703static struct block_defs block_tcm_defs = { 720static struct block_defs block_tcm_defs = {
704 "tcm", {true, true, true}, true, DBG_TSTORM_ID, 721 "tcm",
705 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, 722 {true, true}, true, DBG_TSTORM_ID,
723 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
706 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE, 724 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
707 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID, 725 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
708 TCM_REG_DBG_FORCE_FRAME, 726 TCM_REG_DBG_FORCE_FRAME,
@@ -710,8 +728,9 @@ static struct block_defs block_tcm_defs = {
710}; 728};
711 729
712static struct block_defs block_mcm_defs = { 730static struct block_defs block_mcm_defs = {
713 "mcm", {true, true, true}, true, DBG_MSTORM_ID, 731 "mcm",
714 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 732 {true, true}, true, DBG_MSTORM_ID,
733 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
715 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE, 734 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
716 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID, 735 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
717 MCM_REG_DBG_FORCE_FRAME, 736 MCM_REG_DBG_FORCE_FRAME,
@@ -719,8 +738,9 @@ static struct block_defs block_mcm_defs = {
719}; 738};
720 739
721static struct block_defs block_ucm_defs = { 740static struct block_defs block_ucm_defs = {
722 "ucm", {true, true, true}, true, DBG_USTORM_ID, 741 "ucm",
723 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, 742 {true, true}, true, DBG_USTORM_ID,
743 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
724 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE, 744 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
725 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID, 745 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
726 UCM_REG_DBG_FORCE_FRAME, 746 UCM_REG_DBG_FORCE_FRAME,
@@ -728,8 +748,9 @@ static struct block_defs block_ucm_defs = {
728}; 748};
729 749
730static struct block_defs block_xcm_defs = { 750static struct block_defs block_xcm_defs = {
731 "xcm", {true, true, true}, true, DBG_XSTORM_ID, 751 "xcm",
732 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, 752 {true, true}, true, DBG_XSTORM_ID,
753 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
733 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE, 754 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
734 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID, 755 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
735 XCM_REG_DBG_FORCE_FRAME, 756 XCM_REG_DBG_FORCE_FRAME,
@@ -737,8 +758,9 @@ static struct block_defs block_xcm_defs = {
737}; 758};
738 759
739static struct block_defs block_ycm_defs = { 760static struct block_defs block_ycm_defs = {
740 "ycm", {true, true, true}, true, DBG_YSTORM_ID, 761 "ycm",
741 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, 762 {true, true}, true, DBG_YSTORM_ID,
763 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
742 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE, 764 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
743 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID, 765 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
744 YCM_REG_DBG_FORCE_FRAME, 766 YCM_REG_DBG_FORCE_FRAME,
@@ -746,8 +768,9 @@ static struct block_defs block_ycm_defs = {
746}; 768};
747 769
748static struct block_defs block_pcm_defs = { 770static struct block_defs block_pcm_defs = {
749 "pcm", {true, true, true}, true, DBG_PSTORM_ID, 771 "pcm",
750 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, 772 {true, true}, true, DBG_PSTORM_ID,
773 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
751 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE, 774 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
752 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID, 775 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
753 PCM_REG_DBG_FORCE_FRAME, 776 PCM_REG_DBG_FORCE_FRAME,
@@ -755,8 +778,9 @@ static struct block_defs block_pcm_defs = {
755}; 778};
756 779
757static struct block_defs block_qm_defs = { 780static struct block_defs block_qm_defs = {
758 "qm", {true, true, true}, false, 0, 781 "qm",
759 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ}, 782 {true, true}, false, 0,
783 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
760 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE, 784 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
761 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID, 785 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
762 QM_REG_DBG_FORCE_FRAME, 786 QM_REG_DBG_FORCE_FRAME,
@@ -764,8 +788,9 @@ static struct block_defs block_qm_defs = {
764}; 788};
765 789
766static struct block_defs block_tm_defs = { 790static struct block_defs block_tm_defs = {
767 "tm", {true, true, true}, false, 0, 791 "tm",
768 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, 792 {true, true}, false, 0,
793 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
769 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE, 794 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
770 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID, 795 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
771 TM_REG_DBG_FORCE_FRAME, 796 TM_REG_DBG_FORCE_FRAME,
@@ -773,8 +798,9 @@ static struct block_defs block_tm_defs = {
773}; 798};
774 799
775static struct block_defs block_dorq_defs = { 800static struct block_defs block_dorq_defs = {
776 "dorq", {true, true, true}, false, 0, 801 "dorq",
777 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, 802 {true, true}, false, 0,
803 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
778 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE, 804 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
779 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID, 805 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
780 DORQ_REG_DBG_FORCE_FRAME, 806 DORQ_REG_DBG_FORCE_FRAME,
@@ -782,8 +808,9 @@ static struct block_defs block_dorq_defs = {
782}; 808};
783 809
784static struct block_defs block_brb_defs = { 810static struct block_defs block_brb_defs = {
785 "brb", {true, true, true}, false, 0, 811 "brb",
786 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, 812 {true, true}, false, 0,
813 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
787 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE, 814 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
788 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID, 815 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
789 BRB_REG_DBG_FORCE_FRAME, 816 BRB_REG_DBG_FORCE_FRAME,
@@ -791,8 +818,9 @@ static struct block_defs block_brb_defs = {
791}; 818};
792 819
793static struct block_defs block_src_defs = { 820static struct block_defs block_src_defs = {
794 "src", {true, true, true}, false, 0, 821 "src",
795 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, 822 {true, true}, false, 0,
823 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
796 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE, 824 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
797 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID, 825 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
798 SRC_REG_DBG_FORCE_FRAME, 826 SRC_REG_DBG_FORCE_FRAME,
@@ -800,8 +828,9 @@ static struct block_defs block_src_defs = {
800}; 828};
801 829
802static struct block_defs block_prs_defs = { 830static struct block_defs block_prs_defs = {
803 "prs", {true, true, true}, false, 0, 831 "prs",
804 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, 832 {true, true}, false, 0,
833 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
805 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE, 834 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
806 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID, 835 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
807 PRS_REG_DBG_FORCE_FRAME, 836 PRS_REG_DBG_FORCE_FRAME,
@@ -809,8 +838,9 @@ static struct block_defs block_prs_defs = {
809}; 838};
810 839
811static struct block_defs block_tsdm_defs = { 840static struct block_defs block_tsdm_defs = {
812 "tsdm", {true, true, true}, true, DBG_TSTORM_ID, 841 "tsdm",
813 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, 842 {true, true}, true, DBG_TSTORM_ID,
843 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
814 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE, 844 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
815 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID, 845 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
816 TSDM_REG_DBG_FORCE_FRAME, 846 TSDM_REG_DBG_FORCE_FRAME,
@@ -818,8 +848,9 @@ static struct block_defs block_tsdm_defs = {
818}; 848};
819 849
820static struct block_defs block_msdm_defs = { 850static struct block_defs block_msdm_defs = {
821 "msdm", {true, true, true}, true, DBG_MSTORM_ID, 851 "msdm",
822 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 852 {true, true}, true, DBG_MSTORM_ID,
853 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
823 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE, 854 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
824 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID, 855 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
825 MSDM_REG_DBG_FORCE_FRAME, 856 MSDM_REG_DBG_FORCE_FRAME,
@@ -827,8 +858,9 @@ static struct block_defs block_msdm_defs = {
827}; 858};
828 859
829static struct block_defs block_usdm_defs = { 860static struct block_defs block_usdm_defs = {
830 "usdm", {true, true, true}, true, DBG_USTORM_ID, 861 "usdm",
831 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, 862 {true, true}, true, DBG_USTORM_ID,
863 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
832 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE, 864 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
833 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID, 865 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
834 USDM_REG_DBG_FORCE_FRAME, 866 USDM_REG_DBG_FORCE_FRAME,
@@ -836,8 +868,9 @@ static struct block_defs block_usdm_defs = {
836}; 868};
837 869
838static struct block_defs block_xsdm_defs = { 870static struct block_defs block_xsdm_defs = {
839 "xsdm", {true, true, true}, true, DBG_XSTORM_ID, 871 "xsdm",
840 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, 872 {true, true}, true, DBG_XSTORM_ID,
873 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
841 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE, 874 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
842 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID, 875 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
843 XSDM_REG_DBG_FORCE_FRAME, 876 XSDM_REG_DBG_FORCE_FRAME,
@@ -845,8 +878,9 @@ static struct block_defs block_xsdm_defs = {
845}; 878};
846 879
847static struct block_defs block_ysdm_defs = { 880static struct block_defs block_ysdm_defs = {
848 "ysdm", {true, true, true}, true, DBG_YSTORM_ID, 881 "ysdm",
849 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, 882 {true, true}, true, DBG_YSTORM_ID,
883 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
850 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE, 884 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
851 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID, 885 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
852 YSDM_REG_DBG_FORCE_FRAME, 886 YSDM_REG_DBG_FORCE_FRAME,
@@ -854,8 +888,9 @@ static struct block_defs block_ysdm_defs = {
854}; 888};
855 889
856static struct block_defs block_psdm_defs = { 890static struct block_defs block_psdm_defs = {
857 "psdm", {true, true, true}, true, DBG_PSTORM_ID, 891 "psdm",
858 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, 892 {true, true}, true, DBG_PSTORM_ID,
893 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
859 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE, 894 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
860 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID, 895 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
861 PSDM_REG_DBG_FORCE_FRAME, 896 PSDM_REG_DBG_FORCE_FRAME,
@@ -863,8 +898,9 @@ static struct block_defs block_psdm_defs = {
863}; 898};
864 899
865static struct block_defs block_tsem_defs = { 900static struct block_defs block_tsem_defs = {
866 "tsem", {true, true, true}, true, DBG_TSTORM_ID, 901 "tsem",
867 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, 902 {true, true}, true, DBG_TSTORM_ID,
903 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
868 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE, 904 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
869 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID, 905 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
870 TSEM_REG_DBG_FORCE_FRAME, 906 TSEM_REG_DBG_FORCE_FRAME,
@@ -872,8 +908,9 @@ static struct block_defs block_tsem_defs = {
872}; 908};
873 909
874static struct block_defs block_msem_defs = { 910static struct block_defs block_msem_defs = {
875 "msem", {true, true, true}, true, DBG_MSTORM_ID, 911 "msem",
876 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 912 {true, true}, true, DBG_MSTORM_ID,
913 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
877 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE, 914 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
878 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID, 915 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
879 MSEM_REG_DBG_FORCE_FRAME, 916 MSEM_REG_DBG_FORCE_FRAME,
@@ -881,8 +918,9 @@ static struct block_defs block_msem_defs = {
881}; 918};
882 919
883static struct block_defs block_usem_defs = { 920static struct block_defs block_usem_defs = {
884 "usem", {true, true, true}, true, DBG_USTORM_ID, 921 "usem",
885 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, 922 {true, true}, true, DBG_USTORM_ID,
923 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
886 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE, 924 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
887 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID, 925 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
888 USEM_REG_DBG_FORCE_FRAME, 926 USEM_REG_DBG_FORCE_FRAME,
@@ -890,8 +928,9 @@ static struct block_defs block_usem_defs = {
890}; 928};
891 929
892static struct block_defs block_xsem_defs = { 930static struct block_defs block_xsem_defs = {
893 "xsem", {true, true, true}, true, DBG_XSTORM_ID, 931 "xsem",
894 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, 932 {true, true}, true, DBG_XSTORM_ID,
933 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
895 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE, 934 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
896 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID, 935 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
897 XSEM_REG_DBG_FORCE_FRAME, 936 XSEM_REG_DBG_FORCE_FRAME,
@@ -899,8 +938,9 @@ static struct block_defs block_xsem_defs = {
899}; 938};
900 939
901static struct block_defs block_ysem_defs = { 940static struct block_defs block_ysem_defs = {
902 "ysem", {true, true, true}, true, DBG_YSTORM_ID, 941 "ysem",
903 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, 942 {true, true}, true, DBG_YSTORM_ID,
943 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
904 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE, 944 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
905 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID, 945 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
906 YSEM_REG_DBG_FORCE_FRAME, 946 YSEM_REG_DBG_FORCE_FRAME,
@@ -908,8 +948,9 @@ static struct block_defs block_ysem_defs = {
908}; 948};
909 949
910static struct block_defs block_psem_defs = { 950static struct block_defs block_psem_defs = {
911 "psem", {true, true, true}, true, DBG_PSTORM_ID, 951 "psem",
912 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, 952 {true, true}, true, DBG_PSTORM_ID,
953 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
913 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE, 954 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
914 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID, 955 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
915 PSEM_REG_DBG_FORCE_FRAME, 956 PSEM_REG_DBG_FORCE_FRAME,
@@ -917,8 +958,9 @@ static struct block_defs block_psem_defs = {
917}; 958};
918 959
919static struct block_defs block_rss_defs = { 960static struct block_defs block_rss_defs = {
920 "rss", {true, true, true}, false, 0, 961 "rss",
921 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, 962 {true, true}, false, 0,
963 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
922 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE, 964 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
923 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID, 965 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
924 RSS_REG_DBG_FORCE_FRAME, 966 RSS_REG_DBG_FORCE_FRAME,
@@ -926,8 +968,9 @@ static struct block_defs block_rss_defs = {
926}; 968};
927 969
928static struct block_defs block_tmld_defs = { 970static struct block_defs block_tmld_defs = {
929 "tmld", {true, true, true}, false, 0, 971 "tmld",
930 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 972 {true, true}, false, 0,
973 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
931 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE, 974 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
932 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID, 975 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
933 TMLD_REG_DBG_FORCE_FRAME, 976 TMLD_REG_DBG_FORCE_FRAME,
@@ -935,8 +978,9 @@ static struct block_defs block_tmld_defs = {
935}; 978};
936 979
937static struct block_defs block_muld_defs = { 980static struct block_defs block_muld_defs = {
938 "muld", {true, true, true}, false, 0, 981 "muld",
939 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, 982 {true, true}, false, 0,
983 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
940 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE, 984 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
941 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID, 985 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
942 MULD_REG_DBG_FORCE_FRAME, 986 MULD_REG_DBG_FORCE_FRAME,
@@ -944,8 +988,9 @@ static struct block_defs block_muld_defs = {
944}; 988};
945 989
946static struct block_defs block_yuld_defs = { 990static struct block_defs block_yuld_defs = {
947 "yuld", {true, true, true}, false, 0, 991 "yuld",
948 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, 992 {true, true}, false, 0,
993 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
949 YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE, 994 YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
950 YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID, 995 YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
951 YULD_REG_DBG_FORCE_FRAME, 996 YULD_REG_DBG_FORCE_FRAME,
@@ -953,8 +998,9 @@ static struct block_defs block_yuld_defs = {
953}; 998};
954 999
955static struct block_defs block_xyld_defs = { 1000static struct block_defs block_xyld_defs = {
956 "xyld", {true, true, true}, false, 0, 1001 "xyld",
957 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, 1002 {true, true}, false, 0,
1003 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
958 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE, 1004 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
959 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID, 1005 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
960 XYLD_REG_DBG_FORCE_FRAME, 1006 XYLD_REG_DBG_FORCE_FRAME,
@@ -962,8 +1008,9 @@ static struct block_defs block_xyld_defs = {
962}; 1008};
963 1009
964static struct block_defs block_prm_defs = { 1010static struct block_defs block_prm_defs = {
965 "prm", {true, true, true}, false, 0, 1011 "prm",
966 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 1012 {true, true}, false, 0,
1013 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
967 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE, 1014 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
968 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID, 1015 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
969 PRM_REG_DBG_FORCE_FRAME, 1016 PRM_REG_DBG_FORCE_FRAME,
@@ -971,8 +1018,9 @@ static struct block_defs block_prm_defs = {
971}; 1018};
972 1019
973static struct block_defs block_pbf_pb1_defs = { 1020static struct block_defs block_pbf_pb1_defs = {
974 "pbf_pb1", {true, true, true}, false, 0, 1021 "pbf_pb1",
975 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, 1022 {true, true}, false, 0,
1023 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
976 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE, 1024 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
977 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID, 1025 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
978 PBF_PB1_REG_DBG_FORCE_FRAME, 1026 PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -981,8 +1029,9 @@ static struct block_defs block_pbf_pb1_defs = {
981}; 1029};
982 1030
983static struct block_defs block_pbf_pb2_defs = { 1031static struct block_defs block_pbf_pb2_defs = {
984 "pbf_pb2", {true, true, true}, false, 0, 1032 "pbf_pb2",
985 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, 1033 {true, true}, false, 0,
1034 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
986 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE, 1035 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
987 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID, 1036 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
988 PBF_PB2_REG_DBG_FORCE_FRAME, 1037 PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -991,8 +1040,9 @@ static struct block_defs block_pbf_pb2_defs = {
991}; 1040};
992 1041
993static struct block_defs block_rpb_defs = { 1042static struct block_defs block_rpb_defs = {
994 "rpb", {true, true, true}, false, 0, 1043 "rpb",
995 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 1044 {true, true}, false, 0,
1045 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
996 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE, 1046 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
997 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID, 1047 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
998 RPB_REG_DBG_FORCE_FRAME, 1048 RPB_REG_DBG_FORCE_FRAME,
@@ -1000,8 +1050,9 @@ static struct block_defs block_rpb_defs = {
1000}; 1050};
1001 1051
1002static struct block_defs block_btb_defs = { 1052static struct block_defs block_btb_defs = {
1003 "btb", {true, true, true}, false, 0, 1053 "btb",
1004 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV}, 1054 {true, true}, false, 0,
1055 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
1005 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE, 1056 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1006 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID, 1057 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1007 BTB_REG_DBG_FORCE_FRAME, 1058 BTB_REG_DBG_FORCE_FRAME,
@@ -1009,8 +1060,9 @@ static struct block_defs block_btb_defs = {
1009}; 1060};
1010 1061
1011static struct block_defs block_pbf_defs = { 1062static struct block_defs block_pbf_defs = {
1012 "pbf", {true, true, true}, false, 0, 1063 "pbf",
1013 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, 1064 {true, true}, false, 0,
1065 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1014 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE, 1066 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1015 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID, 1067 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1016 PBF_REG_DBG_FORCE_FRAME, 1068 PBF_REG_DBG_FORCE_FRAME,
@@ -1018,8 +1070,9 @@ static struct block_defs block_pbf_defs = {
1018}; 1070};
1019 1071
1020static struct block_defs block_rdif_defs = { 1072static struct block_defs block_rdif_defs = {
1021 "rdif", {true, true, true}, false, 0, 1073 "rdif",
1022 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 1074 {true, true}, false, 0,
1075 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1023 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE, 1076 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1024 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID, 1077 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1025 RDIF_REG_DBG_FORCE_FRAME, 1078 RDIF_REG_DBG_FORCE_FRAME,
@@ -1027,8 +1080,9 @@ static struct block_defs block_rdif_defs = {
1027}; 1080};
1028 1081
1029static struct block_defs block_tdif_defs = { 1082static struct block_defs block_tdif_defs = {
1030 "tdif", {true, true, true}, false, 0, 1083 "tdif",
1031 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, 1084 {true, true}, false, 0,
1085 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1032 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE, 1086 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1033 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID, 1087 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1034 TDIF_REG_DBG_FORCE_FRAME, 1088 TDIF_REG_DBG_FORCE_FRAME,
@@ -1036,8 +1090,9 @@ static struct block_defs block_tdif_defs = {
1036}; 1090};
1037 1091
1038static struct block_defs block_cdu_defs = { 1092static struct block_defs block_cdu_defs = {
1039 "cdu", {true, true, true}, false, 0, 1093 "cdu",
1040 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, 1094 {true, true}, false, 0,
1095 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1041 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE, 1096 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1042 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID, 1097 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1043 CDU_REG_DBG_FORCE_FRAME, 1098 CDU_REG_DBG_FORCE_FRAME,
@@ -1045,8 +1100,9 @@ static struct block_defs block_cdu_defs = {
1045}; 1100};
1046 1101
1047static struct block_defs block_ccfc_defs = { 1102static struct block_defs block_ccfc_defs = {
1048 "ccfc", {true, true, true}, false, 0, 1103 "ccfc",
1049 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, 1104 {true, true}, false, 0,
1105 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1050 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE, 1106 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1051 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID, 1107 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1052 CCFC_REG_DBG_FORCE_FRAME, 1108 CCFC_REG_DBG_FORCE_FRAME,
@@ -1054,8 +1110,9 @@ static struct block_defs block_ccfc_defs = {
1054}; 1110};
1055 1111
1056static struct block_defs block_tcfc_defs = { 1112static struct block_defs block_tcfc_defs = {
1057 "tcfc", {true, true, true}, false, 0, 1113 "tcfc",
1058 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, 1114 {true, true}, false, 0,
1115 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1059 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE, 1116 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1060 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID, 1117 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1061 TCFC_REG_DBG_FORCE_FRAME, 1118 TCFC_REG_DBG_FORCE_FRAME,
@@ -1063,8 +1120,9 @@ static struct block_defs block_tcfc_defs = {
1063}; 1120};
1064 1121
1065static struct block_defs block_igu_defs = { 1122static struct block_defs block_igu_defs = {
1066 "igu", {true, true, true}, false, 0, 1123 "igu",
1067 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 1124 {true, true}, false, 0,
1125 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1068 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE, 1126 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1069 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID, 1127 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1070 IGU_REG_DBG_FORCE_FRAME, 1128 IGU_REG_DBG_FORCE_FRAME,
@@ -1072,8 +1130,9 @@ static struct block_defs block_igu_defs = {
1072}; 1130};
1073 1131
1074static struct block_defs block_cau_defs = { 1132static struct block_defs block_cau_defs = {
1075 "cau", {true, true, true}, false, 0, 1133 "cau",
1076 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 1134 {true, true}, false, 0,
1135 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1077 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE, 1136 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1078 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID, 1137 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1079 CAU_REG_DBG_FORCE_FRAME, 1138 CAU_REG_DBG_FORCE_FRAME,
@@ -1081,8 +1140,9 @@ static struct block_defs block_cau_defs = {
1081}; 1140};
1082 1141
1083static struct block_defs block_umac_defs = { 1142static struct block_defs block_umac_defs = {
1084 "umac", {false, false, true}, false, 0, 1143 "umac",
1085 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, 1144 {false, true}, false, 0,
1145 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1086 UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE, 1146 UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
1087 UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID, 1147 UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
1088 UMAC_REG_DBG_FORCE_FRAME, 1148 UMAC_REG_DBG_FORCE_FRAME,
@@ -1090,22 +1150,23 @@ static struct block_defs block_umac_defs = {
1090}; 1150};
1091 1151
1092static struct block_defs block_xmac_defs = { 1152static struct block_defs block_xmac_defs = {
1093 "xmac", {false, false, false}, false, 0, 1153 "xmac", {false, false}, false, 0,
1094 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1154 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1095 0, 0, 0, 0, 0, 1155 0, 0, 0, 0, 0,
1096 false, false, MAX_DBG_RESET_REGS, 0 1156 false, false, MAX_DBG_RESET_REGS, 0
1097}; 1157};
1098 1158
1099static struct block_defs block_dbg_defs = { 1159static struct block_defs block_dbg_defs = {
1100 "dbg", {false, false, false}, false, 0, 1160 "dbg", {false, false}, false, 0,
1101 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1161 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1102 0, 0, 0, 0, 0, 1162 0, 0, 0, 0, 0,
1103 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 1163 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1104}; 1164};
1105 1165
1106static struct block_defs block_nig_defs = { 1166static struct block_defs block_nig_defs = {
1107 "nig", {true, true, true}, false, 0, 1167 "nig",
1108 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, 1168 {true, true}, false, 0,
1169 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1109 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE, 1170 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1110 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID, 1171 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1111 NIG_REG_DBG_FORCE_FRAME, 1172 NIG_REG_DBG_FORCE_FRAME,
@@ -1113,8 +1174,9 @@ static struct block_defs block_nig_defs = {
1113}; 1174};
1114 1175
1115static struct block_defs block_wol_defs = { 1176static struct block_defs block_wol_defs = {
1116 "wol", {false, false, true}, false, 0, 1177 "wol",
1117 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, 1178 {false, true}, false, 0,
1179 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1118 WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE, 1180 WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
1119 WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID, 1181 WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
1120 WOL_REG_DBG_FORCE_FRAME, 1182 WOL_REG_DBG_FORCE_FRAME,
@@ -1122,8 +1184,9 @@ static struct block_defs block_wol_defs = {
1122}; 1184};
1123 1185
1124static struct block_defs block_bmbn_defs = { 1186static struct block_defs block_bmbn_defs = {
1125 "bmbn", {false, false, true}, false, 0, 1187 "bmbn",
1126 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB}, 1188 {false, true}, false, 0,
1189 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
1127 BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE, 1190 BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
1128 BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID, 1191 BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
1129 BMBN_REG_DBG_FORCE_FRAME, 1192 BMBN_REG_DBG_FORCE_FRAME,
@@ -1131,15 +1194,16 @@ static struct block_defs block_bmbn_defs = {
1131}; 1194};
1132 1195
1133static struct block_defs block_ipc_defs = { 1196static struct block_defs block_ipc_defs = {
1134 "ipc", {false, false, false}, false, 0, 1197 "ipc", {false, false}, false, 0,
1135 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1198 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1136 0, 0, 0, 0, 0, 1199 0, 0, 0, 0, 0,
1137 true, false, DBG_RESET_REG_MISCS_PL_UA, 8 1200 true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1138}; 1201};
1139 1202
1140static struct block_defs block_nwm_defs = { 1203static struct block_defs block_nwm_defs = {
1141 "nwm", {false, false, true}, false, 0, 1204 "nwm",
1142 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, 1205 {false, true}, false, 0,
1206 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
1143 NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE, 1207 NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
1144 NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID, 1208 NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
1145 NWM_REG_DBG_FORCE_FRAME, 1209 NWM_REG_DBG_FORCE_FRAME,
@@ -1147,22 +1211,29 @@ static struct block_defs block_nwm_defs = {
1147}; 1211};
1148 1212
1149static struct block_defs block_nws_defs = { 1213static struct block_defs block_nws_defs = {
1150 "nws", {false, false, false}, false, 0, 1214 "nws",
1151 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1215 {false, true}, false, 0,
1152 0, 0, 0, 0, 0, 1216 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
1217 NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
1218 NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
1219 NWS_REG_DBG_FORCE_FRAME,
1153 true, false, DBG_RESET_REG_MISCS_PL_HV, 12 1220 true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1154}; 1221};
1155 1222
1156static struct block_defs block_ms_defs = { 1223static struct block_defs block_ms_defs = {
1157 "ms", {false, false, false}, false, 0, 1224 "ms",
1158 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1225 {false, true}, false, 0,
1159 0, 0, 0, 0, 0, 1226 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1227 MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
1228 MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
1229 MS_REG_DBG_FORCE_FRAME,
1160 true, false, DBG_RESET_REG_MISCS_PL_HV, 13 1230 true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1161}; 1231};
1162 1232
1163static struct block_defs block_phy_pcie_defs = { 1233static struct block_defs block_phy_pcie_defs = {
1164 "phy_pcie", {false, false, true}, false, 0, 1234 "phy_pcie",
1165 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, 1235 {false, true}, false, 0,
1236 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1166 PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE, 1237 PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
1167 PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID, 1238 PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
1168 PCIE_REG_DBG_COMMON_FORCE_FRAME, 1239 PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -1170,22 +1241,57 @@ static struct block_defs block_phy_pcie_defs = {
1170}; 1241};
1171 1242
1172static struct block_defs block_led_defs = { 1243static struct block_defs block_led_defs = {
1173 "led", {false, false, false}, false, 0, 1244 "led", {false, false}, false, 0,
1174 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1245 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1246 0, 0, 0, 0, 0,
1247 true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1248};
1249
1250static struct block_defs block_avs_wrap_defs = {
1251 "avs_wrap", {false, false}, false, 0,
1252 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1253 0, 0, 0, 0, 0,
1254 true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1255};
1256
1257static struct block_defs block_rgfs_defs = {
1258 "rgfs", {false, false}, false, 0,
1259 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1175 0, 0, 0, 0, 0, 1260 0, 0, 0, 0, 0,
1176 true, true, DBG_RESET_REG_MISCS_PL_HV, 14 1261 false, false, MAX_DBG_RESET_REGS, 0
1262};
1263
1264static struct block_defs block_tgfs_defs = {
1265 "tgfs", {false, false}, false, 0,
1266 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1267 0, 0, 0, 0, 0,
1268 false, false, MAX_DBG_RESET_REGS, 0
1269};
1270
1271static struct block_defs block_ptld_defs = {
1272 "ptld", {false, false}, false, 0,
1273 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1274 0, 0, 0, 0, 0,
1275 false, false, MAX_DBG_RESET_REGS, 0
1276};
1277
1278static struct block_defs block_ypld_defs = {
1279 "ypld", {false, false}, false, 0,
1280 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1281 0, 0, 0, 0, 0,
1282 false, false, MAX_DBG_RESET_REGS, 0
1177}; 1283};
1178 1284
1179static struct block_defs block_misc_aeu_defs = { 1285static struct block_defs block_misc_aeu_defs = {
1180 "misc_aeu", {false, false, false}, false, 0, 1286 "misc_aeu", {false, false}, false, 0,
1181 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1287 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1182 0, 0, 0, 0, 0, 1288 0, 0, 0, 0, 0,
1183 false, false, MAX_DBG_RESET_REGS, 0 1289 false, false, MAX_DBG_RESET_REGS, 0
1184}; 1290};
1185 1291
1186static struct block_defs block_bar0_map_defs = { 1292static struct block_defs block_bar0_map_defs = {
1187 "bar0_map", {false, false, false}, false, 0, 1293 "bar0_map", {false, false}, false, 0,
1188 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1294 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1189 0, 0, 0, 0, 0, 1295 0, 0, 0, 0, 0,
1190 false, false, MAX_DBG_RESET_REGS, 0 1296 false, false, MAX_DBG_RESET_REGS, 0
1191}; 1297};
@@ -1269,6 +1375,11 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1269 &block_ms_defs, 1375 &block_ms_defs,
1270 &block_phy_pcie_defs, 1376 &block_phy_pcie_defs,
1271 &block_led_defs, 1377 &block_led_defs,
1378 &block_avs_wrap_defs,
1379 &block_rgfs_defs,
1380 &block_tgfs_defs,
1381 &block_ptld_defs,
1382 &block_ypld_defs,
1272 &block_misc_aeu_defs, 1383 &block_misc_aeu_defs,
1273 &block_bar0_map_defs, 1384 &block_bar0_map_defs,
1274}; 1385};
@@ -1281,65 +1392,67 @@ static struct platform_defs s_platform_defs[] = {
1281}; 1392};
1282 1393
1283static struct grc_param_defs s_grc_param_defs[] = { 1394static struct grc_param_defs s_grc_param_defs[] = {
1284 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */ 1395 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
1285 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */ 1396 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
1286 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */ 1397 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
1287 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */ 1398 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
1288 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */ 1399 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
1289 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */ 1400 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
1290 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */ 1401 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
1291 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */ 1402 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
1292 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */ 1403 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
1293 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */ 1404 {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
1294 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */ 1405 {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
1295 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */ 1406 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
1296 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */ 1407 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
1297 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */ 1408 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
1298 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */ 1409 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
1299 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */ 1410 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
1300 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */ 1411 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
1301 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */ 1412 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
1302 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */ 1413 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
1303 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */ 1414 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
1304 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */ 1415 {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
1305 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */ 1416 {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
1306 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ 1417 {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
1307 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */ 1418 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
1308 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */ 1419 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
1309 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */ 1420 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
1310 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */ 1421 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
1311 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */ 1422 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
1312 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */ 1423 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
1313 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */ 1424 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
1314 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */ 1425 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
1315 {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */ 1426 {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
1316 {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS, 1427 {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
1317 MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */ 1428 MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
1318 {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS, 1429 {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
1319 MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */ 1430 MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
1320 {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */ 1431 {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
1321 {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */ 1432 {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
1322 {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */ 1433 {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
1323 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */ 1434 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
1324 {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */ 1435 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */
1436 {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */
1437 {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */
1325}; 1438};
1326 1439
1327static struct rss_mem_defs s_rss_mem_defs[] = { 1440static struct rss_mem_defs s_rss_mem_defs[] = {
1328 { "rss_mem_cid", "rss_cid", 0, 1441 { "rss_mem_cid", "rss_cid", 0,
1329 {256, 256, 320}, 1442 {256, 320},
1330 {32, 32, 32} }, 1443 {32, 32} },
1331 { "rss_mem_key_msb", "rss_key", 1024, 1444 { "rss_mem_key_msb", "rss_key", 1024,
1332 {128, 128, 208}, 1445 {128, 208},
1333 {256, 256, 256} }, 1446 {256, 256} },
1334 { "rss_mem_key_lsb", "rss_key", 2048, 1447 { "rss_mem_key_lsb", "rss_key", 2048,
1335 {128, 128, 208}, 1448 {128, 208},
1336 {64, 64, 64} }, 1449 {64, 64} },
1337 { "rss_mem_info", "rss_info", 3072, 1450 { "rss_mem_info", "rss_info", 3072,
1338 {128, 128, 208}, 1451 {128, 208},
1339 {16, 16, 16} }, 1452 {16, 16} },
1340 { "rss_mem_ind", "rss_ind", 4096, 1453 { "rss_mem_ind", "rss_ind", 4096,
1341 {(128 * 128), (128 * 128), (128 * 208)}, 1454 {(128 * 128), (128 * 208)},
1342 {16, 16, 16} } 1455 {16, 16} }
1343}; 1456};
1344 1457
1345static struct vfc_ram_defs s_vfc_ram_defs[] = { 1458static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1352,32 +1465,32 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
1352static struct big_ram_defs s_big_ram_defs[] = { 1465static struct big_ram_defs s_big_ram_defs[] = {
1353 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, 1466 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1354 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, 1467 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1355 {4800, 4800, 5632} }, 1468 {4800, 5632} },
1356 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, 1469 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1357 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, 1470 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1358 {2880, 2880, 3680} }, 1471 {2880, 3680} },
1359 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, 1472 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1360 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, 1473 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1361 {1152, 1152, 1152} } 1474 {1152, 1152} }
1362}; 1475};
1363 1476
1364static struct reset_reg_defs s_reset_regs_defs[] = { 1477static struct reset_reg_defs s_reset_regs_defs[] = {
1365 { MISCS_REG_RESET_PL_UA, 0x0, 1478 { MISCS_REG_RESET_PL_UA, 0x0,
1366 {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */ 1479 {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
1367 { MISCS_REG_RESET_PL_HV, 0x0, 1480 { MISCS_REG_RESET_PL_HV, 0x0,
1368 {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */ 1481 {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
1369 { MISCS_REG_RESET_PL_HV_2, 0x0, 1482 { MISCS_REG_RESET_PL_HV_2, 0x0,
1370 {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */ 1483 {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
1371 { MISC_REG_RESET_PL_UA, 0x0, 1484 { MISC_REG_RESET_PL_UA, 0x0,
1372 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */ 1485 {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
1373 { MISC_REG_RESET_PL_HV, 0x0, 1486 { MISC_REG_RESET_PL_HV, 0x0,
1374 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */ 1487 {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
1375 { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040, 1488 { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
1376 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */ 1489 {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1377 { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007, 1490 { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
1378 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */ 1491 {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1379 { MISC_REG_RESET_PL_PDA_VAUX, 0x2, 1492 { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
1380 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */ 1493 {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1381}; 1494};
1382 1495
1383static struct phy_defs s_phy_defs[] = { 1496static struct phy_defs s_phy_defs[] = {
@@ -1410,6 +1523,26 @@ static u32 qed_read_unaligned_dword(u8 *buf)
1410 return dword; 1523 return dword;
1411} 1524}
1412 1525
1526/* Returns the value of the specified GRC param */
1527static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1528 enum dbg_grc_params grc_param)
1529{
1530 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1531
1532 return dev_data->grc.param_val[grc_param];
1533}
1534
1535/* Initializes the GRC parameters */
1536static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1537{
1538 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1539
1540 if (!dev_data->grc.params_initialized) {
1541 qed_dbg_grc_set_params_default(p_hwfn);
1542 dev_data->grc.params_initialized = 1;
1543 }
1544}
1545
1413/* Initializes debug data for the specified device */ 1546/* Initializes debug data for the specified device */
1414static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, 1547static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1415 struct qed_ptt *p_ptt) 1548 struct qed_ptt *p_ptt)
@@ -1424,13 +1557,17 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1424 dev_data->mode_enable[MODE_K2] = 1; 1557 dev_data->mode_enable[MODE_K2] = 1;
1425 } else if (QED_IS_BB_B0(p_hwfn->cdev)) { 1558 } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1426 dev_data->chip_id = CHIP_BB_B0; 1559 dev_data->chip_id = CHIP_BB_B0;
1427 dev_data->mode_enable[MODE_BB_B0] = 1; 1560 dev_data->mode_enable[MODE_BB] = 1;
1428 } else { 1561 } else {
1429 return DBG_STATUS_UNKNOWN_CHIP; 1562 return DBG_STATUS_UNKNOWN_CHIP;
1430 } 1563 }
1431 1564
1432 dev_data->platform_id = PLATFORM_ASIC; 1565 dev_data->platform_id = PLATFORM_ASIC;
1433 dev_data->mode_enable[MODE_ASIC] = 1; 1566 dev_data->mode_enable[MODE_ASIC] = 1;
1567
1568 /* Initializes the GRC parameters */
1569 qed_dbg_grc_init_params(p_hwfn);
1570
1434 dev_data->initialized = true; 1571 dev_data->initialized = true;
1435 return DBG_STATUS_OK; 1572 return DBG_STATUS_OK;
1436} 1573}
@@ -1561,7 +1698,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1561 int printed_chars; 1698 int printed_chars;
1562 u32 offset = 0; 1699 u32 offset = 0;
1563 1700
1564 if (dump) { 1701 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1565 /* Read FW image/version from PRAM in a non-reset SEMI */ 1702 /* Read FW image/version from PRAM in a non-reset SEMI */
1566 bool found = false; 1703 bool found = false;
1567 u8 storm_id; 1704 u8 storm_id;
@@ -1622,7 +1759,7 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1622{ 1759{
1623 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR; 1760 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1624 1761
1625 if (dump) { 1762 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1626 u32 global_section_offsize, global_section_addr, mfw_ver; 1763 u32 global_section_offsize, global_section_addr, mfw_ver;
1627 u32 public_data_addr, global_section_offsize_addr; 1764 u32 public_data_addr, global_section_offsize_addr;
1628 int printed_chars; 1765 int printed_chars;
@@ -1683,15 +1820,13 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1683 bool dump, 1820 bool dump,
1684 u8 num_specific_global_params) 1821 u8 num_specific_global_params)
1685{ 1822{
1823 u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
1686 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 1824 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1687 u32 offset = 0; 1825 u32 offset = 0;
1688 1826
1689 /* Find platform string and dump global params section header */ 1827 /* Find platform string and dump global params section header */
1690 offset += qed_dump_section_hdr(dump_buf + offset, 1828 offset += qed_dump_section_hdr(dump_buf + offset,
1691 dump, 1829 dump, "global_params", num_params);
1692 "global_params",
1693 NUM_COMMON_GLOBAL_PARAMS +
1694 num_specific_global_params);
1695 1830
1696 /* Store params */ 1831 /* Store params */
1697 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump); 1832 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
@@ -1815,37 +1950,6 @@ static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1815 } 1950 }
1816} 1951}
1817 1952
1818/* Returns the value of the specified GRC param */
1819static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1820 enum dbg_grc_params grc_param)
1821{
1822 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1823
1824 return dev_data->grc.param_val[grc_param];
1825}
1826
1827/* Clear all GRC params */
1828static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
1829{
1830 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1831 u32 i;
1832
1833 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
1834 dev_data->grc.param_set_by_user[i] = 0;
1835}
1836
1837/* Assign default GRC param values */
1838static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
1839{
1840 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1841 u32 i;
1842
1843 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
1844 if (!dev_data->grc.param_set_by_user[i])
1845 dev_data->grc.param_val[i] =
1846 s_grc_param_defs[i].default_val[dev_data->chip_id];
1847}
1848
1849/* Returns true if the specified entity (indicated by GRC param) should be 1953/* Returns true if the specified entity (indicated by GRC param) should be
1850 * included in the dump, false otherwise. 1954 * included in the dump, false otherwise.
1851 */ 1955 */
@@ -1971,7 +2075,7 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1971 } 2075 }
1972} 2076}
1973 2077
1974/* Returns the attention name offsets of the specified block */ 2078/* Returns the attention block data of the specified block */
1975static const struct dbg_attn_block_type_data * 2079static const struct dbg_attn_block_type_data *
1976qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type) 2080qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
1977{ 2081{
@@ -2040,7 +2144,7 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2040 * The following parameters are dumped: 2144 * The following parameters are dumped:
2041 * - 'count' = num_dumped_entries 2145 * - 'count' = num_dumped_entries
2042 * - 'split' = split_type 2146 * - 'split' = split_type
2043 * - 'id'i = split_id (dumped only if split_id >= 0) 2147 * - 'id' = split_id (dumped only if split_id >= 0)
2044 * - 'param_name' = param_val (user param, dumped only if param_name != NULL and 2148 * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
2045 * param_val != NULL) 2149 * param_val != NULL)
2046 */ 2150 */
@@ -2069,21 +2173,81 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2069 return offset; 2173 return offset;
2070} 2174}
2071 2175
2072/* Dumps GRC register/memory. Returns the dumped size in dwords. */ 2176/* Dumps the GRC registers in the specified address range.
2177 * Returns the dumped size in dwords.
2178 */
2179static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2180 struct qed_ptt *p_ptt, u32 *dump_buf,
2181 bool dump, u32 addr, u32 len)
2182{
2183 u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
2184
2185 if (dump)
2186 for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
2187 *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
2188 else
2189 offset += len;
2190 return offset;
2191}
2192
2193/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
2194static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
2195 u32 len)
2196{
2197 if (dump)
2198 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2199 return 1;
2200}
2201
2202/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
2073static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, 2203static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2074 struct qed_ptt *p_ptt, u32 *dump_buf, 2204 struct qed_ptt *p_ptt, u32 *dump_buf,
2075 bool dump, u32 addr, u32 len) 2205 bool dump, u32 addr, u32 len)
2076{ 2206{
2077 u32 offset = 0, i; 2207 u32 offset = 0;
2208
2209 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2210 offset += qed_grc_dump_addr_range(p_hwfn,
2211 p_ptt,
2212 dump_buf + offset, dump, addr, len);
2213 return offset;
2214}
2215
2216/* Dumps GRC registers sequence with skip cycle.
2217 * Returns the dumped size in dwords.
2218 */
2219static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2220 struct qed_ptt *p_ptt, u32 *dump_buf,
2221 bool dump, u32 addr, u32 total_len,
2222 u32 read_len, u32 skip_len)
2223{
2224 u32 offset = 0, reg_offset = 0;
2078 2225
2226 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2079 if (dump) { 2227 if (dump) {
2080 *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT); 2228 while (reg_offset < total_len) {
2081 for (i = 0; i < len; i++, addr++, offset++) 2229 u32 curr_len = min_t(u32,
2082 *(dump_buf + offset) = qed_rd(p_hwfn, 2230 read_len,
2083 p_ptt, 2231 total_len - reg_offset);
2084 DWORDS_TO_BYTES(addr)); 2232 offset += qed_grc_dump_addr_range(p_hwfn,
2233 p_ptt,
2234 dump_buf + offset,
2235 dump, addr, curr_len);
2236 reg_offset += curr_len;
2237 addr += curr_len;
2238 if (reg_offset < total_len) {
2239 curr_len = min_t(u32,
2240 skip_len,
2241 total_len - skip_len);
2242 memset(dump_buf + offset, 0,
2243 DWORDS_TO_BYTES(curr_len));
2244 offset += curr_len;
2245 reg_offset += curr_len;
2246 addr += curr_len;
2247 }
2248 }
2085 } else { 2249 } else {
2086 offset += len + 1; 2250 offset += total_len;
2087 } 2251 }
2088 2252
2089 return offset; 2253 return offset;
@@ -2124,14 +2288,17 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2124 const struct dbg_dump_reg *reg = 2288 const struct dbg_dump_reg *reg =
2125 (const struct dbg_dump_reg *) 2289 (const struct dbg_dump_reg *)
2126 &input_regs_arr.ptr[input_offset]; 2290 &input_regs_arr.ptr[input_offset];
2291 u32 addr, len;
2127 2292
2293 addr = GET_FIELD(reg->data,
2294 DBG_DUMP_REG_ADDRESS);
2295 len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2128 offset += 2296 offset +=
2129 qed_grc_dump_reg_entry(p_hwfn, p_ptt, 2297 qed_grc_dump_reg_entry(p_hwfn, p_ptt,
2130 dump_buf + offset, dump, 2298 dump_buf + offset,
2131 GET_FIELD(reg->data, 2299 dump,
2132 DBG_DUMP_REG_ADDRESS), 2300 addr,
2133 GET_FIELD(reg->data, 2301 len);
2134 DBG_DUMP_REG_LENGTH));
2135 (*num_dumped_reg_entries)++; 2302 (*num_dumped_reg_entries)++;
2136 } 2303 }
2137 } else { 2304 } else {
@@ -2194,8 +2361,14 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2194 const char *param_name, const char *param_val) 2361 const char *param_name, const char *param_val)
2195{ 2362{
2196 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 2363 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2364 struct chip_platform_defs *p_platform_defs;
2197 u32 offset = 0, input_offset = 0; 2365 u32 offset = 0, input_offset = 0;
2198 u8 port_id, pf_id; 2366 struct chip_defs *p_chip_defs;
2367 u8 port_id, pf_id, vf_id;
2368 u16 fid;
2369
2370 p_chip_defs = &s_chip_defs[dev_data->chip_id];
2371 p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
2199 2372
2200 if (dump) 2373 if (dump)
2201 DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n"); 2374 DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
@@ -2214,7 +2387,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2214 2387
2215 switch (split_type_id) { 2388 switch (split_type_id) {
2216 case SPLIT_TYPE_NONE: 2389 case SPLIT_TYPE_NONE:
2217 case SPLIT_TYPE_VF:
2218 offset += qed_grc_dump_split_data(p_hwfn, 2390 offset += qed_grc_dump_split_data(p_hwfn,
2219 p_ptt, 2391 p_ptt,
2220 curr_input_regs_arr, 2392 curr_input_regs_arr,
@@ -2227,10 +2399,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2227 param_val); 2399 param_val);
2228 break; 2400 break;
2229 case SPLIT_TYPE_PORT: 2401 case SPLIT_TYPE_PORT:
2230 for (port_id = 0; 2402 for (port_id = 0; port_id < p_platform_defs->num_ports;
2231 port_id <
2232 s_chip_defs[dev_data->chip_id].
2233 per_platform[dev_data->platform_id].num_ports;
2234 port_id++) { 2403 port_id++) {
2235 if (dump) 2404 if (dump)
2236 qed_port_pretend(p_hwfn, p_ptt, 2405 qed_port_pretend(p_hwfn, p_ptt,
@@ -2247,20 +2416,48 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2247 break; 2416 break;
2248 case SPLIT_TYPE_PF: 2417 case SPLIT_TYPE_PF:
2249 case SPLIT_TYPE_PORT_PF: 2418 case SPLIT_TYPE_PORT_PF:
2250 for (pf_id = 0; 2419 for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
2251 pf_id <
2252 s_chip_defs[dev_data->chip_id].
2253 per_platform[dev_data->platform_id].num_pfs;
2254 pf_id++) { 2420 pf_id++) {
2255 if (dump) 2421 u8 pfid_shift =
2256 qed_fid_pretend(p_hwfn, p_ptt, pf_id); 2422 PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2257 offset += qed_grc_dump_split_data(p_hwfn, 2423
2258 p_ptt, 2424 if (dump) {
2259 curr_input_regs_arr, 2425 fid = pf_id << pfid_shift;
2260 dump_buf + offset, 2426 qed_fid_pretend(p_hwfn, p_ptt, fid);
2261 dump, block_enable, 2427 }
2262 "pf", pf_id, param_name, 2428
2263 param_val); 2429 offset +=
2430 qed_grc_dump_split_data(p_hwfn, p_ptt,
2431 curr_input_regs_arr,
2432 dump_buf + offset,
2433 dump, block_enable,
2434 "pf", pf_id,
2435 param_name,
2436 param_val);
2437 }
2438 break;
2439 case SPLIT_TYPE_VF:
2440 for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
2441 vf_id++) {
2442 u8 vfvalid_shift =
2443 PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
2444 u8 vfid_shift =
2445 PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
2446
2447 if (dump) {
2448 fid = BIT(vfvalid_shift) |
2449 (vf_id << vfid_shift);
2450 qed_fid_pretend(p_hwfn, p_ptt, fid);
2451 }
2452
2453 offset +=
2454 qed_grc_dump_split_data(p_hwfn, p_ptt,
2455 curr_input_regs_arr,
2456 dump_buf + offset,
2457 dump, block_enable,
2458 "vf", vf_id,
2459 param_name,
2460 param_val);
2264 } 2461 }
2265 break; 2462 break;
2266 default: 2463 default:
@@ -2271,8 +2468,11 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2271 } 2468 }
2272 2469
2273 /* Pretend to original PF */ 2470 /* Pretend to original PF */
2274 if (dump) 2471 if (dump) {
2275 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2472 fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2473 qed_fid_pretend(p_hwfn, p_ptt, fid);
2474 }
2475
2276 return offset; 2476 return offset;
2277} 2477}
2278 2478
@@ -2291,13 +2491,14 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2291 /* Write reset registers */ 2491 /* Write reset registers */
2292 for (i = 0; i < MAX_DBG_RESET_REGS; i++) { 2492 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2293 if (s_reset_regs_defs[i].exists[dev_data->chip_id]) { 2493 if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
2494 u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
2495
2294 offset += qed_grc_dump_reg_entry(p_hwfn, 2496 offset += qed_grc_dump_reg_entry(p_hwfn,
2295 p_ptt, 2497 p_ptt,
2296 dump_buf + offset, 2498 dump_buf + offset,
2297 dump, 2499 dump,
2298 BYTES_TO_DWORDS 2500 addr,
2299 (s_reset_regs_defs 2501 1);
2300 [i].addr), 1);
2301 num_regs++; 2502 num_regs++;
2302 } 2503 }
2303 } 2504 }
@@ -2339,6 +2540,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2339 &attn_reg_arr[reg_idx]; 2540 &attn_reg_arr[reg_idx];
2340 u16 modes_buf_offset; 2541 u16 modes_buf_offset;
2341 bool eval_mode; 2542 bool eval_mode;
2543 u32 addr;
2342 2544
2343 /* Check mode */ 2545 /* Check mode */
2344 eval_mode = GET_FIELD(reg_data->mode.data, 2546 eval_mode = GET_FIELD(reg_data->mode.data,
@@ -2349,19 +2551,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2349 if (!eval_mode || 2551 if (!eval_mode ||
2350 qed_is_mode_match(p_hwfn, &modes_buf_offset)) { 2552 qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
2351 /* Mode match - read and dump registers */ 2553 /* Mode match - read and dump registers */
2352 offset += qed_grc_dump_reg_entry(p_hwfn, 2554 addr = reg_data->mask_address;
2353 p_ptt, 2555 offset +=
2354 dump_buf + offset, 2556 qed_grc_dump_reg_entry(p_hwfn,
2355 dump, 2557 p_ptt,
2356 reg_data->mask_address, 2558 dump_buf + offset,
2357 1); 2559 dump,
2358 offset += qed_grc_dump_reg_entry(p_hwfn, 2560 addr,
2359 p_ptt, 2561 1);
2360 dump_buf + offset, 2562 addr = GET_FIELD(reg_data->data,
2361 dump, 2563 DBG_ATTN_REG_STS_ADDRESS);
2362 GET_FIELD(reg_data->data, 2564 offset +=
2363 DBG_ATTN_REG_STS_ADDRESS), 2565 qed_grc_dump_reg_entry(p_hwfn,
2364 1); 2566 p_ptt,
2567 dump_buf + offset,
2568 dump,
2569 addr,
2570 1);
2365 num_reg_entries += 2; 2571 num_reg_entries += 2;
2366 } 2572 }
2367 } 2573 }
@@ -2369,18 +2575,21 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2369 2575
2370 /* Write storm stall status registers */ 2576 /* Write storm stall status registers */
2371 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { 2577 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2578 u32 addr;
2579
2372 if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] && 2580 if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
2373 dump) 2581 dump)
2374 continue; 2582 continue;
2375 2583
2584 addr =
2585 BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2586 SEM_FAST_REG_STALLED);
2376 offset += qed_grc_dump_reg_entry(p_hwfn, 2587 offset += qed_grc_dump_reg_entry(p_hwfn,
2377 p_ptt, 2588 p_ptt,
2378 dump_buf + offset, 2589 dump_buf + offset,
2379 dump, 2590 dump,
2380 BYTES_TO_DWORDS(s_storm_defs[storm_id]. 2591 addr,
2381 sem_fast_mem_addr + 2592 1);
2382 SEM_FAST_REG_STALLED),
2383 1);
2384 num_reg_entries++; 2593 num_reg_entries++;
2385 } 2594 }
2386 2595
@@ -2392,11 +2601,47 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2392 return offset; 2601 return offset;
2393} 2602}
2394 2603
2604/* Dumps registers that can't be represented in the debug arrays */
2605static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2606 struct qed_ptt *p_ptt,
2607 u32 *dump_buf, bool dump)
2608{
2609 u32 offset = 0, addr;
2610
2611 offset += qed_grc_dump_regs_hdr(dump_buf,
2612 dump, 2, "eng", -1, NULL, NULL);
2613
2614 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2615 * skipped).
2616 */
2617 addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2618 offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2619 p_ptt,
2620 dump_buf + offset,
2621 dump,
2622 addr,
2623 RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2624 7,
2625 1);
2626 addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2627 offset +=
2628 qed_grc_dump_reg_entry_skip(p_hwfn,
2629 p_ptt,
2630 dump_buf + offset,
2631 dump,
2632 addr,
2633 TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2634 7,
2635 1);
2636
2637 return offset;
2638}
2639
2395/* Dumps a GRC memory header (section and params). 2640/* Dumps a GRC memory header (section and params).
2396 * The following parameters are dumped: 2641 * The following parameters are dumped:
2397 * name - name is dumped only if it's not NULL. 2642 * name - name is dumped only if it's not NULL.
2398 * addr - byte_addr is dumped only if name is NULL. 2643 * addr - addr is dumped only if name is NULL.
2399 * len - dword_len is always dumped. 2644 * len - len is always dumped.
2400 * width - bit_width is dumped if it's not zero. 2645 * width - bit_width is dumped if it's not zero.
2401 * packed - packed=1 is dumped if it's not false. 2646 * packed - packed=1 is dumped if it's not false.
2402 * mem_group - mem_group is always dumped. 2647 * mem_group - mem_group is always dumped.
@@ -2408,8 +2653,8 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2408 u32 *dump_buf, 2653 u32 *dump_buf,
2409 bool dump, 2654 bool dump,
2410 const char *name, 2655 const char *name,
2411 u32 byte_addr, 2656 u32 addr,
2412 u32 dword_len, 2657 u32 len,
2413 u32 bit_width, 2658 u32 bit_width,
2414 bool packed, 2659 bool packed,
2415 const char *mem_group, 2660 const char *mem_group,
@@ -2419,7 +2664,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2419 u32 offset = 0; 2664 u32 offset = 0;
2420 char buf[64]; 2665 char buf[64];
2421 2666
2422 if (!dword_len) 2667 if (!len)
2423 DP_NOTICE(p_hwfn, 2668 DP_NOTICE(p_hwfn,
2424 "Unexpected GRC Dump error: dumped memory size must be non-zero\n"); 2669 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2425 if (bit_width) 2670 if (bit_width)
@@ -2446,20 +2691,21 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2446 DP_VERBOSE(p_hwfn, 2691 DP_VERBOSE(p_hwfn,
2447 QED_MSG_DEBUG, 2692 QED_MSG_DEBUG,
2448 "Dumping %d registers from %s...\n", 2693 "Dumping %d registers from %s...\n",
2449 dword_len, buf); 2694 len, buf);
2450 } else { 2695 } else {
2451 /* Dump address */ 2696 /* Dump address */
2452 offset += qed_dump_num_param(dump_buf + offset, 2697 offset += qed_dump_num_param(dump_buf + offset,
2453 dump, "addr", byte_addr); 2698 dump, "addr",
2454 if (dump && dword_len > 64) 2699 DWORDS_TO_BYTES(addr));
2700 if (dump && len > 64)
2455 DP_VERBOSE(p_hwfn, 2701 DP_VERBOSE(p_hwfn,
2456 QED_MSG_DEBUG, 2702 QED_MSG_DEBUG,
2457 "Dumping %d registers from address 0x%x...\n", 2703 "Dumping %d registers from address 0x%x...\n",
2458 dword_len, byte_addr); 2704 len, (u32)DWORDS_TO_BYTES(addr));
2459 } 2705 }
2460 2706
2461 /* Dump len */ 2707 /* Dump len */
2462 offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len); 2708 offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2463 2709
2464 /* Dump bit width */ 2710 /* Dump bit width */
2465 if (bit_width) 2711 if (bit_width)
@@ -2492,8 +2738,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2492 u32 *dump_buf, 2738 u32 *dump_buf,
2493 bool dump, 2739 bool dump,
2494 const char *name, 2740 const char *name,
2495 u32 byte_addr, 2741 u32 addr,
2496 u32 dword_len, 2742 u32 len,
2497 u32 bit_width, 2743 u32 bit_width,
2498 bool packed, 2744 bool packed,
2499 const char *mem_group, 2745 const char *mem_group,
@@ -2505,21 +2751,14 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2505 dump_buf + offset, 2751 dump_buf + offset,
2506 dump, 2752 dump,
2507 name, 2753 name,
2508 byte_addr, 2754 addr,
2509 dword_len, 2755 len,
2510 bit_width, 2756 bit_width,
2511 packed, 2757 packed,
2512 mem_group, is_storm, storm_letter); 2758 mem_group, is_storm, storm_letter);
2513 if (dump) { 2759 offset += qed_grc_dump_addr_range(p_hwfn,
2514 u32 i; 2760 p_ptt,
2515 2761 dump_buf + offset, dump, addr, len);
2516 for (i = 0; i < dword_len;
2517 i++, byte_addr += BYTES_IN_DWORD, offset++)
2518 *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
2519 } else {
2520 offset += dword_len;
2521 }
2522
2523 return offset; 2762 return offset;
2524} 2763}
2525 2764
@@ -2575,25 +2814,41 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2575 if (qed_grc_is_mem_included(p_hwfn, 2814 if (qed_grc_is_mem_included(p_hwfn,
2576 (enum block_id)cond_hdr->block_id, 2815 (enum block_id)cond_hdr->block_id,
2577 mem_group_id)) { 2816 mem_group_id)) {
2578 u32 mem_byte_addr = 2817 u32 mem_addr = GET_FIELD(mem->dword0,
2579 DWORDS_TO_BYTES(GET_FIELD(mem->dword0, 2818 DBG_DUMP_MEM_ADDRESS);
2580 DBG_DUMP_MEM_ADDRESS));
2581 u32 mem_len = GET_FIELD(mem->dword1, 2819 u32 mem_len = GET_FIELD(mem->dword1,
2582 DBG_DUMP_MEM_LENGTH); 2820 DBG_DUMP_MEM_LENGTH);
2821 enum dbg_grc_params grc_param;
2583 char storm_letter = 'a'; 2822 char storm_letter = 'a';
2584 bool is_storm = false; 2823 bool is_storm = false;
2585 2824
2586 /* Update memory length for CCFC/TCFC memories 2825 /* Update memory length for CCFC/TCFC memories
2587 * according to number of LCIDs/LTIDs. 2826 * according to number of LCIDs/LTIDs.
2588 */ 2827 */
2589 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) 2828 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
2829 if (mem_len % MAX_LCIDS != 0) {
2830 DP_NOTICE(p_hwfn,
2831 "Invalid CCFC connection memory size\n");
2832 return 0;
2833 }
2834
2835 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
2590 mem_len = qed_grc_get_param(p_hwfn, 2836 mem_len = qed_grc_get_param(p_hwfn,
2591 DBG_GRC_PARAM_NUM_LCIDS) 2837 grc_param) *
2592 * (mem_len / MAX_LCIDS); 2838 (mem_len / MAX_LCIDS);
2593 else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) 2839 } else if (mem_group_id ==
2840 MEM_GROUP_TASK_CFC_MEM) {
2841 if (mem_len % MAX_LTIDS != 0) {
2842 DP_NOTICE(p_hwfn,
2843 "Invalid TCFC task memory size\n");
2844 return 0;
2845 }
2846
2847 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
2594 mem_len = qed_grc_get_param(p_hwfn, 2848 mem_len = qed_grc_get_param(p_hwfn,
2595 DBG_GRC_PARAM_NUM_LTIDS) 2849 grc_param) *
2596 * (mem_len / MAX_LTIDS); 2850 (mem_len / MAX_LTIDS);
2851 }
2597 2852
2598 /* If memory is associated with Storm, update 2853 /* If memory is associated with Storm, update
2599 * Storm details. 2854 * Storm details.
@@ -2610,7 +2865,7 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2610 /* Dump memory */ 2865 /* Dump memory */
2611 offset += qed_grc_dump_mem(p_hwfn, p_ptt, 2866 offset += qed_grc_dump_mem(p_hwfn, p_ptt,
2612 dump_buf + offset, dump, NULL, 2867 dump_buf + offset, dump, NULL,
2613 mem_byte_addr, mem_len, 0, 2868 mem_addr, mem_len, 0,
2614 false, 2869 false,
2615 s_mem_group_names[mem_group_id], 2870 s_mem_group_names[mem_group_id],
2616 is_storm, storm_letter); 2871 is_storm, storm_letter);
@@ -2799,29 +3054,31 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
2799 u32 offset = 0; 3054 u32 offset = 0;
2800 3055
2801 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { 3056 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2802 if (qed_grc_is_storm_included(p_hwfn, 3057 struct storm_defs *storm = &s_storm_defs[storm_id];
2803 (enum dbg_storms)storm_id)) {
2804 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
2805 u32 addr =
2806 s_storm_defs[storm_id].sem_fast_mem_addr +
2807 SEM_FAST_REG_STORM_REG_FILE +
2808 DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
2809 3058
2810 buf[strlen(buf) - 1] = '0' + set_id; 3059 if (!qed_grc_is_storm_included(p_hwfn,
2811 offset += qed_grc_dump_mem(p_hwfn, 3060 (enum dbg_storms)storm_id))
2812 p_ptt, 3061 continue;
2813 dump_buf + offset, 3062
2814 dump, 3063 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
2815 buf, 3064 u32 dwords, addr;
2816 addr, 3065
2817 IORS_PER_SET, 3066 dwords = storm->sem_fast_mem_addr +
2818 32, 3067 SEM_FAST_REG_STORM_REG_FILE;
2819 false, 3068 addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
2820 "ior", 3069 buf[strlen(buf) - 1] = '0' + set_id;
2821 true, 3070 offset += qed_grc_dump_mem(p_hwfn,
2822 s_storm_defs 3071 p_ptt,
2823 [storm_id].letter); 3072 dump_buf + offset,
2824 } 3073 dump,
3074 buf,
3075 addr,
3076 IORS_PER_SET,
3077 32,
3078 false,
3079 "ior",
3080 true,
3081 storm->letter);
2825 } 3082 }
2826 } 3083 }
2827 3084
@@ -2990,34 +3247,39 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
2990 struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id]; 3247 struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
2991 u32 num_entries = rss_defs->num_entries[dev_data->chip_id]; 3248 u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
2992 u32 entry_width = rss_defs->entry_width[dev_data->chip_id]; 3249 u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
2993 u32 total_size = (num_entries * entry_width) / 32; 3250 u32 total_dwords = (num_entries * entry_width) / 32;
3251 u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
2994 bool packed = (entry_width == 16); 3252 bool packed = (entry_width == 16);
2995 u32 addr = rss_defs->addr; 3253 u32 rss_addr = rss_defs->addr;
2996 u32 i, j; 3254 u32 i, addr;
2997 3255
2998 offset += qed_grc_dump_mem_hdr(p_hwfn, 3256 offset += qed_grc_dump_mem_hdr(p_hwfn,
2999 dump_buf + offset, 3257 dump_buf + offset,
3000 dump, 3258 dump,
3001 rss_defs->mem_name, 3259 rss_defs->mem_name,
3002 addr, 3260 0,
3003 total_size, 3261 total_dwords,
3004 entry_width, 3262 entry_width,
3005 packed, 3263 packed,
3006 rss_defs->type_name, false, 0); 3264 rss_defs->type_name, false, 0);
3007 3265
3008 if (!dump) { 3266 if (!dump) {
3009 offset += total_size; 3267 offset += total_dwords;
3010 continue; 3268 continue;
3011 } 3269 }
3012 3270
3013 /* Dump RSS data */ 3271 /* Dump RSS data */
3014 for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) { 3272 for (i = 0; i < total_dwords;
3015 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr); 3273 i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
3016 for (j = 0; j < BYTES_IN_DWORD; j++, offset++) 3274 addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3017 *(dump_buf + offset) = 3275 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3018 qed_rd(p_hwfn, p_ptt, 3276 offset += qed_grc_dump_addr_range(p_hwfn,
3019 RSS_REG_RSS_RAM_DATA + 3277 p_ptt,
3020 DWORDS_TO_BYTES(j)); 3278 dump_buf +
3279 offset,
3280 dump,
3281 addr,
3282 size);
3021 } 3283 }
3022 } 3284 }
3023 3285
@@ -3030,19 +3292,19 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3030 u32 *dump_buf, bool dump, u8 big_ram_id) 3292 u32 *dump_buf, bool dump, u8 big_ram_id)
3031{ 3293{
3032 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 3294 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3295 u32 total_blocks, ram_size, offset = 0, i;
3033 char mem_name[12] = "???_BIG_RAM"; 3296 char mem_name[12] = "???_BIG_RAM";
3034 char type_name[8] = "???_RAM"; 3297 char type_name[8] = "???_RAM";
3035 u32 ram_size, total_blocks; 3298 struct big_ram_defs *big_ram;
3036 u32 offset = 0, i, j;
3037 3299
3038 total_blocks = 3300 big_ram = &s_big_ram_defs[big_ram_id];
3039 s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id]; 3301 total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
3040 ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS; 3302 ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
3041 3303
3042 strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name, 3304 strncpy(type_name, big_ram->instance_name,
3043 strlen(s_big_ram_defs[big_ram_id].instance_name)); 3305 strlen(big_ram->instance_name));
3044 strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name, 3306 strncpy(mem_name, big_ram->instance_name,
3045 strlen(s_big_ram_defs[big_ram_id].instance_name)); 3307 strlen(big_ram->instance_name));
3046 3308
3047 /* Dump memory header */ 3309 /* Dump memory header */
3048 offset += qed_grc_dump_mem_hdr(p_hwfn, 3310 offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3059,13 +3321,17 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3059 3321
3060 /* Read and dump Big RAM data */ 3322 /* Read and dump Big RAM data */
3061 for (i = 0; i < total_blocks / 2; i++) { 3323 for (i = 0; i < total_blocks / 2; i++) {
3062 qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr, 3324 u32 addr, len;
3063 i); 3325
3064 for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++) 3326 qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3065 *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, 3327 addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3066 s_big_ram_defs[big_ram_id]. 3328 len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
3067 data_reg_addr + 3329 offset += qed_grc_dump_addr_range(p_hwfn,
3068 DWORDS_TO_BYTES(j)); 3330 p_ptt,
3331 dump_buf + offset,
3332 dump,
3333 addr,
3334 len);
3069 } 3335 }
3070 3336
3071 return offset; 3337 return offset;
@@ -3075,11 +3341,11 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3075 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) 3341 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3076{ 3342{
3077 bool block_enable[MAX_BLOCK_ID] = { 0 }; 3343 bool block_enable[MAX_BLOCK_ID] = { 0 };
3344 u32 offset = 0, addr;
3078 bool halted = false; 3345 bool halted = false;
3079 u32 offset = 0;
3080 3346
3081 /* Halt MCP */ 3347 /* Halt MCP */
3082 if (dump) { 3348 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3083 halted = !qed_mcp_halt(p_hwfn, p_ptt); 3349 halted = !qed_mcp_halt(p_hwfn, p_ptt);
3084 if (!halted) 3350 if (!halted)
3085 DP_NOTICE(p_hwfn, "MCP halt failed!\n"); 3351 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -3091,7 +3357,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3091 dump_buf + offset, 3357 dump_buf + offset,
3092 dump, 3358 dump,
3093 NULL, 3359 NULL,
3094 MCP_REG_SCRATCH, 3360 BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3095 MCP_REG_SCRATCH_SIZE, 3361 MCP_REG_SCRATCH_SIZE,
3096 0, false, "MCP", false, 0); 3362 0, false, "MCP", false, 0);
3097 3363
@@ -3101,7 +3367,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3101 dump_buf + offset, 3367 dump_buf + offset,
3102 dump, 3368 dump,
3103 NULL, 3369 NULL,
3104 MCP_REG_CPU_REG_FILE, 3370 BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3105 MCP_REG_CPU_REG_FILE_SIZE, 3371 MCP_REG_CPU_REG_FILE_SIZE,
3106 0, false, "MCP", false, 0); 3372 0, false, "MCP", false, 0);
3107 3373
@@ -3115,12 +3381,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3115 /* Dump required non-MCP registers */ 3381 /* Dump required non-MCP registers */
3116 offset += qed_grc_dump_regs_hdr(dump_buf + offset, 3382 offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3117 dump, 1, "eng", -1, "block", "MCP"); 3383 dump, 1, "eng", -1, "block", "MCP");
3384 addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3118 offset += qed_grc_dump_reg_entry(p_hwfn, 3385 offset += qed_grc_dump_reg_entry(p_hwfn,
3119 p_ptt, 3386 p_ptt,
3120 dump_buf + offset, 3387 dump_buf + offset,
3121 dump, 3388 dump,
3122 BYTES_TO_DWORDS 3389 addr,
3123 (MISC_REG_SHARED_MEM_ADDR), 1); 3390 1);
3124 3391
3125 /* Release MCP */ 3392 /* Release MCP */
3126 if (halted && qed_mcp_resume(p_hwfn, p_ptt)) 3393 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
@@ -3212,7 +3479,7 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3212{ 3479{
3213 u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS; 3480 u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
3214 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 3481 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3215 u32 offset = 0, block_id, line_id, addr, i; 3482 u32 offset = 0, block_id, line_id;
3216 struct block_defs *p_block_defs; 3483 struct block_defs *p_block_defs;
3217 3484
3218 if (dump) { 3485 if (dump) {
@@ -3255,6 +3522,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3255 if (dump && !dev_data->block_in_reset[block_id]) { 3522 if (dump && !dev_data->block_in_reset[block_id]) {
3256 u8 dbg_client_id = 3523 u8 dbg_client_id =
3257 p_block_defs->dbg_client_id[dev_data->chip_id]; 3524 p_block_defs->dbg_client_id[dev_data->chip_id];
3525 u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3526 u32 len = STATIC_DEBUG_LINE_DWORDS;
3258 3527
3259 /* Enable block's client */ 3528 /* Enable block's client */
3260 qed_bus_enable_clients(p_hwfn, p_ptt, 3529 qed_bus_enable_clients(p_hwfn, p_ptt,
@@ -3270,11 +3539,13 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3270 0xf, 0, 0, 0); 3539 0xf, 0, 0, 0);
3271 3540
3272 /* Read debug line info */ 3541 /* Read debug line info */
3273 for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA; 3542 offset +=
3274 i < STATIC_DEBUG_LINE_DWORDS; 3543 qed_grc_dump_addr_range(p_hwfn,
3275 i++, offset++, addr += BYTES_IN_DWORD) 3544 p_ptt,
3276 dump_buf[offset] = qed_rd(p_hwfn, p_ptt, 3545 dump_buf + offset,
3277 addr); 3546 dump,
3547 addr,
3548 len);
3278 } 3549 }
3279 3550
3280 /* Disable block's client and debug output */ 3551 /* Disable block's client and debug output */
@@ -3311,14 +3582,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3311 u8 i, port_mode = 0; 3582 u8 i, port_mode = 0;
3312 u32 offset = 0; 3583 u32 offset = 0;
3313 3584
3314 /* Check if emulation platform */
3315 *num_dumped_dwords = 0; 3585 *num_dumped_dwords = 0;
3316 3586
3317 /* Fill GRC parameters that were not set by the user with their default
3318 * value.
3319 */
3320 qed_dbg_grc_set_params_default(p_hwfn);
3321
3322 /* Find port mode */ 3587 /* Find port mode */
3323 if (dump) { 3588 if (dump) {
3324 switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { 3589 switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
@@ -3370,15 +3635,14 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3370 } 3635 }
3371 3636
3372 /* Disable all parities using MFW command */ 3637 /* Disable all parities using MFW command */
3373 if (dump) { 3638 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3374 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1); 3639 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3375 if (!parities_masked) { 3640 if (!parities_masked) {
3641 DP_NOTICE(p_hwfn,
3642 "Failed to mask parities using MFW\n");
3376 if (qed_grc_get_param 3643 if (qed_grc_get_param
3377 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE)) 3644 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3378 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY; 3645 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3379 else
3380 DP_NOTICE(p_hwfn,
3381 "Failed to mask parities using MFW\n");
3382 } 3646 }
3383 } 3647 }
3384 3648
@@ -3409,6 +3673,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3409 offset, 3673 offset,
3410 dump, 3674 dump,
3411 block_enable, NULL, NULL); 3675 block_enable, NULL, NULL);
3676
3677 /* Dump special registers */
3678 offset += qed_grc_dump_special_regs(p_hwfn,
3679 p_ptt,
3680 dump_buf + offset, dump);
3412 } 3681 }
3413 3682
3414 /* Dump memories */ 3683 /* Dump memories */
@@ -3583,9 +3852,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3583 } 3852 }
3584 3853
3585 if (mode_match) { 3854 if (mode_match) {
3586 u32 grc_addr = 3855 u32 addr =
3587 DWORDS_TO_BYTES(GET_FIELD(reg->data, 3856 GET_FIELD(reg->data,
3588 DBG_IDLE_CHK_INFO_REG_ADDRESS)); 3857 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3589 3858
3590 /* Write register header */ 3859 /* Write register header */
3591 struct dbg_idle_chk_result_reg_hdr *reg_hdr = 3860 struct dbg_idle_chk_result_reg_hdr *reg_hdr =
@@ -3597,16 +3866,19 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3597 memset(reg_hdr, 0, sizeof(*reg_hdr)); 3866 memset(reg_hdr, 0, sizeof(*reg_hdr));
3598 reg_hdr->size = reg->size; 3867 reg_hdr->size = reg->size;
3599 SET_FIELD(reg_hdr->data, 3868 SET_FIELD(reg_hdr->data,
3600 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, 3869 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3601 rule->num_cond_regs + reg_id); 3870 rule->num_cond_regs + reg_id);
3602 3871
3603 /* Write register values */ 3872 /* Write register values */
3604 for (i = 0; i < reg->size; 3873 offset +=
3605 i++, offset++, grc_addr += 4) 3874 qed_grc_dump_addr_range(p_hwfn,
3606 dump_buf[offset] = 3875 p_ptt,
3607 qed_rd(p_hwfn, p_ptt, grc_addr); 3876 dump_buf + offset,
3608 } 3877 dump,
3878 addr,
3879 reg->size);
3609 } 3880 }
3881 }
3610 } 3882 }
3611 3883
3612 return offset; 3884 return offset;
@@ -3621,7 +3893,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3621{ 3893{
3622 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 3894 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3623 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE]; 3895 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3624 u32 i, j, offset = 0; 3896 u32 i, offset = 0;
3625 u16 entry_id; 3897 u16 entry_id;
3626 u8 reg_id; 3898 u8 reg_id;
3627 3899
@@ -3664,73 +3936,83 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3664 if (!check_rule && dump) 3936 if (!check_rule && dump)
3665 continue; 3937 continue;
3666 3938
3939 if (!dump) {
3940 u32 entry_dump_size =
3941 qed_idle_chk_dump_failure(p_hwfn,
3942 p_ptt,
3943 dump_buf + offset,
3944 false,
3945 rule->rule_id,
3946 rule,
3947 0,
3948 NULL);
3949
3950 offset += num_reg_entries * entry_dump_size;
3951 (*num_failing_rules) += num_reg_entries;
3952 continue;
3953 }
3954
3667 /* Go over all register entries (number of entries is the same 3955 /* Go over all register entries (number of entries is the same
3668 * for all condition registers). 3956 * for all condition registers).
3669 */ 3957 */
3670 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) { 3958 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3671 /* Read current entry of all condition registers */ 3959 /* Read current entry of all condition registers */
3672 if (dump) { 3960 u32 next_reg_offset = 0;
3673 u32 next_reg_offset = 0;
3674
3675 for (reg_id = 0;
3676 reg_id < rule->num_cond_regs;
3677 reg_id++) {
3678 const struct dbg_idle_chk_cond_reg
3679 *reg = &cond_regs[reg_id];
3680
3681 /* Find GRC address (if it's a memory,
3682 * the address of the specific entry is
3683 * calculated).
3684 */
3685 u32 grc_addr =
3686 DWORDS_TO_BYTES(
3687 GET_FIELD(reg->data,
3688 DBG_IDLE_CHK_COND_REG_ADDRESS));
3689
3690 if (reg->num_entries > 1 ||
3691 reg->start_entry > 0) {
3692 u32 padded_entry_size =
3693 reg->entry_size > 1 ?
3694 roundup_pow_of_two
3695 (reg->entry_size) : 1;
3696
3697 grc_addr +=
3698 DWORDS_TO_BYTES(
3699 (reg->start_entry +
3700 entry_id)
3701 * padded_entry_size);
3702 }
3703 3961
3704 /* Read registers */ 3962 for (reg_id = 0; reg_id < rule->num_cond_regs;
3705 if (next_reg_offset + reg->entry_size >= 3963 reg_id++) {
3706 IDLE_CHK_MAX_ENTRIES_SIZE) { 3964 const struct dbg_idle_chk_cond_reg *reg =
3707 DP_NOTICE(p_hwfn, 3965 &cond_regs[reg_id];
3708 "idle check registers entry is too large\n");
3709 return 0;
3710 }
3711 3966
3712 for (j = 0; j < reg->entry_size; 3967 /* Find GRC address (if it's a memory,the
3713 j++, next_reg_offset++, 3968 * address of the specific entry is calculated).
3714 grc_addr += 4) 3969 */
3715 cond_reg_values[next_reg_offset] = 3970 u32 addr =
3716 qed_rd(p_hwfn, p_ptt, grc_addr); 3971 GET_FIELD(reg->data,
3972 DBG_IDLE_CHK_COND_REG_ADDRESS);
3973
3974 if (reg->num_entries > 1 ||
3975 reg->start_entry > 0) {
3976 u32 padded_entry_size =
3977 reg->entry_size > 1 ?
3978 roundup_pow_of_two(reg->entry_size) :
3979 1;
3980
3981 addr += (reg->start_entry + entry_id) *
3982 padded_entry_size;
3717 } 3983 }
3984
3985 /* Read registers */
3986 if (next_reg_offset + reg->entry_size >=
3987 IDLE_CHK_MAX_ENTRIES_SIZE) {
3988 DP_NOTICE(p_hwfn,
3989 "idle check registers entry is too large\n");
3990 return 0;
3991 }
3992
3993 next_reg_offset +=
3994 qed_grc_dump_addr_range(p_hwfn,
3995 p_ptt,
3996 cond_reg_values +
3997 next_reg_offset,
3998 dump, addr,
3999 reg->entry_size);
3718 } 4000 }
3719 4001
3720 /* Call rule's condition function - a return value of 4002 /* Call rule's condition function - a return value of
3721 * true indicates failure. 4003 * true indicates failure.
3722 */ 4004 */
3723 if ((*cond_arr[rule->cond_id])(cond_reg_values, 4005 if ((*cond_arr[rule->cond_id])(cond_reg_values,
3724 imm_values) || !dump) { 4006 imm_values)) {
3725 offset += 4007 offset +=
3726 qed_idle_chk_dump_failure(p_hwfn, 4008 qed_idle_chk_dump_failure(p_hwfn,
3727 p_ptt, 4009 p_ptt,
3728 dump_buf + offset, 4010 dump_buf + offset,
3729 dump, 4011 dump,
3730 rule->rule_id, 4012 rule->rule_id,
3731 rule, 4013 rule,
3732 entry_id, 4014 entry_id,
3733 cond_reg_values); 4015 cond_reg_values);
3734 (*num_failing_rules)++; 4016 (*num_failing_rules)++;
3735 break; 4017 break;
3736 } 4018 }
@@ -3818,13 +4100,18 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3818 struct mcp_file_att file_att; 4100 struct mcp_file_att file_att;
3819 4101
3820 /* Call NVRAM get file command */ 4102 /* Call NVRAM get file command */
3821 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, 4103 int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
3822 image_type, &ret_mcp_resp, &ret_mcp_param, 4104 p_ptt,
3823 &ret_txn_size, (u32 *)&file_att) != 0) 4105 DRV_MSG_CODE_NVM_GET_FILE_ATT,
3824 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; 4106 image_type,
4107 &ret_mcp_resp,
4108 &ret_mcp_param,
4109 &ret_txn_size,
4110 (u32 *)&file_att);
3825 4111
3826 /* Check response */ 4112 /* Check response */
3827 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) 4113 if (nvm_result ||
4114 (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3828 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; 4115 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
3829 4116
3830 /* Update return values */ 4117 /* Update return values */
@@ -3944,7 +4231,6 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
3944 u32 running_mfw_addr = 4231 u32 running_mfw_addr =
3945 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + 4232 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
3946 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes; 4233 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
3947 enum dbg_status status;
3948 u32 nvram_image_type; 4234 u32 nvram_image_type;
3949 4235
3950 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr); 4236 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
@@ -3955,30 +4241,12 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
3955 nvram_image_type = 4241 nvram_image_type =
3956 (*running_bundle_id == 4242 (*running_bundle_id ==
3957 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2; 4243 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
3958 status = qed_find_nvram_image(p_hwfn,
3959 p_ptt,
3960 nvram_image_type,
3961 trace_meta_offset_bytes,
3962 trace_meta_size_bytes);
3963
3964 return status;
3965}
3966
3967/* Reads the MCP Trace data from the specified GRC address into the specified
3968 * buffer.
3969 */
3970static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
3971 struct qed_ptt *p_ptt,
3972 u32 grc_addr, u32 size_in_dwords, u32 *buf)
3973{
3974 u32 i;
3975 4244
3976 DP_VERBOSE(p_hwfn, 4245 return qed_find_nvram_image(p_hwfn,
3977 QED_MSG_DEBUG, 4246 p_ptt,
3978 "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n", 4247 nvram_image_type,
3979 size_in_dwords, grc_addr); 4248 trace_meta_offset_bytes,
3980 for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD) 4249 trace_meta_size_bytes);
3981 buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
3982} 4250}
3983 4251
3984/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified 4252/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
@@ -4034,11 +4302,14 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4034 bool dump, u32 *num_dumped_dwords) 4302 bool dump, u32 *num_dumped_dwords)
4035{ 4303{
4036 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords; 4304 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4037 u32 trace_meta_size_dwords, running_bundle_id, offset = 0; 4305 u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4038 u32 trace_meta_offset_bytes, trace_meta_size_bytes; 4306 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4039 enum dbg_status status; 4307 enum dbg_status status;
4308 bool mcp_access;
4040 int halted = 0; 4309 int halted = 0;
4041 4310
4311 mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4312
4042 *num_dumped_dwords = 0; 4313 *num_dumped_dwords = 0;
4043 4314
4044 /* Get trace data info */ 4315 /* Get trace data info */
@@ -4060,7 +4331,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4060 * consistent if halt fails, MCP trace is taken anyway, with a small 4331 * consistent if halt fails, MCP trace is taken anyway, with a small
4061 * risk that it may be corrupt. 4332 * risk that it may be corrupt.
4062 */ 4333 */
4063 if (dump) { 4334 if (dump && mcp_access) {
4064 halted = !qed_mcp_halt(p_hwfn, p_ptt); 4335 halted = !qed_mcp_halt(p_hwfn, p_ptt);
4065 if (!halted) 4336 if (!halted)
4066 DP_NOTICE(p_hwfn, "MCP halt failed!\n"); 4337 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -4078,13 +4349,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4078 dump, "size", trace_data_size_dwords); 4349 dump, "size", trace_data_size_dwords);
4079 4350
4080 /* Read trace data from scratchpad into dump buffer */ 4351 /* Read trace data from scratchpad into dump buffer */
4081 if (dump) 4352 offset += qed_grc_dump_addr_range(p_hwfn,
4082 qed_mcp_trace_read_data(p_hwfn, 4353 p_ptt,
4083 p_ptt, 4354 dump_buf + offset,
4084 trace_data_grc_addr, 4355 dump,
4085 trace_data_size_dwords, 4356 BYTES_TO_DWORDS(trace_data_grc_addr),
4086 dump_buf + offset); 4357 trace_data_size_dwords);
4087 offset += trace_data_size_dwords;
4088 4358
4089 /* Resume MCP (only if halt succeeded) */ 4359 /* Resume MCP (only if halt succeeded) */
4090 if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0) 4360 if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
@@ -4095,38 +4365,38 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4095 dump, "mcp_trace_meta", 1); 4365 dump, "mcp_trace_meta", 1);
4096 4366
4097 /* Read trace meta info */ 4367 /* Read trace meta info */
4098 status = qed_mcp_trace_get_meta_info(p_hwfn, 4368 if (mcp_access) {
4099 p_ptt, 4369 status = qed_mcp_trace_get_meta_info(p_hwfn,
4100 trace_data_size_bytes, 4370 p_ptt,
4101 &running_bundle_id, 4371 trace_data_size_bytes,
4102 &trace_meta_offset_bytes, 4372 &running_bundle_id,
4103 &trace_meta_size_bytes); 4373 &trace_meta_offset_bytes,
4104 if (status != DBG_STATUS_OK) 4374 &trace_meta_size_bytes);
4105 return status; 4375 if (status == DBG_STATUS_OK)
4376 trace_meta_size_dwords =
4377 BYTES_TO_DWORDS(trace_meta_size_bytes);
4378 }
4106 4379
4107 /* Dump trace meta size param (trace_meta_size_bytes is always 4380 /* Dump trace meta size param */
4108 * dword-aligned). 4381 offset += qed_dump_num_param(dump_buf + offset,
4109 */ 4382 dump, "size", trace_meta_size_dwords);
4110 trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4111 offset += qed_dump_num_param(dump_buf + offset, dump, "size",
4112 trace_meta_size_dwords);
4113 4383
4114 /* Read trace meta image into dump buffer */ 4384 /* Read trace meta image into dump buffer */
4115 if (dump) { 4385 if (dump && trace_meta_size_dwords)
4116 status = qed_mcp_trace_read_meta(p_hwfn, 4386 status = qed_mcp_trace_read_meta(p_hwfn,
4117 p_ptt, 4387 p_ptt,
4118 trace_meta_offset_bytes, 4388 trace_meta_offset_bytes,
4119 trace_meta_size_bytes, 4389 trace_meta_size_bytes,
4120 dump_buf + offset); 4390 dump_buf + offset);
4121 if (status != DBG_STATUS_OK) 4391 if (status == DBG_STATUS_OK)
4122 return status; 4392 offset += trace_meta_size_dwords;
4123 }
4124
4125 offset += trace_meta_size_dwords;
4126 4393
4127 *num_dumped_dwords = offset; 4394 *num_dumped_dwords = offset;
4128 4395
4129 return DBG_STATUS_OK; 4396 /* If no mcp access, indicate that the dump doesn't contain the meta
4397 * data from NVRAM.
4398 */
4399 return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4130} 4400}
4131 4401
4132/* Dump GRC FIFO */ 4402/* Dump GRC FIFO */
@@ -4311,9 +4581,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4311 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) 4581 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4312{ 4582{
4313 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 4583 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4584 struct fw_asserts_ram_section *asserts;
4314 char storm_letter_str[2] = "?"; 4585 char storm_letter_str[2] = "?";
4315 struct fw_info fw_info; 4586 struct fw_info fw_info;
4316 u32 offset = 0, i; 4587 u32 offset = 0;
4317 u8 storm_id; 4588 u8 storm_id;
4318 4589
4319 /* Dump global params */ 4590 /* Dump global params */
@@ -4323,8 +4594,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4323 offset += qed_dump_str_param(dump_buf + offset, 4594 offset += qed_dump_str_param(dump_buf + offset,
4324 dump, "dump-type", "fw-asserts"); 4595 dump, "dump-type", "fw-asserts");
4325 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { 4596 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4326 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, 4597 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4327 last_list_idx, element_addr; 4598 u32 last_list_idx, addr;
4328 4599
4329 if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id]) 4600 if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
4330 continue; 4601 continue;
@@ -4332,6 +4603,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4332 /* Read FW info for the current Storm */ 4603 /* Read FW info for the current Storm */
4333 qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); 4604 qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4334 4605
4606 asserts = &fw_info.fw_asserts_section;
4607
4335 /* Dump FW Asserts section header and params */ 4608 /* Dump FW Asserts section header and params */
4336 storm_letter_str[0] = s_storm_defs[storm_id].letter; 4609 storm_letter_str[0] = s_storm_defs[storm_id].letter;
4337 offset += qed_dump_section_hdr(dump_buf + offset, dump, 4610 offset += qed_dump_section_hdr(dump_buf + offset, dump,
@@ -4339,12 +4612,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4339 offset += qed_dump_str_param(dump_buf + offset, dump, "storm", 4612 offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
4340 storm_letter_str); 4613 storm_letter_str);
4341 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 4614 offset += qed_dump_num_param(dump_buf + offset, dump, "size",
4342 fw_info.fw_asserts_section. 4615 asserts->list_element_dword_size);
4343 list_element_dword_size);
4344 4616
4345 if (!dump) { 4617 if (!dump) {
4346 offset += fw_info.fw_asserts_section. 4618 offset += asserts->list_element_dword_size;
4347 list_element_dword_size;
4348 continue; 4619 continue;
4349 } 4620 }
4350 4621
@@ -4352,28 +4623,22 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4352 fw_asserts_section_addr = 4623 fw_asserts_section_addr =
4353 s_storm_defs[storm_id].sem_fast_mem_addr + 4624 s_storm_defs[storm_id].sem_fast_mem_addr +
4354 SEM_FAST_REG_INT_RAM + 4625 SEM_FAST_REG_INT_RAM +
4355 RAM_LINES_TO_BYTES(fw_info.fw_asserts_section. 4626 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4356 section_ram_line_offset);
4357 next_list_idx_addr = 4627 next_list_idx_addr =
4358 fw_asserts_section_addr + 4628 fw_asserts_section_addr +
4359 DWORDS_TO_BYTES(fw_info.fw_asserts_section. 4629 DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4360 list_next_index_dword_offset);
4361 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr); 4630 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4362 last_list_idx = (next_list_idx > 0 4631 last_list_idx = (next_list_idx > 0
4363 ? next_list_idx 4632 ? next_list_idx
4364 : fw_info.fw_asserts_section.list_num_elements) 4633 : asserts->list_num_elements) - 1;
4365 - 1; 4634 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4366 element_addr = 4635 asserts->list_dword_offset +
4367 fw_asserts_section_addr + 4636 last_list_idx * asserts->list_element_dword_size;
4368 DWORDS_TO_BYTES(fw_info.fw_asserts_section. 4637 offset +=
4369 list_dword_offset) + 4638 qed_grc_dump_addr_range(p_hwfn, p_ptt,
4370 last_list_idx * 4639 dump_buf + offset,
4371 DWORDS_TO_BYTES(fw_info.fw_asserts_section. 4640 dump, addr,
4372 list_element_dword_size); 4641 asserts->list_element_dword_size);
4373 for (i = 0;
4374 i < fw_info.fw_asserts_section.list_element_dword_size;
4375 i++, offset++, element_addr += BYTES_IN_DWORD)
4376 dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
4377 } 4642 }
4378 4643
4379 /* Dump last section */ 4644 /* Dump last section */
@@ -4386,13 +4651,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4386enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr) 4651enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
4387{ 4652{
4388 /* Convert binary data to debug arrays */ 4653 /* Convert binary data to debug arrays */
4389 u32 num_of_buffers = *(u32 *)bin_ptr; 4654 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
4390 struct bin_buffer_hdr *buf_array;
4391 u8 buf_id; 4655 u8 buf_id;
4392 4656
4393 buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1); 4657 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4394
4395 for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
4396 s_dbg_arrays[buf_id].ptr = 4658 s_dbg_arrays[buf_id].ptr =
4397 (u32 *)(bin_ptr + buf_array[buf_id].offset); 4659 (u32 *)(bin_ptr + buf_array[buf_id].offset);
4398 s_dbg_arrays[buf_id].size_in_dwords = 4660 s_dbg_arrays[buf_id].size_in_dwords =
@@ -4402,6 +4664,17 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
4402 return DBG_STATUS_OK; 4664 return DBG_STATUS_OK;
4403} 4665}
4404 4666
4667/* Assign default GRC param values */
4668void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
4669{
4670 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4671 u32 i;
4672
4673 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
4674 dev_data->grc.param_val[i] =
4675 s_grc_param_defs[i].default_val[dev_data->chip_id];
4676}
4677
4405enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, 4678enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4406 struct qed_ptt *p_ptt, 4679 struct qed_ptt *p_ptt,
4407 u32 *buf_size) 4680 u32 *buf_size)
@@ -4441,8 +4714,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
4441 /* GRC Dump */ 4714 /* GRC Dump */
4442 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); 4715 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
4443 4716
4444 /* Clear all GRC params */ 4717 /* Revert GRC params to their default */
4445 qed_dbg_grc_clear_params(p_hwfn); 4718 qed_dbg_grc_set_params_default(p_hwfn);
4719
4446 return status; 4720 return status;
4447} 4721}
4448 4722
@@ -4495,6 +4769,10 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
4495 4769
4496 /* Idle Check Dump */ 4770 /* Idle Check Dump */
4497 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true); 4771 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
4772
4773 /* Revert GRC params to their default */
4774 qed_dbg_grc_set_params_default(p_hwfn);
4775
4498 return DBG_STATUS_OK; 4776 return DBG_STATUS_OK;
4499} 4777}
4500 4778
@@ -4519,11 +4797,15 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4519 u32 needed_buf_size_in_dwords; 4797 u32 needed_buf_size_in_dwords;
4520 enum dbg_status status; 4798 enum dbg_status status;
4521 4799
4522 status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, 4800 /* validate buffer size */
4801 status =
4802 qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
4523 &needed_buf_size_in_dwords); 4803 &needed_buf_size_in_dwords);
4524 4804
4525 if (status != DBG_STATUS_OK) 4805 if (status != DBG_STATUS_OK &&
4806 status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
4526 return status; 4807 return status;
4808
4527 if (buf_size_in_dwords < needed_buf_size_in_dwords) 4809 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4528 return DBG_STATUS_DUMP_BUF_TOO_SMALL; 4810 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4529 4811
@@ -4531,8 +4813,13 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4531 qed_update_blocks_reset_state(p_hwfn, p_ptt); 4813 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4532 4814
4533 /* Perform dump */ 4815 /* Perform dump */
4534 return qed_mcp_trace_dump(p_hwfn, 4816 status = qed_mcp_trace_dump(p_hwfn,
4535 p_ptt, dump_buf, true, num_dumped_dwords); 4817 p_ptt, dump_buf, true, num_dumped_dwords);
4818
4819 /* Revert GRC params to their default */
4820 qed_dbg_grc_set_params_default(p_hwfn);
4821
4822 return status;
4536} 4823}
4537 4824
4538enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, 4825enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4567,8 +4854,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4567 4854
4568 /* Update reset state */ 4855 /* Update reset state */
4569 qed_update_blocks_reset_state(p_hwfn, p_ptt); 4856 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4570 return qed_reg_fifo_dump(p_hwfn, 4857
4571 p_ptt, dump_buf, true, num_dumped_dwords); 4858 status = qed_reg_fifo_dump(p_hwfn,
4859 p_ptt, dump_buf, true, num_dumped_dwords);
4860
4861 /* Revert GRC params to their default */
4862 qed_dbg_grc_set_params_default(p_hwfn);
4863
4864 return status;
4572} 4865}
4573 4866
4574enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, 4867enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4603,8 +4896,13 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4603 4896
4604 /* Update reset state */ 4897 /* Update reset state */
4605 qed_update_blocks_reset_state(p_hwfn, p_ptt); 4898 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4606 return qed_igu_fifo_dump(p_hwfn, 4899
4607 p_ptt, dump_buf, true, num_dumped_dwords); 4900 status = qed_igu_fifo_dump(p_hwfn,
4901 p_ptt, dump_buf, true, num_dumped_dwords);
4902 /* Revert GRC params to their default */
4903 qed_dbg_grc_set_params_default(p_hwfn);
4904
4905 return status;
4608} 4906}
4609 4907
4610enum dbg_status 4908enum dbg_status
@@ -4641,9 +4939,16 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
4641 4939
4642 /* Update reset state */ 4940 /* Update reset state */
4643 qed_update_blocks_reset_state(p_hwfn, p_ptt); 4941 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4644 return qed_protection_override_dump(p_hwfn, 4942
4645 p_ptt, 4943 status = qed_protection_override_dump(p_hwfn,
4646 dump_buf, true, num_dumped_dwords); 4944 p_ptt,
4945 dump_buf,
4946 true, num_dumped_dwords);
4947
4948 /* Revert GRC params to their default */
4949 qed_dbg_grc_set_params_default(p_hwfn);
4950
4951 return status;
4647} 4952}
4648 4953
4649enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, 4954enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -5045,13 +5350,10 @@ static char s_temp_buf[MAX_MSG_LEN];
5045enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) 5350enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
5046{ 5351{
5047 /* Convert binary data to debug arrays */ 5352 /* Convert binary data to debug arrays */
5048 u32 num_of_buffers = *(u32 *)bin_ptr; 5353 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5049 struct bin_buffer_hdr *buf_array;
5050 u8 buf_id; 5354 u8 buf_id;
5051 5355
5052 buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1); 5356 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5053
5054 for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
5055 s_dbg_arrays[buf_id].ptr = 5357 s_dbg_arrays[buf_id].ptr =
5056 (u32 *)(bin_ptr + buf_array[buf_id].offset); 5358 (u32 *)(bin_ptr + buf_array[buf_id].offset);
5057 s_dbg_arrays[buf_id].size_in_dwords = 5359 s_dbg_arrays[buf_id].size_in_dwords =
@@ -5874,16 +6176,16 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5874 results_offset += 6176 results_offset +=
5875 sprintf(qed_get_buf_ptr(results_buf, 6177 sprintf(qed_get_buf_ptr(results_buf,
5876 results_offset), 6178 results_offset),
5877 "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ", 6179 "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
5878 elements[i].data, 6180 elements[i].data,
5879 GET_FIELD(elements[i].data, 6181 (u32)GET_FIELD(elements[i].data,
5880 REG_FIFO_ELEMENT_ADDRESS) * 6182 REG_FIFO_ELEMENT_ADDRESS) *
5881 REG_FIFO_ELEMENT_ADDR_FACTOR, 6183 REG_FIFO_ELEMENT_ADDR_FACTOR,
5882 s_access_strs[GET_FIELD(elements[i].data, 6184 s_access_strs[GET_FIELD(elements[i].data,
5883 REG_FIFO_ELEMENT_ACCESS)], 6185 REG_FIFO_ELEMENT_ACCESS)],
5884 GET_FIELD(elements[i].data, 6186 (u32)GET_FIELD(elements[i].data,
5885 REG_FIFO_ELEMENT_PF), vf_str, 6187 REG_FIFO_ELEMENT_PF), vf_str,
5886 GET_FIELD(elements[i].data, 6188 (u32)GET_FIELD(elements[i].data,
5887 REG_FIFO_ELEMENT_PORT), 6189 REG_FIFO_ELEMENT_PORT),
5888 s_privilege_strs[GET_FIELD(elements[i]. 6190 s_privilege_strs[GET_FIELD(elements[i].
5889 data, 6191 data,
@@ -6189,13 +6491,13 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
6189 results_offset += 6491 results_offset +=
6190 sprintf(qed_get_buf_ptr(results_buf, 6492 sprintf(qed_get_buf_ptr(results_buf,
6191 results_offset), 6493 results_offset),
6192 "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n", 6494 "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
6193 i, address, 6495 i, address,
6194 GET_FIELD(elements[i].data, 6496 (u32)GET_FIELD(elements[i].data,
6195 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE), 6497 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
6196 GET_FIELD(elements[i].data, 6498 (u32)GET_FIELD(elements[i].data,
6197 PROTECTION_OVERRIDE_ELEMENT_READ), 6499 PROTECTION_OVERRIDE_ELEMENT_READ),
6198 GET_FIELD(elements[i].data, 6500 (u32)GET_FIELD(elements[i].data,
6199 PROTECTION_OVERRIDE_ELEMENT_WRITE), 6501 PROTECTION_OVERRIDE_ELEMENT_WRITE),
6200 s_protection_strs[GET_FIELD(elements[i].data, 6502 s_protection_strs[GET_FIELD(elements[i].data,
6201 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)], 6503 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
@@ -6508,7 +6810,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
6508 */ 6810 */
6509 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt, 6811 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
6510 &buf_size_dwords); 6812 &buf_size_dwords);
6511 if (rc != DBG_STATUS_OK) 6813 if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6512 return rc; 6814 return rc;
6513 feature->buf_size = buf_size_dwords * sizeof(u32); 6815 feature->buf_size = buf_size_dwords * sizeof(u32);
6514 feature->dump_buf = vmalloc(feature->buf_size); 6816 feature->dump_buf = vmalloc(feature->buf_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e518f914eab1..8b5df71aa3c1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -674,11 +674,19 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
674 return rc; 674 return rc;
675} 675}
676 676
677static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) 677static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
678{ 678{
679 int hw_mode = 0; 679 int hw_mode = 0;
680 680
681 hw_mode = (1 << MODE_BB_B0); 681 if (QED_IS_BB_B0(p_hwfn->cdev)) {
682 hw_mode |= 1 << MODE_BB;
683 } else if (QED_IS_AH(p_hwfn->cdev)) {
684 hw_mode |= 1 << MODE_K2;
685 } else {
686 DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
687 p_hwfn->cdev->type);
688 return -EINVAL;
689 }
682 690
683 switch (p_hwfn->cdev->num_ports_in_engines) { 691 switch (p_hwfn->cdev->num_ports_in_engines) {
684 case 1: 692 case 1:
@@ -693,7 +701,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
693 default: 701 default:
694 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", 702 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
695 p_hwfn->cdev->num_ports_in_engines); 703 p_hwfn->cdev->num_ports_in_engines);
696 return; 704 return -EINVAL;
697 } 705 }
698 706
699 switch (p_hwfn->cdev->mf_mode) { 707 switch (p_hwfn->cdev->mf_mode) {
@@ -719,6 +727,8 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
719 DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), 727 DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
720 "Configuring function for hw_mode: 0x%08x\n", 728 "Configuring function for hw_mode: 0x%08x\n",
721 p_hwfn->hw_info.hw_mode); 729 p_hwfn->hw_info.hw_mode);
730
731 return 0;
722} 732}
723 733
724/* Init run time data for all PFs on an engine. */ 734/* Init run time data for all PFs on an engine. */
@@ -754,10 +764,10 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
754 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 764 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
755 struct qed_qm_common_rt_init_params params; 765 struct qed_qm_common_rt_init_params params;
756 struct qed_dev *cdev = p_hwfn->cdev; 766 struct qed_dev *cdev = p_hwfn->cdev;
767 u8 vf_id, max_num_vfs;
757 u16 num_pfs, pf_id; 768 u16 num_pfs, pf_id;
758 u32 concrete_fid; 769 u32 concrete_fid;
759 int rc = 0; 770 int rc = 0;
760 u8 vf_id;
761 771
762 qed_init_cau_rt_data(cdev); 772 qed_init_cau_rt_data(cdev);
763 773
@@ -814,7 +824,8 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
814 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 824 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
815 } 825 }
816 826
817 for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) { 827 max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
828 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
818 concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); 829 concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
819 qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); 830 qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
820 qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 831 qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
@@ -1135,7 +1146,9 @@ int qed_hw_init(struct qed_dev *cdev,
1135 /* Enable DMAE in PXP */ 1146 /* Enable DMAE in PXP */
1136 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); 1147 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
1137 1148
1138 qed_calc_hw_mode(p_hwfn); 1149 rc = qed_calc_hw_mode(p_hwfn);
1150 if (rc)
1151 return rc;
1139 1152
1140 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code); 1153 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
1141 if (rc) { 1154 if (rc) {
@@ -1485,10 +1498,25 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
1485static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) 1498static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
1486{ 1499{
1487 /* clear indirect access */ 1500 /* clear indirect access */
1488 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0); 1501 if (QED_IS_AH(p_hwfn->cdev)) {
1489 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0); 1502 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1490 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0); 1503 PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
1491 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0); 1504 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1505 PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
1506 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1507 PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
1508 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1509 PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
1510 } else {
1511 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1512 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
1513 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1514 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
1515 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1516 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
1517 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1518 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
1519 }
1492 1520
1493 /* Clean Previous errors if such exist */ 1521 /* Clean Previous errors if such exist */
1494 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1522 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1610,6 +1638,7 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
1610 enum qed_resources res_id) 1638 enum qed_resources res_id)
1611{ 1639{
1612 u8 num_funcs = p_hwfn->num_funcs_on_engine; 1640 u8 num_funcs = p_hwfn->num_funcs_on_engine;
1641 bool b_ah = QED_IS_AH(p_hwfn->cdev);
1613 struct qed_sb_cnt_info sb_cnt_info; 1642 struct qed_sb_cnt_info sb_cnt_info;
1614 u32 dflt_resc_num = 0; 1643 u32 dflt_resc_num = 0;
1615 1644
@@ -1620,17 +1649,22 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
1620 dflt_resc_num = sb_cnt_info.sb_cnt; 1649 dflt_resc_num = sb_cnt_info.sb_cnt;
1621 break; 1650 break;
1622 case QED_L2_QUEUE: 1651 case QED_L2_QUEUE:
1623 dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs; 1652 dflt_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2
1653 : MAX_NUM_L2_QUEUES_BB) / num_funcs;
1624 break; 1654 break;
1625 case QED_VPORT: 1655 case QED_VPORT:
1626 dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs; 1656 dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
1657 dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2
1658 : MAX_NUM_VPORTS_BB) / num_funcs;
1627 break; 1659 break;
1628 case QED_RSS_ENG: 1660 case QED_RSS_ENG:
1629 dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs; 1661 dflt_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2
1662 : ETH_RSS_ENGINE_NUM_BB) / num_funcs;
1630 break; 1663 break;
1631 case QED_PQ: 1664 case QED_PQ:
1632 /* The granularity of the PQs is 8 */ 1665 /* The granularity of the PQs is 8 */
1633 dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs; 1666 dflt_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2
1667 : MAX_QM_TX_QUEUES_BB) / num_funcs;
1634 dflt_resc_num &= ~0x7; 1668 dflt_resc_num &= ~0x7;
1635 break; 1669 break;
1636 case QED_RL: 1670 case QED_RL:
@@ -1642,7 +1676,8 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
1642 dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 1676 dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
1643 break; 1677 break;
1644 case QED_ILT: 1678 case QED_ILT:
1645 dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs; 1679 dflt_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2
1680 : PXP_NUM_ILT_RECORDS_BB) / num_funcs;
1646 break; 1681 break;
1647 case QED_LL2_QUEUE: 1682 case QED_LL2_QUEUE:
1648 dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 1683 dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
@@ -1653,7 +1688,10 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
1653 dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs; 1688 dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
1654 break; 1689 break;
1655 case QED_RDMA_STATS_QUEUE: 1690 case QED_RDMA_STATS_QUEUE:
1656 dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs; 1691 dflt_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
1692 : RDMA_NUM_STATISTIC_COUNTERS_BB) /
1693 num_funcs;
1694
1657 break; 1695 break;
1658 default: 1696 default:
1659 break; 1697 break;
@@ -1780,6 +1818,7 @@ out:
1780 1818
1781static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) 1819static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1782{ 1820{
1821 bool b_ah = QED_IS_AH(p_hwfn->cdev);
1783 u8 res_id; 1822 u8 res_id;
1784 int rc; 1823 int rc;
1785 1824
@@ -1790,7 +1829,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1790 } 1829 }
1791 1830
1792 /* Sanity for ILT */ 1831 /* Sanity for ILT */
1793 if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) { 1832 if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
1833 (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
1794 DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", 1834 DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
1795 RESC_START(p_hwfn, QED_ILT), 1835 RESC_START(p_hwfn, QED_ILT),
1796 RESC_END(p_hwfn, QED_ILT) - 1); 1836 RESC_END(p_hwfn, QED_ILT) - 1);
@@ -1860,9 +1900,15 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1860 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 1900 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
1861 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; 1901 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1862 break; 1902 break;
1903 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
1904 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
1905 break;
1863 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 1906 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
1864 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; 1907 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1865 break; 1908 break;
1909 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
1910 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
1911 break;
1866 default: 1912 default:
1867 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); 1913 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
1868 break; 1914 break;
@@ -1976,8 +2022,9 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1976{ 2022{
1977 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 2023 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
1978 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 2024 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
2025 struct qed_dev *cdev = p_hwfn->cdev;
1979 2026
1980 num_funcs = MAX_NUM_PFS_BB; 2027 num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
1981 2028
1982 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 2029 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
1983 * in the other bits are selected. 2030 * in the other bits are selected.
@@ -1990,12 +2037,17 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1990 reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); 2037 reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
1991 2038
1992 if (reg_function_hide & 0x1) { 2039 if (reg_function_hide & 0x1) {
1993 if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) { 2040 if (QED_IS_BB(cdev)) {
1994 num_funcs = 0; 2041 if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
1995 eng_mask = 0xaaaa; 2042 num_funcs = 0;
2043 eng_mask = 0xaaaa;
2044 } else {
2045 num_funcs = 1;
2046 eng_mask = 0x5554;
2047 }
1996 } else { 2048 } else {
1997 num_funcs = 1; 2049 num_funcs = 1;
1998 eng_mask = 0x5554; 2050 eng_mask = 0xfffe;
1999 } 2051 }
2000 2052
2001 /* Get the number of the enabled functions on the engine */ 2053 /* Get the number of the enabled functions on the engine */
@@ -2027,24 +2079,12 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2027 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 2079 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
2028} 2080}
2029 2081
2030static int 2082static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
2031qed_get_hw_info(struct qed_hwfn *p_hwfn, 2083 struct qed_ptt *p_ptt)
2032 struct qed_ptt *p_ptt,
2033 enum qed_pci_personality personality)
2034{ 2084{
2035 u32 port_mode; 2085 u32 port_mode;
2036 int rc;
2037 2086
2038 /* Since all information is common, only first hwfns should do this */ 2087 port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
2039 if (IS_LEAD_HWFN(p_hwfn)) {
2040 rc = qed_iov_hw_info(p_hwfn);
2041 if (rc)
2042 return rc;
2043 }
2044
2045 /* Read the port mode */
2046 port_mode = qed_rd(p_hwfn, p_ptt,
2047 CNIG_REG_NW_PORT_MODE_BB_B0);
2048 2088
2049 if (port_mode < 3) { 2089 if (port_mode < 3) {
2050 p_hwfn->cdev->num_ports_in_engines = 1; 2090 p_hwfn->cdev->num_ports_in_engines = 1;
@@ -2057,6 +2097,54 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
2057 /* Default num_ports_in_engines to something */ 2097 /* Default num_ports_in_engines to something */
2058 p_hwfn->cdev->num_ports_in_engines = 1; 2098 p_hwfn->cdev->num_ports_in_engines = 1;
2059 } 2099 }
2100}
2101
2102static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
2103 struct qed_ptt *p_ptt)
2104{
2105 u32 port;
2106 int i;
2107
2108 p_hwfn->cdev->num_ports_in_engines = 0;
2109
2110 for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
2111 port = qed_rd(p_hwfn, p_ptt,
2112 CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
2113 if (port & 1)
2114 p_hwfn->cdev->num_ports_in_engines++;
2115 }
2116
2117 if (!p_hwfn->cdev->num_ports_in_engines) {
2118 DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
2119
2120 /* Default num_ports_in_engine to something */
2121 p_hwfn->cdev->num_ports_in_engines = 1;
2122 }
2123}
2124
2125static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2126{
2127 if (QED_IS_BB(p_hwfn->cdev))
2128 qed_hw_info_port_num_bb(p_hwfn, p_ptt);
2129 else
2130 qed_hw_info_port_num_ah(p_hwfn, p_ptt);
2131}
2132
2133static int
2134qed_get_hw_info(struct qed_hwfn *p_hwfn,
2135 struct qed_ptt *p_ptt,
2136 enum qed_pci_personality personality)
2137{
2138 int rc;
2139
2140 /* Since all information is common, only first hwfns should do this */
2141 if (IS_LEAD_HWFN(p_hwfn)) {
2142 rc = qed_iov_hw_info(p_hwfn);
2143 if (rc)
2144 return rc;
2145 }
2146
2147 qed_hw_info_port_num(p_hwfn, p_ptt);
2060 2148
2061 qed_hw_get_nvm_info(p_hwfn, p_ptt); 2149 qed_hw_get_nvm_info(p_hwfn, p_ptt);
2062 2150
@@ -2096,19 +2184,33 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
2096static int qed_get_dev_info(struct qed_dev *cdev) 2184static int qed_get_dev_info(struct qed_dev *cdev)
2097{ 2185{
2098 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2186 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2187 u16 device_id_mask;
2099 u32 tmp; 2188 u32 tmp;
2100 2189
2101 /* Read Vendor Id / Device Id */ 2190 /* Read Vendor Id / Device Id */
2102 pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); 2191 pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
2103 pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); 2192 pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
2104 2193
2194 /* Determine type */
2195 device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
2196 switch (device_id_mask) {
2197 case QED_DEV_ID_MASK_BB:
2198 cdev->type = QED_DEV_TYPE_BB;
2199 break;
2200 case QED_DEV_ID_MASK_AH:
2201 cdev->type = QED_DEV_TYPE_AH;
2202 break;
2203 default:
2204 DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
2205 return -EBUSY;
2206 }
2207
2105 cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, 2208 cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
2106 MISCS_REG_CHIP_NUM); 2209 MISCS_REG_CHIP_NUM);
2107 cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, 2210 cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
2108 MISCS_REG_CHIP_REV); 2211 MISCS_REG_CHIP_REV);
2109 MASK_FIELD(CHIP_REV, cdev->chip_rev); 2212 MASK_FIELD(CHIP_REV, cdev->chip_rev);
2110 2213
2111 cdev->type = QED_DEV_TYPE_BB;
2112 /* Learn number of HW-functions */ 2214 /* Learn number of HW-functions */
2113 tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt, 2215 tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
2114 MISCS_REG_CMT_ENABLED_FOR_PAIR); 2216 MISCS_REG_CMT_ENABLED_FOR_PAIR);
@@ -2128,7 +2230,10 @@ static int qed_get_dev_info(struct qed_dev *cdev)
2128 MASK_FIELD(CHIP_METAL, cdev->chip_metal); 2230 MASK_FIELD(CHIP_METAL, cdev->chip_metal);
2129 2231
2130 DP_INFO(cdev->hwfns, 2232 DP_INFO(cdev->hwfns,
2131 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 2233 "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
2234 QED_IS_BB(cdev) ? "BB" : "AH",
2235 'A' + cdev->chip_rev,
2236 (int)cdev->chip_metal,
2132 cdev->chip_num, cdev->chip_rev, 2237 cdev->chip_num, cdev->chip_rev,
2133 cdev->chip_bond_id, cdev->chip_metal); 2238 cdev->chip_bond_id, cdev->chip_metal);
2134 2239
@@ -3363,3 +3468,8 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3363 memset(p_hwfn->qm_info.wfq_data, 0, 3468 memset(p_hwfn->qm_info.wfq_data, 0,
3364 sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); 3469 sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
3365} 3470}
3471
3472int qed_device_num_engines(struct qed_dev *cdev)
3473{
3474 return QED_IS_BB(cdev) ? 2 : 1;
3475}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 37c2bfb663bb..e9acdc96ba84 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -574,6 +574,7 @@ enum core_event_opcode {
574 CORE_EVENT_TX_QUEUE_STOP, 574 CORE_EVENT_TX_QUEUE_STOP,
575 CORE_EVENT_RX_QUEUE_START, 575 CORE_EVENT_RX_QUEUE_START,
576 CORE_EVENT_RX_QUEUE_STOP, 576 CORE_EVENT_RX_QUEUE_STOP,
577 CORE_EVENT_RX_QUEUE_FLUSH,
577 MAX_CORE_EVENT_OPCODE 578 MAX_CORE_EVENT_OPCODE
578}; 579};
579 580
@@ -625,6 +626,7 @@ enum core_ramrod_cmd_id {
625 CORE_RAMROD_TX_QUEUE_START, 626 CORE_RAMROD_TX_QUEUE_START,
626 CORE_RAMROD_RX_QUEUE_STOP, 627 CORE_RAMROD_RX_QUEUE_STOP,
627 CORE_RAMROD_TX_QUEUE_STOP, 628 CORE_RAMROD_TX_QUEUE_STOP,
629 CORE_RAMROD_RX_QUEUE_FLUSH,
628 MAX_CORE_RAMROD_CMD_ID 630 MAX_CORE_RAMROD_CMD_ID
629}; 631};
630 632
@@ -698,7 +700,8 @@ struct core_rx_slow_path_cqe {
698 u8 type; 700 u8 type;
699 u8 ramrod_cmd_id; 701 u8 ramrod_cmd_id;
700 __le16 echo; 702 __le16 echo;
701 __le32 reserved1[7]; 703 struct core_rx_cqe_opaque_data opaque_data;
704 __le32 reserved1[5];
702}; 705};
703 706
704union core_rx_cqe_union { 707union core_rx_cqe_union {
@@ -735,45 +738,46 @@ struct core_rx_stop_ramrod_data {
735 __le16 reserved2[2]; 738 __le16 reserved2[2];
736}; 739};
737 740
738struct core_tx_bd_flags { 741struct core_tx_bd_data {
739 u8 as_bitfield; 742 __le16 as_bitfield;
740#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 743#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
741#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 744#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
742#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1 745#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
743#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1 746#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
744#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1 747#define CORE_TX_BD_DATA_START_BD_MASK 0x1
745#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2 748#define CORE_TX_BD_DATA_START_BD_SHIFT 2
746#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1 749#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
747#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3 750#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
748#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1 751#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
749#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4 752#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
750#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1 753#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
751#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5 754#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
752#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1 755#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
753#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 756#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
754#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 757#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
755#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 758#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
759#define CORE_TX_BD_DATA_NBDS_MASK 0xF
760#define CORE_TX_BD_DATA_NBDS_SHIFT 8
761#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
762#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
763#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
764#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
765#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
766#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
756}; 767};
757 768
758struct core_tx_bd { 769struct core_tx_bd {
759 struct regpair addr; 770 struct regpair addr;
760 __le16 nbytes; 771 __le16 nbytes;
761 __le16 nw_vlan_or_lb_echo; 772 __le16 nw_vlan_or_lb_echo;
762 u8 bitfield0; 773 struct core_tx_bd_data bd_data;
763#define CORE_TX_BD_NBDS_MASK 0xF
764#define CORE_TX_BD_NBDS_SHIFT 0
765#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
766#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
767#define CORE_TX_BD_RESERVED0_MASK 0x7
768#define CORE_TX_BD_RESERVED0_SHIFT 5
769 struct core_tx_bd_flags bd_flags;
770 __le16 bitfield1; 774 __le16 bitfield1;
771#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF 775#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
772#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 776#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
773#define CORE_TX_BD_TX_DST_MASK 0x1 777#define CORE_TX_BD_TX_DST_MASK 0x1
774#define CORE_TX_BD_TX_DST_SHIFT 14 778#define CORE_TX_BD_TX_DST_SHIFT 14
775#define CORE_TX_BD_RESERVED1_MASK 0x1 779#define CORE_TX_BD_RESERVED_MASK 0x1
776#define CORE_TX_BD_RESERVED1_SHIFT 15 780#define CORE_TX_BD_RESERVED_SHIFT 15
777}; 781};
778 782
779enum core_tx_dest { 783enum core_tx_dest {
@@ -800,6 +804,14 @@ struct core_tx_stop_ramrod_data {
800 __le32 reserved0[2]; 804 __le32 reserved0[2];
801}; 805};
802 806
807enum dcb_dhcp_update_flag {
808 DONT_UPDATE_DCB_DHCP,
809 UPDATE_DCB,
810 UPDATE_DSCP,
811 UPDATE_DCB_DSCP,
812 MAX_DCB_DHCP_UPDATE_FLAG
813};
814
803struct eth_mstorm_per_pf_stat { 815struct eth_mstorm_per_pf_stat {
804 struct regpair gre_discard_pkts; 816 struct regpair gre_discard_pkts;
805 struct regpair vxlan_discard_pkts; 817 struct regpair vxlan_discard_pkts;
@@ -893,6 +905,12 @@ union event_ring_element {
893 struct event_ring_next_addr next_addr; 905 struct event_ring_next_addr next_addr;
894}; 906};
895 907
908enum fw_flow_ctrl_mode {
909 flow_ctrl_pause,
910 flow_ctrl_pfc,
911 MAX_FW_FLOW_CTRL_MODE
912};
913
896/* Major and Minor hsi Versions */ 914/* Major and Minor hsi Versions */
897struct hsi_fp_ver_struct { 915struct hsi_fp_ver_struct {
898 u8 minor_ver_arr[2]; 916 u8 minor_ver_arr[2];
@@ -921,6 +939,7 @@ enum malicious_vf_error_id {
921 ETH_EDPM_OUT_OF_SYNC, 939 ETH_EDPM_OUT_OF_SYNC,
922 ETH_TUNN_IPV6_EXT_NBD_ERR, 940 ETH_TUNN_IPV6_EXT_NBD_ERR,
923 ETH_CONTROL_PACKET_VIOLATION, 941 ETH_CONTROL_PACKET_VIOLATION,
942 ETH_ANTI_SPOOFING_ERR,
924 MAX_MALICIOUS_VF_ERROR_ID 943 MAX_MALICIOUS_VF_ERROR_ID
925}; 944};
926 945
@@ -1106,8 +1125,9 @@ struct tstorm_per_port_stat {
1106 struct regpair ll2_mac_filter_discard; 1125 struct regpair ll2_mac_filter_discard;
1107 struct regpair ll2_conn_disabled_discard; 1126 struct regpair ll2_conn_disabled_discard;
1108 struct regpair iscsi_irregular_pkt; 1127 struct regpair iscsi_irregular_pkt;
1109 struct regpair reserved; 1128 struct regpair fcoe_irregular_pkt;
1110 struct regpair roce_irregular_pkt; 1129 struct regpair roce_irregular_pkt;
1130 struct regpair reserved;
1111 struct regpair eth_irregular_pkt; 1131 struct regpair eth_irregular_pkt;
1112 struct regpair reserved1; 1132 struct regpair reserved1;
1113 struct regpair preroce_irregular_pkt; 1133 struct regpair preroce_irregular_pkt;
@@ -1648,6 +1668,11 @@ enum block_addr {
1648 GRCBASE_MS = 0x6a0000, 1668 GRCBASE_MS = 0x6a0000,
1649 GRCBASE_PHY_PCIE = 0x620000, 1669 GRCBASE_PHY_PCIE = 0x620000,
1650 GRCBASE_LED = 0x6b8000, 1670 GRCBASE_LED = 0x6b8000,
1671 GRCBASE_AVS_WRAP = 0x6b0000,
1672 GRCBASE_RGFS = 0x19d0000,
1673 GRCBASE_TGFS = 0x19e0000,
1674 GRCBASE_PTLD = 0x19f0000,
1675 GRCBASE_YPLD = 0x1a10000,
1651 GRCBASE_MISC_AEU = 0x8000, 1676 GRCBASE_MISC_AEU = 0x8000,
1652 GRCBASE_BAR0_MAP = 0x1c00000, 1677 GRCBASE_BAR0_MAP = 0x1c00000,
1653 MAX_BLOCK_ADDR 1678 MAX_BLOCK_ADDR
@@ -1732,6 +1757,11 @@ enum block_id {
1732 BLOCK_MS, 1757 BLOCK_MS,
1733 BLOCK_PHY_PCIE, 1758 BLOCK_PHY_PCIE,
1734 BLOCK_LED, 1759 BLOCK_LED,
1760 BLOCK_AVS_WRAP,
1761 BLOCK_RGFS,
1762 BLOCK_TGFS,
1763 BLOCK_PTLD,
1764 BLOCK_YPLD,
1735 BLOCK_MISC_AEU, 1765 BLOCK_MISC_AEU,
1736 BLOCK_BAR0_MAP, 1766 BLOCK_BAR0_MAP,
1737 MAX_BLOCK_ID 1767 MAX_BLOCK_ID
@@ -1783,9 +1813,9 @@ struct dbg_attn_reg_result {
1783 __le32 data; 1813 __le32 data;
1784#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF 1814#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
1785#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 1815#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
1786#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF 1816#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
1787#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24 1817#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
1788 __le16 attn_idx_offset; 1818 __le16 block_attn_offset;
1789 __le16 reserved; 1819 __le16 reserved;
1790 __le32 sts_val; 1820 __le32 sts_val;
1791 __le32 mask_val; 1821 __le32 mask_val;
@@ -1815,12 +1845,12 @@ struct dbg_mode_hdr {
1815/* Attention register */ 1845/* Attention register */
1816struct dbg_attn_reg { 1846struct dbg_attn_reg {
1817 struct dbg_mode_hdr mode; 1847 struct dbg_mode_hdr mode;
1818 __le16 attn_idx_offset; 1848 __le16 block_attn_offset;
1819 __le32 data; 1849 __le32 data;
1820#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF 1850#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
1821#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 1851#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
1822#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF 1852#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
1823#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24 1853#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
1824 __le32 sts_clr_address; 1854 __le32 sts_clr_address;
1825 __le32 mask_address; 1855 __le32 mask_address;
1826}; 1856};
@@ -2001,6 +2031,20 @@ enum dbg_bus_clients {
2001 MAX_DBG_BUS_CLIENTS 2031 MAX_DBG_BUS_CLIENTS
2002}; 2032};
2003 2033
2034enum dbg_bus_constraint_ops {
2035 DBG_BUS_CONSTRAINT_OP_EQ,
2036 DBG_BUS_CONSTRAINT_OP_NE,
2037 DBG_BUS_CONSTRAINT_OP_LT,
2038 DBG_BUS_CONSTRAINT_OP_LTC,
2039 DBG_BUS_CONSTRAINT_OP_LE,
2040 DBG_BUS_CONSTRAINT_OP_LEC,
2041 DBG_BUS_CONSTRAINT_OP_GT,
2042 DBG_BUS_CONSTRAINT_OP_GTC,
2043 DBG_BUS_CONSTRAINT_OP_GE,
2044 DBG_BUS_CONSTRAINT_OP_GEC,
2045 MAX_DBG_BUS_CONSTRAINT_OPS
2046};
2047
2004/* Debug Bus memory address */ 2048/* Debug Bus memory address */
2005struct dbg_bus_mem_addr { 2049struct dbg_bus_mem_addr {
2006 __le32 lo; 2050 __le32 lo;
@@ -2092,10 +2136,18 @@ struct dbg_bus_data {
2092 * DBG_BUS_TARGET_ID_PCI. 2136 * DBG_BUS_TARGET_ID_PCI.
2093 */ 2137 */
2094 __le16 reserved; 2138 __le16 reserved;
2095 struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */ 2139 struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
2096 struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */ 2140 struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
2097}; 2141};
2098 2142
2143enum dbg_bus_filter_types {
2144 DBG_BUS_FILTER_TYPE_OFF,
2145 DBG_BUS_FILTER_TYPE_PRE,
2146 DBG_BUS_FILTER_TYPE_POST,
2147 DBG_BUS_FILTER_TYPE_ON,
2148 MAX_DBG_BUS_FILTER_TYPES
2149};
2150
2099/* Debug bus frame modes */ 2151/* Debug bus frame modes */
2100enum dbg_bus_frame_modes { 2152enum dbg_bus_frame_modes {
2101 DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */ 2153 DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
@@ -2104,6 +2156,40 @@ enum dbg_bus_frame_modes {
2104 MAX_DBG_BUS_FRAME_MODES 2156 MAX_DBG_BUS_FRAME_MODES
2105}; 2157};
2106 2158
2159enum dbg_bus_input_types {
2160 DBG_BUS_INPUT_TYPE_STORM,
2161 DBG_BUS_INPUT_TYPE_BLOCK,
2162 MAX_DBG_BUS_INPUT_TYPES
2163};
2164
2165enum dbg_bus_other_engine_modes {
2166 DBG_BUS_OTHER_ENGINE_MODE_NONE,
2167 DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
2168 DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
2169 DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
2170 DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
2171 MAX_DBG_BUS_OTHER_ENGINE_MODES
2172};
2173
2174enum dbg_bus_post_trigger_types {
2175 DBG_BUS_POST_TRIGGER_RECORD,
2176 DBG_BUS_POST_TRIGGER_DROP,
2177 MAX_DBG_BUS_POST_TRIGGER_TYPES
2178};
2179
2180enum dbg_bus_pre_trigger_types {
2181 DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
2182 DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
2183 DBG_BUS_PRE_TRIGGER_DROP,
2184 MAX_DBG_BUS_PRE_TRIGGER_TYPES
2185};
2186
2187enum dbg_bus_semi_frame_modes {
2188 DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
2189 DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
2190 MAX_DBG_BUS_SEMI_FRAME_MODES
2191};
2192
2107/* Debug bus states */ 2193/* Debug bus states */
2108enum dbg_bus_states { 2194enum dbg_bus_states {
2109 DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */ 2195 DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
@@ -2115,6 +2201,19 @@ enum dbg_bus_states {
2115 MAX_DBG_BUS_STATES 2201 MAX_DBG_BUS_STATES
2116}; 2202};
2117 2203
2204enum dbg_bus_storm_modes {
2205 DBG_BUS_STORM_MODE_PRINTF,
2206 DBG_BUS_STORM_MODE_PRAM_ADDR,
2207 DBG_BUS_STORM_MODE_DRA_RW,
2208 DBG_BUS_STORM_MODE_DRA_W,
2209 DBG_BUS_STORM_MODE_LD_ST_ADDR,
2210 DBG_BUS_STORM_MODE_DRA_FSM,
2211 DBG_BUS_STORM_MODE_RH,
2212 DBG_BUS_STORM_MODE_FOC,
2213 DBG_BUS_STORM_MODE_EXT_STORE,
2214 MAX_DBG_BUS_STORM_MODES
2215};
2216
2118/* Debug bus target IDs */ 2217/* Debug bus target IDs */
2119enum dbg_bus_targets { 2218enum dbg_bus_targets {
2120 /* records debug bus to DBG block internal buffer */ 2219 /* records debug bus to DBG block internal buffer */
@@ -2128,13 +2227,10 @@ enum dbg_bus_targets {
2128 2227
2129/* GRC Dump data */ 2228/* GRC Dump data */
2130struct dbg_grc_data { 2229struct dbg_grc_data {
2131 __le32 param_val[40]; /* Value of each GRC parameter. Array size must 2230 u8 params_initialized;
2132 * match the enum dbg_grc_params. 2231 u8 reserved1;
2133 */ 2232 __le16 reserved2;
2134 u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was 2233 __le32 param_val[48];
2135 * set by the user (0/1). Array size must
2136 * match the enum dbg_grc_params.
2137 */
2138}; 2234};
2139 2235
2140/* Debug GRC params */ 2236/* Debug GRC params */
@@ -2181,6 +2277,8 @@ enum dbg_grc_params {
2181 DBG_GRC_PARAM_PARITY_SAFE, 2277 DBG_GRC_PARAM_PARITY_SAFE,
2182 DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */ 2278 DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
2183 DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */ 2279 DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
2280 DBG_GRC_PARAM_NO_MCP,
2281 DBG_GRC_PARAM_NO_FW_VER,
2184 MAX_DBG_GRC_PARAMS 2282 MAX_DBG_GRC_PARAMS
2185}; 2283};
2186 2284
@@ -2280,7 +2378,7 @@ struct dbg_tools_data {
2280 struct dbg_bus_data bus; /* Debug Bus data */ 2378 struct dbg_bus_data bus; /* Debug Bus data */
2281 struct idle_chk_data idle_chk; /* Idle Check data */ 2379 struct idle_chk_data idle_chk; /* Idle Check data */
2282 u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */ 2380 u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
2283 u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1). 2381 u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
2284 */ 2382 */
2285 u8 chip_id; /* Chip ID (from enum chip_ids) */ 2383 u8 chip_id; /* Chip ID (from enum chip_ids) */
2286 u8 platform_id; /* Platform ID (from enum platform_ids) */ 2384 u8 platform_id; /* Platform ID (from enum platform_ids) */
@@ -2404,7 +2502,7 @@ struct fw_info_location {
2404 2502
2405enum init_modes { 2503enum init_modes {
2406 MODE_RESERVED, 2504 MODE_RESERVED,
2407 MODE_BB_B0, 2505 MODE_BB,
2408 MODE_K2, 2506 MODE_K2,
2409 MODE_ASIC, 2507 MODE_ASIC,
2410 MODE_RESERVED2, 2508 MODE_RESERVED2,
@@ -2418,7 +2516,6 @@ enum init_modes {
2418 MODE_PORTS_PER_ENG_2, 2516 MODE_PORTS_PER_ENG_2,
2419 MODE_PORTS_PER_ENG_4, 2517 MODE_PORTS_PER_ENG_4,
2420 MODE_100G, 2518 MODE_100G,
2421 MODE_40G,
2422 MODE_RESERVED6, 2519 MODE_RESERVED6,
2423 MAX_INIT_MODES 2520 MAX_INIT_MODES
2424}; 2521};
@@ -2686,6 +2783,13 @@ struct iro {
2686 */ 2783 */
2687enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr); 2784enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
2688/** 2785/**
2786 * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
2787 * default value.
2788 *
2789 * @param p_hwfn - HW device data
2790 */
2791void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
2792/**
2689 * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for 2793 * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
2690 * GRC Dump. 2794 * GRC Dump.
2691 * 2795 *
@@ -3418,7 +3522,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
3418#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size) 3522#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
3419#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ 3523#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
3420 (IRO[22].base + ((pf_id) * IRO[22].m1)) 3524 (IRO[22].base + ((pf_id) * IRO[22].m1))
3421#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size) 3525#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
3422#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ 3526#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
3423 (IRO[23].base + ((stat_counter_id) * IRO[23].m1)) 3527 (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
3424#define USTORM_QUEUE_STAT_SIZE (IRO[23].size) 3528#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
@@ -3482,7 +3586,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
3482 3586
3483static const struct iro iro_arr[47] = { 3587static const struct iro iro_arr[47] = {
3484 {0x0, 0x0, 0x0, 0x0, 0x8}, 3588 {0x0, 0x0, 0x0, 0x0, 0x8},
3485 {0x4cb0, 0x78, 0x0, 0x0, 0x78}, 3589 {0x4cb0, 0x80, 0x0, 0x0, 0x80},
3486 {0x6318, 0x20, 0x0, 0x0, 0x20}, 3590 {0x6318, 0x20, 0x0, 0x0, 0x20},
3487 {0xb00, 0x8, 0x0, 0x0, 0x4}, 3591 {0xb00, 0x8, 0x0, 0x0, 0x4},
3488 {0xa80, 0x8, 0x0, 0x0, 0x4}, 3592 {0xa80, 0x8, 0x0, 0x0, 0x4},
@@ -3521,13 +3625,13 @@ static const struct iro iro_arr[47] = {
3521 {0xd888, 0x38, 0x0, 0x0, 0x24}, 3625 {0xd888, 0x38, 0x0, 0x0, 0x24},
3522 {0x12c38, 0x10, 0x0, 0x0, 0x8}, 3626 {0x12c38, 0x10, 0x0, 0x0, 0x8},
3523 {0x11aa0, 0x38, 0x0, 0x0, 0x18}, 3627 {0x11aa0, 0x38, 0x0, 0x0, 0x18},
3524 {0xa8c0, 0x30, 0x0, 0x0, 0x10}, 3628 {0xa8c0, 0x38, 0x0, 0x0, 0x10},
3525 {0x86f8, 0x28, 0x0, 0x0, 0x18}, 3629 {0x86f8, 0x30, 0x0, 0x0, 0x18},
3526 {0x101f8, 0x10, 0x0, 0x0, 0x10}, 3630 {0x101f8, 0x10, 0x0, 0x0, 0x10},
3527 {0xdd08, 0x48, 0x0, 0x0, 0x38}, 3631 {0xdd08, 0x48, 0x0, 0x0, 0x38},
3528 {0x10660, 0x20, 0x0, 0x0, 0x20}, 3632 {0x10660, 0x20, 0x0, 0x0, 0x20},
3529 {0x2b80, 0x80, 0x0, 0x0, 0x10}, 3633 {0x2b80, 0x80, 0x0, 0x0, 0x10},
3530 {0x5000, 0x10, 0x0, 0x0, 0x10}, 3634 {0x5020, 0x10, 0x0, 0x0, 0x10},
3531}; 3635};
3532 3636
3533/* Runtime array offsets */ 3637/* Runtime array offsets */
@@ -4595,6 +4699,12 @@ enum eth_ipv4_frag_type {
4595 MAX_ETH_IPV4_FRAG_TYPE 4699 MAX_ETH_IPV4_FRAG_TYPE
4596}; 4700};
4597 4701
4702enum eth_ip_type {
4703 ETH_IPV4,
4704 ETH_IPV6,
4705 MAX_ETH_IP_TYPE
4706};
4707
4598enum eth_ramrod_cmd_id { 4708enum eth_ramrod_cmd_id {
4599 ETH_RAMROD_UNUSED, 4709 ETH_RAMROD_UNUSED,
4600 ETH_RAMROD_VPORT_START, 4710 ETH_RAMROD_VPORT_START,
@@ -4944,7 +5054,10 @@ struct vport_update_ramrod_data_cmn {
4944 u8 update_mtu_flg; 5054 u8 update_mtu_flg;
4945 5055
4946 __le16 mtu; 5056 __le16 mtu;
4947 u8 reserved[2]; 5057 u8 update_ctl_frame_checks_en_flg;
5058 u8 ctl_frame_mac_check_en;
5059 u8 ctl_frame_ethtype_check_en;
5060 u8 reserved[15];
4948}; 5061};
4949 5062
4950struct vport_update_ramrod_mcast { 5063struct vport_update_ramrod_mcast {
@@ -4962,6 +5075,492 @@ struct vport_update_ramrod_data {
4962 struct eth_vport_rss_config rss_config; 5075 struct eth_vport_rss_config rss_config;
4963}; 5076};
4964 5077
5078struct mstorm_eth_conn_ag_ctx {
5079 u8 byte0;
5080 u8 byte1;
5081 u8 flags0;
5082#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
5083#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
5084#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
5085#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
5086#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
5087#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
5088#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
5089#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
5090#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
5091#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
5092 u8 flags1;
5093#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
5094#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
5095#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
5096#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
5097#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
5098#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
5099#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
5100#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
5101#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
5102#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
5103#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
5104#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
5105#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
5106#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
5107#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
5108#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
5109 __le16 word0;
5110 __le16 word1;
5111 __le32 reg0;
5112 __le32 reg1;
5113};
5114
5115struct xstorm_eth_conn_agctxdq_ext_ldpart {
5116 u8 reserved0;
5117 u8 eth_state;
5118 u8 flags0;
5119#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
5120#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
5121#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
5122#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
5123#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
5124#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
5125#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
5126#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
5127#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
5128#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
5129#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
5130#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
5131#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
5132#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
5133#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
5134#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
5135 u8 flags1;
5136#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
5137#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
5138#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
5139#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
5140#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
5141#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
5142#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
5143#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
5144#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
5145#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
5146#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
5147#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
5148#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
5149#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
5150#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
5151#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
5152 u8 flags2;
5153#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
5154#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
5155#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
5156#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
5157#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
5158#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
5159#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
5160#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
5161 u8 flags3;
5162#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
5163#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
5164#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
5165#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
5166#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
5167#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
5168#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
5169#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
5170 u8 flags4;
5171#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
5172#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
5173#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
5174#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
5175#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
5176#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
5177#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
5178#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
5179 u8 flags5;
5180#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
5181#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
5182#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
5183#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
5184#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
5185#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
5186#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
5187#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
5188 u8 flags6;
5189#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
5190#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
5191#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
5192#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
5193#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
5194#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
5195#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
5196#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
5197 u8 flags7;
5198#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
5199#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
5200#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
5201#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
5202#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
5203#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
5204#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
5205#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
5206#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
5207#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
5208 u8 flags8;
5209#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
5210#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
5211#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
5212#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
5213#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
5214#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
5215#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
5216#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
5217#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
5218#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
5219#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
5220#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
5221#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
5222#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
5223#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
5224#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
5225 u8 flags9;
5226#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
5227#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
5228#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
5229#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
5230#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
5231#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
5232#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
5233#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
5234#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
5235#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
5236#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
5237#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
5238#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
5239#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
5240#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
5241#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
5242 u8 flags10;
5243#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
5244#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
5245#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
5246#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
5247#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
5248#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
5249#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
5250#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
5251#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
5252#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
5253#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
5254#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
5255#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
5256#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
5257#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
5258#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
5259 u8 flags11;
5260#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
5261#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
5262#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
5263#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
5264#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
5265#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
5266#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
5267#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
5268#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
5269#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
5270#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
5271#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
5272#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
5273#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
5274#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
5275#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
5276 u8 flags12;
5277#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
5278#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
5279#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
5280#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
5281#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
5282#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
5283#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
5284#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
5285#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
5286#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
5287#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
5288#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
5289#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
5290#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
5291#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
5292#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
5293 u8 flags13;
5294#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
5295#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
5296#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
5297#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
5298#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
5299#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
5300#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
5301#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
5302#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
5303#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
5304#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
5305#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
5306#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
5307#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
5308#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
5309#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
5310 u8 flags14;
5311#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
5312#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
5313#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
5314#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
5315#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
5316#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
5317#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
5318#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
5319#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
5320#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
5321#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
5322#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
5323#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
5324#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
5325 u8 edpm_event_id;
5326 __le16 physical_q0;
5327 __le16 quota;
5328 __le16 edpm_num_bds;
5329 __le16 tx_bd_cons;
5330 __le16 tx_bd_prod;
5331 __le16 tx_class;
5332 __le16 conn_dpi;
5333 u8 byte3;
5334 u8 byte4;
5335 u8 byte5;
5336 u8 byte6;
5337 __le32 reg0;
5338 __le32 reg1;
5339 __le32 reg2;
5340 __le32 reg3;
5341 __le32 reg4;
5342};
5343
5344struct xstorm_eth_hw_conn_ag_ctx {
5345 u8 reserved0;
5346 u8 eth_state;
5347 u8 flags0;
5348#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
5349#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
5350#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
5351#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
5352#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
5353#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
5354#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
5355#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
5356#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
5357#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
5358#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
5359#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
5360#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
5361#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
5362#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
5363#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
5364 u8 flags1;
5365#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
5366#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
5367#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
5368#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
5369#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
5370#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
5371#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
5372#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
5373#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
5374#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
5375#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
5376#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
5377#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
5378#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
5379#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
5380#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
5381 u8 flags2;
5382#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
5383#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
5384#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
5385#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
5386#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
5387#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
5388#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
5389#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
5390 u8 flags3;
5391#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
5392#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
5393#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
5394#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
5395#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
5396#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
5397#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
5398#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
5399 u8 flags4;
5400#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
5401#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
5402#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
5403#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
5404#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
5405#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
5406#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
5407#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
5408 u8 flags5;
5409#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
5410#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
5411#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
5412#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
5413#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
5414#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
5415#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
5416#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
5417 u8 flags6;
5418#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
5419#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
5420#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
5421#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
5422#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
5423#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
5424#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
5425#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
5426 u8 flags7;
5427#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
5428#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
5429#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
5430#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
5431#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
5432#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
5433#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
5434#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
5435#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
5436#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
5437 u8 flags8;
5438#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
5439#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
5440#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
5441#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
5442#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
5443#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
5444#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
5445#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
5446#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
5447#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
5448#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
5449#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
5450#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
5451#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
5452#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
5453#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
5454 u8 flags9;
5455#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
5456#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
5457#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
5458#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
5459#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
5460#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
5461#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
5462#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
5463#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
5464#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
5465#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
5466#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
5467#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
5468#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
5469#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
5470#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
5471 u8 flags10;
5472#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
5473#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
5474#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
5475#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
5476#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
5477#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
5478#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
5479#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
5480#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
5481#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
5482#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
5483#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
5484#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
5485#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
5486#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
5487#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
5488 u8 flags11;
5489#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
5490#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
5491#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
5492#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
5493#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
5494#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
5495#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
5496#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
5497#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
5498#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
5499#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
5500#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
5501#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
5502#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
5503#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
5504#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
5505 u8 flags12;
5506#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
5507#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
5508#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
5509#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
5510#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
5511#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
5512#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
5513#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
5514#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
5515#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
5516#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
5517#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
5518#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
5519#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
5520#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
5521#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
5522 u8 flags13;
5523#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
5524#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
5525#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
5526#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
5527#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
5528#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
5529#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
5530#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
5531#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
5532#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
5533#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
5534#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
5535#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
5536#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
5537#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
5538#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
5539 u8 flags14;
5540#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
5541#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
5542#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
5543#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
5544#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
5545#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
5546#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
5547#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
5548#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
5549#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
5550#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
5551#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
5552#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
5553#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
5554 u8 edpm_event_id;
5555 __le16 physical_q0;
5556 __le16 quota;
5557 __le16 edpm_num_bds;
5558 __le16 tx_bd_cons;
5559 __le16 tx_bd_prod;
5560 __le16 tx_class;
5561 __le16 conn_dpi;
5562};
5563
4965struct mstorm_rdma_task_st_ctx { 5564struct mstorm_rdma_task_st_ctx {
4966 struct regpair temp[4]; 5565 struct regpair temp[4];
4967}; 5566};
@@ -6165,7 +6764,7 @@ struct ystorm_roce_conn_st_ctx {
6165}; 6764};
6166 6765
6167struct xstorm_roce_conn_st_ctx { 6766struct xstorm_roce_conn_st_ctx {
6168 struct regpair temp[22]; 6767 struct regpair temp[24];
6169}; 6768};
6170 6769
6171struct tstorm_roce_conn_st_ctx { 6770struct tstorm_roce_conn_st_ctx {
@@ -6220,7 +6819,7 @@ struct roce_create_qp_req_ramrod_data {
6220 __le16 mtu; 6819 __le16 mtu;
6221 __le16 pd; 6820 __le16 pd;
6222 __le16 sq_num_pages; 6821 __le16 sq_num_pages;
6223 __le16 reseved2; 6822 __le16 low_latency_phy_queue;
6224 struct regpair sq_pbl_addr; 6823 struct regpair sq_pbl_addr;
6225 struct regpair orq_pbl_addr; 6824 struct regpair orq_pbl_addr;
6226 __le16 local_mac_addr[3]; 6825 __le16 local_mac_addr[3];
@@ -6234,7 +6833,7 @@ struct roce_create_qp_req_ramrod_data {
6234 u8 stats_counter_id; 6833 u8 stats_counter_id;
6235 u8 reserved3[7]; 6834 u8 reserved3[7];
6236 __le32 cq_cid; 6835 __le32 cq_cid;
6237 __le16 physical_queue0; 6836 __le16 regular_latency_phy_queue;
6238 __le16 dpi; 6837 __le16 dpi;
6239}; 6838};
6240 6839
@@ -6282,15 +6881,16 @@ struct roce_create_qp_resp_ramrod_data {
6282 __le32 dst_gid[4]; 6881 __le32 dst_gid[4];
6283 struct regpair qp_handle_for_cqe; 6882 struct regpair qp_handle_for_cqe;
6284 struct regpair qp_handle_for_async; 6883 struct regpair qp_handle_for_async;
6285 __le32 reserved2[2]; 6884 __le16 low_latency_phy_queue;
6885 u8 reserved2[6];
6286 __le32 cq_cid; 6886 __le32 cq_cid;
6287 __le16 physical_queue0; 6887 __le16 regular_latency_phy_queue;
6288 __le16 dpi; 6888 __le16 dpi;
6289}; 6889};
6290 6890
6291struct roce_destroy_qp_req_output_params { 6891struct roce_destroy_qp_req_output_params {
6292 __le32 num_bound_mw; 6892 __le32 num_bound_mw;
6293 __le32 reserved; 6893 __le32 cq_prod;
6294}; 6894};
6295 6895
6296struct roce_destroy_qp_req_ramrod_data { 6896struct roce_destroy_qp_req_ramrod_data {
@@ -6299,7 +6899,7 @@ struct roce_destroy_qp_req_ramrod_data {
6299 6899
6300struct roce_destroy_qp_resp_output_params { 6900struct roce_destroy_qp_resp_output_params {
6301 __le32 num_invalidated_mw; 6901 __le32 num_invalidated_mw;
6302 __le32 reserved; 6902 __le32 cq_prod;
6303}; 6903};
6304 6904
6305struct roce_destroy_qp_resp_ramrod_data { 6905struct roce_destroy_qp_resp_ramrod_data {
@@ -7426,6 +8026,7 @@ struct ystorm_fcoe_conn_st_ctx {
7426 u8 fcp_rsp_size; 8026 u8 fcp_rsp_size;
7427 __le16 mss; 8027 __le16 mss;
7428 struct regpair reserved; 8028 struct regpair reserved;
8029 __le16 min_frame_size;
7429 u8 protection_info_flags; 8030 u8 protection_info_flags;
7430#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 8031#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
7431#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0 8032#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
@@ -7444,7 +8045,6 @@ struct ystorm_fcoe_conn_st_ctx {
7444#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F 8045#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
7445#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2 8046#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
7446 u8 fcp_xfer_size; 8047 u8 fcp_xfer_size;
7447 u8 reserved3[2];
7448}; 8048};
7449 8049
7450struct fcoe_vlan_fields { 8050struct fcoe_vlan_fields {
@@ -8273,10 +8873,10 @@ struct xstorm_iscsi_conn_ag_ctx {
8273#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 8873#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
8274#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 8874#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
8275 u8 flags7; 8875 u8 flags7;
8276#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 8876#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
8277#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 8877#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
8278#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 8878#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
8279#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 8879#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
8280#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 8880#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
8281#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 8881#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
8282#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 8882#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
@@ -8322,10 +8922,10 @@ struct xstorm_iscsi_conn_ag_ctx {
8322#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 8922#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
8323#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 8923#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
8324#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 8924#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
8325#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 8925#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
8326#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 8926#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
8327#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 8927#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
8328#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 8928#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
8329#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 8929#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
8330#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 8930#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
8331#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 8931#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
@@ -8335,8 +8935,8 @@ struct xstorm_iscsi_conn_ag_ctx {
8335#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 8935#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
8336#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 8936#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
8337 u8 flags11; 8937 u8 flags11;
8338#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 8938#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
8339#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0 8939#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
8340#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 8940#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
8341#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 8941#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
8342#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 8942#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
@@ -8440,7 +9040,7 @@ struct xstorm_iscsi_conn_ag_ctx {
8440 __le32 reg10; 9040 __le32 reg10;
8441 __le32 reg11; 9041 __le32 reg11;
8442 __le32 exp_stat_sn; 9042 __le32 exp_stat_sn;
8443 __le32 reg13; 9043 __le32 ongoing_fast_rxmit_seq;
8444 __le32 reg14; 9044 __le32 reg14;
8445 __le32 reg15; 9045 __le32 reg15;
8446 __le32 reg16; 9046 __le32 reg16;
@@ -8466,10 +9066,10 @@ struct tstorm_iscsi_conn_ag_ctx {
8466#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 9066#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
8467#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 9067#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
8468 u8 flags1; 9068 u8 flags1;
8469#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 9069#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
8470#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0 9070#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
8471#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 9071#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
8472#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2 9072#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
8473#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 9073#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
8474#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 9074#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
8475#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 9075#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
@@ -8490,10 +9090,10 @@ struct tstorm_iscsi_conn_ag_ctx {
8490#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2 9090#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
8491#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 9091#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
8492#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 9092#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
8493#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 9093#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
8494#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5 9094#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
8495#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 9095#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
8496#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6 9096#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
8497#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 9097#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
8498#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 9098#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
8499 u8 flags4; 9099 u8 flags4;
@@ -8539,7 +9139,7 @@ struct tstorm_iscsi_conn_ag_ctx {
8539 __le32 reg6; 9139 __le32 reg6;
8540 __le32 reg7; 9140 __le32 reg7;
8541 __le32 reg8; 9141 __le32 reg8;
8542 u8 byte2; 9142 u8 cid_offload_cnt;
8543 u8 byte3; 9143 u8 byte3;
8544 __le16 word0; 9144 __le16 word0;
8545}; 9145};
@@ -8831,11 +9431,24 @@ struct eth_stats {
8831 u64 r511; 9431 u64 r511;
8832 u64 r1023; 9432 u64 r1023;
8833 u64 r1518; 9433 u64 r1518;
8834 u64 r1522; 9434
8835 u64 r2047; 9435 union {
8836 u64 r4095; 9436 struct {
8837 u64 r9216; 9437 u64 r1522;
8838 u64 r16383; 9438 u64 r2047;
9439 u64 r4095;
9440 u64 r9216;
9441 u64 r16383;
9442 } bb0;
9443 struct {
9444 u64 unused1;
9445 u64 r1519_to_max;
9446 u64 unused2;
9447 u64 unused3;
9448 u64 unused4;
9449 } ah0;
9450 } u0;
9451
8839 u64 rfcs; 9452 u64 rfcs;
8840 u64 rxcf; 9453 u64 rxcf;
8841 u64 rxpf; 9454 u64 rxpf;
@@ -8852,14 +9465,36 @@ struct eth_stats {
8852 u64 t511; 9465 u64 t511;
8853 u64 t1023; 9466 u64 t1023;
8854 u64 t1518; 9467 u64 t1518;
8855 u64 t2047; 9468
8856 u64 t4095; 9469 union {
8857 u64 t9216; 9470 struct {
8858 u64 t16383; 9471 u64 t2047;
9472 u64 t4095;
9473 u64 t9216;
9474 u64 t16383;
9475 } bb1;
9476 struct {
9477 u64 t1519_to_max;
9478 u64 unused6;
9479 u64 unused7;
9480 u64 unused8;
9481 } ah1;
9482 } u1;
9483
8859 u64 txpf; 9484 u64 txpf;
8860 u64 txpp; 9485 u64 txpp;
8861 u64 tlpiec; 9486
8862 u64 tncl; 9487 union {
9488 struct {
9489 u64 tlpiec;
9490 u64 tncl;
9491 } bb2;
9492 struct {
9493 u64 unused9;
9494 u64 unused10;
9495 } ah2;
9496 } u2;
9497
8863 u64 rbyte; 9498 u64 rbyte;
8864 u64 rxuca; 9499 u64 rxuca;
8865 u64 rxmca; 9500 u64 rxmca;
@@ -9067,6 +9702,10 @@ struct dcb_dscp_map {
9067struct public_global { 9702struct public_global {
9068 u32 max_path; 9703 u32 max_path;
9069 u32 max_ports; 9704 u32 max_ports;
9705#define MODE_1P 1
9706#define MODE_2P 2
9707#define MODE_3P 3
9708#define MODE_4P 4
9070 u32 debug_mb_offset; 9709 u32 debug_mb_offset;
9071 u32 phymod_dbg_mb_offset; 9710 u32 phymod_dbg_mb_offset;
9072 struct couple_mode_teaming cmt; 9711 struct couple_mode_teaming cmt;
@@ -9659,6 +10298,8 @@ struct nvm_cfg1_glob {
9659#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC 10298#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
9660#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD 10299#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
9661#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE 10300#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
10301#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF
10302
9662 u32 e_lane_cfg1; 10303 u32 e_lane_cfg1;
9663 u32 e_lane_cfg2; 10304 u32 e_lane_cfg2;
9664 u32 f_lane_cfg1; 10305 u32 f_lane_cfg1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d891a6852695..2a50e2b7568f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
215{ 215{
216 u32 qm_line_crd; 216 u32 qm_line_crd;
217 217
218 /* In A0 - Limit the size of pbf queue so that only 511 commands with
219 * the minimum size of 4 (FCoE minimum size)
220 */
221 bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
222
223 if (is_bb_a0)
224 cmdq_lines = min_t(u32, cmdq_lines, 1022);
225 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); 218 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
226 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 219 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
227 (u32)cmdq_lines); 220 (u32)cmdq_lines);
@@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init(
343 u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; 336 u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
344 u16 last_pq_group = (p_params->start_pq + num_pqs - 1) / 337 u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
345 QM_PF_QUEUE_GROUP_SIZE; 338 QM_PF_QUEUE_GROUP_SIZE;
346 bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
347 u16 i, pq_id, pq_group; 339 u16 i, pq_id, pq_group;
348 340
349 /* a bit per Tx PQ indicating if the PQ is associated with a VF */ 341 /* a bit per Tx PQ indicating if the PQ is associated with a VF */
350 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; 342 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
351 u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE; 343 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
352 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
353 u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); 344 u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
354 u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); 345 u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
355 u32 mem_addr_4kb = base_mem_addr_4kb; 346 u32 mem_addr_4kb = base_mem_addr_4kb;
@@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init(
371 bool is_vf_pq = (i >= p_params->num_pf_pqs); 362 bool is_vf_pq = (i >= p_params->num_pf_pqs);
372 struct qm_rf_pq_map tx_pq_map; 363 struct qm_rf_pq_map tx_pq_map;
373 364
365 bool rl_valid = p_params->pq_params[i].rl_valid &&
366 (p_params->pq_params[i].vport_id <
367 MAX_QM_GLOBAL_RLS);
368
374 /* update first Tx PQ of VPORT/TC */ 369 /* update first Tx PQ of VPORT/TC */
375 u8 vport_id_in_pf = p_params->pq_params[i].vport_id - 370 u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
376 p_params->start_vport; 371 p_params->start_vport;
@@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init(
389 (p_params->pf_id << 384 (p_params->pf_id <<
390 QM_WFQ_VP_PQ_PF_SHIFT)); 385 QM_WFQ_VP_PQ_PF_SHIFT));
391 } 386 }
387
388 if (p_params->pq_params[i].rl_valid && !rl_valid)
389 DP_NOTICE(p_hwfn,
390 "Invalid VPORT ID for rate limiter configuration");
392 /* fill PQ map entry */ 391 /* fill PQ map entry */
393 memset(&tx_pq_map, 0, sizeof(tx_pq_map)); 392 memset(&tx_pq_map, 0, sizeof(tx_pq_map));
394 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); 393 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
395 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID, 394 SET_FIELD(tx_pq_map.reg,
396 p_params->pq_params[i].rl_valid ? 1 : 0); 395 QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
397 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); 396 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
398 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, 397 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
399 p_params->pq_params[i].rl_valid ? 398 rl_valid ?
400 p_params->pq_params[i].vport_id : 0); 399 p_params->pq_params[i].vport_id : 0);
401 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); 400 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
402 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, 401 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
@@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init(
413 /* if PQ is associated with a VF, add indication 412 /* if PQ is associated with a VF, add indication
414 * to PQ VF mask 413 * to PQ VF mask
415 */ 414 */
416 tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |= 415 tx_pq_vf_mask[pq_id /
417 (1 << (pq_id % tx_pq_vf_mask_width)); 416 QM_PF_QUEUE_GROUP_SIZE] |=
417 BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
418 mem_addr_4kb += vport_pq_mem_4kb; 418 mem_addr_4kb += vport_pq_mem_4kb;
419 } else { 419 } else {
420 mem_addr_4kb += pq_mem_4kb; 420 mem_addr_4kb += pq_mem_4kb;
@@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
480 if (p_params->pf_id < MAX_NUM_PFS_BB) 480 if (p_params->pf_id < MAX_NUM_PFS_BB)
481 crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET; 481 crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
482 else 482 else
483 crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET + 483 crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
484 (p_params->pf_id % MAX_NUM_PFS_BB); 484 crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
485 485
486 inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); 486 inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
487 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 487 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
@@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
498 QM_WFQ_CRD_REG_SIGN_BIT); 498 QM_WFQ_CRD_REG_SIGN_BIT);
499 } 499 }
500 500
501 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
502 inc_val);
503 STORE_RT_REG(p_hwfn, 501 STORE_RT_REG(p_hwfn,
504 QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, 502 QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
505 QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); 503 QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
504 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
505 inc_val);
506 return 0; 506 return 0;
507} 507}
508 508
@@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
576{ 576{
577 u8 i, vport_id; 577 u8 i, vport_id;
578 578
579 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
580 DP_NOTICE(p_hwfn,
581 "Invalid VPORT ID for rate limiter configuration");
582 return -1;
583 }
584
579 /* go over all PF VPORTs */ 585 /* go over all PF VPORTs */
580 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { 586 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
581 u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl); 587 u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
@@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
785{ 791{
786 u32 inc_val = QM_RL_INC_VAL(vport_rl); 792 u32 inc_val = QM_RL_INC_VAL(vport_rl);
787 793
794 if (vport_id >= MAX_QM_GLOBAL_RLS) {
795 DP_NOTICE(p_hwfn,
796 "Invalid VPORT ID for rate limiter configuration");
797 return -1;
798 }
799
788 if (inc_val > QM_RL_MAX_INC_VAL) { 800 if (inc_val > QM_RL_MAX_INC_VAL) {
789 DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration"); 801 DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
790 return -1; 802 return -1;
@@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
940 eth_geneve_enable ? 1 : 0); 952 eth_geneve_enable ? 1 : 0);
941 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); 953 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
942 954
943 /* comp ver */
944 reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
945 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
946 qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
947 qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
948
949 /* EDPM with geneve tunnel not supported in BB_B0 */ 955 /* EDPM with geneve tunnel not supported in BB_B0 */
950 if (QED_IS_BB_B0(p_hwfn->cdev)) 956 if (QED_IS_BB_B0(p_hwfn->cdev))
951 return; 957 return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 243b64e0d4dc..4a2e7be5bf72 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
554 } 554 }
555 555
556 /* First Dword contains metadata and should be skipped */ 556 /* First Dword contains metadata and should be skipped */
557 buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32)); 557 buf_hdr = (struct bin_buffer_hdr *)data;
558 558
559 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; 559 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
560 fw->fw_ver_info = (struct fw_ver_info *)(data + offset); 560 fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index df932be5a4e5..4385ccbb5efb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1470,13 +1470,20 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1470 memset(&pstats, 0, sizeof(pstats)); 1470 memset(&pstats, 0, sizeof(pstats));
1471 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); 1471 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1472 1472
1473 p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1473 p_stats->common.tx_ucast_bytes +=
1474 p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1474 HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1475 p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1475 p_stats->common.tx_mcast_bytes +=
1476 p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1476 HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1477 p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1477 p_stats->common.tx_bcast_bytes +=
1478 p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1478 HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1479 p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts); 1479 p_stats->common.tx_ucast_pkts +=
1480 HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1481 p_stats->common.tx_mcast_pkts +=
1482 HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1483 p_stats->common.tx_bcast_pkts +=
1484 HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1485 p_stats->common.tx_err_drop_pkts +=
1486 HILO_64_REGPAIR(pstats.error_drop_pkts);
1480} 1487}
1481 1488
1482static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, 1489static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1502,10 +1509,10 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1502 memset(&tstats, 0, sizeof(tstats)); 1509 memset(&tstats, 0, sizeof(tstats));
1503 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); 1510 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1504 1511
1505 p_stats->mftag_filter_discards += 1512 p_stats->common.mftag_filter_discards +=
1506 HILO_64_REGPAIR(tstats.mftag_filter_discard); 1513 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1507 p_stats->mac_filter_discards += 1514 p_stats->common.mac_filter_discards +=
1508 HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1515 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1509} 1516}
1510 1517
1511static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, 1518static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1539,12 +1546,15 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1539 memset(&ustats, 0, sizeof(ustats)); 1546 memset(&ustats, 0, sizeof(ustats));
1540 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); 1547 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1541 1548
1542 p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1549 p_stats->common.rx_ucast_bytes +=
1543 p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1550 HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1544 p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1551 p_stats->common.rx_mcast_bytes +=
1545 p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1552 HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1546 p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1553 p_stats->common.rx_bcast_bytes +=
1547 p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1554 HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1555 p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1556 p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1557 p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1548} 1558}
1549 1559
1550static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, 1560static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1578,23 +1588,26 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1578 memset(&mstats, 0, sizeof(mstats)); 1588 memset(&mstats, 0, sizeof(mstats));
1579 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); 1589 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1580 1590
1581 p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard); 1591 p_stats->common.no_buff_discards +=
1582 p_stats->packet_too_big_discard += 1592 HILO_64_REGPAIR(mstats.no_buff_discard);
1583 HILO_64_REGPAIR(mstats.packet_too_big_discard); 1593 p_stats->common.packet_too_big_discard +=
1584 p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); 1594 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1585 p_stats->tpa_coalesced_pkts += 1595 p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1586 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1596 p_stats->common.tpa_coalesced_pkts +=
1587 p_stats->tpa_coalesced_events += 1597 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1588 HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1598 p_stats->common.tpa_coalesced_events +=
1589 p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num); 1599 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1590 p_stats->tpa_coalesced_bytes += 1600 p_stats->common.tpa_aborts_num +=
1591 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1601 HILO_64_REGPAIR(mstats.tpa_aborts_num);
1602 p_stats->common.tpa_coalesced_bytes +=
1603 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1592} 1604}
1593 1605
1594static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, 1606static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1595 struct qed_ptt *p_ptt, 1607 struct qed_ptt *p_ptt,
1596 struct qed_eth_stats *p_stats) 1608 struct qed_eth_stats *p_stats)
1597{ 1609{
1610 struct qed_eth_stats_common *p_common = &p_stats->common;
1598 struct port_stats port_stats; 1611 struct port_stats port_stats;
1599 int j; 1612 int j;
1600 1613
@@ -1605,54 +1618,75 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1605 offsetof(struct public_port, stats), 1618 offsetof(struct public_port, stats),
1606 sizeof(port_stats)); 1619 sizeof(port_stats));
1607 1620
1608 p_stats->rx_64_byte_packets += port_stats.eth.r64; 1621 p_common->rx_64_byte_packets += port_stats.eth.r64;
1609 p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127; 1622 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1610 p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255; 1623 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1611 p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511; 1624 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1612 p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1625 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1613 p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1626 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1614 p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522; 1627 p_common->rx_crc_errors += port_stats.eth.rfcs;
1615 p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047; 1628 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1616 p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095; 1629 p_common->rx_pause_frames += port_stats.eth.rxpf;
1617 p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216; 1630 p_common->rx_pfc_frames += port_stats.eth.rxpp;
1618 p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383; 1631 p_common->rx_align_errors += port_stats.eth.raln;
1619 p_stats->rx_crc_errors += port_stats.eth.rfcs; 1632 p_common->rx_carrier_errors += port_stats.eth.rfcr;
1620 p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf; 1633 p_common->rx_oversize_packets += port_stats.eth.rovr;
1621 p_stats->rx_pause_frames += port_stats.eth.rxpf; 1634 p_common->rx_jabbers += port_stats.eth.rjbr;
1622 p_stats->rx_pfc_frames += port_stats.eth.rxpp; 1635 p_common->rx_undersize_packets += port_stats.eth.rund;
1623 p_stats->rx_align_errors += port_stats.eth.raln; 1636 p_common->rx_fragments += port_stats.eth.rfrg;
1624 p_stats->rx_carrier_errors += port_stats.eth.rfcr; 1637 p_common->tx_64_byte_packets += port_stats.eth.t64;
1625 p_stats->rx_oversize_packets += port_stats.eth.rovr; 1638 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1626 p_stats->rx_jabbers += port_stats.eth.rjbr; 1639 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1627 p_stats->rx_undersize_packets += port_stats.eth.rund; 1640 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1628 p_stats->rx_fragments += port_stats.eth.rfrg; 1641 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1629 p_stats->tx_64_byte_packets += port_stats.eth.t64; 1642 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1630 p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127; 1643 p_common->tx_pause_frames += port_stats.eth.txpf;
1631 p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255; 1644 p_common->tx_pfc_frames += port_stats.eth.txpp;
1632 p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511; 1645 p_common->rx_mac_bytes += port_stats.eth.rbyte;
1633 p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1646 p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1634 p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1647 p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1635 p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047; 1648 p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1636 p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095; 1649 p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1637 p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216; 1650 p_common->tx_mac_bytes += port_stats.eth.tbyte;
1638 p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383; 1651 p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1639 p_stats->tx_pause_frames += port_stats.eth.txpf; 1652 p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1640 p_stats->tx_pfc_frames += port_stats.eth.txpp; 1653 p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1641 p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec; 1654 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1642 p_stats->tx_total_collisions += port_stats.eth.tncl;
1643 p_stats->rx_mac_bytes += port_stats.eth.rbyte;
1644 p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
1645 p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
1646 p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
1647 p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
1648 p_stats->tx_mac_bytes += port_stats.eth.tbyte;
1649 p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
1650 p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
1651 p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
1652 p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
1653 for (j = 0; j < 8; j++) { 1655 for (j = 0; j < 8; j++) {
1654 p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; 1656 p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1655 p_stats->brb_discards += port_stats.brb.brb_discard[j]; 1657 p_common->brb_discards += port_stats.brb.brb_discard[j];
1658 }
1659
1660 if (QED_IS_BB(p_hwfn->cdev)) {
1661 struct qed_eth_stats_bb *p_bb = &p_stats->bb;
1662
1663 p_bb->rx_1519_to_1522_byte_packets +=
1664 port_stats.eth.u0.bb0.r1522;
1665 p_bb->rx_1519_to_2047_byte_packets +=
1666 port_stats.eth.u0.bb0.r2047;
1667 p_bb->rx_2048_to_4095_byte_packets +=
1668 port_stats.eth.u0.bb0.r4095;
1669 p_bb->rx_4096_to_9216_byte_packets +=
1670 port_stats.eth.u0.bb0.r9216;
1671 p_bb->rx_9217_to_16383_byte_packets +=
1672 port_stats.eth.u0.bb0.r16383;
1673 p_bb->tx_1519_to_2047_byte_packets +=
1674 port_stats.eth.u1.bb1.t2047;
1675 p_bb->tx_2048_to_4095_byte_packets +=
1676 port_stats.eth.u1.bb1.t4095;
1677 p_bb->tx_4096_to_9216_byte_packets +=
1678 port_stats.eth.u1.bb1.t9216;
1679 p_bb->tx_9217_to_16383_byte_packets +=
1680 port_stats.eth.u1.bb1.t16383;
1681 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1682 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1683 } else {
1684 struct qed_eth_stats_ah *p_ah = &p_stats->ah;
1685
1686 p_ah->rx_1519_to_max_byte_packets +=
1687 port_stats.eth.u0.ah0.r1519_to_max;
1688 p_ah->tx_1519_to_max_byte_packets =
1689 port_stats.eth.u1.ah1.t1519_to_max;
1656 } 1690 }
1657} 1691}
1658 1692
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 0d3cef409c96..178650aa0c6c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -597,7 +597,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
597 u8 bd_flags = 0; 597 u8 bd_flags = 0;
598 598
599 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST)) 599 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
600 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1); 600 SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
601 601
602 return bd_flags; 602 return bd_flags;
603} 603}
@@ -758,8 +758,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
758 p_buffer->placement_offset; 758 p_buffer->placement_offset;
759 parse_flags = p_buffer->parse_flags; 759 parse_flags = p_buffer->parse_flags;
760 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags); 760 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
761 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1); 761 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
762 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1); 762 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
763 763
764 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, 764 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
765 p_buffer->vlan, bd_flags, 765 p_buffer->vlan, bd_flags,
@@ -1591,33 +1591,34 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1591 p_tx->cur_send_frag_num++; 1591 p_tx->cur_send_frag_num++;
1592} 1592}
1593 1593
1594static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, 1594static void
1595 struct qed_ll2_info *p_ll2, 1595qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1596 struct qed_ll2_tx_packet *p_curp, 1596 struct qed_ll2_info *p_ll2,
1597 u8 num_of_bds, 1597 struct qed_ll2_tx_packet *p_curp,
1598 enum core_tx_dest tx_dest, 1598 u8 num_of_bds,
1599 u16 vlan, 1599 enum core_tx_dest tx_dest,
1600 u8 bd_flags, 1600 u16 vlan,
1601 u16 l4_hdr_offset_w, 1601 u8 bd_flags,
1602 enum core_roce_flavor_type type, 1602 u16 l4_hdr_offset_w,
1603 dma_addr_t first_frag, 1603 enum core_roce_flavor_type roce_flavor,
1604 u16 first_frag_len) 1604 dma_addr_t first_frag,
1605 u16 first_frag_len)
1605{ 1606{
1606 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; 1607 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1607 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain); 1608 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1608 struct core_tx_bd *start_bd = NULL; 1609 struct core_tx_bd *start_bd = NULL;
1609 u16 frag_idx; 1610 u16 bd_data = 0, frag_idx;
1610 1611
1611 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); 1612 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1612 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan); 1613 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1613 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, 1614 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1614 cpu_to_le16(l4_hdr_offset_w)); 1615 cpu_to_le16(l4_hdr_offset_w));
1615 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); 1616 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1616 start_bd->bd_flags.as_bitfield = bd_flags; 1617 bd_data |= bd_flags;
1617 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << 1618 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
1618 CORE_TX_BD_FLAGS_START_BD_SHIFT; 1619 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
1619 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); 1620 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1620 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type); 1621 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
1621 DMA_REGPAIR_LE(start_bd->addr, first_frag); 1622 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1622 start_bd->nbytes = cpu_to_le16(first_frag_len); 1623 start_bd->nbytes = cpu_to_le16(first_frag_len);
1623 1624
@@ -1642,9 +1643,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1642 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; 1643 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1643 1644
1644 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); 1645 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1645 (*p_bd)->bd_flags.as_bitfield = 0; 1646 (*p_bd)->bd_data.as_bitfield = 0;
1646 (*p_bd)->bitfield1 = 0; 1647 (*p_bd)->bitfield1 = 0;
1647 (*p_bd)->bitfield0 = 0;
1648 p_curp->bds_set[frag_idx].tx_frag = 0; 1648 p_curp->bds_set[frag_idx].tx_frag = 0;
1649 p_curp->bds_set[frag_idx].frag_len = 0; 1649 p_curp->bds_set[frag_idx].frag_len = 0;
1650 } 1650 }
@@ -2241,11 +2241,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2241 /* Request HW to calculate IP csum */ 2241 /* Request HW to calculate IP csum */
2242 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) && 2242 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2243 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) 2243 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2244 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT); 2244 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
2245 2245
2246 if (skb_vlan_tag_present(skb)) { 2246 if (skb_vlan_tag_present(skb)) {
2247 vlan = skb_vlan_tag_get(skb); 2247 vlan = skb_vlan_tag_get(skb);
2248 flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT); 2248 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
2249 } 2249 }
2250 2250
2251 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), 2251 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index eef30a598b40..766c6f39ea63 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -238,6 +238,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
238 dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == 238 dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
239 QED_PCI_ETH_ROCE); 239 QED_PCI_ETH_ROCE);
240 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); 240 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
241 dev_info->dev_type = cdev->type;
241 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); 242 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
242 243
243 if (IS_PF(cdev)) { 244 if (IS_PF(cdev)) {
@@ -1653,8 +1654,10 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
1653 switch (type) { 1654 switch (type) {
1654 case QED_MCP_LAN_STATS: 1655 case QED_MCP_LAN_STATS:
1655 qed_get_vport_stats(cdev, &eth_stats); 1656 qed_get_vport_stats(cdev, &eth_stats);
1656 stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts; 1657 stats->lan_stats.ucast_rx_pkts =
1657 stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts; 1658 eth_stats.common.rx_ucast_pkts;
1659 stats->lan_stats.ucast_tx_pkts =
1660 eth_stats.common.tx_ucast_pkts;
1658 stats->lan_stats.fcs_err = -1; 1661 stats->lan_stats.fcs_err = -1;
1659 break; 1662 break;
1660 case QED_MCP_FCOE_STATS: 1663 case QED_MCP_FCOE_STATS:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 368e88de146c..bdbfd6d4485e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -479,11 +479,10 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
479 rel_pfid) 479 rel_pfid)
480#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) 480#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
481 481
482/* TODO - this is only correct as long as only BB is supported, and 482#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
483 * no port-swapping is implemented; Afterwards we'll need to fix it. 483 ((_p_hwfn)->cdev->num_ports_in_engines * \
484 */ 484 qed_device_num_engines((_p_hwfn)->cdev)))
485#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ 485
486 ((_p_hwfn)->cdev->num_ports_in_engines * 2))
487struct qed_mcp_info { 486struct qed_mcp_info {
488 /* Spinlock used for protecting the access to the MFW mailbox */ 487 /* Spinlock used for protecting the access to the MFW mailbox */
489 spinlock_t lock; 488 spinlock_t lock;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index d27aa85da23c..80c9c0b172dd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -262,12 +262,20 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev)
262 qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1); 262 qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
263 263
264 /* Pause free running counter */ 264 /* Pause free running counter */
265 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2); 265 if (QED_IS_BB_B0(p_hwfn->cdev))
266 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
267 if (QED_IS_AH(p_hwfn->cdev))
268 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
266 269
267 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0); 270 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
268 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0); 271 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
269 /* Resume free running counter */ 272 /* Resume free running counter */
270 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4); 273 if (QED_IS_BB_B0(p_hwfn->cdev))
274 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
275 if (QED_IS_AH(p_hwfn->cdev)) {
276 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
277 qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
278 }
271 279
272 /* Disable drift register */ 280 /* Disable drift register */
273 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0); 281 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d59d9df60cd2..6d4ac7e2ee83 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -160,13 +160,13 @@
160 0x2e0704UL 160 0x2e0704UL
161#define CCFC_REG_STRONG_ENABLE_PF \ 161#define CCFC_REG_STRONG_ENABLE_PF \
162 0x2e0708UL 162 0x2e0708UL
163#define PGLUE_B_REG_PGL_ADDR_88_F0 \ 163#define PGLUE_B_REG_PGL_ADDR_88_F0_BB \
164 0x2aa404UL 164 0x2aa404UL
165#define PGLUE_B_REG_PGL_ADDR_8C_F0 \ 165#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB \
166 0x2aa408UL 166 0x2aa408UL
167#define PGLUE_B_REG_PGL_ADDR_90_F0 \ 167#define PGLUE_B_REG_PGL_ADDR_90_F0_BB \
168 0x2aa40cUL 168 0x2aa40cUL
169#define PGLUE_B_REG_PGL_ADDR_94_F0 \ 169#define PGLUE_B_REG_PGL_ADDR_94_F0_BB \
170 0x2aa410UL 170 0x2aa410UL
171#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \ 171#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
172 0x2aa138UL 172 0x2aa138UL
@@ -356,6 +356,10 @@
356 0x238804UL 356 0x238804UL
357#define RDIF_REG_STOP_ON_ERROR \ 357#define RDIF_REG_STOP_ON_ERROR \
358 0x300040UL 358 0x300040UL
359#define RDIF_REG_DEBUG_ERROR_INFO \
360 0x300400UL
361#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
362 64
359#define SRC_REG_SOFT_RST \ 363#define SRC_REG_SOFT_RST \
360 0x23874cUL 364 0x23874cUL
361#define TCFC_REG_ACTIVITY_COUNTER \ 365#define TCFC_REG_ACTIVITY_COUNTER \
@@ -370,6 +374,10 @@
370 0x1700004UL 374 0x1700004UL
371#define TDIF_REG_STOP_ON_ERROR \ 375#define TDIF_REG_STOP_ON_ERROR \
372 0x310040UL 376 0x310040UL
377#define TDIF_REG_DEBUG_ERROR_INFO \
378 0x310400UL
379#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
380 64
373#define UCM_REG_INIT \ 381#define UCM_REG_INIT \
374 0x1280000UL 382 0x1280000UL
375#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \ 383#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
@@ -1236,6 +1244,26 @@
1236 0x1901534UL 1244 0x1901534UL
1237#define USEM_REG_DBG_FORCE_FRAME \ 1245#define USEM_REG_DBG_FORCE_FRAME \
1238 0x1901538UL 1246 0x1901538UL
1247#define NWS_REG_DBG_SELECT \
1248 0x700128UL
1249#define NWS_REG_DBG_DWORD_ENABLE \
1250 0x70012cUL
1251#define NWS_REG_DBG_SHIFT \
1252 0x700130UL
1253#define NWS_REG_DBG_FORCE_VALID \
1254 0x700134UL
1255#define NWS_REG_DBG_FORCE_FRAME \
1256 0x700138UL
1257#define MS_REG_DBG_SELECT \
1258 0x6a0228UL
1259#define MS_REG_DBG_DWORD_ENABLE \
1260 0x6a022cUL
1261#define MS_REG_DBG_SHIFT \
1262 0x6a0230UL
1263#define MS_REG_DBG_FORCE_VALID \
1264 0x6a0234UL
1265#define MS_REG_DBG_FORCE_FRAME \
1266 0x6a0238UL
1239#define PCIE_REG_DBG_COMMON_SELECT \ 1267#define PCIE_REG_DBG_COMMON_SELECT \
1240 0x054398UL 1268 0x054398UL
1241#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \ 1269#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
@@ -1448,6 +1476,8 @@
1448 0x000b48UL 1476 0x000b48UL
1449#define RSS_REG_RSS_RAM_DATA \ 1477#define RSS_REG_RSS_RAM_DATA \
1450 0x238c20UL 1478 0x238c20UL
1479#define RSS_REG_RSS_RAM_DATA_SIZE \
1480 4
1451#define MISC_REG_BLOCK_256B_EN \ 1481#define MISC_REG_BLOCK_256B_EN \
1452 0x008c14UL 1482 0x008c14UL
1453#define NWS_REG_NWS_CMU \ 1483#define NWS_REG_NWS_CMU \
@@ -1520,4 +1550,13 @@
1520#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL 1550#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL
1521#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL 1551#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
1522#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL 1552#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
1553#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL
1554
1555#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL
1556#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL
1557#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL
1558#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL
1559#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
1560#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
1561
1523#endif 1562#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index d9ff6b28591c..4bef5c59627c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -66,13 +66,27 @@
66#include "qed_roce.h" 66#include "qed_roce.h"
67#include "qed_ll2.h" 67#include "qed_ll2.h"
68 68
69void qed_async_roce_event(struct qed_hwfn *p_hwfn, 69static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
70 struct event_ring_entry *p_eqe) 70
71void qed_roce_async_event(struct qed_hwfn *p_hwfn,
72 u8 fw_event_code, union rdma_eqe_data *rdma_data)
71{ 73{
72 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 74 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
75 u16 icid =
76 (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
77
78 /* icid release in this async event can occur only if the icid
79 * was offloaded to the FW. In case it wasn't offloaded this is
80 * handled in qed_roce_sp_destroy_qp.
81 */
82 qed_roce_free_real_icid(p_hwfn, icid);
83 } else {
84 struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
73 85
74 p_rdma_info->events.affiliated_event(p_rdma_info->events.context, 86 events->affiliated_event(p_hwfn->p_rdma_info->events.context,
75 p_eqe->opcode, &p_eqe->data); 87 fw_event_code,
88 &rdma_data->async_handle);
89 }
76} 90}
77 91
78static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, 92static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
@@ -113,6 +127,15 @@ static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
113 return 0; 127 return 0;
114} 128}
115 129
130static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
131 struct qed_bmap *bmap, u32 id_num)
132{
133 if (id_num >= bmap->max_count)
134 return;
135
136 __set_bit(id_num, bmap->bitmap);
137}
138
116static void qed_bmap_release_id(struct qed_hwfn *p_hwfn, 139static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
117 struct qed_bmap *bmap, u32 id_num) 140 struct qed_bmap *bmap, u32 id_num)
118{ 141{
@@ -129,6 +152,15 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
129 } 152 }
130} 153}
131 154
155static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
156 struct qed_bmap *bmap, u32 id_num)
157{
158 if (id_num >= bmap->max_count)
159 return -1;
160
161 return test_bit(id_num, bmap->bitmap);
162}
163
132static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) 164static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
133{ 165{
134 /* First sb id for RoCE is after all the l2 sb */ 166 /* First sb id for RoCE is after all the l2 sb */
@@ -170,7 +202,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
170 /* Queue zone lines are shared between RoCE and L2 in such a way that 202 /* Queue zone lines are shared between RoCE and L2 in such a way that
171 * they can be used by each without obstructing the other. 203 * they can be used by each without obstructing the other.
172 */ 204 */
173 p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE); 205 p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
206 p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
174 207
175 /* Allocate a struct with device params and fill it */ 208 /* Allocate a struct with device params and fill it */
176 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); 209 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
@@ -248,9 +281,18 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
248 goto free_tid_map; 281 goto free_tid_map;
249 } 282 }
250 283
284 /* Allocate bitmap for cids used for responders/requesters. */
285 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons);
286 if (rc) {
287 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
288 "Failed to allocate real cid bitmap, rc = %d\n", rc);
289 goto free_cid_map;
290 }
251 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); 291 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
252 return 0; 292 return 0;
253 293
294free_cid_map:
295 kfree(p_rdma_info->cid_map.bitmap);
254free_tid_map: 296free_tid_map:
255 kfree(p_rdma_info->tid_map.bitmap); 297 kfree(p_rdma_info->tid_map.bitmap);
256free_toggle_map: 298free_toggle_map:
@@ -273,7 +315,22 @@ free_rdma_info:
273 315
274static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) 316static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
275{ 317{
318 struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
276 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 319 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
320 int wait_count = 0;
321
322 /* when destroying a_RoCE QP the control is returned to the user after
323 * the synchronous part. The asynchronous part may take a little longer.
324 * We delay for a short while if an async destroy QP is still expected.
325 * Beyond the added delay we clear the bitmap anyway.
326 */
327 while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
328 msleep(100);
329 if (wait_count++ > 20) {
330 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
331 break;
332 }
333 }
277 334
278 kfree(p_rdma_info->cid_map.bitmap); 335 kfree(p_rdma_info->cid_map.bitmap);
279 kfree(p_rdma_info->tid_map.bitmap); 336 kfree(p_rdma_info->tid_map.bitmap);
@@ -724,6 +781,14 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
724 u32 addr; 781 u32 addr;
725 782
726 p_hwfn = (struct qed_hwfn *)rdma_cxt; 783 p_hwfn = (struct qed_hwfn *)rdma_cxt;
784
785 if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
786 DP_NOTICE(p_hwfn,
787 "queue zone offset %d is too large (max is %d)\n",
788 qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
789 return;
790 }
791
727 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; 792 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
728 addr = GTT_BAR0_MAP_REG_USDM_RAM + 793 addr = GTT_BAR0_MAP_REG_USDM_RAM +
729 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); 794 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
@@ -1080,6 +1145,14 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
1080 return flavor; 1145 return flavor;
1081} 1146}
1082 1147
1148void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
1149{
1150 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1151 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
1152 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
1153 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1154}
1155
1083static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) 1156static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
1084{ 1157{
1085 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 1158 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -1139,6 +1212,13 @@ err:
1139 return rc; 1212 return rc;
1140} 1213}
1141 1214
1215static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
1216{
1217 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1218 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
1219 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1220}
1221
1142static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, 1222static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
1143 struct qed_rdma_qp *qp) 1223 struct qed_rdma_qp *qp)
1144{ 1224{
@@ -1147,7 +1227,8 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
1147 union qed_qm_pq_params qm_params; 1227 union qed_qm_pq_params qm_params;
1148 enum roce_flavor roce_flavor; 1228 enum roce_flavor roce_flavor;
1149 struct qed_spq_entry *p_ent; 1229 struct qed_spq_entry *p_ent;
1150 u16 physical_queue0 = 0; 1230 u16 regular_latency_queue;
1231 enum protocol_type proto;
1151 int rc; 1232 int rc;
1152 1233
1153 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1234 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1229,15 +1310,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
1229 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 1310 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1230 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 1311 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1231 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 1312 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1232 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
1233 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | 1313 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1234 qp->rq_cq_id); 1314 qp->rq_cq_id);
1235 1315
1236 memset(&qm_params, 0, sizeof(qm_params)); 1316 memset(&qm_params, 0, sizeof(qm_params));
1237 qm_params.roce.qpid = qp->icid >> 1; 1317 qm_params.roce.qpid = qp->icid >> 1;
1238 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params); 1318 regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
1319 &qm_params);
1320
1321 p_ramrod->regular_latency_phy_queue =
1322 cpu_to_le16(regular_latency_queue);
1323 p_ramrod->low_latency_phy_queue =
1324 cpu_to_le16(regular_latency_queue);
1239 1325
1240 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1241 p_ramrod->dpi = cpu_to_le16(qp->dpi); 1326 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1242 1327
1243 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 1328 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1253,13 +1338,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
1253 1338
1254 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1339 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1255 1340
1256 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n", 1341 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1257 rc, physical_queue0); 1342 "rc = %d regular physical queue = 0x%x\n", rc,
1343 regular_latency_queue);
1258 1344
1259 if (rc) 1345 if (rc)
1260 goto err; 1346 goto err;
1261 1347
1262 qp->resp_offloaded = true; 1348 qp->resp_offloaded = true;
1349 qp->cq_prod = 0;
1350
1351 proto = p_hwfn->p_rdma_info->proto;
1352 qed_roce_set_real_cid(p_hwfn, qp->icid -
1353 qed_cxt_get_proto_cid_start(p_hwfn, proto));
1263 1354
1264 return rc; 1355 return rc;
1265 1356
@@ -1280,7 +1371,8 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
1280 union qed_qm_pq_params qm_params; 1371 union qed_qm_pq_params qm_params;
1281 enum roce_flavor roce_flavor; 1372 enum roce_flavor roce_flavor;
1282 struct qed_spq_entry *p_ent; 1373 struct qed_spq_entry *p_ent;
1283 u16 physical_queue0 = 0; 1374 u16 regular_latency_queue;
1375 enum protocol_type proto;
1284 int rc; 1376 int rc;
1285 1377
1286 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1378 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1351,15 +1443,19 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
1351 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 1443 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1352 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 1444 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1353 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 1445 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1354 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id; 1446 p_ramrod->cq_cid =
1355 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | 1447 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
1356 qp->sq_cq_id);
1357 1448
1358 memset(&qm_params, 0, sizeof(qm_params)); 1449 memset(&qm_params, 0, sizeof(qm_params));
1359 qm_params.roce.qpid = qp->icid >> 1; 1450 qm_params.roce.qpid = qp->icid >> 1;
1360 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params); 1451 regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
1452 &qm_params);
1453
1454 p_ramrod->regular_latency_phy_queue =
1455 cpu_to_le16(regular_latency_queue);
1456 p_ramrod->low_latency_phy_queue =
1457 cpu_to_le16(regular_latency_queue);
1361 1458
1362 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1363 p_ramrod->dpi = cpu_to_le16(qp->dpi); 1459 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1364 1460
1365 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 1461 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1378,6 +1474,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
1378 goto err; 1474 goto err;
1379 1475
1380 qp->req_offloaded = true; 1476 qp->req_offloaded = true;
1477 proto = p_hwfn->p_rdma_info->proto;
1478 qed_roce_set_real_cid(p_hwfn,
1479 qp->icid + 1 -
1480 qed_cxt_get_proto_cid_start(p_hwfn, proto));
1381 1481
1382 return rc; 1482 return rc;
1383 1483
@@ -1577,7 +1677,8 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
1577 1677
1578static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, 1678static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
1579 struct qed_rdma_qp *qp, 1679 struct qed_rdma_qp *qp,
1580 u32 *num_invalidated_mw) 1680 u32 *num_invalidated_mw,
1681 u32 *cq_prod)
1581{ 1682{
1582 struct roce_destroy_qp_resp_output_params *p_ramrod_res; 1683 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
1583 struct roce_destroy_qp_resp_ramrod_data *p_ramrod; 1684 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
@@ -1588,8 +1689,22 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
1588 1689
1589 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1690 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1590 1691
1591 if (!qp->resp_offloaded) 1692 *num_invalidated_mw = 0;
1693 *cq_prod = qp->cq_prod;
1694
1695 if (!qp->resp_offloaded) {
1696 /* If a responder was never offload, we need to free the cids
1697 * allocated in create_qp as a FW async event will never arrive
1698 */
1699 u32 cid;
1700
1701 cid = qp->icid -
1702 qed_cxt_get_proto_cid_start(p_hwfn,
1703 p_hwfn->p_rdma_info->proto);
1704 qed_roce_free_cid_pair(p_hwfn, (u16)cid);
1705
1592 return 0; 1706 return 0;
1707 }
1593 1708
1594 /* Get SPQ entry */ 1709 /* Get SPQ entry */
1595 memset(&init_data, 0, sizeof(init_data)); 1710 memset(&init_data, 0, sizeof(init_data));
@@ -1624,6 +1739,8 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
1624 goto err; 1739 goto err;
1625 1740
1626 *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw); 1741 *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
1742 *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
1743 qp->cq_prod = *cq_prod;
1627 1744
1628 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */ 1745 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
1629 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1746 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@@ -1827,10 +1944,8 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
1827 1944
1828 out_params->draining = false; 1945 out_params->draining = false;
1829 1946
1830 if (rq_err_state) 1947 if (rq_err_state || sq_err_state)
1831 qp->cur_state = QED_ROCE_QP_STATE_ERR; 1948 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1832 else if (sq_err_state)
1833 qp->cur_state = QED_ROCE_QP_STATE_SQE;
1834 else if (sq_draining) 1949 else if (sq_draining)
1835 out_params->draining = true; 1950 out_params->draining = true;
1836 out_params->state = qp->cur_state; 1951 out_params->state = qp->cur_state;
@@ -1849,10 +1964,9 @@ err_resp:
1849 1964
1850static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 1965static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
1851{ 1966{
1852 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1853 u32 num_invalidated_mw = 0; 1967 u32 num_invalidated_mw = 0;
1854 u32 num_bound_mw = 0; 1968 u32 num_bound_mw = 0;
1855 u32 start_cid; 1969 u32 cq_prod;
1856 int rc; 1970 int rc;
1857 1971
1858 /* Destroys the specified QP */ 1972 /* Destroys the specified QP */
@@ -1866,7 +1980,8 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
1866 1980
1867 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { 1981 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
1868 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, 1982 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
1869 &num_invalidated_mw); 1983 &num_invalidated_mw,
1984 &cq_prod);
1870 if (rc) 1985 if (rc)
1871 return rc; 1986 return rc;
1872 1987
@@ -1881,21 +1996,6 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
1881 "number of invalidate memory windows is different from bounded ones\n"); 1996 "number of invalidate memory windows is different from bounded ones\n");
1882 return -EINVAL; 1997 return -EINVAL;
1883 } 1998 }
1884
1885 spin_lock_bh(&p_rdma_info->lock);
1886
1887 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
1888 p_rdma_info->proto);
1889
1890 /* Release responder's icid */
1891 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
1892 qp->icid - start_cid);
1893
1894 /* Release requester's icid */
1895 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
1896 qp->icid + 1 - start_cid);
1897
1898 spin_unlock_bh(&p_rdma_info->lock);
1899 } 1999 }
1900 2000
1901 return 0; 2001 return 0;
@@ -2110,12 +2210,19 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
2110 return rc; 2210 return rc;
2111 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) { 2211 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
2112 /* Any state -> RESET */ 2212 /* Any state -> RESET */
2213 u32 cq_prod;
2214
2215 /* Send destroy responder ramrod */
2216 rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
2217 qp,
2218 &num_invalidated_mw,
2219 &cq_prod);
2113 2220
2114 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
2115 &num_invalidated_mw);
2116 if (rc) 2221 if (rc)
2117 return rc; 2222 return rc;
2118 2223
2224 qp->cq_prod = cq_prod;
2225
2119 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, 2226 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
2120 &num_bound_mw); 2227 &num_bound_mw);
2121 2228
@@ -2454,6 +2561,31 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
2454 return rc; 2561 return rc;
2455} 2562}
2456 2563
2564static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
2565{
2566 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
2567 u32 start_cid, cid, xcid;
2568
2569 /* an even icid belongs to a responder while an odd icid belongs to a
2570 * requester. The 'cid' received as an input can be either. We calculate
2571 * the "partner" icid and call it xcid. Only if both are free then the
2572 * "cid" map can be cleared.
2573 */
2574 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
2575 cid = icid - start_cid;
2576 xcid = cid ^ 1;
2577
2578 spin_lock_bh(&p_rdma_info->lock);
2579
2580 qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
2581 if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
2582 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
2583 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
2584 }
2585
2586 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
2587}
2588
2457static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) 2589static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
2458{ 2590{
2459 return QED_LEADING_HWFN(cdev); 2591 return QED_LEADING_HWFN(cdev);
@@ -2773,7 +2905,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
2773 : QED_LL2_RROCE; 2905 : QED_LL2_RROCE;
2774 2906
2775 if (pkt->roce_mode == ROCE_V2_IPV4) 2907 if (pkt->roce_mode == ROCE_V2_IPV4)
2776 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT); 2908 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
2777 2909
2778 /* Tx header */ 2910 /* Tx header */
2779 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle, 2911 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 36cf4b2ab7fa..3ccc08a7c995 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -82,6 +82,7 @@ struct qed_rdma_info {
82 struct qed_bmap qp_map; 82 struct qed_bmap qp_map;
83 struct qed_bmap srq_map; 83 struct qed_bmap srq_map;
84 struct qed_bmap cid_map; 84 struct qed_bmap cid_map;
85 struct qed_bmap real_cid_map;
85 struct qed_bmap dpi_map; 86 struct qed_bmap dpi_map;
86 struct qed_bmap toggle_bits; 87 struct qed_bmap toggle_bits;
87 struct qed_rdma_events events; 88 struct qed_rdma_events events;
@@ -92,6 +93,7 @@ struct qed_rdma_info {
92 u32 num_qps; 93 u32 num_qps;
93 u32 num_mrs; 94 u32 num_mrs;
94 u16 queue_zone_base; 95 u16 queue_zone_base;
96 u16 max_queue_zones;
95 enum protocol_type proto; 97 enum protocol_type proto;
96}; 98};
97 99
@@ -153,6 +155,7 @@ struct qed_rdma_qp {
153 dma_addr_t irq_phys_addr; 155 dma_addr_t irq_phys_addr;
154 u8 irq_num_pages; 156 u8 irq_num_pages;
155 bool resp_offloaded; 157 bool resp_offloaded;
158 u32 cq_prod;
156 159
157 u8 remote_mac_addr[6]; 160 u8 remote_mac_addr[6];
158 u8 local_mac_addr[6]; 161 u8 local_mac_addr[6];
@@ -163,8 +166,8 @@ struct qed_rdma_qp {
163 166
164#if IS_ENABLED(CONFIG_QED_RDMA) 167#if IS_ENABLED(CONFIG_QED_RDMA)
165void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 168void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
166void qed_async_roce_event(struct qed_hwfn *p_hwfn, 169void qed_roce_async_event(struct qed_hwfn *p_hwfn,
167 struct event_ring_entry *p_eqe); 170 u8 fw_event_code, union rdma_eqe_data *rdma_data);
168void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, 171void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
169 u8 connection_handle, 172 u8 connection_handle,
170 void *cookie, 173 void *cookie,
@@ -187,7 +190,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
187 u16 src_mac_addr_lo, bool b_last_packet); 190 u16 src_mac_addr_lo, bool b_last_packet);
188#else 191#else
189static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} 192static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
190static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {} 193static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
194 u8 fw_event_code,
195 union rdma_eqe_data *rdma_data) {}
191static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, 196static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
192 u8 connection_handle, 197 u8 connection_handle,
193 void *cookie, 198 void *cookie,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 645328a9f0cf..54fbe3789cf3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -296,9 +296,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
296 struct event_ring_entry *p_eqe) 296 struct event_ring_entry *p_eqe)
297{ 297{
298 switch (p_eqe->protocol_id) { 298 switch (p_eqe->protocol_id) {
299#if IS_ENABLED(CONFIG_QED_RDMA)
299 case PROTOCOLID_ROCE: 300 case PROTOCOLID_ROCE:
300 qed_async_roce_event(p_hwfn, p_eqe); 301 qed_roce_async_event(p_hwfn, p_eqe->opcode,
302 &p_eqe->data.rdma_data);
301 return 0; 303 return 0;
304#endif
302 case PROTOCOLID_COMMON: 305 case PROTOCOLID_COMMON:
303 return qed_sriov_eqe_event(p_hwfn, 306 return qed_sriov_eqe_event(p_hwfn,
304 p_eqe->opcode, 307 p_eqe->opcode,
@@ -306,14 +309,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
306 case PROTOCOLID_ISCSI: 309 case PROTOCOLID_ISCSI:
307 if (!IS_ENABLED(CONFIG_QED_ISCSI)) 310 if (!IS_ENABLED(CONFIG_QED_ISCSI))
308 return -EINVAL; 311 return -EINVAL;
309 if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
310 u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
311
312 qed_ooo_release_connection_isles(p_hwfn,
313 p_hwfn->p_ooo_info,
314 cid);
315 return 0;
316 }
317 312
318 if (p_hwfn->p_iscsi_info->event_cb) { 313 if (p_hwfn->p_iscsi_info->event_cb) {
319 struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; 314 struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 253c2bbe1e4e..16f503c9b0af 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -557,14 +557,30 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
557 return 0; 557 return 0;
558 } 558 }
559 559
560 /* Calculate the first VF index - this is a bit tricky; Basically, 560 /* First VF index based on offset is tricky:
561 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin 561 * - If ARI is supported [likely], offset - (16 - pf_id) would
562 * after the first engine's VFs. 562 * provide the number for eng0. 2nd engine Vfs would begin
563 * after the first engine's VFs.
564 * - If !ARI, VFs would start on next device.
565 * so offset - (256 - pf_id) would provide the number.
566 * Utilize the fact that (256 - pf_id) is achieved only by later
567 * to diffrentiate between the two.
563 */ 568 */
564 cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset + 569
565 p_hwfn->abs_pf_id - 16; 570 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
566 if (QED_PATH_ID(p_hwfn)) 571 u32 first = p_hwfn->cdev->p_iov_info->offset +
567 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; 572 p_hwfn->abs_pf_id - 16;
573
574 cdev->p_iov_info->first_vf_in_pf = first;
575
576 if (QED_PATH_ID(p_hwfn))
577 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
578 } else {
579 u32 first = p_hwfn->cdev->p_iov_info->offset +
580 p_hwfn->abs_pf_id - 256;
581
582 cdev->p_iov_info->first_vf_in_pf = first;
583 }
568 584
569 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 585 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
570 "First VF in hwfn 0x%08x\n", 586 "First VF in hwfn 0x%08x\n",
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index f2aaef2cfb86..e73a4a5165ee 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -50,7 +50,7 @@
50#define QEDE_MAJOR_VERSION 8 50#define QEDE_MAJOR_VERSION 8
51#define QEDE_MINOR_VERSION 10 51#define QEDE_MINOR_VERSION 10
52#define QEDE_REVISION_VERSION 10 52#define QEDE_REVISION_VERSION 10
53#define QEDE_ENGINEERING_VERSION 20 53#define QEDE_ENGINEERING_VERSION 21
54#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ 54#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
55 __stringify(QEDE_MINOR_VERSION) "." \ 55 __stringify(QEDE_MINOR_VERSION) "." \
56 __stringify(QEDE_REVISION_VERSION) "." \ 56 __stringify(QEDE_REVISION_VERSION) "." \
@@ -58,7 +58,7 @@
58 58
59#define DRV_MODULE_SYM qede 59#define DRV_MODULE_SYM qede
60 60
61struct qede_stats { 61struct qede_stats_common {
62 u64 no_buff_discards; 62 u64 no_buff_discards;
63 u64 packet_too_big_discard; 63 u64 packet_too_big_discard;
64 u64 ttl0_discard; 64 u64 ttl0_discard;
@@ -90,11 +90,6 @@ struct qede_stats {
90 u64 rx_256_to_511_byte_packets; 90 u64 rx_256_to_511_byte_packets;
91 u64 rx_512_to_1023_byte_packets; 91 u64 rx_512_to_1023_byte_packets;
92 u64 rx_1024_to_1518_byte_packets; 92 u64 rx_1024_to_1518_byte_packets;
93 u64 rx_1519_to_1522_byte_packets;
94 u64 rx_1519_to_2047_byte_packets;
95 u64 rx_2048_to_4095_byte_packets;
96 u64 rx_4096_to_9216_byte_packets;
97 u64 rx_9217_to_16383_byte_packets;
98 u64 rx_crc_errors; 93 u64 rx_crc_errors;
99 u64 rx_mac_crtl_frames; 94 u64 rx_mac_crtl_frames;
100 u64 rx_pause_frames; 95 u64 rx_pause_frames;
@@ -111,17 +106,39 @@ struct qede_stats {
111 u64 tx_256_to_511_byte_packets; 106 u64 tx_256_to_511_byte_packets;
112 u64 tx_512_to_1023_byte_packets; 107 u64 tx_512_to_1023_byte_packets;
113 u64 tx_1024_to_1518_byte_packets; 108 u64 tx_1024_to_1518_byte_packets;
109 u64 tx_pause_frames;
110 u64 tx_pfc_frames;
111 u64 brb_truncates;
112 u64 brb_discards;
113 u64 tx_mac_ctrl_frames;
114};
115
116struct qede_stats_bb {
117 u64 rx_1519_to_1522_byte_packets;
118 u64 rx_1519_to_2047_byte_packets;
119 u64 rx_2048_to_4095_byte_packets;
120 u64 rx_4096_to_9216_byte_packets;
121 u64 rx_9217_to_16383_byte_packets;
114 u64 tx_1519_to_2047_byte_packets; 122 u64 tx_1519_to_2047_byte_packets;
115 u64 tx_2048_to_4095_byte_packets; 123 u64 tx_2048_to_4095_byte_packets;
116 u64 tx_4096_to_9216_byte_packets; 124 u64 tx_4096_to_9216_byte_packets;
117 u64 tx_9217_to_16383_byte_packets; 125 u64 tx_9217_to_16383_byte_packets;
118 u64 tx_pause_frames;
119 u64 tx_pfc_frames;
120 u64 tx_lpi_entry_count; 126 u64 tx_lpi_entry_count;
121 u64 tx_total_collisions; 127 u64 tx_total_collisions;
122 u64 brb_truncates; 128};
123 u64 brb_discards; 129
124 u64 tx_mac_ctrl_frames; 130struct qede_stats_ah {
131 u64 rx_1519_to_max_byte_packets;
132 u64 tx_1519_to_max_byte_packets;
133};
134
135struct qede_stats {
136 struct qede_stats_common common;
137
138 union {
139 struct qede_stats_bb bb;
140 struct qede_stats_ah ah;
141 };
125}; 142};
126 143
127struct qede_vlan { 144struct qede_vlan {
@@ -158,6 +175,10 @@ struct qede_dev {
158 struct qed_dev_eth_info dev_info; 175 struct qed_dev_eth_info dev_info;
159#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) 176#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
160#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues) 177#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
178#define QEDE_IS_BB(edev) \
179 ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
180#define QEDE_IS_AH(edev) \
181 ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
161 182
162 struct qede_fastpath *fp_array; 183 struct qede_fastpath *fp_array;
163 u8 req_num_tx; 184 u8 req_num_tx;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 897953133245..4dcfe9614731 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -75,16 +75,33 @@ static const struct {
75 QEDE_TQSTAT(stopped_cnt), 75 QEDE_TQSTAT(stopped_cnt),
76}; 76};
77 77
78#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name)) 78#define QEDE_STAT_OFFSET(stat_name, type, base) \
79#define QEDE_STAT_STRING(stat_name) (#stat_name) 79 (offsetof(type, stat_name) + (base))
80#define _QEDE_STAT(stat_name, pf_only) \ 80#define QEDE_STAT_STRING(stat_name) (#stat_name)
81 {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only} 81#define _QEDE_STAT(stat_name, type, base, attr) \
82#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true) 82 {QEDE_STAT_OFFSET(stat_name, type, base), \
83#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false) 83 QEDE_STAT_STRING(stat_name), \
84 attr}
85#define QEDE_STAT(stat_name) \
86 _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
87#define QEDE_PF_STAT(stat_name) \
88 _QEDE_STAT(stat_name, struct qede_stats_common, 0, \
89 BIT(QEDE_STAT_PF_ONLY))
90#define QEDE_PF_BB_STAT(stat_name) \
91 _QEDE_STAT(stat_name, struct qede_stats_bb, \
92 offsetof(struct qede_stats, bb), \
93 BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
94#define QEDE_PF_AH_STAT(stat_name) \
95 _QEDE_STAT(stat_name, struct qede_stats_ah, \
96 offsetof(struct qede_stats, ah), \
97 BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
84static const struct { 98static const struct {
85 u64 offset; 99 u64 offset;
86 char string[ETH_GSTRING_LEN]; 100 char string[ETH_GSTRING_LEN];
87 bool pf_only; 101 unsigned long attr;
102#define QEDE_STAT_PF_ONLY 0
103#define QEDE_STAT_BB_ONLY 1
104#define QEDE_STAT_AH_ONLY 2
88} qede_stats_arr[] = { 105} qede_stats_arr[] = {
89 QEDE_STAT(rx_ucast_bytes), 106 QEDE_STAT(rx_ucast_bytes),
90 QEDE_STAT(rx_mcast_bytes), 107 QEDE_STAT(rx_mcast_bytes),
@@ -106,22 +123,23 @@ static const struct {
106 QEDE_PF_STAT(rx_256_to_511_byte_packets), 123 QEDE_PF_STAT(rx_256_to_511_byte_packets),
107 QEDE_PF_STAT(rx_512_to_1023_byte_packets), 124 QEDE_PF_STAT(rx_512_to_1023_byte_packets),
108 QEDE_PF_STAT(rx_1024_to_1518_byte_packets), 125 QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
109 QEDE_PF_STAT(rx_1519_to_1522_byte_packets), 126 QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets),
110 QEDE_PF_STAT(rx_1519_to_2047_byte_packets), 127 QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets),
111 QEDE_PF_STAT(rx_2048_to_4095_byte_packets), 128 QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets),
112 QEDE_PF_STAT(rx_4096_to_9216_byte_packets), 129 QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets),
113 QEDE_PF_STAT(rx_9217_to_16383_byte_packets), 130 QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets),
131 QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets),
114 QEDE_PF_STAT(tx_64_byte_packets), 132 QEDE_PF_STAT(tx_64_byte_packets),
115 QEDE_PF_STAT(tx_65_to_127_byte_packets), 133 QEDE_PF_STAT(tx_65_to_127_byte_packets),
116 QEDE_PF_STAT(tx_128_to_255_byte_packets), 134 QEDE_PF_STAT(tx_128_to_255_byte_packets),
117 QEDE_PF_STAT(tx_256_to_511_byte_packets), 135 QEDE_PF_STAT(tx_256_to_511_byte_packets),
118 QEDE_PF_STAT(tx_512_to_1023_byte_packets), 136 QEDE_PF_STAT(tx_512_to_1023_byte_packets),
119 QEDE_PF_STAT(tx_1024_to_1518_byte_packets), 137 QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
120 QEDE_PF_STAT(tx_1519_to_2047_byte_packets), 138 QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets),
121 QEDE_PF_STAT(tx_2048_to_4095_byte_packets), 139 QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets),
122 QEDE_PF_STAT(tx_4096_to_9216_byte_packets), 140 QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets),
123 QEDE_PF_STAT(tx_9217_to_16383_byte_packets), 141 QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets),
124 142 QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets),
125 QEDE_PF_STAT(rx_mac_crtl_frames), 143 QEDE_PF_STAT(rx_mac_crtl_frames),
126 QEDE_PF_STAT(tx_mac_ctrl_frames), 144 QEDE_PF_STAT(tx_mac_ctrl_frames),
127 QEDE_PF_STAT(rx_pause_frames), 145 QEDE_PF_STAT(rx_pause_frames),
@@ -136,8 +154,8 @@ static const struct {
136 QEDE_PF_STAT(rx_jabbers), 154 QEDE_PF_STAT(rx_jabbers),
137 QEDE_PF_STAT(rx_undersize_packets), 155 QEDE_PF_STAT(rx_undersize_packets),
138 QEDE_PF_STAT(rx_fragments), 156 QEDE_PF_STAT(rx_fragments),
139 QEDE_PF_STAT(tx_lpi_entry_count), 157 QEDE_PF_BB_STAT(tx_lpi_entry_count),
140 QEDE_PF_STAT(tx_total_collisions), 158 QEDE_PF_BB_STAT(tx_total_collisions),
141 QEDE_PF_STAT(brb_truncates), 159 QEDE_PF_STAT(brb_truncates),
142 QEDE_PF_STAT(brb_discards), 160 QEDE_PF_STAT(brb_discards),
143 QEDE_STAT(no_buff_discards), 161 QEDE_STAT(no_buff_discards),
@@ -155,6 +173,12 @@ static const struct {
155}; 173};
156 174
157#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) 175#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
176#define QEDE_STAT_IS_PF_ONLY(i) \
177 test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
178#define QEDE_STAT_IS_BB_ONLY(i) \
179 test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
180#define QEDE_STAT_IS_AH_ONLY(i) \
181 test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
158 182
159enum { 183enum {
160 QEDE_PRI_FLAG_CMT, 184 QEDE_PRI_FLAG_CMT,
@@ -213,6 +237,13 @@ static void qede_get_strings_stats_rxq(struct qede_dev *edev,
213 } 237 }
214} 238}
215 239
240static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
241{
242 return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) ||
243 (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) ||
244 (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index));
245}
246
216static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) 247static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
217{ 248{
218 struct qede_fastpath *fp; 249 struct qede_fastpath *fp;
@@ -234,7 +265,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
234 265
235 /* Account for non-queue statistics */ 266 /* Account for non-queue statistics */
236 for (i = 0; i < QEDE_NUM_STATS; i++) { 267 for (i = 0; i < QEDE_NUM_STATS; i++) {
237 if (IS_VF(edev) && qede_stats_arr[i].pf_only) 268 if (qede_is_irrelevant_stat(edev, i))
238 continue; 269 continue;
239 strcpy(buf, qede_stats_arr[i].string); 270 strcpy(buf, qede_stats_arr[i].string);
240 buf += ETH_GSTRING_LEN; 271 buf += ETH_GSTRING_LEN;
@@ -309,7 +340,7 @@ static void qede_get_ethtool_stats(struct net_device *dev,
309 } 340 }
310 341
311 for (i = 0; i < QEDE_NUM_STATS; i++) { 342 for (i = 0; i < QEDE_NUM_STATS; i++) {
312 if (IS_VF(edev) && qede_stats_arr[i].pf_only) 343 if (qede_is_irrelevant_stat(edev, i))
313 continue; 344 continue;
314 *buf = *((u64 *)(((void *)&edev->stats) + 345 *buf = *((u64 *)(((void *)&edev->stats) +
315 qede_stats_arr[i].offset)); 346 qede_stats_arr[i].offset));
@@ -323,17 +354,13 @@ static void qede_get_ethtool_stats(struct net_device *dev,
323static int qede_get_sset_count(struct net_device *dev, int stringset) 354static int qede_get_sset_count(struct net_device *dev, int stringset)
324{ 355{
325 struct qede_dev *edev = netdev_priv(dev); 356 struct qede_dev *edev = netdev_priv(dev);
326 int num_stats = QEDE_NUM_STATS; 357 int num_stats = QEDE_NUM_STATS, i;
327 358
328 switch (stringset) { 359 switch (stringset) {
329 case ETH_SS_STATS: 360 case ETH_SS_STATS:
330 if (IS_VF(edev)) { 361 for (i = 0; i < QEDE_NUM_STATS; i++)
331 int i; 362 if (qede_is_irrelevant_stat(edev, i))
332 363 num_stats--;
333 for (i = 0; i < QEDE_NUM_STATS; i++)
334 if (qede_stats_arr[i].pf_only)
335 num_stats--;
336 }
337 364
338 /* Account for the Regular Tx statistics */ 365 /* Account for the Regular Tx statistics */
339 num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS; 366 num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 3a78c3f25157..abd99109e532 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -84,6 +84,8 @@ static const struct qed_eth_ops *qed_ops;
84#define CHIP_NUM_57980S_50 0x1654 84#define CHIP_NUM_57980S_50 0x1654
85#define CHIP_NUM_57980S_25 0x1656 85#define CHIP_NUM_57980S_25 0x1656
86#define CHIP_NUM_57980S_IOV 0x1664 86#define CHIP_NUM_57980S_IOV 0x1664
87#define CHIP_NUM_AH 0x8070
88#define CHIP_NUM_AH_IOV 0x8090
87 89
88#ifndef PCI_DEVICE_ID_NX2_57980E 90#ifndef PCI_DEVICE_ID_NX2_57980E
89#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 91#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
@@ -93,6 +95,9 @@ static const struct qed_eth_ops *qed_ops;
93#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 95#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
94#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 96#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
95#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV 97#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
98#define PCI_DEVICE_ID_AH CHIP_NUM_AH
99#define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
100
96#endif 101#endif
97 102
98enum qede_pci_private { 103enum qede_pci_private {
@@ -110,6 +115,10 @@ static const struct pci_device_id qede_pci_tbl[] = {
110#ifdef CONFIG_QED_SRIOV 115#ifdef CONFIG_QED_SRIOV
111 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, 116 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
112#endif 117#endif
118 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
119#ifdef CONFIG_QED_SRIOV
120 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
121#endif
113 { 0 } 122 { 0 }
114}; 123};
115 124
@@ -314,122 +323,135 @@ static int qede_close(struct net_device *ndev);
314 323
315void qede_fill_by_demand_stats(struct qede_dev *edev) 324void qede_fill_by_demand_stats(struct qede_dev *edev)
316{ 325{
326 struct qede_stats_common *p_common = &edev->stats.common;
317 struct qed_eth_stats stats; 327 struct qed_eth_stats stats;
318 328
319 edev->ops->get_vport_stats(edev->cdev, &stats); 329 edev->ops->get_vport_stats(edev->cdev, &stats);
320 edev->stats.no_buff_discards = stats.no_buff_discards; 330
321 edev->stats.packet_too_big_discard = stats.packet_too_big_discard; 331 p_common->no_buff_discards = stats.common.no_buff_discards;
322 edev->stats.ttl0_discard = stats.ttl0_discard; 332 p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
323 edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes; 333 p_common->ttl0_discard = stats.common.ttl0_discard;
324 edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes; 334 p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
325 edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes; 335 p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
326 edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts; 336 p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
327 edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts; 337 p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
328 edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts; 338 p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
329 edev->stats.mftag_filter_discards = stats.mftag_filter_discards; 339 p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
330 edev->stats.mac_filter_discards = stats.mac_filter_discards; 340 p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
331 341 p_common->mac_filter_discards = stats.common.mac_filter_discards;
332 edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes; 342
333 edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes; 343 p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
334 edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes; 344 p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
335 edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts; 345 p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
336 edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts; 346 p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
337 edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts; 347 p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
338 edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts; 348 p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
339 edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts; 349 p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
340 edev->stats.coalesced_events = stats.tpa_coalesced_events; 350 p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
341 edev->stats.coalesced_aborts_num = stats.tpa_aborts_num; 351 p_common->coalesced_events = stats.common.tpa_coalesced_events;
342 edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts; 352 p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
343 edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes; 353 p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
344 354 p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
345 edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets; 355
346 edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets; 356 p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
347 edev->stats.rx_128_to_255_byte_packets = 357 p_common->rx_65_to_127_byte_packets =
348 stats.rx_128_to_255_byte_packets; 358 stats.common.rx_65_to_127_byte_packets;
349 edev->stats.rx_256_to_511_byte_packets = 359 p_common->rx_128_to_255_byte_packets =
350 stats.rx_256_to_511_byte_packets; 360 stats.common.rx_128_to_255_byte_packets;
351 edev->stats.rx_512_to_1023_byte_packets = 361 p_common->rx_256_to_511_byte_packets =
352 stats.rx_512_to_1023_byte_packets; 362 stats.common.rx_256_to_511_byte_packets;
353 edev->stats.rx_1024_to_1518_byte_packets = 363 p_common->rx_512_to_1023_byte_packets =
354 stats.rx_1024_to_1518_byte_packets; 364 stats.common.rx_512_to_1023_byte_packets;
355 edev->stats.rx_1519_to_1522_byte_packets = 365 p_common->rx_1024_to_1518_byte_packets =
356 stats.rx_1519_to_1522_byte_packets; 366 stats.common.rx_1024_to_1518_byte_packets;
357 edev->stats.rx_1519_to_2047_byte_packets = 367 p_common->rx_crc_errors = stats.common.rx_crc_errors;
358 stats.rx_1519_to_2047_byte_packets; 368 p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
359 edev->stats.rx_2048_to_4095_byte_packets = 369 p_common->rx_pause_frames = stats.common.rx_pause_frames;
360 stats.rx_2048_to_4095_byte_packets; 370 p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
361 edev->stats.rx_4096_to_9216_byte_packets = 371 p_common->rx_align_errors = stats.common.rx_align_errors;
362 stats.rx_4096_to_9216_byte_packets; 372 p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
363 edev->stats.rx_9217_to_16383_byte_packets = 373 p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
364 stats.rx_9217_to_16383_byte_packets; 374 p_common->rx_jabbers = stats.common.rx_jabbers;
365 edev->stats.rx_crc_errors = stats.rx_crc_errors; 375 p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
366 edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames; 376 p_common->rx_fragments = stats.common.rx_fragments;
367 edev->stats.rx_pause_frames = stats.rx_pause_frames; 377 p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
368 edev->stats.rx_pfc_frames = stats.rx_pfc_frames; 378 p_common->tx_65_to_127_byte_packets =
369 edev->stats.rx_align_errors = stats.rx_align_errors; 379 stats.common.tx_65_to_127_byte_packets;
370 edev->stats.rx_carrier_errors = stats.rx_carrier_errors; 380 p_common->tx_128_to_255_byte_packets =
371 edev->stats.rx_oversize_packets = stats.rx_oversize_packets; 381 stats.common.tx_128_to_255_byte_packets;
372 edev->stats.rx_jabbers = stats.rx_jabbers; 382 p_common->tx_256_to_511_byte_packets =
373 edev->stats.rx_undersize_packets = stats.rx_undersize_packets; 383 stats.common.tx_256_to_511_byte_packets;
374 edev->stats.rx_fragments = stats.rx_fragments; 384 p_common->tx_512_to_1023_byte_packets =
375 edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets; 385 stats.common.tx_512_to_1023_byte_packets;
376 edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets; 386 p_common->tx_1024_to_1518_byte_packets =
377 edev->stats.tx_128_to_255_byte_packets = 387 stats.common.tx_1024_to_1518_byte_packets;
378 stats.tx_128_to_255_byte_packets; 388 p_common->tx_pause_frames = stats.common.tx_pause_frames;
379 edev->stats.tx_256_to_511_byte_packets = 389 p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
380 stats.tx_256_to_511_byte_packets; 390 p_common->brb_truncates = stats.common.brb_truncates;
381 edev->stats.tx_512_to_1023_byte_packets = 391 p_common->brb_discards = stats.common.brb_discards;
382 stats.tx_512_to_1023_byte_packets; 392 p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
383 edev->stats.tx_1024_to_1518_byte_packets = 393
384 stats.tx_1024_to_1518_byte_packets; 394 if (QEDE_IS_BB(edev)) {
385 edev->stats.tx_1519_to_2047_byte_packets = 395 struct qede_stats_bb *p_bb = &edev->stats.bb;
386 stats.tx_1519_to_2047_byte_packets; 396
387 edev->stats.tx_2048_to_4095_byte_packets = 397 p_bb->rx_1519_to_1522_byte_packets =
388 stats.tx_2048_to_4095_byte_packets; 398 stats.bb.rx_1519_to_1522_byte_packets;
389 edev->stats.tx_4096_to_9216_byte_packets = 399 p_bb->rx_1519_to_2047_byte_packets =
390 stats.tx_4096_to_9216_byte_packets; 400 stats.bb.rx_1519_to_2047_byte_packets;
391 edev->stats.tx_9217_to_16383_byte_packets = 401 p_bb->rx_2048_to_4095_byte_packets =
392 stats.tx_9217_to_16383_byte_packets; 402 stats.bb.rx_2048_to_4095_byte_packets;
393 edev->stats.tx_pause_frames = stats.tx_pause_frames; 403 p_bb->rx_4096_to_9216_byte_packets =
394 edev->stats.tx_pfc_frames = stats.tx_pfc_frames; 404 stats.bb.rx_4096_to_9216_byte_packets;
395 edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count; 405 p_bb->rx_9217_to_16383_byte_packets =
396 edev->stats.tx_total_collisions = stats.tx_total_collisions; 406 stats.bb.rx_9217_to_16383_byte_packets;
397 edev->stats.brb_truncates = stats.brb_truncates; 407 p_bb->tx_1519_to_2047_byte_packets =
398 edev->stats.brb_discards = stats.brb_discards; 408 stats.bb.tx_1519_to_2047_byte_packets;
399 edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; 409 p_bb->tx_2048_to_4095_byte_packets =
410 stats.bb.tx_2048_to_4095_byte_packets;
411 p_bb->tx_4096_to_9216_byte_packets =
412 stats.bb.tx_4096_to_9216_byte_packets;
413 p_bb->tx_9217_to_16383_byte_packets =
414 stats.bb.tx_9217_to_16383_byte_packets;
415 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
416 p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
417 } else {
418 struct qede_stats_ah *p_ah = &edev->stats.ah;
419
420 p_ah->rx_1519_to_max_byte_packets =
421 stats.ah.rx_1519_to_max_byte_packets;
422 p_ah->tx_1519_to_max_byte_packets =
423 stats.ah.tx_1519_to_max_byte_packets;
424 }
400} 425}
401 426
402static void qede_get_stats64(struct net_device *dev, 427static void qede_get_stats64(struct net_device *dev,
403 struct rtnl_link_stats64 *stats) 428 struct rtnl_link_stats64 *stats)
404{ 429{
405 struct qede_dev *edev = netdev_priv(dev); 430 struct qede_dev *edev = netdev_priv(dev);
431 struct qede_stats_common *p_common;
406 432
407 qede_fill_by_demand_stats(edev); 433 qede_fill_by_demand_stats(edev);
434 p_common = &edev->stats.common;
408 435
409 stats->rx_packets = edev->stats.rx_ucast_pkts + 436 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
410 edev->stats.rx_mcast_pkts + 437 p_common->rx_bcast_pkts;
411 edev->stats.rx_bcast_pkts; 438 stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
412 stats->tx_packets = edev->stats.tx_ucast_pkts + 439 p_common->tx_bcast_pkts;
413 edev->stats.tx_mcast_pkts +
414 edev->stats.tx_bcast_pkts;
415
416 stats->rx_bytes = edev->stats.rx_ucast_bytes +
417 edev->stats.rx_mcast_bytes +
418 edev->stats.rx_bcast_bytes;
419 440
420 stats->tx_bytes = edev->stats.tx_ucast_bytes + 441 stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
421 edev->stats.tx_mcast_bytes + 442 p_common->rx_bcast_bytes;
422 edev->stats.tx_bcast_bytes; 443 stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
444 p_common->tx_bcast_bytes;
423 445
424 stats->tx_errors = edev->stats.tx_err_drop_pkts; 446 stats->tx_errors = p_common->tx_err_drop_pkts;
425 stats->multicast = edev->stats.rx_mcast_pkts + 447 stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
426 edev->stats.rx_bcast_pkts;
427 448
428 stats->rx_fifo_errors = edev->stats.no_buff_discards; 449 stats->rx_fifo_errors = p_common->no_buff_discards;
429 450
430 stats->collisions = edev->stats.tx_total_collisions; 451 if (QEDE_IS_BB(edev))
431 stats->rx_crc_errors = edev->stats.rx_crc_errors; 452 stats->collisions = edev->stats.bb.tx_total_collisions;
432 stats->rx_frame_errors = edev->stats.rx_align_errors; 453 stats->rx_crc_errors = p_common->rx_crc_errors;
454 stats->rx_frame_errors = p_common->rx_align_errors;
433} 455}
434 456
435#ifdef CONFIG_QED_SRIOV 457#ifdef CONFIG_QED_SRIOV
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
index f62c215be779..7116be485e61 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
@@ -26,6 +26,7 @@
26 26
27/* SGMII digital lane registers */ 27/* SGMII digital lane registers */
28#define EMAC_SGMII_LN_DRVR_CTRL0 0x000C 28#define EMAC_SGMII_LN_DRVR_CTRL0 0x000C
29#define EMAC_SGMII_LN_DRVR_CTRL1 0x0010
29#define EMAC_SGMII_LN_DRVR_TAP_EN 0x0018 30#define EMAC_SGMII_LN_DRVR_TAP_EN 0x0018
30#define EMAC_SGMII_LN_TX_MARGINING 0x001C 31#define EMAC_SGMII_LN_TX_MARGINING 0x001C
31#define EMAC_SGMII_LN_TX_PRE 0x0020 32#define EMAC_SGMII_LN_TX_PRE 0x0020
@@ -48,6 +49,7 @@
48#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x02AC 49#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x02AC
49#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x02B8 50#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x02B8
50#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x02C8 51#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x02C8
52#define EMAC_SGMII_LN_RX_RESECODE_OFFSET 0x02CC
51 53
52/* SGMII digital lane register values */ 54/* SGMII digital lane register values */
53#define UCDR_STEP_BY_TWO_MODE0 BIT(7) 55#define UCDR_STEP_BY_TWO_MODE0 BIT(7)
@@ -73,6 +75,8 @@
73#define CML_GEAR_MODE(x) (((x) & 7) << 3) 75#define CML_GEAR_MODE(x) (((x) & 7) << 3)
74#define CML2CMOS_IBOOST_MODE(x) ((x) & 7) 76#define CML2CMOS_IBOOST_MODE(x) ((x) & 7)
75 77
78#define RESCODE_OFFSET(x) ((x) & 0x1f)
79
76#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2) 80#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2)
77#define MIXER_DATARATE_MODE(x) ((x) & 3) 81#define MIXER_DATARATE_MODE(x) ((x) & 3)
78 82
@@ -159,6 +163,8 @@ static const struct emac_reg_write sgmii_laned[] = {
159 {EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)}, 163 {EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
160 {EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(1)}, 164 {EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(1)},
161 {EMAC_SGMII_LN_RX_BAND, BAND_MODE0(2)}, 165 {EMAC_SGMII_LN_RX_BAND, BAND_MODE0(2)},
166 {EMAC_SGMII_LN_DRVR_CTRL1, RESCODE_OFFSET(7)},
167 {EMAC_SGMII_LN_RX_RESECODE_OFFSET, RESCODE_OFFSET(9)},
162 {EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)}, 168 {EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
163 {EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(2) | 169 {EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(2) |
164 EN_DLL_MODE0 | EN_IQ_DCC_MODE0 | EN_IQCAL_MODE0}, 170 EN_DLL_MODE0 | EN_IQ_DCC_MODE0 | EN_IQCAL_MODE0},
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 672f6b696069..72233ab9474b 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1406,27 +1406,29 @@ static int cp_get_sset_count (struct net_device *dev, int sset)
1406 } 1406 }
1407} 1407}
1408 1408
1409static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1409static int cp_get_link_ksettings(struct net_device *dev,
1410 struct ethtool_link_ksettings *cmd)
1410{ 1411{
1411 struct cp_private *cp = netdev_priv(dev); 1412 struct cp_private *cp = netdev_priv(dev);
1412 int rc; 1413 int rc;
1413 unsigned long flags; 1414 unsigned long flags;
1414 1415
1415 spin_lock_irqsave(&cp->lock, flags); 1416 spin_lock_irqsave(&cp->lock, flags);
1416 rc = mii_ethtool_gset(&cp->mii_if, cmd); 1417 rc = mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
1417 spin_unlock_irqrestore(&cp->lock, flags); 1418 spin_unlock_irqrestore(&cp->lock, flags);
1418 1419
1419 return rc; 1420 return rc;
1420} 1421}
1421 1422
1422static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1423static int cp_set_link_ksettings(struct net_device *dev,
1424 const struct ethtool_link_ksettings *cmd)
1423{ 1425{
1424 struct cp_private *cp = netdev_priv(dev); 1426 struct cp_private *cp = netdev_priv(dev);
1425 int rc; 1427 int rc;
1426 unsigned long flags; 1428 unsigned long flags;
1427 1429
1428 spin_lock_irqsave(&cp->lock, flags); 1430 spin_lock_irqsave(&cp->lock, flags);
1429 rc = mii_ethtool_sset(&cp->mii_if, cmd); 1431 rc = mii_ethtool_set_link_ksettings(&cp->mii_if, cmd);
1430 spin_unlock_irqrestore(&cp->lock, flags); 1432 spin_unlock_irqrestore(&cp->lock, flags);
1431 1433
1432 return rc; 1434 return rc;
@@ -1578,8 +1580,6 @@ static const struct ethtool_ops cp_ethtool_ops = {
1578 .get_drvinfo = cp_get_drvinfo, 1580 .get_drvinfo = cp_get_drvinfo,
1579 .get_regs_len = cp_get_regs_len, 1581 .get_regs_len = cp_get_regs_len,
1580 .get_sset_count = cp_get_sset_count, 1582 .get_sset_count = cp_get_sset_count,
1581 .get_settings = cp_get_settings,
1582 .set_settings = cp_set_settings,
1583 .nway_reset = cp_nway_reset, 1583 .nway_reset = cp_nway_reset,
1584 .get_link = ethtool_op_get_link, 1584 .get_link = ethtool_op_get_link,
1585 .get_msglevel = cp_get_msglevel, 1585 .get_msglevel = cp_get_msglevel,
@@ -1593,6 +1593,8 @@ static const struct ethtool_ops cp_ethtool_ops = {
1593 .get_eeprom = cp_get_eeprom, 1593 .get_eeprom = cp_get_eeprom,
1594 .set_eeprom = cp_set_eeprom, 1594 .set_eeprom = cp_set_eeprom,
1595 .get_ringparam = cp_get_ringparam, 1595 .get_ringparam = cp_get_ringparam,
1596 .get_link_ksettings = cp_get_link_ksettings,
1597 .set_link_ksettings = cp_set_link_ksettings,
1596}; 1598};
1597 1599
1598static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1600static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 89631753e799..ca22f2898664 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2384,21 +2384,23 @@ static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
2384 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); 2384 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
2385} 2385}
2386 2386
2387static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2387static int rtl8139_get_link_ksettings(struct net_device *dev,
2388 struct ethtool_link_ksettings *cmd)
2388{ 2389{
2389 struct rtl8139_private *tp = netdev_priv(dev); 2390 struct rtl8139_private *tp = netdev_priv(dev);
2390 spin_lock_irq(&tp->lock); 2391 spin_lock_irq(&tp->lock);
2391 mii_ethtool_gset(&tp->mii, cmd); 2392 mii_ethtool_get_link_ksettings(&tp->mii, cmd);
2392 spin_unlock_irq(&tp->lock); 2393 spin_unlock_irq(&tp->lock);
2393 return 0; 2394 return 0;
2394} 2395}
2395 2396
2396static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2397static int rtl8139_set_link_ksettings(struct net_device *dev,
2398 const struct ethtool_link_ksettings *cmd)
2397{ 2399{
2398 struct rtl8139_private *tp = netdev_priv(dev); 2400 struct rtl8139_private *tp = netdev_priv(dev);
2399 int rc; 2401 int rc;
2400 spin_lock_irq(&tp->lock); 2402 spin_lock_irq(&tp->lock);
2401 rc = mii_ethtool_sset(&tp->mii, cmd); 2403 rc = mii_ethtool_set_link_ksettings(&tp->mii, cmd);
2402 spin_unlock_irq(&tp->lock); 2404 spin_unlock_irq(&tp->lock);
2403 return rc; 2405 return rc;
2404} 2406}
@@ -2480,8 +2482,6 @@ static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2480 2482
2481static const struct ethtool_ops rtl8139_ethtool_ops = { 2483static const struct ethtool_ops rtl8139_ethtool_ops = {
2482 .get_drvinfo = rtl8139_get_drvinfo, 2484 .get_drvinfo = rtl8139_get_drvinfo,
2483 .get_settings = rtl8139_get_settings,
2484 .set_settings = rtl8139_set_settings,
2485 .get_regs_len = rtl8139_get_regs_len, 2485 .get_regs_len = rtl8139_get_regs_len,
2486 .get_regs = rtl8139_get_regs, 2486 .get_regs = rtl8139_get_regs,
2487 .nway_reset = rtl8139_nway_reset, 2487 .nway_reset = rtl8139_nway_reset,
@@ -2493,6 +2493,8 @@ static const struct ethtool_ops rtl8139_ethtool_ops = {
2493 .get_strings = rtl8139_get_strings, 2493 .get_strings = rtl8139_get_strings,
2494 .get_sset_count = rtl8139_get_sset_count, 2494 .get_sset_count = rtl8139_get_sset_count,
2495 .get_ethtool_stats = rtl8139_get_ethtool_stats, 2495 .get_ethtool_stats = rtl8139_get_ethtool_stats,
2496 .get_link_ksettings = rtl8139_get_link_ksettings,
2497 .set_link_ksettings = rtl8139_set_link_ksettings,
2496}; 2498};
2497 2499
2498static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2500static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 81f18a833527..0a8f2817ea60 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -817,7 +817,8 @@ struct rtl8169_private {
817 } csi_ops; 817 } csi_ops;
818 818
819 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); 819 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
820 int (*get_settings)(struct net_device *, struct ethtool_cmd *); 820 int (*get_link_ksettings)(struct net_device *,
821 struct ethtool_link_ksettings *);
821 void (*phy_reset_enable)(struct rtl8169_private *tp); 822 void (*phy_reset_enable)(struct rtl8169_private *tp);
822 void (*hw_start)(struct net_device *); 823 void (*hw_start)(struct net_device *);
823 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); 824 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
@@ -2115,41 +2116,49 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
2115 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); 2116 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
2116} 2117}
2117 2118
2118static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) 2119static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
2120 struct ethtool_link_ksettings *cmd)
2119{ 2121{
2120 struct rtl8169_private *tp = netdev_priv(dev); 2122 struct rtl8169_private *tp = netdev_priv(dev);
2121 void __iomem *ioaddr = tp->mmio_addr; 2123 void __iomem *ioaddr = tp->mmio_addr;
2122 u32 status; 2124 u32 status;
2125 u32 supported, advertising;
2123 2126
2124 cmd->supported = 2127 supported =
2125 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; 2128 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
2126 cmd->port = PORT_FIBRE; 2129 cmd->base.port = PORT_FIBRE;
2127 cmd->transceiver = XCVR_INTERNAL;
2128 2130
2129 status = RTL_R32(TBICSR); 2131 status = RTL_R32(TBICSR);
2130 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; 2132 advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
2131 cmd->autoneg = !!(status & TBINwEnable); 2133 cmd->base.autoneg = !!(status & TBINwEnable);
2132 2134
2133 ethtool_cmd_speed_set(cmd, SPEED_1000); 2135 cmd->base.speed = SPEED_1000;
2134 cmd->duplex = DUPLEX_FULL; /* Always set */ 2136 cmd->base.duplex = DUPLEX_FULL; /* Always set */
2137
2138 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2139 supported);
2140 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2141 advertising);
2135 2142
2136 return 0; 2143 return 0;
2137} 2144}
2138 2145
2139static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) 2146static int rtl8169_get_link_ksettings_xmii(struct net_device *dev,
2147 struct ethtool_link_ksettings *cmd)
2140{ 2148{
2141 struct rtl8169_private *tp = netdev_priv(dev); 2149 struct rtl8169_private *tp = netdev_priv(dev);
2142 2150
2143 return mii_ethtool_gset(&tp->mii, cmd); 2151 return mii_ethtool_get_link_ksettings(&tp->mii, cmd);
2144} 2152}
2145 2153
2146static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2154static int rtl8169_get_link_ksettings(struct net_device *dev,
2155 struct ethtool_link_ksettings *cmd)
2147{ 2156{
2148 struct rtl8169_private *tp = netdev_priv(dev); 2157 struct rtl8169_private *tp = netdev_priv(dev);
2149 int rc; 2158 int rc;
2150 2159
2151 rtl_lock_work(tp); 2160 rtl_lock_work(tp);
2152 rc = tp->get_settings(dev, cmd); 2161 rc = tp->get_link_ksettings(dev, cmd);
2153 rtl_unlock_work(tp); 2162 rtl_unlock_work(tp);
2154 2163
2155 return rc; 2164 return rc;
@@ -2356,7 +2365,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
2356 .get_drvinfo = rtl8169_get_drvinfo, 2365 .get_drvinfo = rtl8169_get_drvinfo,
2357 .get_regs_len = rtl8169_get_regs_len, 2366 .get_regs_len = rtl8169_get_regs_len,
2358 .get_link = ethtool_op_get_link, 2367 .get_link = ethtool_op_get_link,
2359 .get_settings = rtl8169_get_settings,
2360 .set_settings = rtl8169_set_settings, 2368 .set_settings = rtl8169_set_settings,
2361 .get_msglevel = rtl8169_get_msglevel, 2369 .get_msglevel = rtl8169_get_msglevel,
2362 .set_msglevel = rtl8169_set_msglevel, 2370 .set_msglevel = rtl8169_set_msglevel,
@@ -2368,6 +2376,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
2368 .get_ethtool_stats = rtl8169_get_ethtool_stats, 2376 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2369 .get_ts_info = ethtool_op_get_ts_info, 2377 .get_ts_info = ethtool_op_get_ts_info,
2370 .nway_reset = rtl8169_nway_reset, 2378 .nway_reset = rtl8169_nway_reset,
2379 .get_link_ksettings = rtl8169_get_link_ksettings,
2371}; 2380};
2372 2381
2373static void rtl8169_get_mac_version(struct rtl8169_private *tp, 2382static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -8351,14 +8360,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8351 8360
8352 if (rtl_tbi_enabled(tp)) { 8361 if (rtl_tbi_enabled(tp)) {
8353 tp->set_speed = rtl8169_set_speed_tbi; 8362 tp->set_speed = rtl8169_set_speed_tbi;
8354 tp->get_settings = rtl8169_gset_tbi; 8363 tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi;
8355 tp->phy_reset_enable = rtl8169_tbi_reset_enable; 8364 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
8356 tp->phy_reset_pending = rtl8169_tbi_reset_pending; 8365 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
8357 tp->link_ok = rtl8169_tbi_link_ok; 8366 tp->link_ok = rtl8169_tbi_link_ok;
8358 tp->do_ioctl = rtl_tbi_ioctl; 8367 tp->do_ioctl = rtl_tbi_ioctl;
8359 } else { 8368 } else {
8360 tp->set_speed = rtl8169_set_speed_xmii; 8369 tp->set_speed = rtl8169_set_speed_xmii;
8361 tp->get_settings = rtl8169_gset_xmii; 8370 tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii;
8362 tp->phy_reset_enable = rtl8169_xmii_reset_enable; 8371 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
8363 tp->phy_reset_pending = rtl8169_xmii_reset_pending; 8372 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
8364 tp->link_ok = rtl8169_xmii_link_ok; 8373 tp->link_ok = rtl8169_xmii_link_ok;
@@ -8444,9 +8453,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8444 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? 8453 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
8445 ~(RxBOVF | RxFOVF) : ~0; 8454 ~(RxBOVF | RxFOVF) : ~0;
8446 8455
8447 init_timer(&tp->timer); 8456 setup_timer(&tp->timer, rtl8169_phy_timer, (unsigned long)dev);
8448 tp->timer.data = (unsigned long) dev;
8449 tp->timer.function = rtl8169_phy_timer;
8450 8457
8451 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; 8458 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
8452 8459
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 0f63a44a955d..bab13613b138 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -33,6 +33,7 @@
33#include <net/rtnetlink.h> 33#include <net/rtnetlink.h>
34#include <net/netevent.h> 34#include <net/netevent.h>
35#include <net/arp.h> 35#include <net/arp.h>
36#include <net/fib_rules.h>
36#include <linux/io-64-nonatomic-lo-hi.h> 37#include <linux/io-64-nonatomic-lo-hi.h>
37#include <generated/utsrelease.h> 38#include <generated/utsrelease.h>
38 39
@@ -1115,7 +1116,7 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1115 const struct rocker_desc_info *desc_info, 1116 const struct rocker_desc_info *desc_info,
1116 void *priv) 1117 void *priv)
1117{ 1118{
1118 struct ethtool_cmd *ecmd = priv; 1119 struct ethtool_link_ksettings *ecmd = priv;
1119 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; 1120 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1120 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; 1121 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1121 u32 speed; 1122 u32 speed;
@@ -1137,13 +1138,14 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1137 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); 1138 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1138 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); 1139 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1139 1140
1140 ecmd->transceiver = XCVR_INTERNAL; 1141 ethtool_link_ksettings_zero_link_mode(ecmd, supported);
1141 ecmd->supported = SUPPORTED_TP; 1142 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
1142 ecmd->phy_address = 0xff; 1143
1143 ecmd->port = PORT_TP; 1144 ecmd->base.phy_address = 0xff;
1144 ethtool_cmd_speed_set(ecmd, speed); 1145 ecmd->base.port = PORT_TP;
1145 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; 1146 ecmd->base.speed = speed;
1146 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 1147 ecmd->base.duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1148 ecmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1147 1149
1148 return 0; 1150 return 0;
1149} 1151}
@@ -1250,7 +1252,7 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1250 struct rocker_desc_info *desc_info, 1252 struct rocker_desc_info *desc_info,
1251 void *priv) 1253 void *priv)
1252{ 1254{
1253 struct ethtool_cmd *ecmd = priv; 1255 struct ethtool_link_ksettings *ecmd = priv;
1254 struct rocker_tlv *cmd_info; 1256 struct rocker_tlv *cmd_info;
1255 1257
1256 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1258 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
@@ -1263,13 +1265,13 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1263 rocker_port->pport)) 1265 rocker_port->pport))
1264 return -EMSGSIZE; 1266 return -EMSGSIZE;
1265 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, 1267 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1266 ethtool_cmd_speed(ecmd))) 1268 ecmd->base.speed))
1267 return -EMSGSIZE; 1269 return -EMSGSIZE;
1268 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, 1270 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1269 ecmd->duplex)) 1271 ecmd->base.duplex))
1270 return -EMSGSIZE; 1272 return -EMSGSIZE;
1271 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, 1273 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1272 ecmd->autoneg)) 1274 ecmd->base.autoneg))
1273 return -EMSGSIZE; 1275 return -EMSGSIZE;
1274 rocker_tlv_nest_end(desc_info, cmd_info); 1276 rocker_tlv_nest_end(desc_info, cmd_info);
1275 return 0; 1277 return 0;
@@ -1347,8 +1349,9 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1347 return 0; 1349 return 0;
1348} 1350}
1349 1351
1350static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, 1352static int
1351 struct ethtool_cmd *ecmd) 1353rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1354 struct ethtool_link_ksettings *ecmd)
1352{ 1355{
1353 return rocker_cmd_exec(rocker_port, false, 1356 return rocker_cmd_exec(rocker_port, false,
1354 rocker_cmd_get_port_settings_prep, NULL, 1357 rocker_cmd_get_port_settings_prep, NULL,
@@ -1373,12 +1376,17 @@ static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1373 rocker_cmd_get_port_settings_mode_proc, p_mode); 1376 rocker_cmd_get_port_settings_mode_proc, p_mode);
1374} 1377}
1375 1378
1376static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, 1379static int
1377 struct ethtool_cmd *ecmd) 1380rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1381 const struct ethtool_link_ksettings *ecmd)
1378{ 1382{
1383 struct ethtool_link_ksettings copy_ecmd;
1384
1385 memcpy(&copy_ecmd, ecmd, sizeof(copy_ecmd));
1386
1379 return rocker_cmd_exec(rocker_port, false, 1387 return rocker_cmd_exec(rocker_port, false,
1380 rocker_cmd_set_port_settings_ethtool_prep, 1388 rocker_cmd_set_port_settings_ethtool_prep,
1381 ecmd, NULL, NULL); 1389 &copy_ecmd, NULL, NULL);
1382} 1390}
1383 1391
1384static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, 1392static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
@@ -2168,7 +2176,10 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
2168 2176
2169struct rocker_fib_event_work { 2177struct rocker_fib_event_work {
2170 struct work_struct work; 2178 struct work_struct work;
2171 struct fib_entry_notifier_info fen_info; 2179 union {
2180 struct fib_entry_notifier_info fen_info;
2181 struct fib_rule_notifier_info fr_info;
2182 };
2172 struct rocker *rocker; 2183 struct rocker *rocker;
2173 unsigned long event; 2184 unsigned long event;
2174}; 2185};
@@ -2178,6 +2189,7 @@ static void rocker_router_fib_event_work(struct work_struct *work)
2178 struct rocker_fib_event_work *fib_work = 2189 struct rocker_fib_event_work *fib_work =
2179 container_of(work, struct rocker_fib_event_work, work); 2190 container_of(work, struct rocker_fib_event_work, work);
2180 struct rocker *rocker = fib_work->rocker; 2191 struct rocker *rocker = fib_work->rocker;
2192 struct fib_rule *rule;
2181 int err; 2193 int err;
2182 2194
2183 /* Protect internal structures from changes */ 2195 /* Protect internal structures from changes */
@@ -2195,7 +2207,10 @@ static void rocker_router_fib_event_work(struct work_struct *work)
2195 break; 2207 break;
2196 case FIB_EVENT_RULE_ADD: /* fall through */ 2208 case FIB_EVENT_RULE_ADD: /* fall through */
2197 case FIB_EVENT_RULE_DEL: 2209 case FIB_EVENT_RULE_DEL:
2198 rocker_world_fib4_abort(rocker); 2210 rule = fib_work->fr_info.rule;
2211 if (!fib4_rule_default(rule))
2212 rocker_world_fib4_abort(rocker);
2213 fib_rule_put(rule);
2199 break; 2214 break;
2200 } 2215 }
2201 rtnl_unlock(); 2216 rtnl_unlock();
@@ -2226,6 +2241,11 @@ static int rocker_router_fib_event(struct notifier_block *nb,
2226 */ 2241 */
2227 fib_info_hold(fib_work->fen_info.fi); 2242 fib_info_hold(fib_work->fen_info.fi);
2228 break; 2243 break;
2244 case FIB_EVENT_RULE_ADD: /* fall through */
2245 case FIB_EVENT_RULE_DEL:
2246 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2247 fib_rule_get(fib_work->fr_info.rule);
2248 break;
2229 } 2249 }
2230 2250
2231 queue_work(rocker->rocker_owq, &fib_work->work); 2251 queue_work(rocker->rocker_owq, &fib_work->work);
@@ -2237,16 +2257,18 @@ static int rocker_router_fib_event(struct notifier_block *nb,
2237 * ethtool interface 2257 * ethtool interface
2238 ********************/ 2258 ********************/
2239 2259
2240static int rocker_port_get_settings(struct net_device *dev, 2260static int
2241 struct ethtool_cmd *ecmd) 2261rocker_port_get_link_ksettings(struct net_device *dev,
2262 struct ethtool_link_ksettings *ecmd)
2242{ 2263{
2243 struct rocker_port *rocker_port = netdev_priv(dev); 2264 struct rocker_port *rocker_port = netdev_priv(dev);
2244 2265
2245 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); 2266 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
2246} 2267}
2247 2268
2248static int rocker_port_set_settings(struct net_device *dev, 2269static int
2249 struct ethtool_cmd *ecmd) 2270rocker_port_set_link_ksettings(struct net_device *dev,
2271 const struct ethtool_link_ksettings *ecmd)
2250{ 2272{
2251 struct rocker_port *rocker_port = netdev_priv(dev); 2273 struct rocker_port *rocker_port = netdev_priv(dev);
2252 2274
@@ -2388,13 +2410,13 @@ static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
2388} 2410}
2389 2411
2390static const struct ethtool_ops rocker_port_ethtool_ops = { 2412static const struct ethtool_ops rocker_port_ethtool_ops = {
2391 .get_settings = rocker_port_get_settings,
2392 .set_settings = rocker_port_set_settings,
2393 .get_drvinfo = rocker_port_get_drvinfo, 2413 .get_drvinfo = rocker_port_get_drvinfo,
2394 .get_link = ethtool_op_get_link, 2414 .get_link = ethtool_op_get_link,
2395 .get_strings = rocker_port_get_strings, 2415 .get_strings = rocker_port_get_strings,
2396 .get_ethtool_stats = rocker_port_get_stats, 2416 .get_ethtool_stats = rocker_port_get_stats,
2397 .get_sset_count = rocker_port_get_sset_count, 2417 .get_sset_count = rocker_port_get_sset_count,
2418 .get_link_ksettings = rocker_port_get_link_ksettings,
2419 .set_link_ksettings = rocker_port_set_link_ksettings,
2398}; 2420};
2399 2421
2400/***************** 2422/*****************
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index 104fb15a73f2..f6daf09b8627 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -437,11 +437,13 @@ int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
437 if (ntc->type != TC_SETUP_MQPRIO) 437 if (ntc->type != TC_SETUP_MQPRIO)
438 return -EINVAL; 438 return -EINVAL;
439 439
440 num_tc = ntc->tc; 440 num_tc = ntc->mqprio->num_tc;
441 441
442 if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC) 442 if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC)
443 return -EINVAL; 443 return -EINVAL;
444 444
445 ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
446
445 if (num_tc == net_dev->num_tc) 447 if (num_tc == net_dev->num_tc)
446 return 0; 448 return 0;
447 449
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index ff88d60aa6d5..3bdf87f31087 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -665,11 +665,13 @@ int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
665 if (ntc->type != TC_SETUP_MQPRIO) 665 if (ntc->type != TC_SETUP_MQPRIO)
666 return -EINVAL; 666 return -EINVAL;
667 667
668 num_tc = ntc->tc; 668 num_tc = ntc->mqprio->num_tc;
669 669
670 if (num_tc > EFX_MAX_TX_TC) 670 if (num_tc > EFX_MAX_TX_TC)
671 return -EINVAL; 671 return -EINVAL;
672 672
673 ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
674
673 if (num_tc == net_dev->num_tc) 675 if (num_tc == net_dev->num_tc)
674 return 0; 676 return 0;
675 677
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 57e6cef81ebe..52ead5524de7 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1558,25 +1558,27 @@ static void ioc3_get_drvinfo (struct net_device *dev,
1558 strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info)); 1558 strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
1559} 1559}
1560 1560
1561static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1561static int ioc3_get_link_ksettings(struct net_device *dev,
1562 struct ethtool_link_ksettings *cmd)
1562{ 1563{
1563 struct ioc3_private *ip = netdev_priv(dev); 1564 struct ioc3_private *ip = netdev_priv(dev);
1564 int rc; 1565 int rc;
1565 1566
1566 spin_lock_irq(&ip->ioc3_lock); 1567 spin_lock_irq(&ip->ioc3_lock);
1567 rc = mii_ethtool_gset(&ip->mii, cmd); 1568 rc = mii_ethtool_get_link_ksettings(&ip->mii, cmd);
1568 spin_unlock_irq(&ip->ioc3_lock); 1569 spin_unlock_irq(&ip->ioc3_lock);
1569 1570
1570 return rc; 1571 return rc;
1571} 1572}
1572 1573
1573static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1574static int ioc3_set_link_ksettings(struct net_device *dev,
1575 const struct ethtool_link_ksettings *cmd)
1574{ 1576{
1575 struct ioc3_private *ip = netdev_priv(dev); 1577 struct ioc3_private *ip = netdev_priv(dev);
1576 int rc; 1578 int rc;
1577 1579
1578 spin_lock_irq(&ip->ioc3_lock); 1580 spin_lock_irq(&ip->ioc3_lock);
1579 rc = mii_ethtool_sset(&ip->mii, cmd); 1581 rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd);
1580 spin_unlock_irq(&ip->ioc3_lock); 1582 spin_unlock_irq(&ip->ioc3_lock);
1581 1583
1582 return rc; 1584 return rc;
@@ -1608,10 +1610,10 @@ static u32 ioc3_get_link(struct net_device *dev)
1608 1610
1609static const struct ethtool_ops ioc3_ethtool_ops = { 1611static const struct ethtool_ops ioc3_ethtool_ops = {
1610 .get_drvinfo = ioc3_get_drvinfo, 1612 .get_drvinfo = ioc3_get_drvinfo,
1611 .get_settings = ioc3_get_settings,
1612 .set_settings = ioc3_set_settings,
1613 .nway_reset = ioc3_nway_reset, 1613 .nway_reset = ioc3_nway_reset,
1614 .get_link = ioc3_get_link, 1614 .get_link = ioc3_get_link,
1615 .get_link_ksettings = ioc3_get_link_ksettings,
1616 .set_link_ksettings = ioc3_set_link_ksettings,
1615}; 1617};
1616 1618
1617static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1619static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 6c2e2b311c16..751c81848f35 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1122,14 +1122,16 @@ static void sc92031_poll_controller(struct net_device *dev)
1122} 1122}
1123#endif 1123#endif
1124 1124
1125static int sc92031_ethtool_get_settings(struct net_device *dev, 1125static int
1126 struct ethtool_cmd *cmd) 1126sc92031_ethtool_get_link_ksettings(struct net_device *dev,
1127 struct ethtool_link_ksettings *cmd)
1127{ 1128{
1128 struct sc92031_priv *priv = netdev_priv(dev); 1129 struct sc92031_priv *priv = netdev_priv(dev);
1129 void __iomem *port_base = priv->port_base; 1130 void __iomem *port_base = priv->port_base;
1130 u8 phy_address; 1131 u8 phy_address;
1131 u32 phy_ctrl; 1132 u32 phy_ctrl;
1132 u16 output_status; 1133 u16 output_status;
1134 u32 supported, advertising;
1133 1135
1134 spin_lock_bh(&priv->lock); 1136 spin_lock_bh(&priv->lock);
1135 1137
@@ -1142,68 +1144,77 @@ static int sc92031_ethtool_get_settings(struct net_device *dev,
1142 1144
1143 spin_unlock_bh(&priv->lock); 1145 spin_unlock_bh(&priv->lock);
1144 1146
1145 cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full 1147 supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
1146 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full 1148 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
1147 | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII; 1149 | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
1148 1150
1149 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; 1151 advertising = ADVERTISED_TP | ADVERTISED_MII;
1150 1152
1151 if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) 1153 if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1152 == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) 1154 == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1153 cmd->advertising |= ADVERTISED_Autoneg; 1155 advertising |= ADVERTISED_Autoneg;
1154 1156
1155 if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10) 1157 if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
1156 cmd->advertising |= ADVERTISED_10baseT_Half; 1158 advertising |= ADVERTISED_10baseT_Half;
1157 1159
1158 if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux)) 1160 if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
1159 == (PhyCtrlSpd10 | PhyCtrlDux)) 1161 == (PhyCtrlSpd10 | PhyCtrlDux))
1160 cmd->advertising |= ADVERTISED_10baseT_Full; 1162 advertising |= ADVERTISED_10baseT_Full;
1161 1163
1162 if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100) 1164 if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
1163 cmd->advertising |= ADVERTISED_100baseT_Half; 1165 advertising |= ADVERTISED_100baseT_Half;
1164 1166
1165 if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux)) 1167 if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
1166 == (PhyCtrlSpd100 | PhyCtrlDux)) 1168 == (PhyCtrlSpd100 | PhyCtrlDux))
1167 cmd->advertising |= ADVERTISED_100baseT_Full; 1169 advertising |= ADVERTISED_100baseT_Full;
1168 1170
1169 if (phy_ctrl & PhyCtrlAne) 1171 if (phy_ctrl & PhyCtrlAne)
1170 cmd->advertising |= ADVERTISED_Autoneg; 1172 advertising |= ADVERTISED_Autoneg;
1171 1173
1172 ethtool_cmd_speed_set(cmd, 1174 cmd->base.speed = (output_status & 0x2) ? SPEED_100 : SPEED_10;
1173 (output_status & 0x2) ? SPEED_100 : SPEED_10); 1175 cmd->base.duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
1174 cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF; 1176 cmd->base.port = PORT_MII;
1175 cmd->port = PORT_MII; 1177 cmd->base.phy_address = phy_address;
1176 cmd->phy_address = phy_address; 1178 cmd->base.autoneg = (phy_ctrl & PhyCtrlAne) ?
1177 cmd->transceiver = XCVR_INTERNAL; 1179 AUTONEG_ENABLE : AUTONEG_DISABLE;
1178 cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE; 1180
1181 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1182 supported);
1183 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1184 advertising);
1179 1185
1180 return 0; 1186 return 0;
1181} 1187}
1182 1188
1183static int sc92031_ethtool_set_settings(struct net_device *dev, 1189static int
1184 struct ethtool_cmd *cmd) 1190sc92031_ethtool_set_link_ksettings(struct net_device *dev,
1191 const struct ethtool_link_ksettings *cmd)
1185{ 1192{
1186 struct sc92031_priv *priv = netdev_priv(dev); 1193 struct sc92031_priv *priv = netdev_priv(dev);
1187 void __iomem *port_base = priv->port_base; 1194 void __iomem *port_base = priv->port_base;
1188 u32 speed = ethtool_cmd_speed(cmd); 1195 u32 speed = cmd->base.speed;
1189 u32 phy_ctrl; 1196 u32 phy_ctrl;
1190 u32 old_phy_ctrl; 1197 u32 old_phy_ctrl;
1198 u32 advertising;
1199
1200 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1201 cmd->link_modes.advertising);
1191 1202
1192 if (!(speed == SPEED_10 || speed == SPEED_100)) 1203 if (!(speed == SPEED_10 || speed == SPEED_100))
1193 return -EINVAL; 1204 return -EINVAL;
1194 if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL)) 1205 if (!(cmd->base.duplex == DUPLEX_HALF ||
1195 return -EINVAL; 1206 cmd->base.duplex == DUPLEX_FULL))
1196 if (!(cmd->port == PORT_MII))
1197 return -EINVAL; 1207 return -EINVAL;
1198 if (!(cmd->phy_address == 0x1f)) 1208 if (!(cmd->base.port == PORT_MII))
1199 return -EINVAL; 1209 return -EINVAL;
1200 if (!(cmd->transceiver == XCVR_INTERNAL)) 1210 if (!(cmd->base.phy_address == 0x1f))
1201 return -EINVAL; 1211 return -EINVAL;
1202 if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE)) 1212 if (!(cmd->base.autoneg == AUTONEG_DISABLE ||
1213 cmd->base.autoneg == AUTONEG_ENABLE))
1203 return -EINVAL; 1214 return -EINVAL;
1204 1215
1205 if (cmd->autoneg == AUTONEG_ENABLE) { 1216 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1206 if (!(cmd->advertising & (ADVERTISED_Autoneg 1217 if (!(advertising & (ADVERTISED_Autoneg
1207 | ADVERTISED_100baseT_Full 1218 | ADVERTISED_100baseT_Full
1208 | ADVERTISED_100baseT_Half 1219 | ADVERTISED_100baseT_Half
1209 | ADVERTISED_10baseT_Full 1220 | ADVERTISED_10baseT_Full
@@ -1213,15 +1224,15 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
1213 phy_ctrl = PhyCtrlAne; 1224 phy_ctrl = PhyCtrlAne;
1214 1225
1215 // FIXME: I'm not sure what the original code was trying to do 1226 // FIXME: I'm not sure what the original code was trying to do
1216 if (cmd->advertising & ADVERTISED_Autoneg) 1227 if (advertising & ADVERTISED_Autoneg)
1217 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10; 1228 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
1218 if (cmd->advertising & ADVERTISED_100baseT_Full) 1229 if (advertising & ADVERTISED_100baseT_Full)
1219 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100; 1230 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
1220 if (cmd->advertising & ADVERTISED_100baseT_Half) 1231 if (advertising & ADVERTISED_100baseT_Half)
1221 phy_ctrl |= PhyCtrlSpd100; 1232 phy_ctrl |= PhyCtrlSpd100;
1222 if (cmd->advertising & ADVERTISED_10baseT_Full) 1233 if (advertising & ADVERTISED_10baseT_Full)
1223 phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux; 1234 phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
1224 if (cmd->advertising & ADVERTISED_10baseT_Half) 1235 if (advertising & ADVERTISED_10baseT_Half)
1225 phy_ctrl |= PhyCtrlSpd10; 1236 phy_ctrl |= PhyCtrlSpd10;
1226 } else { 1237 } else {
1227 // FIXME: Whole branch guessed 1238 // FIXME: Whole branch guessed
@@ -1232,7 +1243,7 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
1232 else /* cmd->speed == SPEED_100 */ 1243 else /* cmd->speed == SPEED_100 */
1233 phy_ctrl |= PhyCtrlSpd100; 1244 phy_ctrl |= PhyCtrlSpd100;
1234 1245
1235 if (cmd->duplex == DUPLEX_FULL) 1246 if (cmd->base.duplex == DUPLEX_FULL)
1236 phy_ctrl |= PhyCtrlDux; 1247 phy_ctrl |= PhyCtrlDux;
1237 } 1248 }
1238 1249
@@ -1368,8 +1379,6 @@ static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
1368} 1379}
1369 1380
1370static const struct ethtool_ops sc92031_ethtool_ops = { 1381static const struct ethtool_ops sc92031_ethtool_ops = {
1371 .get_settings = sc92031_ethtool_get_settings,
1372 .set_settings = sc92031_ethtool_set_settings,
1373 .get_wol = sc92031_ethtool_get_wol, 1382 .get_wol = sc92031_ethtool_get_wol,
1374 .set_wol = sc92031_ethtool_set_wol, 1383 .set_wol = sc92031_ethtool_set_wol,
1375 .nway_reset = sc92031_ethtool_nway_reset, 1384 .nway_reset = sc92031_ethtool_nway_reset,
@@ -1377,6 +1386,8 @@ static const struct ethtool_ops sc92031_ethtool_ops = {
1377 .get_strings = sc92031_ethtool_get_strings, 1386 .get_strings = sc92031_ethtool_get_strings,
1378 .get_sset_count = sc92031_ethtool_get_sset_count, 1387 .get_sset_count = sc92031_ethtool_get_sset_count,
1379 .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats, 1388 .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
1389 .get_link_ksettings = sc92031_ethtool_get_link_ksettings,
1390 .set_link_ksettings = sc92031_ethtool_set_link_ksettings,
1380}; 1391};
1381 1392
1382 1393
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 210e35d079dd..02da106c6e04 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1734,18 +1734,20 @@ static void sis190_set_speed_auto(struct net_device *dev)
1734 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET); 1734 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1735} 1735}
1736 1736
1737static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1737static int sis190_get_link_ksettings(struct net_device *dev,
1738 struct ethtool_link_ksettings *cmd)
1738{ 1739{
1739 struct sis190_private *tp = netdev_priv(dev); 1740 struct sis190_private *tp = netdev_priv(dev);
1740 1741
1741 return mii_ethtool_gset(&tp->mii_if, cmd); 1742 return mii_ethtool_get_link_ksettings(&tp->mii_if, cmd);
1742} 1743}
1743 1744
1744static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1745static int sis190_set_link_ksettings(struct net_device *dev,
1746 const struct ethtool_link_ksettings *cmd)
1745{ 1747{
1746 struct sis190_private *tp = netdev_priv(dev); 1748 struct sis190_private *tp = netdev_priv(dev);
1747 1749
1748 return mii_ethtool_sset(&tp->mii_if, cmd); 1750 return mii_ethtool_set_link_ksettings(&tp->mii_if, cmd);
1749} 1751}
1750 1752
1751static void sis190_get_drvinfo(struct net_device *dev, 1753static void sis190_get_drvinfo(struct net_device *dev,
@@ -1797,8 +1799,6 @@ static void sis190_set_msglevel(struct net_device *dev, u32 value)
1797} 1799}
1798 1800
1799static const struct ethtool_ops sis190_ethtool_ops = { 1801static const struct ethtool_ops sis190_ethtool_ops = {
1800 .get_settings = sis190_get_settings,
1801 .set_settings = sis190_set_settings,
1802 .get_drvinfo = sis190_get_drvinfo, 1802 .get_drvinfo = sis190_get_drvinfo,
1803 .get_regs_len = sis190_get_regs_len, 1803 .get_regs_len = sis190_get_regs_len,
1804 .get_regs = sis190_get_regs, 1804 .get_regs = sis190_get_regs,
@@ -1806,6 +1806,8 @@ static const struct ethtool_ops sis190_ethtool_ops = {
1806 .get_msglevel = sis190_get_msglevel, 1806 .get_msglevel = sis190_get_msglevel,
1807 .set_msglevel = sis190_set_msglevel, 1807 .set_msglevel = sis190_set_msglevel,
1808 .nway_reset = sis190_nway_reset, 1808 .nway_reset = sis190_nway_reset,
1809 .get_link_ksettings = sis190_get_link_ksettings,
1810 .set_link_ksettings = sis190_set_link_ksettings,
1809}; 1811};
1810 1812
1811static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1813static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 1b6f6171d078..40bd88362e3d 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2035,23 +2035,23 @@ static u32 sis900_get_link(struct net_device *net_dev)
2035 return mii_link_ok(&sis_priv->mii_info); 2035 return mii_link_ok(&sis_priv->mii_info);
2036} 2036}
2037 2037
2038static int sis900_get_settings(struct net_device *net_dev, 2038static int sis900_get_link_ksettings(struct net_device *net_dev,
2039 struct ethtool_cmd *cmd) 2039 struct ethtool_link_ksettings *cmd)
2040{ 2040{
2041 struct sis900_private *sis_priv = netdev_priv(net_dev); 2041 struct sis900_private *sis_priv = netdev_priv(net_dev);
2042 spin_lock_irq(&sis_priv->lock); 2042 spin_lock_irq(&sis_priv->lock);
2043 mii_ethtool_gset(&sis_priv->mii_info, cmd); 2043 mii_ethtool_get_link_ksettings(&sis_priv->mii_info, cmd);
2044 spin_unlock_irq(&sis_priv->lock); 2044 spin_unlock_irq(&sis_priv->lock);
2045 return 0; 2045 return 0;
2046} 2046}
2047 2047
2048static int sis900_set_settings(struct net_device *net_dev, 2048static int sis900_set_link_ksettings(struct net_device *net_dev,
2049 struct ethtool_cmd *cmd) 2049 const struct ethtool_link_ksettings *cmd)
2050{ 2050{
2051 struct sis900_private *sis_priv = netdev_priv(net_dev); 2051 struct sis900_private *sis_priv = netdev_priv(net_dev);
2052 int rt; 2052 int rt;
2053 spin_lock_irq(&sis_priv->lock); 2053 spin_lock_irq(&sis_priv->lock);
2054 rt = mii_ethtool_sset(&sis_priv->mii_info, cmd); 2054 rt = mii_ethtool_set_link_ksettings(&sis_priv->mii_info, cmd);
2055 spin_unlock_irq(&sis_priv->lock); 2055 spin_unlock_irq(&sis_priv->lock);
2056 return rt; 2056 return rt;
2057} 2057}
@@ -2129,11 +2129,11 @@ static const struct ethtool_ops sis900_ethtool_ops = {
2129 .get_msglevel = sis900_get_msglevel, 2129 .get_msglevel = sis900_get_msglevel,
2130 .set_msglevel = sis900_set_msglevel, 2130 .set_msglevel = sis900_set_msglevel,
2131 .get_link = sis900_get_link, 2131 .get_link = sis900_get_link,
2132 .get_settings = sis900_get_settings,
2133 .set_settings = sis900_set_settings,
2134 .nway_reset = sis900_nway_reset, 2132 .nway_reset = sis900_nway_reset,
2135 .get_wol = sis900_get_wol, 2133 .get_wol = sis900_get_wol,
2136 .set_wol = sis900_set_wol 2134 .set_wol = sis900_set_wol,
2135 .get_link_ksettings = sis900_get_link_ksettings,
2136 .set_link_ksettings = sis900_set_link_ksettings,
2137}; 2137};
2138 2138
2139/** 2139/**
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 5f2737189c72..db6dcb06193d 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1387,25 +1387,27 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
1387 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); 1387 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1388} 1388}
1389 1389
1390static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1390static int netdev_get_link_ksettings(struct net_device *dev,
1391 struct ethtool_link_ksettings *cmd)
1391{ 1392{
1392 struct epic_private *np = netdev_priv(dev); 1393 struct epic_private *np = netdev_priv(dev);
1393 int rc; 1394 int rc;
1394 1395
1395 spin_lock_irq(&np->lock); 1396 spin_lock_irq(&np->lock);
1396 rc = mii_ethtool_gset(&np->mii, cmd); 1397 rc = mii_ethtool_get_link_ksettings(&np->mii, cmd);
1397 spin_unlock_irq(&np->lock); 1398 spin_unlock_irq(&np->lock);
1398 1399
1399 return rc; 1400 return rc;
1400} 1401}
1401 1402
1402static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1403static int netdev_set_link_ksettings(struct net_device *dev,
1404 const struct ethtool_link_ksettings *cmd)
1403{ 1405{
1404 struct epic_private *np = netdev_priv(dev); 1406 struct epic_private *np = netdev_priv(dev);
1405 int rc; 1407 int rc;
1406 1408
1407 spin_lock_irq(&np->lock); 1409 spin_lock_irq(&np->lock);
1408 rc = mii_ethtool_sset(&np->mii, cmd); 1410 rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1409 spin_unlock_irq(&np->lock); 1411 spin_unlock_irq(&np->lock);
1410 1412
1411 return rc; 1413 return rc;
@@ -1460,14 +1462,14 @@ static void ethtool_complete(struct net_device *dev)
1460 1462
1461static const struct ethtool_ops netdev_ethtool_ops = { 1463static const struct ethtool_ops netdev_ethtool_ops = {
1462 .get_drvinfo = netdev_get_drvinfo, 1464 .get_drvinfo = netdev_get_drvinfo,
1463 .get_settings = netdev_get_settings,
1464 .set_settings = netdev_set_settings,
1465 .nway_reset = netdev_nway_reset, 1465 .nway_reset = netdev_nway_reset,
1466 .get_link = netdev_get_link, 1466 .get_link = netdev_get_link,
1467 .get_msglevel = netdev_get_msglevel, 1467 .get_msglevel = netdev_get_msglevel,
1468 .set_msglevel = netdev_set_msglevel, 1468 .set_msglevel = netdev_set_msglevel,
1469 .begin = ethtool_begin, 1469 .begin = ethtool_begin,
1470 .complete = ethtool_complete 1470 .complete = ethtool_complete,
1471 .get_link_ksettings = netdev_get_link_ksettings,
1472 .set_link_ksettings = netdev_set_link_ksettings,
1471}; 1473};
1472 1474
1473static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1475static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 4f19c6166182..36307d34f641 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1446,40 +1446,40 @@ static int smc911x_close(struct net_device *dev)
1446 * Ethtool support 1446 * Ethtool support
1447 */ 1447 */
1448static int 1448static int
1449smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) 1449smc911x_ethtool_get_link_ksettings(struct net_device *dev,
1450 struct ethtool_link_ksettings *cmd)
1450{ 1451{
1451 struct smc911x_local *lp = netdev_priv(dev); 1452 struct smc911x_local *lp = netdev_priv(dev);
1452 int ret, status; 1453 int ret, status;
1453 unsigned long flags; 1454 unsigned long flags;
1455 u32 supported;
1454 1456
1455 DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); 1457 DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
1456 cmd->maxtxpkt = 1;
1457 cmd->maxrxpkt = 1;
1458 1458
1459 if (lp->phy_type != 0) { 1459 if (lp->phy_type != 0) {
1460 spin_lock_irqsave(&lp->lock, flags); 1460 spin_lock_irqsave(&lp->lock, flags);
1461 ret = mii_ethtool_gset(&lp->mii, cmd); 1461 ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
1462 spin_unlock_irqrestore(&lp->lock, flags); 1462 spin_unlock_irqrestore(&lp->lock, flags);
1463 } else { 1463 } else {
1464 cmd->supported = SUPPORTED_10baseT_Half | 1464 supported = SUPPORTED_10baseT_Half |
1465 SUPPORTED_10baseT_Full | 1465 SUPPORTED_10baseT_Full |
1466 SUPPORTED_TP | SUPPORTED_AUI; 1466 SUPPORTED_TP | SUPPORTED_AUI;
1467 1467
1468 if (lp->ctl_rspeed == 10) 1468 if (lp->ctl_rspeed == 10)
1469 ethtool_cmd_speed_set(cmd, SPEED_10); 1469 cmd->base.speed = SPEED_10;
1470 else if (lp->ctl_rspeed == 100) 1470 else if (lp->ctl_rspeed == 100)
1471 ethtool_cmd_speed_set(cmd, SPEED_100); 1471 cmd->base.speed = SPEED_100;
1472 1472
1473 cmd->autoneg = AUTONEG_DISABLE; 1473 cmd->base.autoneg = AUTONEG_DISABLE;
1474 if (lp->mii.phy_id==1) 1474 cmd->base.port = 0;
1475 cmd->transceiver = XCVR_INTERNAL;
1476 else
1477 cmd->transceiver = XCVR_EXTERNAL;
1478 cmd->port = 0;
1479 SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status); 1475 SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
1480 cmd->duplex = 1476 cmd->base.duplex =
1481 (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ? 1477 (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
1482 DUPLEX_FULL : DUPLEX_HALF; 1478 DUPLEX_FULL : DUPLEX_HALF;
1479
1480 ethtool_convert_legacy_u32_to_link_mode(
1481 cmd->link_modes.supported, supported);
1482
1483 ret = 0; 1483 ret = 0;
1484 } 1484 }
1485 1485
@@ -1487,7 +1487,8 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1487} 1487}
1488 1488
1489static int 1489static int
1490smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) 1490smc911x_ethtool_set_link_ksettings(struct net_device *dev,
1491 const struct ethtool_link_ksettings *cmd)
1491{ 1492{
1492 struct smc911x_local *lp = netdev_priv(dev); 1493 struct smc911x_local *lp = netdev_priv(dev);
1493 int ret; 1494 int ret;
@@ -1495,16 +1496,18 @@ smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1495 1496
1496 if (lp->phy_type != 0) { 1497 if (lp->phy_type != 0) {
1497 spin_lock_irqsave(&lp->lock, flags); 1498 spin_lock_irqsave(&lp->lock, flags);
1498 ret = mii_ethtool_sset(&lp->mii, cmd); 1499 ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
1499 spin_unlock_irqrestore(&lp->lock, flags); 1500 spin_unlock_irqrestore(&lp->lock, flags);
1500 } else { 1501 } else {
1501 if (cmd->autoneg != AUTONEG_DISABLE || 1502 if (cmd->base.autoneg != AUTONEG_DISABLE ||
1502 cmd->speed != SPEED_10 || 1503 cmd->base.speed != SPEED_10 ||
1503 (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || 1504 (cmd->base.duplex != DUPLEX_HALF &&
1504 (cmd->port != PORT_TP && cmd->port != PORT_AUI)) 1505 cmd->base.duplex != DUPLEX_FULL) ||
1506 (cmd->base.port != PORT_TP &&
1507 cmd->base.port != PORT_AUI))
1505 return -EINVAL; 1508 return -EINVAL;
1506 1509
1507 lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; 1510 lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
1508 1511
1509 ret = 0; 1512 ret = 0;
1510 } 1513 }
@@ -1686,8 +1689,6 @@ static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
1686} 1689}
1687 1690
1688static const struct ethtool_ops smc911x_ethtool_ops = { 1691static const struct ethtool_ops smc911x_ethtool_ops = {
1689 .get_settings = smc911x_ethtool_getsettings,
1690 .set_settings = smc911x_ethtool_setsettings,
1691 .get_drvinfo = smc911x_ethtool_getdrvinfo, 1692 .get_drvinfo = smc911x_ethtool_getdrvinfo,
1692 .get_msglevel = smc911x_ethtool_getmsglevel, 1693 .get_msglevel = smc911x_ethtool_getmsglevel,
1693 .set_msglevel = smc911x_ethtool_setmsglevel, 1694 .set_msglevel = smc911x_ethtool_setmsglevel,
@@ -1698,6 +1699,8 @@ static const struct ethtool_ops smc911x_ethtool_ops = {
1698 .get_eeprom_len = smc911x_ethtool_geteeprom_len, 1699 .get_eeprom_len = smc911x_ethtool_geteeprom_len,
1699 .get_eeprom = smc911x_ethtool_geteeprom, 1700 .get_eeprom = smc911x_ethtool_geteeprom,
1700 .set_eeprom = smc911x_ethtool_seteeprom, 1701 .set_eeprom = smc911x_ethtool_seteeprom,
1702 .get_link_ksettings = smc911x_ethtool_get_link_ksettings,
1703 .set_link_ksettings = smc911x_ethtool_set_link_ksettings,
1701}; 1704};
1702 1705
1703/* 1706/*
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 97280daba27f..976aa876789a 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1843,56 +1843,60 @@ static int smc_link_ok(struct net_device *dev)
1843 } 1843 }
1844} 1844}
1845 1845
1846static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) 1846static int smc_netdev_get_ecmd(struct net_device *dev,
1847 struct ethtool_link_ksettings *ecmd)
1847{ 1848{
1848 u16 tmp; 1849 u16 tmp;
1849 unsigned int ioaddr = dev->base_addr; 1850 unsigned int ioaddr = dev->base_addr;
1851 u32 supported;
1850 1852
1851 ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI | 1853 supported = (SUPPORTED_TP | SUPPORTED_AUI |
1852 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); 1854 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
1853
1854 SMC_SELECT_BANK(1);
1855 tmp = inw(ioaddr + CONFIG);
1856 ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
1857 ecmd->transceiver = XCVR_INTERNAL;
1858 ethtool_cmd_speed_set(ecmd, SPEED_10);
1859 ecmd->phy_address = ioaddr + MGMT;
1860 1855
1861 SMC_SELECT_BANK(0); 1856 SMC_SELECT_BANK(1);
1862 tmp = inw(ioaddr + TCR); 1857 tmp = inw(ioaddr + CONFIG);
1863 ecmd->duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF; 1858 ecmd->base.port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
1859 ecmd->base.speed = SPEED_10;
1860 ecmd->base.phy_address = ioaddr + MGMT;
1864 1861
1865 return 0; 1862 SMC_SELECT_BANK(0);
1863 tmp = inw(ioaddr + TCR);
1864 ecmd->base.duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF;
1865
1866 ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
1867 supported);
1868
1869 return 0;
1866} 1870}
1867 1871
1868static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) 1872static int smc_netdev_set_ecmd(struct net_device *dev,
1873 const struct ethtool_link_ksettings *ecmd)
1869{ 1874{
1870 u16 tmp; 1875 u16 tmp;
1871 unsigned int ioaddr = dev->base_addr; 1876 unsigned int ioaddr = dev->base_addr;
1872 1877
1873 if (ethtool_cmd_speed(ecmd) != SPEED_10) 1878 if (ecmd->base.speed != SPEED_10)
1874 return -EINVAL; 1879 return -EINVAL;
1875 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 1880 if (ecmd->base.duplex != DUPLEX_HALF &&
1876 return -EINVAL; 1881 ecmd->base.duplex != DUPLEX_FULL)
1877 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI) 1882 return -EINVAL;
1878 return -EINVAL; 1883 if (ecmd->base.port != PORT_TP && ecmd->base.port != PORT_AUI)
1879 if (ecmd->transceiver != XCVR_INTERNAL) 1884 return -EINVAL;
1880 return -EINVAL;
1881 1885
1882 if (ecmd->port == PORT_AUI) 1886 if (ecmd->base.port == PORT_AUI)
1883 smc_set_xcvr(dev, 1); 1887 smc_set_xcvr(dev, 1);
1884 else 1888 else
1885 smc_set_xcvr(dev, 0); 1889 smc_set_xcvr(dev, 0);
1886 1890
1887 SMC_SELECT_BANK(0); 1891 SMC_SELECT_BANK(0);
1888 tmp = inw(ioaddr + TCR); 1892 tmp = inw(ioaddr + TCR);
1889 if (ecmd->duplex == DUPLEX_FULL) 1893 if (ecmd->base.duplex == DUPLEX_FULL)
1890 tmp |= TCR_FDUPLX; 1894 tmp |= TCR_FDUPLX;
1891 else 1895 else
1892 tmp &= ~TCR_FDUPLX; 1896 tmp &= ~TCR_FDUPLX;
1893 outw(tmp, ioaddr + TCR); 1897 outw(tmp, ioaddr + TCR);
1894 1898
1895 return 0; 1899 return 0;
1896} 1900}
1897 1901
1898static int check_if_running(struct net_device *dev) 1902static int check_if_running(struct net_device *dev)
@@ -1908,7 +1912,8 @@ static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
1908 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1912 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1909} 1913}
1910 1914
1911static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 1915static int smc_get_link_ksettings(struct net_device *dev,
1916 struct ethtool_link_ksettings *ecmd)
1912{ 1917{
1913 struct smc_private *smc = netdev_priv(dev); 1918 struct smc_private *smc = netdev_priv(dev);
1914 unsigned int ioaddr = dev->base_addr; 1919 unsigned int ioaddr = dev->base_addr;
@@ -1919,7 +1924,7 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1919 spin_lock_irqsave(&smc->lock, flags); 1924 spin_lock_irqsave(&smc->lock, flags);
1920 SMC_SELECT_BANK(3); 1925 SMC_SELECT_BANK(3);
1921 if (smc->cfg & CFG_MII_SELECT) 1926 if (smc->cfg & CFG_MII_SELECT)
1922 ret = mii_ethtool_gset(&smc->mii_if, ecmd); 1927 ret = mii_ethtool_get_link_ksettings(&smc->mii_if, ecmd);
1923 else 1928 else
1924 ret = smc_netdev_get_ecmd(dev, ecmd); 1929 ret = smc_netdev_get_ecmd(dev, ecmd);
1925 SMC_SELECT_BANK(saved_bank); 1930 SMC_SELECT_BANK(saved_bank);
@@ -1927,7 +1932,8 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1927 return ret; 1932 return ret;
1928} 1933}
1929 1934
1930static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 1935static int smc_set_link_ksettings(struct net_device *dev,
1936 const struct ethtool_link_ksettings *ecmd)
1931{ 1937{
1932 struct smc_private *smc = netdev_priv(dev); 1938 struct smc_private *smc = netdev_priv(dev);
1933 unsigned int ioaddr = dev->base_addr; 1939 unsigned int ioaddr = dev->base_addr;
@@ -1938,7 +1944,7 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1938 spin_lock_irqsave(&smc->lock, flags); 1944 spin_lock_irqsave(&smc->lock, flags);
1939 SMC_SELECT_BANK(3); 1945 SMC_SELECT_BANK(3);
1940 if (smc->cfg & CFG_MII_SELECT) 1946 if (smc->cfg & CFG_MII_SELECT)
1941 ret = mii_ethtool_sset(&smc->mii_if, ecmd); 1947 ret = mii_ethtool_set_link_ksettings(&smc->mii_if, ecmd);
1942 else 1948 else
1943 ret = smc_netdev_set_ecmd(dev, ecmd); 1949 ret = smc_netdev_set_ecmd(dev, ecmd);
1944 SMC_SELECT_BANK(saved_bank); 1950 SMC_SELECT_BANK(saved_bank);
@@ -1982,10 +1988,10 @@ static int smc_nway_reset(struct net_device *dev)
1982static const struct ethtool_ops ethtool_ops = { 1988static const struct ethtool_ops ethtool_ops = {
1983 .begin = check_if_running, 1989 .begin = check_if_running,
1984 .get_drvinfo = smc_get_drvinfo, 1990 .get_drvinfo = smc_get_drvinfo,
1985 .get_settings = smc_get_settings,
1986 .set_settings = smc_set_settings,
1987 .get_link = smc_get_link, 1991 .get_link = smc_get_link,
1988 .nway_reset = smc_nway_reset, 1992 .nway_reset = smc_nway_reset,
1993 .get_link_ksettings = smc_get_link_ksettings,
1994 .set_link_ksettings = smc_set_link_ksettings,
1989}; 1995};
1990 1996
1991static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1997static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 01a8c020d6db..37881f81319e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -26,12 +26,15 @@
26 26
27static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 27static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
28{ 28{
29 struct stmmac_priv *priv = (struct stmmac_priv *)p; 29 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
30 unsigned int entry = priv->cur_tx;
31 struct dma_desc *desc = priv->dma_tx + entry;
32 unsigned int nopaged_len = skb_headlen(skb); 30 unsigned int nopaged_len = skb_headlen(skb);
31 struct stmmac_priv *priv = tx_q->priv_data;
32 unsigned int entry = tx_q->cur_tx;
33 unsigned int bmax, des2; 33 unsigned int bmax, des2;
34 unsigned int i = 1, len; 34 unsigned int i = 1, len;
35 struct dma_desc *desc;
36
37 desc = tx_q->dma_tx + entry;
35 38
36 if (priv->plat->enh_desc) 39 if (priv->plat->enh_desc)
37 bmax = BUF_SIZE_8KiB; 40 bmax = BUF_SIZE_8KiB;
@@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
45 desc->des2 = cpu_to_le32(des2); 48 desc->des2 = cpu_to_le32(des2);
46 if (dma_mapping_error(priv->device, des2)) 49 if (dma_mapping_error(priv->device, des2))
47 return -1; 50 return -1;
48 priv->tx_skbuff_dma[entry].buf = des2; 51 tx_q->tx_skbuff_dma[entry].buf = des2;
49 priv->tx_skbuff_dma[entry].len = bmax; 52 tx_q->tx_skbuff_dma[entry].len = bmax;
50 /* do not close the descriptor and do not set own bit */ 53 /* do not close the descriptor and do not set own bit */
51 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE, 54 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
52 0, false); 55 0, false);
53 56
54 while (len != 0) { 57 while (len != 0) {
55 priv->tx_skbuff[entry] = NULL; 58 tx_q->tx_skbuff[entry] = NULL;
56 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 59 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
57 desc = priv->dma_tx + entry; 60 desc = tx_q->dma_tx + entry;
58 61
59 if (len > bmax) { 62 if (len > bmax) {
60 des2 = dma_map_single(priv->device, 63 des2 = dma_map_single(priv->device,
@@ -63,8 +66,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
63 desc->des2 = cpu_to_le32(des2); 66 desc->des2 = cpu_to_le32(des2);
64 if (dma_mapping_error(priv->device, des2)) 67 if (dma_mapping_error(priv->device, des2))
65 return -1; 68 return -1;
66 priv->tx_skbuff_dma[entry].buf = des2; 69 tx_q->tx_skbuff_dma[entry].buf = des2;
67 priv->tx_skbuff_dma[entry].len = bmax; 70 tx_q->tx_skbuff_dma[entry].len = bmax;
68 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, 71 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
69 STMMAC_CHAIN_MODE, 1, 72 STMMAC_CHAIN_MODE, 1,
70 false); 73 false);
@@ -77,8 +80,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
77 desc->des2 = cpu_to_le32(des2); 80 desc->des2 = cpu_to_le32(des2);
78 if (dma_mapping_error(priv->device, des2)) 81 if (dma_mapping_error(priv->device, des2))
79 return -1; 82 return -1;
80 priv->tx_skbuff_dma[entry].buf = des2; 83 tx_q->tx_skbuff_dma[entry].buf = des2;
81 priv->tx_skbuff_dma[entry].len = len; 84 tx_q->tx_skbuff_dma[entry].len = len;
82 /* last descriptor can be set now */ 85 /* last descriptor can be set now */
83 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, 86 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
84 STMMAC_CHAIN_MODE, 1, 87 STMMAC_CHAIN_MODE, 1,
@@ -87,7 +90,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
87 } 90 }
88 } 91 }
89 92
90 priv->cur_tx = entry; 93 tx_q->cur_tx = entry;
91 94
92 return entry; 95 return entry;
93} 96}
@@ -136,32 +139,34 @@ static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
136 139
137static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) 140static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
138{ 141{
139 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; 142 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
143 struct stmmac_priv *priv = rx_q->priv_data;
140 144
141 if (priv->hwts_rx_en && !priv->extend_desc) 145 if (priv->hwts_rx_en && !priv->extend_desc)
142 /* NOTE: Device will overwrite des3 with timestamp value if 146 /* NOTE: Device will overwrite des3 with timestamp value if
143 * 1588-2002 time stamping is enabled, hence reinitialize it 147 * 1588-2002 time stamping is enabled, hence reinitialize it
144 * to keep explicit chaining in the descriptor. 148 * to keep explicit chaining in the descriptor.
145 */ 149 */
146 p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy + 150 p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
147 (((priv->dirty_rx) + 1) % 151 (((rx_q->dirty_rx) + 1) %
148 DMA_RX_SIZE) * 152 DMA_RX_SIZE) *
149 sizeof(struct dma_desc))); 153 sizeof(struct dma_desc)));
150} 154}
151 155
152static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) 156static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
153{ 157{
154 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; 158 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
155 unsigned int entry = priv->dirty_tx; 159 struct stmmac_priv *priv = tx_q->priv_data;
160 unsigned int entry = tx_q->dirty_tx;
156 161
157 if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && 162 if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
158 priv->hwts_tx_en) 163 priv->hwts_tx_en)
159 /* NOTE: Device will overwrite des3 with timestamp value if 164 /* NOTE: Device will overwrite des3 with timestamp value if
160 * 1588-2002 time stamping is enabled, hence reinitialize it 165 * 1588-2002 time stamping is enabled, hence reinitialize it
161 * to keep explicit chaining in the descriptor. 166 * to keep explicit chaining in the descriptor.
162 */ 167 */
163 p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy + 168 p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
164 ((priv->dirty_tx + 1) % DMA_TX_SIZE)) 169 ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
165 * sizeof(struct dma_desc))); 170 * sizeof(struct dma_desc)));
166} 171}
167 172
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 04d9245b7149..572cf8b61707 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -246,6 +246,15 @@ struct stmmac_extra_stats {
246#define STMMAC_TX_MAX_FRAMES 256 246#define STMMAC_TX_MAX_FRAMES 256
247#define STMMAC_TX_FRAMES 64 247#define STMMAC_TX_FRAMES 64
248 248
249/* Packets types */
250enum packets_types {
251 PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */
252 PACKET_PTPQ = 0x2, /* PTP Packets */
253 PACKET_DCBCPQ = 0x3, /* DCB Control Packets */
254 PACKET_UPQ = 0x4, /* Untagged Packets */
255 PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */
256};
257
249/* Rx IPC status */ 258/* Rx IPC status */
250enum rx_frame_status { 259enum rx_frame_status {
251 good_frame = 0x0, 260 good_frame = 0x0,
@@ -324,6 +333,9 @@ struct dma_features {
324 unsigned int number_tx_queues; 333 unsigned int number_tx_queues;
325 /* Alternate (enhanced) DESC mode */ 334 /* Alternate (enhanced) DESC mode */
326 unsigned int enh_desc; 335 unsigned int enh_desc;
336 /* TX and RX FIFO sizes */
337 unsigned int tx_fifo_size;
338 unsigned int rx_fifo_size;
327}; 339};
328 340
329/* GMAC TX FIFO is 8K, Rx FIFO is 16K */ 341/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -413,6 +425,14 @@ struct stmmac_dma_ops {
413 int (*reset)(void __iomem *ioaddr); 425 int (*reset)(void __iomem *ioaddr);
414 void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, 426 void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
415 u32 dma_tx, u32 dma_rx, int atds); 427 u32 dma_tx, u32 dma_rx, int atds);
428 void (*init_chan)(void __iomem *ioaddr,
429 struct stmmac_dma_cfg *dma_cfg, u32 chan);
430 void (*init_rx_chan)(void __iomem *ioaddr,
431 struct stmmac_dma_cfg *dma_cfg,
432 u32 dma_rx_phy, u32 chan);
433 void (*init_tx_chan)(void __iomem *ioaddr,
434 struct stmmac_dma_cfg *dma_cfg,
435 u32 dma_tx_phy, u32 chan);
416 /* Configure the AXI Bus Mode Register */ 436 /* Configure the AXI Bus Mode Register */
417 void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); 437 void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
418 /* Dump DMA registers */ 438 /* Dump DMA registers */
@@ -421,25 +441,28 @@ struct stmmac_dma_ops {
421 * An invalid value enables the store-and-forward mode */ 441 * An invalid value enables the store-and-forward mode */
422 void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, 442 void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
423 int rxfifosz); 443 int rxfifosz);
444 void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
445 int fifosz);
446 void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
424 /* To track extra statistic (if supported) */ 447 /* To track extra statistic (if supported) */
425 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, 448 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
426 void __iomem *ioaddr); 449 void __iomem *ioaddr);
427 void (*enable_dma_transmission) (void __iomem *ioaddr); 450 void (*enable_dma_transmission) (void __iomem *ioaddr);
428 void (*enable_dma_irq) (void __iomem *ioaddr); 451 void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
429 void (*disable_dma_irq) (void __iomem *ioaddr); 452 void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
430 void (*start_tx) (void __iomem *ioaddr); 453 void (*start_tx)(void __iomem *ioaddr, u32 chan);
431 void (*stop_tx) (void __iomem *ioaddr); 454 void (*stop_tx)(void __iomem *ioaddr, u32 chan);
432 void (*start_rx) (void __iomem *ioaddr); 455 void (*start_rx)(void __iomem *ioaddr, u32 chan);
433 void (*stop_rx) (void __iomem *ioaddr); 456 void (*stop_rx)(void __iomem *ioaddr, u32 chan);
434 int (*dma_interrupt) (void __iomem *ioaddr, 457 int (*dma_interrupt) (void __iomem *ioaddr,
435 struct stmmac_extra_stats *x); 458 struct stmmac_extra_stats *x, u32 chan);
436 /* If supported then get the optional core features */ 459 /* If supported then get the optional core features */
437 void (*get_hw_feature)(void __iomem *ioaddr, 460 void (*get_hw_feature)(void __iomem *ioaddr,
438 struct dma_features *dma_cap); 461 struct dma_features *dma_cap);
439 /* Program the HW RX Watchdog */ 462 /* Program the HW RX Watchdog */
440 void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt); 463 void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
441 void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len); 464 void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
442 void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len); 465 void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
443 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 466 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
444 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 467 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
445 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); 468 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
@@ -454,17 +477,39 @@ struct stmmac_ops {
454 /* Enable and verify that the IPC module is supported */ 477 /* Enable and verify that the IPC module is supported */
455 int (*rx_ipc)(struct mac_device_info *hw); 478 int (*rx_ipc)(struct mac_device_info *hw);
456 /* Enable RX Queues */ 479 /* Enable RX Queues */
457 void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue); 480 void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
481 /* RX Queues Priority */
482 void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
483 /* TX Queues Priority */
484 void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
485 /* RX Queues Routing */
486 void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
487 u32 queue);
488 /* Program RX Algorithms */
489 void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
490 /* Program TX Algorithms */
491 void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
492 /* Set MTL TX queues weight */
493 void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
494 u32 weight, u32 queue);
495 /* RX MTL queue to RX dma mapping */
496 void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
497 /* Configure AV Algorithm */
498 void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
499 u32 idle_slope, u32 high_credit, u32 low_credit,
500 u32 queue);
458 /* Dump MAC registers */ 501 /* Dump MAC registers */
459 void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); 502 void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
460 /* Handle extra events on specific interrupts hw dependent */ 503 /* Handle extra events on specific interrupts hw dependent */
461 int (*host_irq_status)(struct mac_device_info *hw, 504 int (*host_irq_status)(struct mac_device_info *hw,
462 struct stmmac_extra_stats *x); 505 struct stmmac_extra_stats *x);
506 /* Handle MTL interrupts */
507 int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
463 /* Multicast filter setting */ 508 /* Multicast filter setting */
464 void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); 509 void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
465 /* Flow control setting */ 510 /* Flow control setting */
466 void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, 511 void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
467 unsigned int fc, unsigned int pause_time); 512 unsigned int fc, unsigned int pause_time, u32 tx_cnt);
468 /* Set power management mode (e.g. magic frame) */ 513 /* Set power management mode (e.g. magic frame) */
469 void (*pmt)(struct mac_device_info *hw, unsigned long mode); 514 void (*pmt)(struct mac_device_info *hw, unsigned long mode);
470 /* Set/Get Unicast MAC addresses */ 515 /* Set/Get Unicast MAC addresses */
@@ -477,7 +522,8 @@ struct stmmac_ops {
477 void (*reset_eee_mode)(struct mac_device_info *hw); 522 void (*reset_eee_mode)(struct mac_device_info *hw);
478 void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); 523 void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
479 void (*set_eee_pls)(struct mac_device_info *hw, int link); 524 void (*set_eee_pls)(struct mac_device_info *hw, int link);
480 void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x); 525 void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
526 u32 rx_queues, u32 tx_queues);
481 /* PCS calls */ 527 /* PCS calls */
482 void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, 528 void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
483 bool loopback); 529 bool loopback);
@@ -547,6 +593,11 @@ struct mac_device_info {
547 unsigned int ps; 593 unsigned int ps;
548}; 594};
549 595
596struct stmmac_rx_routing {
597 u32 reg_mask;
598 u32 reg_shift;
599};
600
550struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, 601struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
551 int perfect_uc_entries, 602 int perfect_uc_entries,
552 int *synopsys_id); 603 int *synopsys_id);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 1a3fa3d9f855..dd6a2f9791cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -14,16 +14,34 @@
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/clk-provider.h> 15#include <linux/clk-provider.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/gpio/consumer.h>
17#include <linux/ethtool.h> 18#include <linux/ethtool.h>
18#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/iopoll.h>
19#include <linux/ioport.h> 21#include <linux/ioport.h>
20#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/of_device.h>
21#include <linux/of_net.h> 24#include <linux/of_net.h>
22#include <linux/mfd/syscon.h> 25#include <linux/mfd/syscon.h>
23#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/reset.h>
24#include <linux/stmmac.h> 28#include <linux/stmmac.h>
25 29
26#include "stmmac_platform.h" 30#include "stmmac_platform.h"
31#include "dwmac4.h"
32
33struct tegra_eqos {
34 struct device *dev;
35 void __iomem *regs;
36
37 struct reset_control *rst;
38 struct clk *clk_master;
39 struct clk *clk_slave;
40 struct clk *clk_tx;
41 struct clk *clk_rx;
42
43 struct gpio_desc *reset;
44};
27 45
28static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, 46static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
29 struct plat_stmmacenet_data *plat_dat) 47 struct plat_stmmacenet_data *plat_dat)
@@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
106 return 0; 124 return 0;
107} 125}
108 126
127static void *dwc_qos_probe(struct platform_device *pdev,
128 struct plat_stmmacenet_data *plat_dat,
129 struct stmmac_resources *stmmac_res)
130{
131 int err;
132
133 plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
134 if (IS_ERR(plat_dat->stmmac_clk)) {
135 dev_err(&pdev->dev, "apb_pclk clock not found.\n");
136 return ERR_CAST(plat_dat->stmmac_clk);
137 }
138
139 err = clk_prepare_enable(plat_dat->stmmac_clk);
140 if (err < 0) {
141 dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
142 err);
143 return ERR_PTR(err);
144 }
145
146 plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
147 if (IS_ERR(plat_dat->pclk)) {
148 dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
149 err = PTR_ERR(plat_dat->pclk);
150 goto disable;
151 }
152
153 err = clk_prepare_enable(plat_dat->pclk);
154 if (err < 0) {
155 dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
156 err);
157 goto disable;
158 }
159
160 return NULL;
161
162disable:
163 clk_disable_unprepare(plat_dat->stmmac_clk);
164 return ERR_PTR(err);
165}
166
167static int dwc_qos_remove(struct platform_device *pdev)
168{
169 struct net_device *ndev = platform_get_drvdata(pdev);
170 struct stmmac_priv *priv = netdev_priv(ndev);
171
172 clk_disable_unprepare(priv->plat->pclk);
173 clk_disable_unprepare(priv->plat->stmmac_clk);
174
175 return 0;
176}
177
178#define SDMEMCOMPPADCTRL 0x8800
179#define SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
180
181#define AUTO_CAL_CONFIG 0x8804
182#define AUTO_CAL_CONFIG_START BIT(31)
183#define AUTO_CAL_CONFIG_ENABLE BIT(29)
184
185#define AUTO_CAL_STATUS 0x880c
186#define AUTO_CAL_STATUS_ACTIVE BIT(31)
187
188static void tegra_eqos_fix_speed(void *priv, unsigned int speed)
189{
190 struct tegra_eqos *eqos = priv;
191 unsigned long rate = 125000000;
192 bool needs_calibration = false;
193 u32 value;
194 int err;
195
196 switch (speed) {
197 case SPEED_1000:
198 needs_calibration = true;
199 rate = 125000000;
200 break;
201
202 case SPEED_100:
203 needs_calibration = true;
204 rate = 25000000;
205 break;
206
207 case SPEED_10:
208 rate = 2500000;
209 break;
210
211 default:
212 dev_err(eqos->dev, "invalid speed %u\n", speed);
213 break;
214 }
215
216 if (needs_calibration) {
217 /* calibrate */
218 value = readl(eqos->regs + SDMEMCOMPPADCTRL);
219 value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
220 writel(value, eqos->regs + SDMEMCOMPPADCTRL);
221
222 udelay(1);
223
224 value = readl(eqos->regs + AUTO_CAL_CONFIG);
225 value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE;
226 writel(value, eqos->regs + AUTO_CAL_CONFIG);
227
228 err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
229 value,
230 value & AUTO_CAL_STATUS_ACTIVE,
231 1, 10);
232 if (err < 0) {
233 dev_err(eqos->dev, "calibration did not start\n");
234 goto failed;
235 }
236
237 err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
238 value,
239 (value & AUTO_CAL_STATUS_ACTIVE) == 0,
240 20, 200);
241 if (err < 0) {
242 dev_err(eqos->dev, "calibration didn't finish\n");
243 goto failed;
244 }
245
246 failed:
247 value = readl(eqos->regs + SDMEMCOMPPADCTRL);
248 value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
249 writel(value, eqos->regs + SDMEMCOMPPADCTRL);
250 } else {
251 value = readl(eqos->regs + AUTO_CAL_CONFIG);
252 value &= ~AUTO_CAL_CONFIG_ENABLE;
253 writel(value, eqos->regs + AUTO_CAL_CONFIG);
254 }
255
256 err = clk_set_rate(eqos->clk_tx, rate);
257 if (err < 0)
258 dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
259}
260
261static int tegra_eqos_init(struct platform_device *pdev, void *priv)
262{
263 struct tegra_eqos *eqos = priv;
264 unsigned long rate;
265 u32 value;
266
267 rate = clk_get_rate(eqos->clk_slave);
268
269 value = (rate / 1000000) - 1;
270 writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
271
272 return 0;
273}
274
275static void *tegra_eqos_probe(struct platform_device *pdev,
276 struct plat_stmmacenet_data *data,
277 struct stmmac_resources *res)
278{
279 struct tegra_eqos *eqos;
280 int err;
281
282 eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
283 if (!eqos) {
284 err = -ENOMEM;
285 goto error;
286 }
287
288 eqos->dev = &pdev->dev;
289 eqos->regs = res->addr;
290
291 eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
292 if (IS_ERR(eqos->clk_master)) {
293 err = PTR_ERR(eqos->clk_master);
294 goto error;
295 }
296
297 err = clk_prepare_enable(eqos->clk_master);
298 if (err < 0)
299 goto error;
300
301 eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
302 if (IS_ERR(eqos->clk_slave)) {
303 err = PTR_ERR(eqos->clk_slave);
304 goto disable_master;
305 }
306
307 data->stmmac_clk = eqos->clk_slave;
308
309 err = clk_prepare_enable(eqos->clk_slave);
310 if (err < 0)
311 goto disable_master;
312
313 eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
314 if (IS_ERR(eqos->clk_rx)) {
315 err = PTR_ERR(eqos->clk_rx);
316 goto disable_slave;
317 }
318
319 err = clk_prepare_enable(eqos->clk_rx);
320 if (err < 0)
321 goto disable_slave;
322
323 eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
324 if (IS_ERR(eqos->clk_tx)) {
325 err = PTR_ERR(eqos->clk_tx);
326 goto disable_rx;
327 }
328
329 err = clk_prepare_enable(eqos->clk_tx);
330 if (err < 0)
331 goto disable_rx;
332
333 eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
334 if (IS_ERR(eqos->reset)) {
335 err = PTR_ERR(eqos->reset);
336 goto disable_tx;
337 }
338
339 usleep_range(2000, 4000);
340 gpiod_set_value(eqos->reset, 0);
341
342 eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
343 if (IS_ERR(eqos->rst)) {
344 err = PTR_ERR(eqos->rst);
345 goto reset_phy;
346 }
347
348 err = reset_control_assert(eqos->rst);
349 if (err < 0)
350 goto reset_phy;
351
352 usleep_range(2000, 4000);
353
354 err = reset_control_deassert(eqos->rst);
355 if (err < 0)
356 goto reset_phy;
357
358 usleep_range(2000, 4000);
359
360 data->fix_mac_speed = tegra_eqos_fix_speed;
361 data->init = tegra_eqos_init;
362 data->bsp_priv = eqos;
363
364 err = tegra_eqos_init(pdev, eqos);
365 if (err < 0)
366 goto reset;
367
368out:
369 return eqos;
370
371reset:
372 reset_control_assert(eqos->rst);
373reset_phy:
374 gpiod_set_value(eqos->reset, 1);
375disable_tx:
376 clk_disable_unprepare(eqos->clk_tx);
377disable_rx:
378 clk_disable_unprepare(eqos->clk_rx);
379disable_slave:
380 clk_disable_unprepare(eqos->clk_slave);
381disable_master:
382 clk_disable_unprepare(eqos->clk_master);
383error:
384 eqos = ERR_PTR(err);
385 goto out;
386}
387
388static int tegra_eqos_remove(struct platform_device *pdev)
389{
390 struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
391
392 reset_control_assert(eqos->rst);
393 gpiod_set_value(eqos->reset, 1);
394 clk_disable_unprepare(eqos->clk_tx);
395 clk_disable_unprepare(eqos->clk_rx);
396 clk_disable_unprepare(eqos->clk_slave);
397 clk_disable_unprepare(eqos->clk_master);
398
399 return 0;
400}
401
402struct dwc_eth_dwmac_data {
403 void *(*probe)(struct platform_device *pdev,
404 struct plat_stmmacenet_data *data,
405 struct stmmac_resources *res);
406 int (*remove)(struct platform_device *pdev);
407};
408
409static const struct dwc_eth_dwmac_data dwc_qos_data = {
410 .probe = dwc_qos_probe,
411 .remove = dwc_qos_remove,
412};
413
414static const struct dwc_eth_dwmac_data tegra_eqos_data = {
415 .probe = tegra_eqos_probe,
416 .remove = tegra_eqos_remove,
417};
418
109static int dwc_eth_dwmac_probe(struct platform_device *pdev) 419static int dwc_eth_dwmac_probe(struct platform_device *pdev)
110{ 420{
421 const struct dwc_eth_dwmac_data *data;
111 struct plat_stmmacenet_data *plat_dat; 422 struct plat_stmmacenet_data *plat_dat;
112 struct stmmac_resources stmmac_res; 423 struct stmmac_resources stmmac_res;
113 struct resource *res; 424 struct resource *res;
425 void *priv;
114 int ret; 426 int ret;
115 427
428 data = of_device_get_match_data(&pdev->dev);
429
116 memset(&stmmac_res, 0, sizeof(struct stmmac_resources)); 430 memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
117 431
118 /** 432 /**
@@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
138 if (IS_ERR(plat_dat)) 452 if (IS_ERR(plat_dat))
139 return PTR_ERR(plat_dat); 453 return PTR_ERR(plat_dat);
140 454
141 plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); 455 priv = data->probe(pdev, plat_dat, &stmmac_res);
142 if (IS_ERR(plat_dat->stmmac_clk)) { 456 if (IS_ERR(priv)) {
143 dev_err(&pdev->dev, "apb_pclk clock not found.\n"); 457 ret = PTR_ERR(priv);
144 ret = PTR_ERR(plat_dat->stmmac_clk); 458 dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret);
145 plat_dat->stmmac_clk = NULL; 459 goto remove_config;
146 goto err_remove_config_dt;
147 }
148 clk_prepare_enable(plat_dat->stmmac_clk);
149
150 plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
151 if (IS_ERR(plat_dat->pclk)) {
152 dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
153 ret = PTR_ERR(plat_dat->pclk);
154 plat_dat->pclk = NULL;
155 goto err_out_clk_dis_phy;
156 } 460 }
157 clk_prepare_enable(plat_dat->pclk);
158 461
159 ret = dwc_eth_dwmac_config_dt(pdev, plat_dat); 462 ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
160 if (ret) 463 if (ret)
161 goto err_out_clk_dis_aper; 464 goto remove;
162 465
163 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 466 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
164 if (ret) 467 if (ret)
165 goto err_out_clk_dis_aper; 468 goto remove;
166 469
167 return 0; 470 return ret;
168 471
169err_out_clk_dis_aper: 472remove:
170 clk_disable_unprepare(plat_dat->pclk); 473 data->remove(pdev);
171err_out_clk_dis_phy: 474remove_config:
172 clk_disable_unprepare(plat_dat->stmmac_clk);
173err_remove_config_dt:
174 stmmac_remove_config_dt(pdev, plat_dat); 475 stmmac_remove_config_dt(pdev, plat_dat);
175 476
176 return ret; 477 return ret;
@@ -178,11 +479,29 @@ err_remove_config_dt:
178 479
179static int dwc_eth_dwmac_remove(struct platform_device *pdev) 480static int dwc_eth_dwmac_remove(struct platform_device *pdev)
180{ 481{
181 return stmmac_pltfr_remove(pdev); 482 struct net_device *ndev = platform_get_drvdata(pdev);
483 struct stmmac_priv *priv = netdev_priv(ndev);
484 const struct dwc_eth_dwmac_data *data;
485 int err;
486
487 data = of_device_get_match_data(&pdev->dev);
488
489 err = stmmac_dvr_remove(&pdev->dev);
490 if (err < 0)
491 dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
492
493 err = data->remove(pdev);
494 if (err < 0)
495 dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
496
497 stmmac_remove_config_dt(pdev, priv->plat);
498
499 return err;
182} 500}
183 501
184static const struct of_device_id dwc_eth_dwmac_match[] = { 502static const struct of_device_id dwc_eth_dwmac_match[] = {
185 { .compatible = "snps,dwc-qos-ethernet-4.10", }, 503 { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
504 { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
186 { } 505 { }
187}; 506};
188MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match); 507MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 19b9b3087099..7f78f7746a5b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
216 216
217 217
218static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, 218static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
219 unsigned int fc, unsigned int pause_time) 219 unsigned int fc, unsigned int pause_time,
220 u32 tx_cnt)
220{ 221{
221 void __iomem *ioaddr = hw->pcsr; 222 void __iomem *ioaddr = hw->pcsr;
222 /* Set flow such that DZPQ in Mac Register 6 is 0, 223 /* Set flow such that DZPQ in Mac Register 6 is 0,
@@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
412 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); 413 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
413} 414}
414 415
415static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) 416static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
417 u32 rx_queues, u32 tx_queues)
416{ 418{
417 u32 value = readl(ioaddr + GMAC_DEBUG); 419 u32 value = readl(ioaddr + GMAC_DEBUG);
418 420
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index d3654a447046..471a9aa6ac94 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
247 dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; 247 dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
248} 248}
249 249
250static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) 250static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
251 u32 number_chan)
251{ 252{
252 writel(riwt, ioaddr + DMA_RX_WATCHDOG); 253 writel(riwt, ioaddr + DMA_RX_WATCHDOG);
253} 254}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index e370ccec6176..524135e6dd89 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct mac_device_info *hw,
131} 131}
132 132
133static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, 133static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
134 unsigned int fc, unsigned int pause_time) 134 unsigned int fc, unsigned int pause_time,
135 u32 tx_cnt)
135{ 136{
136 void __iomem *ioaddr = hw->pcsr; 137 void __iomem *ioaddr = hw->pcsr;
137 unsigned int flow = MAC_FLOW_CTRL_ENABLE; 138 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index db45134fddf0..d74cedf2a397 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -22,9 +22,15 @@
22#define GMAC_HASH_TAB_32_63 0x00000014 22#define GMAC_HASH_TAB_32_63 0x00000014
23#define GMAC_RX_FLOW_CTRL 0x00000090 23#define GMAC_RX_FLOW_CTRL 0x00000090
24#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) 24#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
25#define GMAC_TXQ_PRTY_MAP0 0x98
26#define GMAC_TXQ_PRTY_MAP1 0x9C
25#define GMAC_RXQ_CTRL0 0x000000a0 27#define GMAC_RXQ_CTRL0 0x000000a0
28#define GMAC_RXQ_CTRL1 0x000000a4
29#define GMAC_RXQ_CTRL2 0x000000a8
30#define GMAC_RXQ_CTRL3 0x000000ac
26#define GMAC_INT_STATUS 0x000000b0 31#define GMAC_INT_STATUS 0x000000b0
27#define GMAC_INT_EN 0x000000b4 32#define GMAC_INT_EN 0x000000b4
33#define GMAC_1US_TIC_COUNTER 0x000000dc
28#define GMAC_PCS_BASE 0x000000e0 34#define GMAC_PCS_BASE 0x000000e0
29#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8 35#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
30#define GMAC_PMT 0x000000c0 36#define GMAC_PMT 0x000000c0
@@ -38,6 +44,22 @@
38#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) 44#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
39#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) 45#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
40 46
47/* RX Queues Routing */
48#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0)
49#define GMAC_RXQCTRL_AVCPQ_SHIFT 0
50#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4)
51#define GMAC_RXQCTRL_PTPQ_SHIFT 4
52#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8)
53#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8
54#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12)
55#define GMAC_RXQCTRL_UPQ_SHIFT 12
56#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16)
57#define GMAC_RXQCTRL_MCBCQ_SHIFT 16
58#define GMAC_RXQCTRL_MCBCQEN BIT(20)
59#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20
60#define GMAC_RXQCTRL_TACPQE BIT(21)
61#define GMAC_RXQCTRL_TACPQE_SHIFT 21
62
41/* MAC Packet Filtering */ 63/* MAC Packet Filtering */
42#define GMAC_PACKET_FILTER_PR BIT(0) 64#define GMAC_PACKET_FILTER_PR BIT(0)
43#define GMAC_PACKET_FILTER_HMC BIT(2) 65#define GMAC_PACKET_FILTER_HMC BIT(2)
@@ -53,6 +75,14 @@
53/* MAC Flow Control RX */ 75/* MAC Flow Control RX */
54#define GMAC_RX_FLOW_CTRL_RFE BIT(0) 76#define GMAC_RX_FLOW_CTRL_RFE BIT(0)
55 77
78/* RX Queues Priorities */
79#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
80#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8)
81
82/* TX Queues Priorities */
83#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
84#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8)
85
56/* MAC Flow Control TX */ 86/* MAC Flow Control TX */
57#define GMAC_TX_FLOW_CTRL_TFE BIT(1) 87#define GMAC_TX_FLOW_CTRL_TFE BIT(1)
58#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 88#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
@@ -148,6 +178,8 @@ enum power_event {
148/* MAC HW features1 bitmap */ 178/* MAC HW features1 bitmap */
149#define GMAC_HW_FEAT_AVSEL BIT(20) 179#define GMAC_HW_FEAT_AVSEL BIT(20)
150#define GMAC_HW_TSOEN BIT(18) 180#define GMAC_HW_TSOEN BIT(18)
181#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
182#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
151 183
152/* MAC HW features2 bitmap */ 184/* MAC HW features2 bitmap */
153#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) 185#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
@@ -161,8 +193,25 @@ enum power_event {
161#define GMAC_HI_REG_AE BIT(31) 193#define GMAC_HI_REG_AE BIT(31)
162 194
163/* MTL registers */ 195/* MTL registers */
196#define MTL_OPERATION_MODE 0x00000c00
197#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5)
198#define MTL_OPERATION_SCHALG_WRR (0x0 << 5)
199#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5)
200#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5)
201#define MTL_OPERATION_SCHALG_SP (0x3 << 5)
202#define MTL_OPERATION_RAA BIT(2)
203#define MTL_OPERATION_RAA_SP (0x0 << 2)
204#define MTL_OPERATION_RAA_WSP (0x1 << 2)
205
164#define MTL_INT_STATUS 0x00000c20 206#define MTL_INT_STATUS 0x00000c20
165#define MTL_INT_Q0 BIT(0) 207#define MTL_INT_QX(x) BIT(x)
208
209#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */
210#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */
211#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0)
212#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0)
213#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
214#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
166 215
167#define MTL_CHAN_BASE_ADDR 0x00000d00 216#define MTL_CHAN_BASE_ADDR 0x00000d00
168#define MTL_CHAN_BASE_OFFSET 0x40 217#define MTL_CHAN_BASE_OFFSET 0x40
@@ -180,6 +229,7 @@ enum power_event {
180#define MTL_OP_MODE_TSF BIT(1) 229#define MTL_OP_MODE_TSF BIT(1)
181 230
182#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16) 231#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
232#define MTL_OP_MODE_TQS_SHIFT 16
183 233
184#define MTL_OP_MODE_TTC_MASK 0x70 234#define MTL_OP_MODE_TTC_MASK 0x70
185#define MTL_OP_MODE_TTC_SHIFT 4 235#define MTL_OP_MODE_TTC_SHIFT 4
@@ -193,6 +243,17 @@ enum power_event {
193#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT) 243#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
194#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT) 244#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
195 245
246#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20)
247#define MTL_OP_MODE_RQS_SHIFT 20
248
249#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14)
250#define MTL_OP_MODE_RFD_SHIFT 14
251
252#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8)
253#define MTL_OP_MODE_RFA_SHIFT 8
254
255#define MTL_OP_MODE_EHFC BIT(7)
256
196#define MTL_OP_MODE_RTC_MASK 0x18 257#define MTL_OP_MODE_RTC_MASK 0x18
197#define MTL_OP_MODE_RTC_SHIFT 3 258#define MTL_OP_MODE_RTC_SHIFT 3
198 259
@@ -201,6 +262,46 @@ enum power_event {
201#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT) 262#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
202#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT) 263#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
203 264
265/* MTL ETS Control register */
266#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
267#define MTL_ETS_CTRL_BASE_OFFSET 0x40
268#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \
269 ((x) * MTL_ETS_CTRL_BASE_OFFSET))
270
271#define MTL_ETS_CTRL_CC BIT(3)
272#define MTL_ETS_CTRL_AVALG BIT(2)
273
274/* MTL Queue Quantum Weight */
275#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18
276#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40
277#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \
278 ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
279#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0)
280
281/* MTL sendSlopeCredit register */
282#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c
283#define MTL_SEND_SLP_CRED_OFFSET 0x40
284#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \
285 ((x) * MTL_SEND_SLP_CRED_OFFSET))
286
287#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0)
288
289/* MTL hiCredit register */
290#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20
291#define MTL_HIGH_CRED_OFFSET 0x40
292#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \
293 ((x) * MTL_HIGH_CRED_OFFSET))
294
295#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0)
296
297/* MTL loCredit register */
298#define MTL_LOW_CRED_BASE_ADDR 0x00000d24
299#define MTL_LOW_CRED_OFFSET 0x40
300#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \
301 ((x) * MTL_LOW_CRED_OFFSET))
302
303#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0)
304
204/* MTL debug */ 305/* MTL debug */
205#define MTL_DEBUG_TXSTSFSTS BIT(5) 306#define MTL_DEBUG_TXSTSFSTS BIT(5)
206#define MTL_DEBUG_TXFSTS BIT(4) 307#define MTL_DEBUG_TXFSTS BIT(4)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 1e79e6529c4a..40ce20218402 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -59,17 +59,211 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
59 writel(value, ioaddr + GMAC_INT_EN); 59 writel(value, ioaddr + GMAC_INT_EN);
60} 60}
61 61
62static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue) 62static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
63 u8 mode, u32 queue)
63{ 64{
64 void __iomem *ioaddr = hw->pcsr; 65 void __iomem *ioaddr = hw->pcsr;
65 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0); 66 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
66 67
67 value &= GMAC_RX_QUEUE_CLEAR(queue); 68 value &= GMAC_RX_QUEUE_CLEAR(queue);
68 value |= GMAC_RX_AV_QUEUE_ENABLE(queue); 69 if (mode == MTL_QUEUE_AVB)
70 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
71 else if (mode == MTL_QUEUE_DCB)
72 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
69 73
70 writel(value, ioaddr + GMAC_RXQ_CTRL0); 74 writel(value, ioaddr + GMAC_RXQ_CTRL0);
71} 75}
72 76
77static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
78 u32 prio, u32 queue)
79{
80 void __iomem *ioaddr = hw->pcsr;
81 u32 base_register;
82 u32 value;
83
84 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
85
86 value = readl(ioaddr + base_register);
87
88 value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
89 value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
90 GMAC_RXQCTRL_PSRQX_MASK(queue);
91 writel(value, ioaddr + base_register);
92}
93
94static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
95 u32 prio, u32 queue)
96{
97 void __iomem *ioaddr = hw->pcsr;
98 u32 base_register;
99 u32 value;
100
101 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
102
103 value = readl(ioaddr + base_register);
104
105 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
106 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
107 GMAC_TXQCTRL_PSTQX_MASK(queue);
108
109 writel(value, ioaddr + base_register);
110}
111
112static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
113 u8 packet, u32 queue)
114{
115 void __iomem *ioaddr = hw->pcsr;
116 u32 value;
117
118 const struct stmmac_rx_routing route_possibilities[] = {
119 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
120 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
121 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
122 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
123 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
124 };
125
126 value = readl(ioaddr + GMAC_RXQ_CTRL1);
127
128 /* routing configuration */
129 value &= ~route_possibilities[packet - 1].reg_mask;
130 value |= (queue << route_possibilities[packet-1].reg_shift) &
131 route_possibilities[packet - 1].reg_mask;
132
133 /* some packets require extra ops */
134 if (packet == PACKET_AVCPQ) {
135 value &= ~GMAC_RXQCTRL_TACPQE;
136 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
137 } else if (packet == PACKET_MCBCQ) {
138 value &= ~GMAC_RXQCTRL_MCBCQEN;
139 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
140 }
141
142 writel(value, ioaddr + GMAC_RXQ_CTRL1);
143}
144
145static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
146 u32 rx_alg)
147{
148 void __iomem *ioaddr = hw->pcsr;
149 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
150
151 value &= ~MTL_OPERATION_RAA;
152 switch (rx_alg) {
153 case MTL_RX_ALGORITHM_SP:
154 value |= MTL_OPERATION_RAA_SP;
155 break;
156 case MTL_RX_ALGORITHM_WSP:
157 value |= MTL_OPERATION_RAA_WSP;
158 break;
159 default:
160 break;
161 }
162
163 writel(value, ioaddr + MTL_OPERATION_MODE);
164}
165
166static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
167 u32 tx_alg)
168{
169 void __iomem *ioaddr = hw->pcsr;
170 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
171
172 value &= ~MTL_OPERATION_SCHALG_MASK;
173 switch (tx_alg) {
174 case MTL_TX_ALGORITHM_WRR:
175 value |= MTL_OPERATION_SCHALG_WRR;
176 break;
177 case MTL_TX_ALGORITHM_WFQ:
178 value |= MTL_OPERATION_SCHALG_WFQ;
179 break;
180 case MTL_TX_ALGORITHM_DWRR:
181 value |= MTL_OPERATION_SCHALG_DWRR;
182 break;
183 case MTL_TX_ALGORITHM_SP:
184 value |= MTL_OPERATION_SCHALG_SP;
185 break;
186 default:
187 break;
188 }
189}
190
191static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
192 u32 weight, u32 queue)
193{
194 void __iomem *ioaddr = hw->pcsr;
195 u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
196
197 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
198 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
199 writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
200}
201
202static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
203{
204 void __iomem *ioaddr = hw->pcsr;
205 u32 value;
206
207 if (queue < 4)
208 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
209 else
210 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
211
212 if (queue == 0 || queue == 4) {
213 value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
214 value |= MTL_RXQ_DMA_Q04MDMACH(chan);
215 } else {
216 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
217 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
218 }
219
220 if (queue < 4)
221 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
222 else
223 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
224}
225
226static void dwmac4_config_cbs(struct mac_device_info *hw,
227 u32 send_slope, u32 idle_slope,
228 u32 high_credit, u32 low_credit, u32 queue)
229{
230 void __iomem *ioaddr = hw->pcsr;
231 u32 value;
232
233 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
234 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
235 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
236 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
237 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
238
239 /* enable AV algorithm */
240 value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
241 value |= MTL_ETS_CTRL_AVALG;
242 value |= MTL_ETS_CTRL_CC;
243 writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
244
245 /* configure send slope */
246 value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
247 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
248 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
249 writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
250
251 /* configure idle slope (same register as tx weight) */
252 dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
253
254 /* configure high credit */
255 value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
256 value &= ~MTL_HIGH_CRED_HC_MASK;
257 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
258 writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
259
260 /* configure high credit */
261 value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
262 value &= ~MTL_HIGH_CRED_LC_MASK;
263 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
264 writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
265}
266
73static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) 267static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
74{ 268{
75 void __iomem *ioaddr = hw->pcsr; 269 void __iomem *ioaddr = hw->pcsr;
@@ -251,11 +445,12 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
251} 445}
252 446
253static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, 447static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
254 unsigned int fc, unsigned int pause_time) 448 unsigned int fc, unsigned int pause_time,
449 u32 tx_cnt)
255{ 450{
256 void __iomem *ioaddr = hw->pcsr; 451 void __iomem *ioaddr = hw->pcsr;
257 u32 channel = STMMAC_CHAN0; /* FIXME */
258 unsigned int flow = 0; 452 unsigned int flow = 0;
453 u32 queue = 0;
259 454
260 pr_debug("GMAC Flow-Control:\n"); 455 pr_debug("GMAC Flow-Control:\n");
261 if (fc & FLOW_RX) { 456 if (fc & FLOW_RX) {
@@ -265,13 +460,18 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
265 } 460 }
266 if (fc & FLOW_TX) { 461 if (fc & FLOW_TX) {
267 pr_debug("\tTransmit Flow-Control ON\n"); 462 pr_debug("\tTransmit Flow-Control ON\n");
268 flow |= GMAC_TX_FLOW_CTRL_TFE;
269 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
270 463
271 if (duplex) { 464 if (duplex)
272 pr_debug("\tduplex mode: PAUSE %d\n", pause_time); 465 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
273 flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); 466
274 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel)); 467 for (queue = 0; queue < tx_cnt; queue++) {
468 flow |= GMAC_TX_FLOW_CTRL_TFE;
469
470 if (duplex)
471 flow |=
472 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
473
474 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
275 } 475 }
276 } 476 }
277} 477}
@@ -325,11 +525,34 @@ static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
325 } 525 }
326} 526}
327 527
528static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
529{
530 void __iomem *ioaddr = hw->pcsr;
531 u32 mtl_int_qx_status;
532 int ret = 0;
533
534 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
535
536 /* Check MTL Interrupt */
537 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
538 /* read Queue x Interrupt status */
539 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
540
541 if (status & MTL_RX_OVERFLOW_INT) {
542 /* clear Interrupt */
543 writel(status | MTL_RX_OVERFLOW_INT,
544 ioaddr + MTL_CHAN_INT_CTRL(chan));
545 ret = CORE_IRQ_MTL_RX_OVERFLOW;
546 }
547 }
548
549 return ret;
550}
551
328static int dwmac4_irq_status(struct mac_device_info *hw, 552static int dwmac4_irq_status(struct mac_device_info *hw,
329 struct stmmac_extra_stats *x) 553 struct stmmac_extra_stats *x)
330{ 554{
331 void __iomem *ioaddr = hw->pcsr; 555 void __iomem *ioaddr = hw->pcsr;
332 u32 mtl_int_qx_status;
333 u32 intr_status; 556 u32 intr_status;
334 int ret = 0; 557 int ret = 0;
335 558
@@ -348,20 +571,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
348 x->irq_receive_pmt_irq_n++; 571 x->irq_receive_pmt_irq_n++;
349 } 572 }
350 573
351 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
352 /* Check MTL Interrupt: Currently only one queue is used: Q0. */
353 if (mtl_int_qx_status & MTL_INT_Q0) {
354 /* read Queue 0 Interrupt status */
355 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
356
357 if (status & MTL_RX_OVERFLOW_INT) {
358 /* clear Interrupt */
359 writel(status | MTL_RX_OVERFLOW_INT,
360 ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
361 ret = CORE_IRQ_MTL_RX_OVERFLOW;
362 }
363 }
364
365 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); 574 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
366 if (intr_status & PCS_RGSMIIIS_IRQ) 575 if (intr_status & PCS_RGSMIIIS_IRQ)
367 dwmac4_phystatus(ioaddr, x); 576 dwmac4_phystatus(ioaddr, x);
@@ -369,64 +578,69 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
369 return ret; 578 return ret;
370} 579}
371 580
372static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) 581static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
582 u32 rx_queues, u32 tx_queues)
373{ 583{
374 u32 value; 584 u32 value;
375 585 u32 queue;
376 /* Currently only channel 0 is supported */ 586
377 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0)); 587 for (queue = 0; queue < tx_queues; queue++) {
378 588 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
379 if (value & MTL_DEBUG_TXSTSFSTS) 589
380 x->mtl_tx_status_fifo_full++; 590 if (value & MTL_DEBUG_TXSTSFSTS)
381 if (value & MTL_DEBUG_TXFSTS) 591 x->mtl_tx_status_fifo_full++;
382 x->mtl_tx_fifo_not_empty++; 592 if (value & MTL_DEBUG_TXFSTS)
383 if (value & MTL_DEBUG_TWCSTS) 593 x->mtl_tx_fifo_not_empty++;
384 x->mmtl_fifo_ctrl++; 594 if (value & MTL_DEBUG_TWCSTS)
385 if (value & MTL_DEBUG_TRCSTS_MASK) { 595 x->mmtl_fifo_ctrl++;
386 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) 596 if (value & MTL_DEBUG_TRCSTS_MASK) {
387 >> MTL_DEBUG_TRCSTS_SHIFT; 597 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
388 if (trcsts == MTL_DEBUG_TRCSTS_WRITE) 598 >> MTL_DEBUG_TRCSTS_SHIFT;
389 x->mtl_tx_fifo_read_ctrl_write++; 599 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
390 else if (trcsts == MTL_DEBUG_TRCSTS_TXW) 600 x->mtl_tx_fifo_read_ctrl_write++;
391 x->mtl_tx_fifo_read_ctrl_wait++; 601 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
392 else if (trcsts == MTL_DEBUG_TRCSTS_READ) 602 x->mtl_tx_fifo_read_ctrl_wait++;
393 x->mtl_tx_fifo_read_ctrl_read++; 603 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
394 else 604 x->mtl_tx_fifo_read_ctrl_read++;
395 x->mtl_tx_fifo_read_ctrl_idle++; 605 else
606 x->mtl_tx_fifo_read_ctrl_idle++;
607 }
608 if (value & MTL_DEBUG_TXPAUSED)
609 x->mac_tx_in_pause++;
396 } 610 }
397 if (value & MTL_DEBUG_TXPAUSED)
398 x->mac_tx_in_pause++;
399 611
400 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0)); 612 for (queue = 0; queue < rx_queues; queue++) {
613 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
401 614
402 if (value & MTL_DEBUG_RXFSTS_MASK) { 615 if (value & MTL_DEBUG_RXFSTS_MASK) {
403 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) 616 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
404 >> MTL_DEBUG_RRCSTS_SHIFT; 617 >> MTL_DEBUG_RRCSTS_SHIFT;
405 618
406 if (rxfsts == MTL_DEBUG_RXFSTS_FULL) 619 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
407 x->mtl_rx_fifo_fill_level_full++; 620 x->mtl_rx_fifo_fill_level_full++;
408 else if (rxfsts == MTL_DEBUG_RXFSTS_AT) 621 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
409 x->mtl_rx_fifo_fill_above_thresh++; 622 x->mtl_rx_fifo_fill_above_thresh++;
410 else if (rxfsts == MTL_DEBUG_RXFSTS_BT) 623 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
411 x->mtl_rx_fifo_fill_below_thresh++; 624 x->mtl_rx_fifo_fill_below_thresh++;
412 else 625 else
413 x->mtl_rx_fifo_fill_level_empty++; 626 x->mtl_rx_fifo_fill_level_empty++;
414 } 627 }
415 if (value & MTL_DEBUG_RRCSTS_MASK) { 628 if (value & MTL_DEBUG_RRCSTS_MASK) {
416 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> 629 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
417 MTL_DEBUG_RRCSTS_SHIFT; 630 MTL_DEBUG_RRCSTS_SHIFT;
418 631
419 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) 632 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
420 x->mtl_rx_fifo_read_ctrl_flush++; 633 x->mtl_rx_fifo_read_ctrl_flush++;
421 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) 634 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
422 x->mtl_rx_fifo_read_ctrl_read_data++; 635 x->mtl_rx_fifo_read_ctrl_read_data++;
423 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) 636 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
424 x->mtl_rx_fifo_read_ctrl_status++; 637 x->mtl_rx_fifo_read_ctrl_status++;
425 else 638 else
426 x->mtl_rx_fifo_read_ctrl_idle++; 639 x->mtl_rx_fifo_read_ctrl_idle++;
640 }
641 if (value & MTL_DEBUG_RWCSTS)
642 x->mtl_rx_fifo_ctrl_active++;
427 } 643 }
428 if (value & MTL_DEBUG_RWCSTS)
429 x->mtl_rx_fifo_ctrl_active++;
430 644
431 /* GMAC debug */ 645 /* GMAC debug */
432 value = readl(ioaddr + GMAC_DEBUG); 646 value = readl(ioaddr + GMAC_DEBUG);
@@ -457,8 +671,17 @@ static const struct stmmac_ops dwmac4_ops = {
457 .core_init = dwmac4_core_init, 671 .core_init = dwmac4_core_init,
458 .rx_ipc = dwmac4_rx_ipc_enable, 672 .rx_ipc = dwmac4_rx_ipc_enable,
459 .rx_queue_enable = dwmac4_rx_queue_enable, 673 .rx_queue_enable = dwmac4_rx_queue_enable,
674 .rx_queue_prio = dwmac4_rx_queue_priority,
675 .tx_queue_prio = dwmac4_tx_queue_priority,
676 .rx_queue_routing = dwmac4_tx_queue_routing,
677 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
678 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
679 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
680 .map_mtl_to_dma = dwmac4_map_mtl_dma,
681 .config_cbs = dwmac4_config_cbs,
460 .dump_regs = dwmac4_dump_regs, 682 .dump_regs = dwmac4_dump_regs,
461 .host_irq_status = dwmac4_irq_status, 683 .host_irq_status = dwmac4_irq_status,
684 .host_mtl_irq_status = dwmac4_irq_mtl_status,
462 .flow_ctrl = dwmac4_flow_ctrl, 685 .flow_ctrl = dwmac4_flow_ctrl,
463 .pmt = dwmac4_pmt, 686 .pmt = dwmac4_pmt,
464 .set_umac_addr = dwmac4_set_umac_addr, 687 .set_umac_addr = dwmac4_set_umac_addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index f97b0d5d9987..eec8463057fd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
71 writel(value, ioaddr + DMA_SYS_BUS_MODE); 71 writel(value, ioaddr + DMA_SYS_BUS_MODE);
72} 72}
73 73
74static void dwmac4_dma_init_channel(void __iomem *ioaddr, 74void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
75 struct stmmac_dma_cfg *dma_cfg, 75 struct stmmac_dma_cfg *dma_cfg,
76 u32 dma_tx_phy, u32 dma_rx_phy, 76 u32 dma_rx_phy, u32 chan)
77 u32 channel)
78{ 77{
79 u32 value; 78 u32 value;
80 int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; 79 u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
81 int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
82 80
83 /* set PBL for each channels. Currently we affect same configuration 81 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
84 * on each channel 82 value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
85 */ 83 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
86 value = readl(ioaddr + DMA_CHAN_CONTROL(channel)); 84
87 if (dma_cfg->pblx8) 85 writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
88 value = value | DMA_BUS_MODE_PBL; 86}
89 writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
90 87
91 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); 88void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
89 struct stmmac_dma_cfg *dma_cfg,
90 u32 dma_tx_phy, u32 chan)
91{
92 u32 value;
93 u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
94
95 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
92 value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); 96 value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
93 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel)); 97 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
94 98
95 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); 99 writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
96 value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); 100}
97 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
98 101
99 /* Mask interrupts by writing to CSR7 */ 102void dwmac4_dma_init_channel(void __iomem *ioaddr,
100 writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel)); 103 struct stmmac_dma_cfg *dma_cfg, u32 chan)
104{
105 u32 value;
106
107 /* common channel control register config */
108 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
109 if (dma_cfg->pblx8)
110 value = value | DMA_BUS_MODE_PBL;
111 writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
101 112
102 writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); 113 /* Mask interrupts by writing to CSR7 */
103 writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); 114 writel(DMA_CHAN_INTR_DEFAULT_MASK,
115 ioaddr + DMA_CHAN_INTR_ENA(chan));
104} 116}
105 117
106static void dwmac4_dma_init(void __iomem *ioaddr, 118static void dwmac4_dma_init(void __iomem *ioaddr,
@@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
108 u32 dma_tx, u32 dma_rx, int atds) 120 u32 dma_tx, u32 dma_rx, int atds)
109{ 121{
110 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); 122 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
111 int i;
112 123
113 /* Set the Fixed burst mode */ 124 /* Set the Fixed burst mode */
114 if (dma_cfg->fixed_burst) 125 if (dma_cfg->fixed_burst)
@@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
122 value |= DMA_SYS_BUS_AAL; 133 value |= DMA_SYS_BUS_AAL;
123 134
124 writel(value, ioaddr + DMA_SYS_BUS_MODE); 135 writel(value, ioaddr + DMA_SYS_BUS_MODE);
125
126 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
127 dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
128} 136}
129 137
130static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, 138static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
@@ -174,46 +182,121 @@ static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
174 _dwmac4_dump_dma_regs(ioaddr, i, reg_space); 182 _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
175} 183}
176 184
177static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt) 185static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
178{ 186{
179 int i; 187 u32 chan;
180 188
181 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) 189 for (chan = 0; chan < number_chan; chan++)
182 writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i)); 190 writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
183} 191}
184 192
185static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, 193static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
186 int rxmode, u32 channel) 194 u32 channel, int fifosz)
187{ 195{
188 u32 mtl_tx_op, mtl_rx_op, mtl_rx_int; 196 unsigned int rqs = fifosz / 256 - 1;
197 u32 mtl_rx_op, mtl_rx_int;
189 198
190 /* Following code only done for channel 0, other channels not yet 199 mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
191 * supported. 200
192 */ 201 if (mode == SF_DMA_MODE) {
193 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); 202 pr_debug("GMAC: enable RX store and forward mode\n");
203 mtl_rx_op |= MTL_OP_MODE_RSF;
204 } else {
205 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
206 mtl_rx_op &= ~MTL_OP_MODE_RSF;
207 mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
208 if (mode <= 32)
209 mtl_rx_op |= MTL_OP_MODE_RTC_32;
210 else if (mode <= 64)
211 mtl_rx_op |= MTL_OP_MODE_RTC_64;
212 else if (mode <= 96)
213 mtl_rx_op |= MTL_OP_MODE_RTC_96;
214 else
215 mtl_rx_op |= MTL_OP_MODE_RTC_128;
216 }
217
218 mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
219 mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
220
221 /* enable flow control only if each channel gets 4 KiB or more FIFO */
222 if (fifosz >= 4096) {
223 unsigned int rfd, rfa;
224
225 mtl_rx_op |= MTL_OP_MODE_EHFC;
226
227 /* Set Threshold for Activating Flow Control to min 2 frames,
228 * i.e. 1500 * 2 = 3000 bytes.
229 *
230 * Set Threshold for Deactivating Flow Control to min 1 frame,
231 * i.e. 1500 bytes.
232 */
233 switch (fifosz) {
234 case 4096:
235 /* This violates the above formula because of FIFO size
236 * limit therefore overflow may occur in spite of this.
237 */
238 rfd = 0x03; /* Full-2.5K */
239 rfa = 0x01; /* Full-1.5K */
240 break;
241
242 case 8192:
243 rfd = 0x06; /* Full-4K */
244 rfa = 0x0a; /* Full-6K */
245 break;
246
247 case 16384:
248 rfd = 0x06; /* Full-4K */
249 rfa = 0x12; /* Full-10K */
250 break;
251
252 default:
253 rfd = 0x06; /* Full-4K */
254 rfa = 0x1e; /* Full-16K */
255 break;
256 }
257
258 mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
259 mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
194 260
195 if (txmode == SF_DMA_MODE) { 261 mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
262 mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
263 }
264
265 writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
266
267 /* Enable MTL RX overflow */
268 mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
269 writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
270 ioaddr + MTL_CHAN_INT_CTRL(channel));
271}
272
273static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
274 u32 channel)
275{
276 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
277
278 if (mode == SF_DMA_MODE) {
196 pr_debug("GMAC: enable TX store and forward mode\n"); 279 pr_debug("GMAC: enable TX store and forward mode\n");
197 /* Transmit COE type 2 cannot be done in cut-through mode. */ 280 /* Transmit COE type 2 cannot be done in cut-through mode. */
198 mtl_tx_op |= MTL_OP_MODE_TSF; 281 mtl_tx_op |= MTL_OP_MODE_TSF;
199 } else { 282 } else {
200 pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); 283 pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
201 mtl_tx_op &= ~MTL_OP_MODE_TSF; 284 mtl_tx_op &= ~MTL_OP_MODE_TSF;
202 mtl_tx_op &= MTL_OP_MODE_TTC_MASK; 285 mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
203 /* Set the transmit threshold */ 286 /* Set the transmit threshold */
204 if (txmode <= 32) 287 if (mode <= 32)
205 mtl_tx_op |= MTL_OP_MODE_TTC_32; 288 mtl_tx_op |= MTL_OP_MODE_TTC_32;
206 else if (txmode <= 64) 289 else if (mode <= 64)
207 mtl_tx_op |= MTL_OP_MODE_TTC_64; 290 mtl_tx_op |= MTL_OP_MODE_TTC_64;
208 else if (txmode <= 96) 291 else if (mode <= 96)
209 mtl_tx_op |= MTL_OP_MODE_TTC_96; 292 mtl_tx_op |= MTL_OP_MODE_TTC_96;
210 else if (txmode <= 128) 293 else if (mode <= 128)
211 mtl_tx_op |= MTL_OP_MODE_TTC_128; 294 mtl_tx_op |= MTL_OP_MODE_TTC_128;
212 else if (txmode <= 192) 295 else if (mode <= 192)
213 mtl_tx_op |= MTL_OP_MODE_TTC_192; 296 mtl_tx_op |= MTL_OP_MODE_TTC_192;
214 else if (txmode <= 256) 297 else if (mode <= 256)
215 mtl_tx_op |= MTL_OP_MODE_TTC_256; 298 mtl_tx_op |= MTL_OP_MODE_TTC_256;
216 else if (txmode <= 384) 299 else if (mode <= 384)
217 mtl_tx_op |= MTL_OP_MODE_TTC_384; 300 mtl_tx_op |= MTL_OP_MODE_TTC_384;
218 else 301 else
219 mtl_tx_op |= MTL_OP_MODE_TTC_512; 302 mtl_tx_op |= MTL_OP_MODE_TTC_512;
@@ -230,39 +313,6 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
230 */ 313 */
231 mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK; 314 mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
232 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); 315 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
233
234 mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
235
236 if (rxmode == SF_DMA_MODE) {
237 pr_debug("GMAC: enable RX store and forward mode\n");
238 mtl_rx_op |= MTL_OP_MODE_RSF;
239 } else {
240 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
241 mtl_rx_op &= ~MTL_OP_MODE_RSF;
242 mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
243 if (rxmode <= 32)
244 mtl_rx_op |= MTL_OP_MODE_RTC_32;
245 else if (rxmode <= 64)
246 mtl_rx_op |= MTL_OP_MODE_RTC_64;
247 else if (rxmode <= 96)
248 mtl_rx_op |= MTL_OP_MODE_RTC_96;
249 else
250 mtl_rx_op |= MTL_OP_MODE_RTC_128;
251 }
252
253 writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
254
255 /* Enable MTL RX overflow */
256 mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
257 writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
258 ioaddr + MTL_CHAN_INT_CTRL(channel));
259}
260
261static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
262 int rxmode, int rxfifosz)
263{
264 /* Only Channel 0 is actually configured and used */
265 dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
266} 316}
267 317
268static void dwmac4_get_hw_feature(void __iomem *ioaddr, 318static void dwmac4_get_hw_feature(void __iomem *ioaddr,
@@ -294,6 +344,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
294 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); 344 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
295 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; 345 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
296 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; 346 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
347 /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
348 * shifting and store the sizes in bytes.
349 */
350 dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
351 dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
297 /* MAC HW feature2 */ 352 /* MAC HW feature2 */
298 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2); 353 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
299 /* TX and RX number of channels */ 354 /* TX and RX number of channels */
@@ -332,9 +387,13 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
332const struct stmmac_dma_ops dwmac4_dma_ops = { 387const struct stmmac_dma_ops dwmac4_dma_ops = {
333 .reset = dwmac4_dma_reset, 388 .reset = dwmac4_dma_reset,
334 .init = dwmac4_dma_init, 389 .init = dwmac4_dma_init,
390 .init_chan = dwmac4_dma_init_channel,
391 .init_rx_chan = dwmac4_dma_init_rx_chan,
392 .init_tx_chan = dwmac4_dma_init_tx_chan,
335 .axi = dwmac4_dma_axi, 393 .axi = dwmac4_dma_axi,
336 .dump_regs = dwmac4_dump_dma_regs, 394 .dump_regs = dwmac4_dump_dma_regs,
337 .dma_mode = dwmac4_dma_operation_mode, 395 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
396 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
338 .enable_dma_irq = dwmac4_enable_dma_irq, 397 .enable_dma_irq = dwmac4_enable_dma_irq,
339 .disable_dma_irq = dwmac4_disable_dma_irq, 398 .disable_dma_irq = dwmac4_disable_dma_irq,
340 .start_tx = dwmac4_dma_start_tx, 399 .start_tx = dwmac4_dma_start_tx,
@@ -354,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
354const struct stmmac_dma_ops dwmac410_dma_ops = { 413const struct stmmac_dma_ops dwmac410_dma_ops = {
355 .reset = dwmac4_dma_reset, 414 .reset = dwmac4_dma_reset,
356 .init = dwmac4_dma_init, 415 .init = dwmac4_dma_init,
416 .init_chan = dwmac4_dma_init_channel,
417 .init_rx_chan = dwmac4_dma_init_rx_chan,
418 .init_tx_chan = dwmac4_dma_init_tx_chan,
357 .axi = dwmac4_dma_axi, 419 .axi = dwmac4_dma_axi,
358 .dump_regs = dwmac4_dump_dma_regs, 420 .dump_regs = dwmac4_dump_dma_regs,
359 .dma_mode = dwmac4_dma_operation_mode, 421 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
422 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
360 .enable_dma_irq = dwmac410_enable_dma_irq, 423 .enable_dma_irq = dwmac410_enable_dma_irq,
361 .disable_dma_irq = dwmac4_disable_dma_irq, 424 .disable_dma_irq = dwmac4_disable_dma_irq,
362 .start_tx = dwmac4_dma_start_tx, 425 .start_tx = dwmac4_dma_start_tx,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 1b06df749e2b..8474bf961dd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -185,17 +185,17 @@
185 185
186int dwmac4_dma_reset(void __iomem *ioaddr); 186int dwmac4_dma_reset(void __iomem *ioaddr);
187void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr); 187void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
188void dwmac4_enable_dma_irq(void __iomem *ioaddr); 188void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
189void dwmac410_enable_dma_irq(void __iomem *ioaddr); 189void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
190void dwmac4_disable_dma_irq(void __iomem *ioaddr); 190void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
191void dwmac4_dma_start_tx(void __iomem *ioaddr); 191void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
192void dwmac4_dma_stop_tx(void __iomem *ioaddr); 192void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
193void dwmac4_dma_start_rx(void __iomem *ioaddr); 193void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
194void dwmac4_dma_stop_rx(void __iomem *ioaddr); 194void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
195int dwmac4_dma_interrupt(void __iomem *ioaddr, 195int dwmac4_dma_interrupt(void __iomem *ioaddr,
196 struct stmmac_extra_stats *x); 196 struct stmmac_extra_stats *x, u32 chan);
197void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len); 197void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
198void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len); 198void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
199void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 199void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
200void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 200void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
201 201
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index c7326d5b2f43..49f5687879df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
37 37
38void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) 38void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
39{ 39{
40 writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0)); 40 writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
41} 41}
42 42
43void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) 43void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
44{ 44{
45 writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0)); 45 writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
46} 46}
47 47
48void dwmac4_dma_start_tx(void __iomem *ioaddr) 48void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan)
49{ 49{
50 u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); 50 u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
51 51
52 value |= DMA_CONTROL_ST; 52 value |= DMA_CONTROL_ST;
53 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); 53 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
54 54
55 value = readl(ioaddr + GMAC_CONFIG); 55 value = readl(ioaddr + GMAC_CONFIG);
56 value |= GMAC_CONFIG_TE; 56 value |= GMAC_CONFIG_TE;
57 writel(value, ioaddr + GMAC_CONFIG); 57 writel(value, ioaddr + GMAC_CONFIG);
58} 58}
59 59
60void dwmac4_dma_stop_tx(void __iomem *ioaddr) 60void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
61{ 61{
62 u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); 62 u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
63 63
64 value &= ~DMA_CONTROL_ST; 64 value &= ~DMA_CONTROL_ST;
65 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); 65 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
66 66
67 value = readl(ioaddr + GMAC_CONFIG); 67 value = readl(ioaddr + GMAC_CONFIG);
68 value &= ~GMAC_CONFIG_TE; 68 value &= ~GMAC_CONFIG_TE;
69 writel(value, ioaddr + GMAC_CONFIG); 69 writel(value, ioaddr + GMAC_CONFIG);
70} 70}
71 71
72void dwmac4_dma_start_rx(void __iomem *ioaddr) 72void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
73{ 73{
74 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); 74 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
75 75
76 value |= DMA_CONTROL_SR; 76 value |= DMA_CONTROL_SR;
77 77
78 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); 78 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
79 79
80 value = readl(ioaddr + GMAC_CONFIG); 80 value = readl(ioaddr + GMAC_CONFIG);
81 value |= GMAC_CONFIG_RE; 81 value |= GMAC_CONFIG_RE;
82 writel(value, ioaddr + GMAC_CONFIG); 82 writel(value, ioaddr + GMAC_CONFIG);
83} 83}
84 84
85void dwmac4_dma_stop_rx(void __iomem *ioaddr) 85void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
86{ 86{
87 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); 87 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
88 88
89 value &= ~DMA_CONTROL_SR; 89 value &= ~DMA_CONTROL_SR;
90 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); 90 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
91 91
92 value = readl(ioaddr + GMAC_CONFIG); 92 value = readl(ioaddr + GMAC_CONFIG);
93 value &= ~GMAC_CONFIG_RE; 93 value &= ~GMAC_CONFIG_RE;
94 writel(value, ioaddr + GMAC_CONFIG); 94 writel(value, ioaddr + GMAC_CONFIG);
95} 95}
96 96
97void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len) 97void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
98{ 98{
99 writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0)); 99 writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
100} 100}
101 101
102void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len) 102void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
103{ 103{
104 writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0)); 104 writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
105} 105}
106 106
107void dwmac4_enable_dma_irq(void __iomem *ioaddr) 107void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
108{ 108{
109 writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + 109 writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
110 DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); 110 DMA_CHAN_INTR_ENA(chan));
111} 111}
112 112
113void dwmac410_enable_dma_irq(void __iomem *ioaddr) 113void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
114{ 114{
115 writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, 115 writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
116 ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); 116 ioaddr + DMA_CHAN_INTR_ENA(chan));
117} 117}
118 118
119void dwmac4_disable_dma_irq(void __iomem *ioaddr) 119void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
120{ 120{
121 writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); 121 writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
122} 122}
123 123
124int dwmac4_dma_interrupt(void __iomem *ioaddr, 124int dwmac4_dma_interrupt(void __iomem *ioaddr,
125 struct stmmac_extra_stats *x) 125 struct stmmac_extra_stats *x, u32 chan)
126{ 126{
127 int ret = 0; 127 int ret = 0;
128 128
129 u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0)); 129 u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
130 130
131 /* ABNORMAL interrupts */ 131 /* ABNORMAL interrupts */
132 if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { 132 if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
@@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
153 if (likely(intr_status & DMA_CHAN_STATUS_RI)) { 153 if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
154 u32 value; 154 u32 value;
155 155
156 value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); 156 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
157 /* to schedule NAPI on real RIE event. */ 157 /* to schedule NAPI on real RIE event. */
158 if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { 158 if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
159 x->rx_normal_irq_n++; 159 x->rx_normal_irq_n++;
@@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
172 * status [21-0] expect reserved bits [5-3] 172 * status [21-0] expect reserved bits [5-3]
173 */ 173 */
174 writel((intr_status & 0x3fffc7), 174 writel((intr_status & 0x3fffc7),
175 ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0)); 175 ioaddr + DMA_CHAN_STATUS(chan));
176 176
177 return ret; 177 return ret;
178} 178}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 56e485f79077..9091df86723a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -137,13 +137,14 @@
137#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ 137#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
138 138
139void dwmac_enable_dma_transmission(void __iomem *ioaddr); 139void dwmac_enable_dma_transmission(void __iomem *ioaddr);
140void dwmac_enable_dma_irq(void __iomem *ioaddr); 140void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
141void dwmac_disable_dma_irq(void __iomem *ioaddr); 141void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
142void dwmac_dma_start_tx(void __iomem *ioaddr); 142void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
143void dwmac_dma_stop_tx(void __iomem *ioaddr); 143void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
144void dwmac_dma_start_rx(void __iomem *ioaddr); 144void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
145void dwmac_dma_stop_rx(void __iomem *ioaddr); 145void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
146int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); 146int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
147 u32 chan);
147int dwmac_dma_reset(void __iomem *ioaddr); 148int dwmac_dma_reset(void __iomem *ioaddr);
148 149
149#endif /* __DWMAC_DMA_H__ */ 150#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index e60bfca2a763..38f94305aab5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr)
47 writel(1, ioaddr + DMA_XMT_POLL_DEMAND); 47 writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
48} 48}
49 49
50void dwmac_enable_dma_irq(void __iomem *ioaddr) 50void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
51{ 51{
52 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); 52 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
53} 53}
54 54
55void dwmac_disable_dma_irq(void __iomem *ioaddr) 55void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
56{ 56{
57 writel(0, ioaddr + DMA_INTR_ENA); 57 writel(0, ioaddr + DMA_INTR_ENA);
58} 58}
59 59
60void dwmac_dma_start_tx(void __iomem *ioaddr) 60void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
61{ 61{
62 u32 value = readl(ioaddr + DMA_CONTROL); 62 u32 value = readl(ioaddr + DMA_CONTROL);
63 value |= DMA_CONTROL_ST; 63 value |= DMA_CONTROL_ST;
64 writel(value, ioaddr + DMA_CONTROL); 64 writel(value, ioaddr + DMA_CONTROL);
65} 65}
66 66
67void dwmac_dma_stop_tx(void __iomem *ioaddr) 67void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
68{ 68{
69 u32 value = readl(ioaddr + DMA_CONTROL); 69 u32 value = readl(ioaddr + DMA_CONTROL);
70 value &= ~DMA_CONTROL_ST; 70 value &= ~DMA_CONTROL_ST;
71 writel(value, ioaddr + DMA_CONTROL); 71 writel(value, ioaddr + DMA_CONTROL);
72} 72}
73 73
74void dwmac_dma_start_rx(void __iomem *ioaddr) 74void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
75{ 75{
76 u32 value = readl(ioaddr + DMA_CONTROL); 76 u32 value = readl(ioaddr + DMA_CONTROL);
77 value |= DMA_CONTROL_SR; 77 value |= DMA_CONTROL_SR;
78 writel(value, ioaddr + DMA_CONTROL); 78 writel(value, ioaddr + DMA_CONTROL);
79} 79}
80 80
81void dwmac_dma_stop_rx(void __iomem *ioaddr) 81void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
82{ 82{
83 u32 value = readl(ioaddr + DMA_CONTROL); 83 u32 value = readl(ioaddr + DMA_CONTROL);
84 value &= ~DMA_CONTROL_SR; 84 value &= ~DMA_CONTROL_SR;
@@ -156,7 +156,7 @@ static void show_rx_process_state(unsigned int status)
156#endif 156#endif
157 157
158int dwmac_dma_interrupt(void __iomem *ioaddr, 158int dwmac_dma_interrupt(void __iomem *ioaddr,
159 struct stmmac_extra_stats *x) 159 struct stmmac_extra_stats *x, u32 chan)
160{ 160{
161 int ret = 0; 161 int ret = 0;
162 /* read the status register (CSR5) */ 162 /* read the status register (CSR5) */
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 452f256ff03f..31213e64513d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -26,16 +26,17 @@
26 26
27static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 27static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
28{ 28{
29 struct stmmac_priv *priv = (struct stmmac_priv *)p; 29 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
30 unsigned int entry = priv->cur_tx;
31 struct dma_desc *desc;
32 unsigned int nopaged_len = skb_headlen(skb); 30 unsigned int nopaged_len = skb_headlen(skb);
31 struct stmmac_priv *priv = tx_q->priv_data;
32 unsigned int entry = tx_q->cur_tx;
33 unsigned int bmax, len, des2; 33 unsigned int bmax, len, des2;
34 struct dma_desc *desc;
34 35
35 if (priv->extend_desc) 36 if (priv->extend_desc)
36 desc = (struct dma_desc *)(priv->dma_etx + entry); 37 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
37 else 38 else
38 desc = priv->dma_tx + entry; 39 desc = tx_q->dma_tx + entry;
39 40
40 if (priv->plat->enh_desc) 41 if (priv->plat->enh_desc)
41 bmax = BUF_SIZE_8KiB; 42 bmax = BUF_SIZE_8KiB;
@@ -52,29 +53,29 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
52 if (dma_mapping_error(priv->device, des2)) 53 if (dma_mapping_error(priv->device, des2))
53 return -1; 54 return -1;
54 55
55 priv->tx_skbuff_dma[entry].buf = des2; 56 tx_q->tx_skbuff_dma[entry].buf = des2;
56 priv->tx_skbuff_dma[entry].len = bmax; 57 tx_q->tx_skbuff_dma[entry].len = bmax;
57 priv->tx_skbuff_dma[entry].is_jumbo = true; 58 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
58 59
59 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 60 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
60 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, 61 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
61 STMMAC_RING_MODE, 0, false); 62 STMMAC_RING_MODE, 0, false);
62 priv->tx_skbuff[entry] = NULL; 63 tx_q->tx_skbuff[entry] = NULL;
63 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 64 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
64 65
65 if (priv->extend_desc) 66 if (priv->extend_desc)
66 desc = (struct dma_desc *)(priv->dma_etx + entry); 67 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
67 else 68 else
68 desc = priv->dma_tx + entry; 69 desc = tx_q->dma_tx + entry;
69 70
70 des2 = dma_map_single(priv->device, skb->data + bmax, len, 71 des2 = dma_map_single(priv->device, skb->data + bmax, len,
71 DMA_TO_DEVICE); 72 DMA_TO_DEVICE);
72 desc->des2 = cpu_to_le32(des2); 73 desc->des2 = cpu_to_le32(des2);
73 if (dma_mapping_error(priv->device, des2)) 74 if (dma_mapping_error(priv->device, des2))
74 return -1; 75 return -1;
75 priv->tx_skbuff_dma[entry].buf = des2; 76 tx_q->tx_skbuff_dma[entry].buf = des2;
76 priv->tx_skbuff_dma[entry].len = len; 77 tx_q->tx_skbuff_dma[entry].len = len;
77 priv->tx_skbuff_dma[entry].is_jumbo = true; 78 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
78 79
79 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 80 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
80 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, 81 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
@@ -85,15 +86,15 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
85 desc->des2 = cpu_to_le32(des2); 86 desc->des2 = cpu_to_le32(des2);
86 if (dma_mapping_error(priv->device, des2)) 87 if (dma_mapping_error(priv->device, des2))
87 return -1; 88 return -1;
88 priv->tx_skbuff_dma[entry].buf = des2; 89 tx_q->tx_skbuff_dma[entry].buf = des2;
89 priv->tx_skbuff_dma[entry].len = nopaged_len; 90 tx_q->tx_skbuff_dma[entry].len = nopaged_len;
90 priv->tx_skbuff_dma[entry].is_jumbo = true; 91 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
91 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 92 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
92 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, 93 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
93 STMMAC_RING_MODE, 0, true); 94 STMMAC_RING_MODE, 0, true);
94 } 95 }
95 96
96 priv->cur_tx = entry; 97 tx_q->cur_tx = entry;
97 98
98 return entry; 99 return entry;
99} 100}
@@ -125,12 +126,13 @@ static void stmmac_init_desc3(struct dma_desc *p)
125 126
126static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) 127static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
127{ 128{
128 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; 129 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
129 unsigned int entry = priv->dirty_tx; 130 struct stmmac_priv *priv = tx_q->priv_data;
131 unsigned int entry = tx_q->dirty_tx;
130 132
131 /* des3 is only used for jumbo frames tx or time stamping */ 133 /* des3 is only used for jumbo frames tx or time stamping */
132 if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo || 134 if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
133 (priv->tx_skbuff_dma[entry].last_segment && 135 (tx_q->tx_skbuff_dma[entry].last_segment &&
134 !priv->extend_desc && priv->hwts_tx_en))) 136 !priv->extend_desc && priv->hwts_tx_en)))
135 p->des3 = 0; 137 p->des3 = 0;
136} 138}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index cd8fb619b1e9..6ec671c9be84 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -46,6 +46,35 @@ struct stmmac_tx_info {
46 bool is_jumbo; 46 bool is_jumbo;
47}; 47};
48 48
49/* Frequently used values are kept adjacent for cache effect */
50struct stmmac_tx_queue {
51 u32 queue_index;
52 struct stmmac_priv *priv_data;
53 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
54 struct dma_desc *dma_tx;
55 struct sk_buff **tx_skbuff;
56 struct stmmac_tx_info *tx_skbuff_dma;
57 unsigned int cur_tx;
58 unsigned int dirty_tx;
59 dma_addr_t dma_tx_phy;
60 u32 tx_tail_addr;
61};
62
63struct stmmac_rx_queue {
64 u32 queue_index;
65 struct stmmac_priv *priv_data;
66 struct dma_extended_desc *dma_erx;
67 struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
68 struct sk_buff **rx_skbuff;
69 dma_addr_t *rx_skbuff_dma;
70 struct napi_struct napi ____cacheline_aligned_in_smp;
71 unsigned int cur_rx;
72 unsigned int dirty_rx;
73 u32 rx_zeroc_thresh;
74 dma_addr_t dma_rx_phy;
75 u32 rx_tail_addr;
76};
77
49struct stmmac_priv { 78struct stmmac_priv {
50 /* Frequently used values are kept adjacent for cache effect */ 79 /* Frequently used values are kept adjacent for cache effect */
51 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; 80 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -56,28 +85,22 @@ struct stmmac_priv {
56 u32 tx_count_frames; 85 u32 tx_count_frames;
57 u32 tx_coal_frames; 86 u32 tx_coal_frames;
58 u32 tx_coal_timer; 87 u32 tx_coal_timer;
59 struct stmmac_tx_info *tx_skbuff_dma;
60 dma_addr_t dma_tx_phy;
61 int tx_coalesce; 88 int tx_coalesce;
62 int hwts_tx_en; 89 int hwts_tx_en;
63 bool tx_path_in_lpi_mode; 90 bool tx_path_in_lpi_mode;
64 struct timer_list txtimer; 91 struct timer_list txtimer;
65 bool tso; 92 bool tso;
66 93
67 struct dma_desc *dma_rx ____cacheline_aligned_in_smp; 94 /* TX Queue */
68 struct dma_extended_desc *dma_erx; 95 struct stmmac_tx_queue *tx_queue;
69 struct sk_buff **rx_skbuff; 96
70 unsigned int cur_rx; 97 /* RX Queue */
71 unsigned int dirty_rx; 98 struct stmmac_rx_queue *rx_queue;
99
72 unsigned int dma_buf_sz; 100 unsigned int dma_buf_sz;
73 unsigned int rx_copybreak; 101 unsigned int rx_copybreak;
74 unsigned int rx_zeroc_thresh;
75 u32 rx_riwt; 102 u32 rx_riwt;
76 int hwts_rx_en; 103 int hwts_rx_en;
77 dma_addr_t *rx_skbuff_dma;
78 dma_addr_t dma_rx_phy;
79
80 struct napi_struct napi ____cacheline_aligned_in_smp;
81 104
82 void __iomem *ioaddr; 105 void __iomem *ioaddr;
83 struct net_device *dev; 106 struct net_device *dev;
@@ -119,8 +142,6 @@ struct stmmac_priv {
119 spinlock_t ptp_lock; 142 spinlock_t ptp_lock;
120 void __iomem *mmcaddr; 143 void __iomem *mmcaddr;
121 void __iomem *ptpaddr; 144 void __iomem *ptpaddr;
122 u32 rx_tail_addr;
123 u32 tx_tail_addr;
124 u32 mss; 145 u32 mss;
125 146
126#ifdef CONFIG_DEBUG_FS 147#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 85d64114e159..16808e48ca1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
481 struct ethtool_pauseparam *pause) 481 struct ethtool_pauseparam *pause)
482{ 482{
483 struct stmmac_priv *priv = netdev_priv(netdev); 483 struct stmmac_priv *priv = netdev_priv(netdev);
484 u32 tx_cnt = priv->plat->tx_queues_to_use;
484 struct phy_device *phy = netdev->phydev; 485 struct phy_device *phy = netdev->phydev;
485 int new_pause = FLOW_OFF; 486 int new_pause = FLOW_OFF;
486 487
@@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
511 } 512 }
512 513
513 priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl, 514 priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
514 priv->pause); 515 priv->pause, tx_cnt);
515 return 0; 516 return 0;
516} 517}
517 518
@@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
519 struct ethtool_stats *dummy, u64 *data) 520 struct ethtool_stats *dummy, u64 *data)
520{ 521{
521 struct stmmac_priv *priv = netdev_priv(dev); 522 struct stmmac_priv *priv = netdev_priv(dev);
523 u32 rx_queues_count = priv->plat->rx_queues_to_use;
524 u32 tx_queues_count = priv->plat->tx_queues_to_use;
522 int i, j = 0; 525 int i, j = 0;
523 526
524 /* Update the DMA HW counters for dwmac10/100 */ 527 /* Update the DMA HW counters for dwmac10/100 */
@@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
549 if ((priv->hw->mac->debug) && 552 if ((priv->hw->mac->debug) &&
550 (priv->synopsys_id >= DWMAC_CORE_3_50)) 553 (priv->synopsys_id >= DWMAC_CORE_3_50))
551 priv->hw->mac->debug(priv->ioaddr, 554 priv->hw->mac->debug(priv->ioaddr,
552 (void *)&priv->xstats); 555 (void *)&priv->xstats,
556 rx_queues_count, tx_queues_count);
553 } 557 }
554 for (i = 0; i < STMMAC_STATS_LEN; i++) { 558 for (i = 0; i < STMMAC_STATS_LEN; i++) {
555 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; 559 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -726,6 +730,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
726 struct ethtool_coalesce *ec) 730 struct ethtool_coalesce *ec)
727{ 731{
728 struct stmmac_priv *priv = netdev_priv(dev); 732 struct stmmac_priv *priv = netdev_priv(dev);
733 u32 rx_cnt = priv->plat->rx_queues_to_use;
729 unsigned int rx_riwt; 734 unsigned int rx_riwt;
730 735
731 /* Check not supported parameters */ 736 /* Check not supported parameters */
@@ -764,7 +769,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
764 priv->tx_coal_frames = ec->tx_max_coalesced_frames; 769 priv->tx_coal_frames = ec->tx_max_coalesced_frames;
765 priv->tx_coal_timer = ec->tx_coalesce_usecs; 770 priv->tx_coal_timer = ec->tx_coalesce_usecs;
766 priv->rx_riwt = rx_riwt; 771 priv->rx_riwt = rx_riwt;
767 priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt); 772 priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
768 773
769 return 0; 774 return 0;
770} 775}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4498a3861aa3..531bf1dc35cd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -185,26 +185,38 @@ static void print_pkt(unsigned char *buf, int len)
185 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 185 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186} 186}
187 187
188static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) 188/**
189 * stmmac_tx_avail - Get tx queue availability
190 * @priv: driver private structure
191 * @queue: TX queue index
192 */
193static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
189{ 194{
195 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
190 u32 avail; 196 u32 avail;
191 197
192 if (priv->dirty_tx > priv->cur_tx) 198 if (tx_q->dirty_tx > tx_q->cur_tx)
193 avail = priv->dirty_tx - priv->cur_tx - 1; 199 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
194 else 200 else
195 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1; 201 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
196 202
197 return avail; 203 return avail;
198} 204}
199 205
200static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) 206/**
207 * stmmac_rx_dirty - Get RX queue dirty
208 * @priv: driver private structure
209 * @queue: RX queue index
210 */
211static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
201{ 212{
213 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
202 u32 dirty; 214 u32 dirty;
203 215
204 if (priv->dirty_rx <= priv->cur_rx) 216 if (rx_q->dirty_rx <= rx_q->cur_rx)
205 dirty = priv->cur_rx - priv->dirty_rx; 217 dirty = rx_q->cur_rx - rx_q->dirty_rx;
206 else 218 else
207 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx; 219 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
208 220
209 return dirty; 221 return dirty;
210} 222}
@@ -232,9 +244,19 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
232 */ 244 */
233static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 245static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
234{ 246{
247 u32 tx_cnt = priv->plat->tx_queues_to_use;
248 u32 queue;
249
250 /* check if all TX queues have the work finished */
251 for (queue = 0; queue < tx_cnt; queue++) {
252 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
253
254 if (tx_q->dirty_tx != tx_q->cur_tx)
255 return; /* still unfinished work */
256 }
257
235 /* Check and enter in LPI mode */ 258 /* Check and enter in LPI mode */
236 if ((priv->dirty_tx == priv->cur_tx) && 259 if (!priv->tx_path_in_lpi_mode)
237 (priv->tx_path_in_lpi_mode == false))
238 priv->hw->mac->set_eee_mode(priv->hw, 260 priv->hw->mac->set_eee_mode(priv->hw,
239 priv->plat->en_tx_lpi_clockgating); 261 priv->plat->en_tx_lpi_clockgating);
240} 262}
@@ -673,6 +695,19 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
673} 695}
674 696
675/** 697/**
698 * stmmac_mac_flow_ctrl - Configure flow control in all queues
699 * @priv: driver private structure
700 * Description: It is used for configuring the flow control in all queues
701 */
702static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
703{
704 u32 tx_cnt = priv->plat->tx_queues_to_use;
705
706 priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
707 priv->pause, tx_cnt);
708}
709
710/**
676 * stmmac_adjust_link - adjusts the link parameters 711 * stmmac_adjust_link - adjusts the link parameters
677 * @dev: net device structure 712 * @dev: net device structure
678 * Description: this is the helper called by the physical abstraction layer 713 * Description: this is the helper called by the physical abstraction layer
@@ -687,7 +722,6 @@ static void stmmac_adjust_link(struct net_device *dev)
687 struct phy_device *phydev = dev->phydev; 722 struct phy_device *phydev = dev->phydev;
688 unsigned long flags; 723 unsigned long flags;
689 int new_state = 0; 724 int new_state = 0;
690 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
691 725
692 if (!phydev) 726 if (!phydev)
693 return; 727 return;
@@ -709,8 +743,7 @@ static void stmmac_adjust_link(struct net_device *dev)
709 } 743 }
710 /* Flow Control operation */ 744 /* Flow Control operation */
711 if (phydev->pause) 745 if (phydev->pause)
712 priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex, 746 stmmac_mac_flow_ctrl(priv, phydev->duplex);
713 fc, pause_time);
714 747
715 if (phydev->speed != priv->speed) { 748 if (phydev->speed != priv->speed) {
716 new_state = 1; 749 new_state = 1;
@@ -880,20 +913,40 @@ static int stmmac_init_phy(struct net_device *dev)
880 913
881static void stmmac_display_rings(struct stmmac_priv *priv) 914static void stmmac_display_rings(struct stmmac_priv *priv)
882{ 915{
916 u32 rx_cnt = priv->plat->rx_queues_to_use;
917 u32 tx_cnt = priv->plat->tx_queues_to_use;
883 void *head_rx, *head_tx; 918 void *head_rx, *head_tx;
919 u32 queue;
884 920
885 if (priv->extend_desc) { 921 /* Display RX rings */
886 head_rx = (void *)priv->dma_erx; 922 for (queue = 0; queue < rx_cnt; queue++) {
887 head_tx = (void *)priv->dma_etx; 923 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
888 } else { 924
889 head_rx = (void *)priv->dma_rx; 925 pr_info("\tRX Queue %d rings\n", queue);
890 head_tx = (void *)priv->dma_tx; 926
927 if (priv->extend_desc)
928 head_rx = (void *)rx_q->dma_erx;
929 else
930 head_rx = (void *)rx_q->dma_rx;
931
932 /* Display Rx ring */
933 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
891 } 934 }
892 935
893 /* Display Rx ring */ 936 /* Display TX rings */
894 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); 937 for (queue = 0; queue < tx_cnt; queue++) {
895 /* Display Tx ring */ 938 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
896 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); 939
940 pr_info("\tTX Queue %d rings\n", queue);
941
942 if (priv->extend_desc)
943 head_tx = (void *)tx_q->dma_etx;
944 else
945 head_tx = (void *)tx_q->dma_tx;
946
947 /* Display Tx ring */
948 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
949 }
897} 950}
898 951
899static int stmmac_set_bfsize(int mtu, int bufsize) 952static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -913,48 +966,86 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
913} 966}
914 967
915/** 968/**
916 * stmmac_clear_descriptors - clear descriptors 969 * stmmac_clear_rx_descriptors - clear the descriptors of a RX queue
917 * @priv: driver private structure 970 * @priv: driver private structure
918 * Description: this function is called to clear the tx and rx descriptors 971 * @queue: RX queue index
972 * Description: this function is called to clear the RX descriptors
919 * in case of both basic and extended descriptors are used. 973 * in case of both basic and extended descriptors are used.
920 */ 974 */
921static void stmmac_clear_descriptors(struct stmmac_priv *priv) 975static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
922{ 976{
923 int i; 977 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
978 u32 i = 0;
924 979
925 /* Clear the Rx/Tx descriptors */ 980 /* Clear the RX descriptors */
926 for (i = 0; i < DMA_RX_SIZE; i++) 981 for (i = 0; i < DMA_RX_SIZE; i++)
927 if (priv->extend_desc) 982 if (priv->extend_desc)
928 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic, 983 priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
929 priv->use_riwt, priv->mode, 984 priv->use_riwt, priv->mode,
930 (i == DMA_RX_SIZE - 1)); 985 (i == DMA_RX_SIZE - 1));
931 else 986 else
932 priv->hw->desc->init_rx_desc(&priv->dma_rx[i], 987 priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
933 priv->use_riwt, priv->mode, 988 priv->use_riwt, priv->mode,
934 (i == DMA_RX_SIZE - 1)); 989 (i == DMA_RX_SIZE - 1));
990}
991
992/**
993 * stmmac_clear_tx_descriptors - clear the descriptors of a TX queue
994 * @priv: driver private structure
995 * @queue: TX queue index
996 * Description: this function is called to clear the TX descriptors
997 * in case of both basic and extended descriptors are used.
998 */
999static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1000{
1001 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1002 u32 i = 0;
1003
1004 /* Clear the TX descriptors */
935 for (i = 0; i < DMA_TX_SIZE; i++) 1005 for (i = 0; i < DMA_TX_SIZE; i++)
936 if (priv->extend_desc) 1006 if (priv->extend_desc)
937 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, 1007 priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
938 priv->mode, 1008 priv->mode,
939 (i == DMA_TX_SIZE - 1)); 1009 (i == DMA_TX_SIZE - 1));
940 else 1010 else
941 priv->hw->desc->init_tx_desc(&priv->dma_tx[i], 1011 priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
942 priv->mode, 1012 priv->mode,
943 (i == DMA_TX_SIZE - 1)); 1013 (i == DMA_TX_SIZE - 1));
944} 1014}
945 1015
946/** 1016/**
1017 * stmmac_clear_descriptors - clear descriptors
1018 * @priv: driver private structure
1019 * Description: this function is called to clear the tx and rx descriptors
1020 * in case of both basic and extended descriptors are used.
1021 */
1022static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1023{
1024 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1025 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1026 u32 queue;
1027
1028 for (queue = 0; queue < rx_queue_cnt; queue++)
1029 stmmac_clear_rx_descriptors(priv, queue);
1030
1031 for (queue = 0; queue < tx_queue_cnt; queue++)
1032 stmmac_clear_tx_descriptors(priv, queue);
1033}
1034
1035/**
947 * stmmac_init_rx_buffers - init the RX descriptor buffer. 1036 * stmmac_init_rx_buffers - init the RX descriptor buffer.
948 * @priv: driver private structure 1037 * @priv: driver private structure
949 * @p: descriptor pointer 1038 * @p: descriptor pointer
950 * @i: descriptor index 1039 * @i: descriptor index
951 * @flags: gfp flag. 1040 * @flags: gfp flag.
1041 * @queue: RX queue index
952 * Description: this function is called to allocate a receive buffer, perform 1042 * Description: this function is called to allocate a receive buffer, perform
953 * the DMA mapping and init the descriptor. 1043 * the DMA mapping and init the descriptor.
954 */ 1044 */
955static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 1045static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
956 int i, gfp_t flags) 1046 int i, gfp_t flags, u32 queue)
957{ 1047{
1048 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
958 struct sk_buff *skb; 1049 struct sk_buff *skb;
959 1050
960 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); 1051 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
@@ -963,20 +1054,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
963 "%s: Rx init fails; skb is NULL\n", __func__); 1054 "%s: Rx init fails; skb is NULL\n", __func__);
964 return -ENOMEM; 1055 return -ENOMEM;
965 } 1056 }
966 priv->rx_skbuff[i] = skb; 1057 rx_q->rx_skbuff[i] = skb;
967 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 1058 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
968 priv->dma_buf_sz, 1059 priv->dma_buf_sz,
969 DMA_FROM_DEVICE); 1060 DMA_FROM_DEVICE);
970 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { 1061 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
971 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); 1062 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
972 dev_kfree_skb_any(skb); 1063 dev_kfree_skb_any(skb);
973 return -EINVAL; 1064 return -EINVAL;
974 } 1065 }
975 1066
976 if (priv->synopsys_id >= DWMAC_CORE_4_00) 1067 if (priv->synopsys_id >= DWMAC_CORE_4_00)
977 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]); 1068 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
978 else 1069 else
979 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]); 1070 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
980 1071
981 if ((priv->hw->mode->init_desc3) && 1072 if ((priv->hw->mode->init_desc3) &&
982 (priv->dma_buf_sz == BUF_SIZE_16KiB)) 1073 (priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -985,30 +1076,136 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
985 return 0; 1076 return 0;
986} 1077}
987 1078
988static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) 1079/**
1080 * stmmac_free_rx_buffers - free RX buffers.
1081 * @priv: driver private structure
1082 * @queue: RX queue index
1083 * @i: buffer index
1084 */
1085static void stmmac_free_rx_buffers(struct stmmac_priv *priv, u32 queue, int i)
989{ 1086{
990 if (priv->rx_skbuff[i]) { 1087 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
991 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], 1088
1089 if (rx_q->rx_skbuff[i]) {
1090 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
992 priv->dma_buf_sz, DMA_FROM_DEVICE); 1091 priv->dma_buf_sz, DMA_FROM_DEVICE);
993 dev_kfree_skb_any(priv->rx_skbuff[i]); 1092 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
994 } 1093 }
995 priv->rx_skbuff[i] = NULL; 1094 rx_q->rx_skbuff[i] = NULL;
996} 1095}
997 1096
998/** 1097/**
999 * init_dma_desc_rings - init the RX/TX descriptor rings 1098 * stmmac_free_tx_buffers - free RX buffers.
1099 * @priv: driver private structure
1100 * @queue: RX queue index
1101 * @i: buffer index
1102 */
1103static void stmmac_free_tx_buffers(struct stmmac_priv *priv, u32 queue, u32 i)
1104{
1105 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1106
1107 if (tx_q->tx_skbuff_dma[i].buf) {
1108 if (tx_q->tx_skbuff_dma[i].map_as_page)
1109 dma_unmap_page(priv->device,
1110 tx_q->tx_skbuff_dma[i].buf,
1111 tx_q->tx_skbuff_dma[i].len,
1112 DMA_TO_DEVICE);
1113 else
1114 dma_unmap_single(priv->device,
1115 tx_q->tx_skbuff_dma[i].buf,
1116 tx_q->tx_skbuff_dma[i].len,
1117 DMA_TO_DEVICE);
1118 }
1119
1120 if (tx_q->tx_skbuff[i]) {
1121 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1122 tx_q->tx_skbuff[i] = NULL;
1123 tx_q->tx_skbuff_dma[i].buf = 0;
1124 tx_q->tx_skbuff_dma[i].map_as_page = false;
1125 }
1126}
1127
1128/**
1129 * init_tx_dma_desc_rings - init the TX descriptor rings
1130 * @dev: net device structure
1131 * Description: this function initializes the DMA TX descriptors
1132 * and allocates the socket buffers. It suppors the chained and ring
1133 * modes.
1134 */
1135static int init_tx_dma_desc_rings(struct net_device *dev)
1136{
1137 struct stmmac_priv *priv = netdev_priv(dev);
1138 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1139 u32 queue;
1140 int i = 0;
1141
1142 for (queue = 0; queue < tx_queue_cnt; queue++) {
1143 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1144
1145 netif_dbg(priv, probe, priv->dev,
1146 "(%s) dma_tx_phy=0x%08x\n", __func__,
1147 (u32)tx_q->dma_tx_phy);
1148
1149 /* Setup the chained descriptor addresses */
1150 if (priv->mode == STMMAC_CHAIN_MODE) {
1151 if (priv->extend_desc)
1152 priv->hw->mode->init(tx_q->dma_etx,
1153 tx_q->dma_tx_phy,
1154 DMA_TX_SIZE, 1);
1155 else
1156 priv->hw->mode->init(tx_q->dma_tx,
1157 tx_q->dma_tx_phy,
1158 DMA_TX_SIZE, 0);
1159 }
1160
1161 for (i = 0; i < DMA_TX_SIZE; i++) {
1162 struct dma_desc *p;
1163
1164 if (priv->extend_desc)
1165 p = &((tx_q->dma_etx + i)->basic);
1166 else
1167 p = tx_q->dma_tx + i;
1168
1169 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1170 p->des0 = 0;
1171 p->des1 = 0;
1172 p->des2 = 0;
1173 p->des3 = 0;
1174 } else {
1175 p->des2 = 0;
1176 }
1177
1178 tx_q->tx_skbuff_dma[i].buf = 0;
1179 tx_q->tx_skbuff_dma[i].map_as_page = false;
1180 tx_q->tx_skbuff_dma[i].len = 0;
1181 tx_q->tx_skbuff_dma[i].last_segment = false;
1182 tx_q->tx_skbuff[i] = NULL;
1183 }
1184
1185 tx_q->dirty_tx = 0;
1186 tx_q->cur_tx = 0;
1187 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1188 }
1189
1190 return 0;
1191}
1192
1193/**
1194 * init_rx_dma_desc_rings - init the RX descriptor rings
1000 * @dev: net device structure 1195 * @dev: net device structure
1001 * @flags: gfp flag. 1196 * @flags: gfp flag.
1002 * Description: this function initializes the DMA RX/TX descriptors 1197 * Description: this function initializes the DMA RX descriptors
1003 * and allocates the socket buffers. It supports the chained and ring 1198 * and allocates the socket buffers. It suppors the chained and ring
1004 * modes. 1199 * modes.
1005 */ 1200 */
1006static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) 1201static int init_rx_dma_desc_rings(struct net_device *dev, gfp_t flags)
1007{ 1202{
1008 int i;
1009 struct stmmac_priv *priv = netdev_priv(dev); 1203 struct stmmac_priv *priv = netdev_priv(dev);
1204 u32 rx_count = priv->plat->rx_queues_to_use;
1010 unsigned int bfsize = 0; 1205 unsigned int bfsize = 0;
1011 int ret = -ENOMEM; 1206 int ret = -ENOMEM;
1207 u32 queue;
1208 int i;
1012 1209
1013 if (priv->hw->mode->set_16kib_bfsize) 1210 if (priv->hw->mode->set_16kib_bfsize)
1014 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); 1211 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
@@ -1018,235 +1215,350 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1018 1215
1019 priv->dma_buf_sz = bfsize; 1216 priv->dma_buf_sz = bfsize;
1020 1217
1021 netif_dbg(priv, probe, priv->dev,
1022 "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
1023 __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
1024
1025 /* RX INITIALIZATION */ 1218 /* RX INITIALIZATION */
1026 netif_dbg(priv, probe, priv->dev, 1219 netif_dbg(priv, probe, priv->dev,
1027 "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1220 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1028 1221
1029 for (i = 0; i < DMA_RX_SIZE; i++) { 1222 for (queue = 0; queue < rx_count; queue++) {
1030 struct dma_desc *p; 1223 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1031 if (priv->extend_desc)
1032 p = &((priv->dma_erx + i)->basic);
1033 else
1034 p = priv->dma_rx + i;
1035 1224
1036 ret = stmmac_init_rx_buffers(priv, p, i, flags); 1225 netif_dbg(priv, probe, priv->dev,
1037 if (ret) 1226 "(%s) dma_rx_phy=0x%08x\n", __func__,
1038 goto err_init_rx_buffers; 1227 (u32)rx_q->dma_rx_phy);
1039 1228
1040 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", 1229 for (i = 0; i < DMA_RX_SIZE; i++) {
1041 priv->rx_skbuff[i], priv->rx_skbuff[i]->data, 1230 struct dma_desc *p;
1042 (unsigned int)priv->rx_skbuff_dma[i]);
1043 }
1044 priv->cur_rx = 0;
1045 priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1046 buf_sz = bfsize;
1047 1231
1048 /* Setup the chained descriptor addresses */ 1232 if (priv->extend_desc)
1049 if (priv->mode == STMMAC_CHAIN_MODE) { 1233 p = &((rx_q->dma_erx + i)->basic);
1050 if (priv->extend_desc) { 1234 else
1051 priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, 1235 p = rx_q->dma_rx + i;
1052 DMA_RX_SIZE, 1); 1236
1053 priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, 1237 ret = stmmac_init_rx_buffers(priv, p, i, flags, queue);
1054 DMA_TX_SIZE, 1); 1238 if (ret)
1055 } else { 1239 goto err_init_rx_buffers;
1056 priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, 1240
1057 DMA_RX_SIZE, 0); 1241 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1058 priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, 1242 rx_q->rx_skbuff[i],
1059 DMA_TX_SIZE, 0); 1243 rx_q->rx_skbuff[i]->data,
1244 (unsigned int)rx_q->rx_skbuff_dma[i]);
1060 } 1245 }
1061 }
1062 1246
1063 /* TX INITIALIZATION */ 1247 rx_q->cur_rx = 0;
1064 for (i = 0; i < DMA_TX_SIZE; i++) { 1248 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1065 struct dma_desc *p;
1066 if (priv->extend_desc)
1067 p = &((priv->dma_etx + i)->basic);
1068 else
1069 p = priv->dma_tx + i;
1070 1249
1071 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 1250 stmmac_clear_rx_descriptors(priv, queue);
1072 p->des0 = 0; 1251
1073 p->des1 = 0; 1252 if (priv->mode == STMMAC_CHAIN_MODE) {
1074 p->des2 = 0; 1253 if (priv->extend_desc)
1075 p->des3 = 0; 1254 priv->hw->mode->init(rx_q->dma_erx,
1076 } else { 1255 rx_q->dma_rx_phy,
1077 p->des2 = 0; 1256 DMA_RX_SIZE, 1);
1257 else
1258 priv->hw->mode->init(rx_q->dma_rx,
1259 rx_q->dma_rx_phy,
1260 DMA_RX_SIZE, 0);
1078 } 1261 }
1262 }
1263
1264 buf_sz = bfsize;
1265
1266 return 0;
1079 1267
1080 priv->tx_skbuff_dma[i].buf = 0; 1268err_init_rx_buffers:
1081 priv->tx_skbuff_dma[i].map_as_page = false; 1269 while (queue-- >= 0) {
1082 priv->tx_skbuff_dma[i].len = 0; 1270 while (--i >= 0)
1083 priv->tx_skbuff_dma[i].last_segment = false; 1271 stmmac_free_rx_buffers(priv, queue, i);
1084 priv->tx_skbuff[i] = NULL; 1272
1273 i = DMA_RX_SIZE;
1085 } 1274 }
1086 1275
1087 priv->dirty_tx = 0; 1276 return ret;
1088 priv->cur_tx = 0; 1277}
1089 netdev_reset_queue(priv->dev);
1090 1278
1091 stmmac_clear_descriptors(priv); 1279/**
1280 * init_dma_desc_rings - init the RX/TX descriptor rings
1281 * @dev: net device structure
1282 * @flags: gfp flag.
1283 * Description: this function initializes the DMA RX/TX descriptors
1284 * and allocates the socket buffers. It suppors the chained and ring
1285 * modes.
1286 */
1287static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1288{
1289 struct stmmac_priv *priv = netdev_priv(dev);
1290 int ret = init_rx_dma_desc_rings(dev, flags);
1291
1292 if (ret)
1293 return ret;
1294
1295 ret = init_tx_dma_desc_rings(dev);
1092 1296
1093 if (netif_msg_hw(priv)) 1297 if (netif_msg_hw(priv))
1094 stmmac_display_rings(priv); 1298 stmmac_display_rings(priv);
1095 1299
1096 return 0;
1097err_init_rx_buffers:
1098 while (--i >= 0)
1099 stmmac_free_rx_buffers(priv, i);
1100 return ret; 1300 return ret;
1101} 1301}
1102 1302
1103static void dma_free_rx_skbufs(struct stmmac_priv *priv) 1303static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1104{ 1304{
1105 int i; 1305 int i;
1106 1306
1107 for (i = 0; i < DMA_RX_SIZE; i++) 1307 for (i = 0; i < DMA_RX_SIZE; i++)
1108 stmmac_free_rx_buffers(priv, i); 1308 stmmac_free_rx_buffers(priv, queue, i);
1109} 1309}
1110 1310
1111static void dma_free_tx_skbufs(struct stmmac_priv *priv) 1311static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1112{ 1312{
1113 int i; 1313 int i;
1114 1314
1115 for (i = 0; i < DMA_TX_SIZE; i++) { 1315 for (i = 0; i < DMA_TX_SIZE; i++)
1116 if (priv->tx_skbuff_dma[i].buf) { 1316 stmmac_free_tx_buffers(priv, queue, i);
1117 if (priv->tx_skbuff_dma[i].map_as_page) 1317}
1118 dma_unmap_page(priv->device, 1318
1119 priv->tx_skbuff_dma[i].buf, 1319/**
1120 priv->tx_skbuff_dma[i].len, 1320 * free_rx_dma_desc_resources - free RX DMA resources
1121 DMA_TO_DEVICE); 1321 * @priv: driver private structure
1122 else 1322 */
1123 dma_unmap_single(priv->device, 1323static void free_rx_dma_desc_resources(struct stmmac_priv *priv)
1124 priv->tx_skbuff_dma[i].buf, 1324{
1125 priv->tx_skbuff_dma[i].len, 1325 u32 rx_count = priv->plat->rx_queues_to_use;
1126 DMA_TO_DEVICE); 1326 u32 queue = 0;
1127 } 1327
1328 if (!priv->rx_queue)
1329 return;
1330
1331 /* Free RX queue resources */
1332 for (queue = 0; queue < rx_count; queue++) {
1333 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1334
1335 if (!rx_q)
1336 break;
1337
1338 /* Release the DMA RX socket buffers */
1339 dma_free_rx_skbufs(priv, queue);
1340
1341 kfree(rx_q->rx_skbuff);
1342
1343 kfree(rx_q->rx_skbuff_dma);
1344
1345 if (!priv->extend_desc)
1346 dma_free_coherent(priv->device,
1347 DMA_RX_SIZE * sizeof(struct dma_desc),
1348 rx_q->dma_rx,
1349 rx_q->dma_rx_phy);
1350 else
1351 dma_free_coherent(priv->device, DMA_RX_SIZE *
1352 sizeof(struct dma_extended_desc),
1353 rx_q->dma_erx,
1354 rx_q->dma_rx_phy);
1355 }
1356
1357 kfree(priv->rx_queue);
1358}
1359
1360/**
1361 * free_tx_dma_desc_resources - free TX DMA resources
1362 * @priv: driver private structure
1363 */
1364static void free_tx_dma_desc_resources(struct stmmac_priv *priv)
1365{
1366 u32 tx_count = priv->plat->tx_queues_to_use;
1367 u32 queue = 0;
1368
1369 if (!priv->tx_queue)
1370 return;
1371
1372 /* Free TX queue resources */
1373 for (queue = 0; queue < tx_count; queue++) {
1374 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1375
1376 if (!tx_q)
1377 break;
1378
1379 /* Release the DMA TX socket buffers */
1380 dma_free_tx_skbufs(priv, queue);
1381
1382 kfree(tx_q->tx_skbuff);
1383
1384 kfree(tx_q->tx_skbuff_dma);
1385
1386 if (!priv->extend_desc)
1387 dma_free_coherent(priv->device,
1388 DMA_TX_SIZE * sizeof(struct dma_desc),
1389 tx_q->dma_tx,
1390 tx_q->dma_tx_phy);
1391 else
1392 dma_free_coherent(priv->device, DMA_TX_SIZE *
1393 sizeof(struct dma_extended_desc),
1394 tx_q->dma_etx,
1395 tx_q->dma_tx_phy);
1396 }
1397
1398 kfree(priv->tx_queue);
1399}
1400
1401/**
1402 * free_dma_desc_resources - free All DMA resources
1403 * @priv: driver private structure
1404 */
1405static void free_dma_desc_resources(struct stmmac_priv *priv)
1406{
1407 free_rx_dma_desc_resources(priv);
1408 free_tx_dma_desc_resources(priv);
1409}
1410
1411/**
1412 * alloc_rx_dma_desc_resources - alloc RX resources.
1413 * @priv: private structure
1414 * Description: according to which descriptor can be used (extend or basic)
1415 * this function allocates the resources for RX paths. It pre-allocates the
1416 * RX socket buffer in order to allow zero-copy mechanism.
1417 */
1418static int alloc_rx_dma_desc_resources(struct stmmac_priv *priv)
1419{
1420 u32 rx_count = priv->plat->rx_queues_to_use;
1421 int ret = -ENOMEM;
1422 u32 queue = 0;
1423
1424 /* Allocate RX queues array */
1425 priv->rx_queue = kmalloc_array(rx_count,
1426 sizeof(struct stmmac_rx_queue),
1427 GFP_KERNEL);
1428 if (!priv->rx_queue) {
1429 kfree(priv->rx_queue);
1430 return -ENOMEM;
1431 }
1432
1433 /* RX queues buffers and DMA */
1434 for (queue = 0; queue < rx_count; queue++) {
1435 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1436
1437 rx_q->queue_index = queue;
1438 rx_q->priv_data = priv;
1128 1439
1129 if (priv->tx_skbuff[i]) { 1440 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1130 dev_kfree_skb_any(priv->tx_skbuff[i]); 1441 sizeof(dma_addr_t),
1131 priv->tx_skbuff[i] = NULL; 1442 GFP_KERNEL);
1132 priv->tx_skbuff_dma[i].buf = 0; 1443 if (!rx_q->rx_skbuff_dma)
1133 priv->tx_skbuff_dma[i].map_as_page = false; 1444 goto err_dma_buffers;
1445
1446 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1447 sizeof(struct sk_buff *),
1448 GFP_KERNEL);
1449 if (!rx_q->rx_skbuff)
1450 goto err_dma_buffers;
1451
1452 if (priv->extend_desc) {
1453 rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1454 (DMA_RX_SIZE * sizeof(struct dma_extended_desc)),
1455 &rx_q->dma_rx_phy, GFP_KERNEL);
1456
1457 if (!rx_q->dma_erx)
1458 goto err_dma_buffers;
1459 } else {
1460 rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1461 (DMA_RX_SIZE * sizeof(struct dma_desc)),
1462 &rx_q->dma_rx_phy, GFP_KERNEL);
1463
1464 if (!rx_q->dma_rx)
1465 goto err_dma_buffers;
1134 } 1466 }
1135 } 1467 }
1468
1469 return 0;
1470
1471err_dma_buffers:
1472 free_rx_dma_desc_resources(priv);
1473
1474 return ret;
1136} 1475}
1137 1476
1138/** 1477/**
1139 * alloc_dma_desc_resources - alloc TX/RX resources. 1478 * alloc_tx_dma_desc_resources - alloc TX resources.
1140 * @priv: private structure 1479 * @priv: private structure
1141 * Description: according to which descriptor can be used (extend or basic) 1480 * Description: according to which descriptor can be used (extend or basic)
1142 * this function allocates the resources for TX and RX paths. In case of 1481 * this function allocates the resources for TX paths.
1143 * reception, for example, it pre-allocated the RX socket buffer in order to
1144 * allow zero-copy mechanism.
1145 */ 1482 */
1146static int alloc_dma_desc_resources(struct stmmac_priv *priv) 1483static int alloc_tx_dma_desc_resources(struct stmmac_priv *priv)
1147{ 1484{
1485 u32 tx_count = priv->plat->tx_queues_to_use;
1148 int ret = -ENOMEM; 1486 int ret = -ENOMEM;
1487 u32 queue = 0;
1149 1488
1150 priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), 1489 /* Allocate TX queues array */
1151 GFP_KERNEL); 1490 priv->tx_queue = kmalloc_array(tx_count,
1152 if (!priv->rx_skbuff_dma) 1491 sizeof(struct stmmac_tx_queue),
1492 GFP_KERNEL);
1493 if (!priv->tx_queue)
1153 return -ENOMEM; 1494 return -ENOMEM;
1154 1495
1155 priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), 1496 /* TX queues buffers and DMA */
1156 GFP_KERNEL); 1497 for (queue = 0; queue < tx_count; queue++) {
1157 if (!priv->rx_skbuff) 1498 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1158 goto err_rx_skbuff;
1159
1160 priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1161 sizeof(*priv->tx_skbuff_dma),
1162 GFP_KERNEL);
1163 if (!priv->tx_skbuff_dma)
1164 goto err_tx_skbuff_dma;
1165
1166 priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
1167 GFP_KERNEL);
1168 if (!priv->tx_skbuff)
1169 goto err_tx_skbuff;
1170
1171 if (priv->extend_desc) {
1172 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1173 sizeof(struct
1174 dma_extended_desc),
1175 &priv->dma_rx_phy,
1176 GFP_KERNEL);
1177 if (!priv->dma_erx)
1178 goto err_dma;
1179 1499
1180 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * 1500 tx_q->queue_index = queue;
1181 sizeof(struct 1501 tx_q->priv_data = priv;
1182 dma_extended_desc), 1502
1183 &priv->dma_tx_phy, 1503 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1504 sizeof(struct stmmac_tx_info),
1505 GFP_KERNEL);
1506
1507 if (!tx_q->tx_skbuff_dma)
1508 goto err_dma_buffers;
1509
1510 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1511 sizeof(struct sk_buff *),
1184 GFP_KERNEL); 1512 GFP_KERNEL);
1185 if (!priv->dma_etx) { 1513 if (!tx_q->tx_skbuff)
1186 dma_free_coherent(priv->device, DMA_RX_SIZE * 1514 goto err_dma_buffers;
1187 sizeof(struct dma_extended_desc), 1515
1188 priv->dma_erx, priv->dma_rx_phy); 1516 if (priv->extend_desc) {
1189 goto err_dma; 1517 tx_q->dma_etx =
1190 } 1518 dma_zalloc_coherent(priv->device,
1191 } else { 1519 (DMA_TX_SIZE * sizeof(struct dma_extended_desc)),
1192 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * 1520 &tx_q->dma_tx_phy, GFP_KERNEL);
1193 sizeof(struct dma_desc), 1521
1194 &priv->dma_rx_phy, 1522 if (!tx_q->dma_etx)
1195 GFP_KERNEL); 1523 goto err_dma_buffers;
1196 if (!priv->dma_rx) 1524 } else {
1197 goto err_dma; 1525 tx_q->dma_tx =
1198 1526 dma_zalloc_coherent(priv->device,
1199 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * 1527 (DMA_TX_SIZE * sizeof(struct dma_desc)),
1200 sizeof(struct dma_desc), 1528 &tx_q->dma_tx_phy, GFP_KERNEL);
1201 &priv->dma_tx_phy, 1529
1202 GFP_KERNEL); 1530 if (!tx_q->dma_tx)
1203 if (!priv->dma_tx) { 1531 goto err_dma_buffers;
1204 dma_free_coherent(priv->device, DMA_RX_SIZE *
1205 sizeof(struct dma_desc),
1206 priv->dma_rx, priv->dma_rx_phy);
1207 goto err_dma;
1208 } 1532 }
1209 } 1533 }
1210 1534
1211 return 0; 1535 return 0;
1212 1536
1213err_dma: 1537err_dma_buffers:
1214 kfree(priv->tx_skbuff); 1538 free_tx_dma_desc_resources(priv);
1215err_tx_skbuff: 1539
1216 kfree(priv->tx_skbuff_dma);
1217err_tx_skbuff_dma:
1218 kfree(priv->rx_skbuff);
1219err_rx_skbuff:
1220 kfree(priv->rx_skbuff_dma);
1221 return ret; 1540 return ret;
1222} 1541}
1223 1542
1224static void free_dma_desc_resources(struct stmmac_priv *priv) 1543/**
1544 * alloc_dma_desc_resources - alloc TX/RX resources.
1545 * @priv: private structure
1546 * Description: according to which descriptor can be used (extend or basic)
1547 * this function allocates the resources for TX and RX paths. In case of
1548 * reception, for example, it pre-allocated the RX socket buffer in order to
1549 * allow zero-copy mechanism.
1550 */
1551static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1225{ 1552{
1226 /* Release the DMA TX/RX socket buffers */ 1553 int ret = 0;
1227 dma_free_rx_skbufs(priv); 1554
1228 dma_free_tx_skbufs(priv); 1555 ret = alloc_tx_dma_desc_resources(priv);
1229 1556 if (ret)
1230 /* Free DMA regions of consistent memory previously allocated */ 1557 return ret;
1231 if (!priv->extend_desc) { 1558
1232 dma_free_coherent(priv->device, 1559 ret = alloc_rx_dma_desc_resources(priv);
1233 DMA_TX_SIZE * sizeof(struct dma_desc), 1560
1234 priv->dma_tx, priv->dma_tx_phy); 1561 return ret;
1235 dma_free_coherent(priv->device,
1236 DMA_RX_SIZE * sizeof(struct dma_desc),
1237 priv->dma_rx, priv->dma_rx_phy);
1238 } else {
1239 dma_free_coherent(priv->device, DMA_TX_SIZE *
1240 sizeof(struct dma_extended_desc),
1241 priv->dma_etx, priv->dma_tx_phy);
1242 dma_free_coherent(priv->device, DMA_RX_SIZE *
1243 sizeof(struct dma_extended_desc),
1244 priv->dma_erx, priv->dma_rx_phy);
1245 }
1246 kfree(priv->rx_skbuff_dma);
1247 kfree(priv->rx_skbuff);
1248 kfree(priv->tx_skbuff_dma);
1249 kfree(priv->tx_skbuff);
1250} 1562}
1251 1563
1252/** 1564/**
@@ -1256,19 +1568,104 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
1256 */ 1568 */
1257static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 1569static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1258{ 1570{
1259 int rx_count = priv->dma_cap.number_rx_queues; 1571 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1260 int queue = 0; 1572 int queue;
1573 u8 mode;
1261 1574
1262 /* If GMAC does not have multiple queues, then this is not necessary*/ 1575 for (queue = 0; queue < rx_queues_count; queue++) {
1263 if (rx_count == 1) 1576 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1264 return; 1577 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1578 }
1579}
1265 1580
1266 /** 1581/**
1267 * If the core is synthesized with multiple rx queues / multiple 1582 * stmmac_start_rx_dma - start RX DMA channel
1268 * dma channels, then rx queues will be disabled by default. 1583 * @priv: driver private structure
1269 * For now only rx queue 0 is enabled. 1584 * @chan: RX channel index
1270 */ 1585 * Description:
1271 priv->hw->mac->rx_queue_enable(priv->hw, queue); 1586 * This starts a RX DMA channel
1587 */
1588static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1589{
1590 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1591 priv->hw->dma->start_rx(priv->ioaddr, chan);
1592}
1593
1594/**
1595 * stmmac_start_tx_dma - start TX DMA channel
1596 * @priv: driver private structure
1597 * @chan: TX channel index
1598 * Description:
1599 * This starts a TX DMA channel
1600 */
1601static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1602{
1603 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1604 priv->hw->dma->start_tx(priv->ioaddr, chan);
1605}
1606
1607/**
1608 * stmmac_stop_rx_dma - stop RX DMA channel
1609 * @priv: driver private structure
1610 * @chan: RX channel index
1611 * Description:
1612 * This stops a RX DMA channel
1613 */
1614static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1615{
1616 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1617 priv->hw->dma->stop_rx(priv->ioaddr, chan);
1618}
1619
1620/**
1621 * stmmac_stop_tx_dma - stop TX DMA channel
1622 * @priv: driver private structure
1623 * @chan: TX channel index
1624 * Description:
1625 * This stops a TX DMA channel
1626 */
1627static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1628{
1629 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1630 priv->hw->dma->stop_tx(priv->ioaddr, chan);
1631}
1632
1633/**
1634 * stmmac_start_all_dma - start all RX and TX DMA channels
1635 * @priv: driver private structure
1636 * Description:
1637 * This starts all the RX and TX DMA channels
1638 */
1639static void stmmac_start_all_dma(struct stmmac_priv *priv)
1640{
1641 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1642 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1643 u32 chan = 0;
1644
1645 for (chan = 0; chan < rx_channels_count; chan++)
1646 stmmac_start_rx_dma(priv, chan);
1647
1648 for (chan = 0; chan < tx_channels_count; chan++)
1649 stmmac_start_tx_dma(priv, chan);
1650}
1651
1652/**
1653 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1654 * @priv: driver private structure
1655 * Description:
1656 * This stops the RX and TX DMA channels
1657 */
1658static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1659{
1660 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1661 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1662 u32 chan = 0;
1663
1664 for (chan = 0; chan < rx_channels_count; chan++)
1665 stmmac_stop_rx_dma(priv, chan);
1666
1667 for (chan = 0; chan < tx_channels_count; chan++)
1668 stmmac_stop_tx_dma(priv, chan);
1272} 1669}
1273 1670
1274/** 1671/**
@@ -1279,11 +1676,20 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1279 */ 1676 */
1280static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 1677static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1281{ 1678{
1679 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1680 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1282 int rxfifosz = priv->plat->rx_fifo_size; 1681 int rxfifosz = priv->plat->rx_fifo_size;
1682 u32 txmode = 0;
1683 u32 rxmode = 0;
1684 u32 chan = 0;
1685
1686 if (rxfifosz == 0)
1687 rxfifosz = priv->dma_cap.rx_fifo_size;
1283 1688
1284 if (priv->plat->force_thresh_dma_mode) 1689 if (priv->plat->force_thresh_dma_mode) {
1285 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz); 1690 txmode = tc;
1286 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 1691 rxmode = tc;
1692 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1287 /* 1693 /*
1288 * In case of GMAC, SF mode can be enabled 1694 * In case of GMAC, SF mode can be enabled
1289 * to perform the TX COE in HW. This depends on: 1695 * to perform the TX COE in HW. This depends on:
@@ -1291,37 +1697,53 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1291 * 2) There is no bugged Jumbo frame support 1697 * 2) There is no bugged Jumbo frame support
1292 * that needs to not insert csum in the TDES. 1698 * that needs to not insert csum in the TDES.
1293 */ 1699 */
1294 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE, 1700 txmode = SF_DMA_MODE;
1295 rxfifosz); 1701 rxmode = SF_DMA_MODE;
1296 priv->xstats.threshold = SF_DMA_MODE; 1702 priv->xstats.threshold = SF_DMA_MODE;
1297 } else 1703 } else {
1298 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE, 1704 txmode = tc;
1705 rxmode = SF_DMA_MODE;
1706 }
1707
1708 /* configure all channels */
1709 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1710 for (chan = 0; chan < rx_channels_count; chan++)
1711 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1712 rxfifosz);
1713
1714 for (chan = 0; chan < tx_channels_count; chan++)
1715 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1716 } else {
1717 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1299 rxfifosz); 1718 rxfifosz);
1719 }
1300} 1720}
1301 1721
1302/** 1722/**
1303 * stmmac_tx_clean - to manage the transmission completion 1723 * stmmac_tx_clean - to manage the transmission completion
1304 * @priv: driver private structure 1724 * @priv: driver private structure
1725 * @queue: TX queue index
1305 * Description: it reclaims the transmit resources after transmission completes. 1726 * Description: it reclaims the transmit resources after transmission completes.
1306 */ 1727 */
1307static void stmmac_tx_clean(struct stmmac_priv *priv) 1728static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1308{ 1729{
1730 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1309 unsigned int bytes_compl = 0, pkts_compl = 0; 1731 unsigned int bytes_compl = 0, pkts_compl = 0;
1310 unsigned int entry = priv->dirty_tx; 1732 unsigned int entry = tx_q->dirty_tx;
1311 1733
1312 netif_tx_lock(priv->dev); 1734 netif_tx_lock(priv->dev);
1313 1735
1314 priv->xstats.tx_clean++; 1736 priv->xstats.tx_clean++;
1315 1737
1316 while (entry != priv->cur_tx) { 1738 while (entry != tx_q->cur_tx) {
1317 struct sk_buff *skb = priv->tx_skbuff[entry]; 1739 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1318 struct dma_desc *p; 1740 struct dma_desc *p;
1319 int status; 1741 int status;
1320 1742
1321 if (priv->extend_desc) 1743 if (priv->extend_desc)
1322 p = (struct dma_desc *)(priv->dma_etx + entry); 1744 p = (struct dma_desc *)(tx_q->dma_etx + entry);
1323 else 1745 else
1324 p = priv->dma_tx + entry; 1746 p = tx_q->dma_tx + entry;
1325 1747
1326 status = priv->hw->desc->tx_status(&priv->dev->stats, 1748 status = priv->hw->desc->tx_status(&priv->dev->stats,
1327 &priv->xstats, p, 1749 &priv->xstats, p,
@@ -1342,48 +1764,50 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1342 stmmac_get_tx_hwtstamp(priv, p, skb); 1764 stmmac_get_tx_hwtstamp(priv, p, skb);
1343 } 1765 }
1344 1766
1345 if (likely(priv->tx_skbuff_dma[entry].buf)) { 1767 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1346 if (priv->tx_skbuff_dma[entry].map_as_page) 1768 if (tx_q->tx_skbuff_dma[entry].map_as_page)
1347 dma_unmap_page(priv->device, 1769 dma_unmap_page(priv->device,
1348 priv->tx_skbuff_dma[entry].buf, 1770 tx_q->tx_skbuff_dma[entry].buf,
1349 priv->tx_skbuff_dma[entry].len, 1771 tx_q->tx_skbuff_dma[entry].len,
1350 DMA_TO_DEVICE); 1772 DMA_TO_DEVICE);
1351 else 1773 else
1352 dma_unmap_single(priv->device, 1774 dma_unmap_single(priv->device,
1353 priv->tx_skbuff_dma[entry].buf, 1775 tx_q->tx_skbuff_dma[entry].buf,
1354 priv->tx_skbuff_dma[entry].len, 1776 tx_q->tx_skbuff_dma[entry].len,
1355 DMA_TO_DEVICE); 1777 DMA_TO_DEVICE);
1356 priv->tx_skbuff_dma[entry].buf = 0; 1778 tx_q->tx_skbuff_dma[entry].buf = 0;
1357 priv->tx_skbuff_dma[entry].len = 0; 1779 tx_q->tx_skbuff_dma[entry].len = 0;
1358 priv->tx_skbuff_dma[entry].map_as_page = false; 1780 tx_q->tx_skbuff_dma[entry].map_as_page = false;
1359 } 1781 }
1360 1782
1361 if (priv->hw->mode->clean_desc3) 1783 if (priv->hw->mode->clean_desc3)
1362 priv->hw->mode->clean_desc3(priv, p); 1784 priv->hw->mode->clean_desc3(tx_q, p);
1363 1785
1364 priv->tx_skbuff_dma[entry].last_segment = false; 1786 tx_q->tx_skbuff_dma[entry].last_segment = false;
1365 priv->tx_skbuff_dma[entry].is_jumbo = false; 1787 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1366 1788
1367 if (likely(skb != NULL)) { 1789 if (likely(skb != NULL)) {
1368 pkts_compl++; 1790 pkts_compl++;
1369 bytes_compl += skb->len; 1791 bytes_compl += skb->len;
1370 dev_consume_skb_any(skb); 1792 dev_consume_skb_any(skb);
1371 priv->tx_skbuff[entry] = NULL; 1793 tx_q->tx_skbuff[entry] = NULL;
1372 } 1794 }
1373 1795
1374 priv->hw->desc->release_tx_desc(p, priv->mode); 1796 priv->hw->desc->release_tx_desc(p, priv->mode);
1375 1797
1376 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 1798 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1377 } 1799 }
1378 priv->dirty_tx = entry; 1800 tx_q->dirty_tx = entry;
1379 1801
1380 netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); 1802 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1803 pkts_compl, bytes_compl);
1381 1804
1382 if (unlikely(netif_queue_stopped(priv->dev) && 1805 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1383 stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { 1806 queue))) &&
1807 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1384 netif_dbg(priv, tx_done, priv->dev, 1808 netif_dbg(priv, tx_done, priv->dev,
1385 "%s: restart transmit\n", __func__); 1809 "%s: restart transmit\n", __func__);
1386 netif_wake_queue(priv->dev); 1810 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1387 } 1811 }
1388 1812
1389 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { 1813 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
@@ -1393,45 +1817,77 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1393 netif_tx_unlock(priv->dev); 1817 netif_tx_unlock(priv->dev);
1394} 1818}
1395 1819
1396static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) 1820static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1397{ 1821{
1398 priv->hw->dma->enable_dma_irq(priv->ioaddr); 1822 priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1399} 1823}
1400 1824
1401static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) 1825static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1402{ 1826{
1403 priv->hw->dma->disable_dma_irq(priv->ioaddr); 1827 priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1404} 1828}
1405 1829
1406/** 1830/**
1407 * stmmac_tx_err - to manage the tx error 1831 * stmmac_tx_err - to manage the tx error
1408 * @priv: driver private structure 1832 * @priv: driver private structure
1833 * @queue: queue index
1409 * Description: it cleans the descriptors and restarts the transmission 1834 * Description: it cleans the descriptors and restarts the transmission
1410 * in case of transmission errors. 1835 * in case of transmission errors.
1411 */ 1836 */
1412static void stmmac_tx_err(struct stmmac_priv *priv) 1837static void stmmac_tx_err(struct stmmac_priv *priv, u32 queue)
1413{ 1838{
1839 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1840 u32 chan = queue;
1414 int i; 1841 int i;
1415 netif_stop_queue(priv->dev);
1416 1842
1417 priv->hw->dma->stop_tx(priv->ioaddr); 1843 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
1418 dma_free_tx_skbufs(priv); 1844
1845 stmmac_stop_tx_dma(priv, chan);
1846 dma_free_tx_skbufs(priv, queue);
1419 for (i = 0; i < DMA_TX_SIZE; i++) 1847 for (i = 0; i < DMA_TX_SIZE; i++)
1420 if (priv->extend_desc) 1848 if (priv->extend_desc)
1421 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, 1849 priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1422 priv->mode, 1850 priv->mode,
1423 (i == DMA_TX_SIZE - 1)); 1851 (i == DMA_TX_SIZE - 1));
1424 else 1852 else
1425 priv->hw->desc->init_tx_desc(&priv->dma_tx[i], 1853 priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1426 priv->mode, 1854 priv->mode,
1427 (i == DMA_TX_SIZE - 1)); 1855 (i == DMA_TX_SIZE - 1));
1428 priv->dirty_tx = 0; 1856 tx_q->dirty_tx = 0;
1429 priv->cur_tx = 0; 1857 tx_q->cur_tx = 0;
1430 netdev_reset_queue(priv->dev); 1858 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1431 priv->hw->dma->start_tx(priv->ioaddr); 1859 stmmac_start_tx_dma(priv, chan);
1432 1860
1433 priv->dev->stats.tx_errors++; 1861 priv->dev->stats.tx_errors++;
1434 netif_wake_queue(priv->dev); 1862 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1863}
1864
1865/**
1866 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1867 * @priv: driver private structure
1868 * @txmode: TX operating mode
1869 * @rxmode: RX operating mode
1870 * @chan: channel index
1871 * Description: it is used for configuring of the DMA operation mode in
1872 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1873 * mode.
1874 */
1875static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1876 u32 rxmode, u32 chan)
1877{
1878 int rxfifosz = priv->plat->rx_fifo_size;
1879
1880 if (rxfifosz == 0)
1881 rxfifosz = priv->dma_cap.rx_fifo_size;
1882
1883 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1884 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1885 rxfifosz);
1886 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1887 } else {
1888 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1889 rxfifosz);
1890 }
1435} 1891}
1436 1892
1437/** 1893/**
@@ -1443,31 +1899,43 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
1443 */ 1899 */
1444static void stmmac_dma_interrupt(struct stmmac_priv *priv) 1900static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1445{ 1901{
1902 u32 tx_channel_count = priv->plat->tx_queues_to_use;
1446 int status; 1903 int status;
1447 int rxfifosz = priv->plat->rx_fifo_size; 1904 u32 chan;
1448 1905
1449 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); 1906 for (chan = 0; chan < tx_channel_count; chan++) {
1450 if (likely((status & handle_rx)) || (status & handle_tx)) { 1907 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1451 if (likely(napi_schedule_prep(&priv->napi))) { 1908
1452 stmmac_disable_dma_irq(priv); 1909 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1453 __napi_schedule(&priv->napi); 1910 &priv->xstats, chan);
1911 if (likely((status & handle_rx)) || (status & handle_tx)) {
1912 if (likely(napi_schedule_prep(&rx_q->napi))) {
1913 stmmac_disable_dma_irq(priv, chan);
1914 __napi_schedule(&rx_q->napi);
1915 }
1454 } 1916 }
1455 } 1917
1456 if (unlikely(status & tx_hard_error_bump_tc)) { 1918 if (unlikely(status & tx_hard_error_bump_tc)) {
1457 /* Try to bump up the dma threshold on this failure */ 1919 /* Try to bump up the dma threshold on this failure */
1458 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 1920 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1459 (tc <= 256)) { 1921 (tc <= 256)) {
1460 tc += 64; 1922 tc += 64;
1461 if (priv->plat->force_thresh_dma_mode) 1923 if (priv->plat->force_thresh_dma_mode)
1462 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, 1924 stmmac_set_dma_operation_mode(priv,
1463 rxfifosz); 1925 tc,
1464 else 1926 tc,
1465 priv->hw->dma->dma_mode(priv->ioaddr, tc, 1927 chan);
1466 SF_DMA_MODE, rxfifosz); 1928 else
1467 priv->xstats.threshold = tc; 1929 stmmac_set_dma_operation_mode(priv,
1930 tc,
1931 SF_DMA_MODE,
1932 chan);
1933 priv->xstats.threshold = tc;
1934 }
1935 } else if (unlikely(status == tx_hard_error)) {
1936 stmmac_tx_err(priv, chan);
1468 } 1937 }
1469 } else if (unlikely(status == tx_hard_error)) 1938 }
1470 stmmac_tx_err(priv);
1471} 1939}
1472 1940
1473/** 1941/**
@@ -1574,6 +2042,13 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1574 */ 2042 */
1575static int stmmac_init_dma_engine(struct stmmac_priv *priv) 2043static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1576{ 2044{
2045 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2046 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2047 struct stmmac_rx_queue *rx_q;
2048 struct stmmac_tx_queue *tx_q;
2049 u32 dummy_dma_rx_phy = 0;
2050 u32 dummy_dma_tx_phy = 0;
2051 u32 chan = 0;
1577 int atds = 0; 2052 int atds = 0;
1578 int ret = 0; 2053 int ret = 0;
1579 2054
@@ -1591,19 +2066,50 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1591 return ret; 2066 return ret;
1592 } 2067 }
1593 2068
1594 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1595 priv->dma_tx_phy, priv->dma_rx_phy, atds);
1596
1597 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 2069 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1598 priv->rx_tail_addr = priv->dma_rx_phy + 2070 /* DMA Configuration */
1599 (DMA_RX_SIZE * sizeof(struct dma_desc)); 2071 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1600 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr, 2072 dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
1601 STMMAC_CHAN0); 2073
2074 /* DMA RX Channel Configuration */
2075 for (chan = 0; chan < rx_channels_count; chan++) {
2076 rx_q = &priv->rx_queue[chan];
2077
2078 priv->hw->dma->init_rx_chan(priv->ioaddr,
2079 priv->plat->dma_cfg,
2080 rx_q->dma_rx_phy, chan);
2081
2082 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2083 (DMA_RX_SIZE * sizeof(struct dma_desc));
2084 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2085 rx_q->rx_tail_addr,
2086 chan);
2087 }
2088
2089 /* DMA TX Channel Configuration */
2090 for (chan = 0; chan < tx_channels_count; chan++) {
2091 tx_q = &priv->tx_queue[chan];
2092
2093 priv->hw->dma->init_chan(priv->ioaddr,
2094 priv->plat->dma_cfg,
2095 chan);
2096
2097 priv->hw->dma->init_tx_chan(priv->ioaddr,
2098 priv->plat->dma_cfg,
2099 tx_q->dma_tx_phy, chan);
2100
2101 tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2102 (DMA_TX_SIZE * sizeof(struct dma_desc));
2103 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2104 tx_q->tx_tail_addr,
2105 chan);
2106 }
2107 } else {
2108 rx_q = &priv->rx_queue[chan];
2109 tx_q = &priv->tx_queue[chan];
1602 2110
1603 priv->tx_tail_addr = priv->dma_tx_phy + 2111 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1604 (DMA_TX_SIZE * sizeof(struct dma_desc)); 2112 tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
1605 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1606 STMMAC_CHAN0);
1607 } 2113 }
1608 2114
1609 if (priv->plat->axi && priv->hw->dma->axi) 2115 if (priv->plat->axi && priv->hw->dma->axi)
@@ -1621,8 +2127,70 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1621static void stmmac_tx_timer(unsigned long data) 2127static void stmmac_tx_timer(unsigned long data)
1622{ 2128{
1623 struct stmmac_priv *priv = (struct stmmac_priv *)data; 2129 struct stmmac_priv *priv = (struct stmmac_priv *)data;
2130 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2131 u32 queue;
2132
2133 /* let's scan all the tx queues */
2134 for (queue = 0; queue < tx_queues_count; queue++)
2135 stmmac_tx_clean(priv, queue);
2136}
2137
2138/**
2139 * stmmac_stop_all_queues - Stop all queues
2140 * @priv: driver private structure
2141 */
2142static void stmmac_stop_all_queues(struct stmmac_priv *priv)
2143{
2144 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2145 u32 queue;
2146
2147 for (queue = 0; queue < tx_queues_cnt; queue++)
2148 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2149}
1624 2150
1625 stmmac_tx_clean(priv); 2151/**
2152 * stmmac_start_all_queues - Start all queues
2153 * @priv: driver private structure
2154 */
2155static void stmmac_start_all_queues(struct stmmac_priv *priv)
2156{
2157 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2158 u32 queue;
2159
2160 for (queue = 0; queue < tx_queues_cnt; queue++)
2161 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
2162}
2163
2164/**
2165 * stmmac_disable_all_queues - Disable all queues
2166 * @priv: driver private structure
2167 */
2168static void stmmac_disable_all_queues(struct stmmac_priv *priv)
2169{
2170 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2171 u32 queue;
2172
2173 for (queue = 0; queue < rx_queues_cnt; queue++) {
2174 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2175
2176 napi_disable(&rx_q->napi);
2177 }
2178}
2179
2180/**
2181 * stmmac_enable_all_queues - Enable all queues
2182 * @priv: driver private structure
2183 */
2184static void stmmac_enable_all_queues(struct stmmac_priv *priv)
2185{
2186 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2187 u32 queue;
2188
2189 for (queue = 0; queue < rx_queues_cnt; queue++) {
2190 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2191
2192 napi_enable(&rx_q->napi);
2193 }
1626} 2194}
1627 2195
1628/** 2196/**
@@ -1644,6 +2212,198 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1644 add_timer(&priv->txtimer); 2212 add_timer(&priv->txtimer);
1645} 2213}
1646 2214
2215static void stmmac_set_rings_length(struct stmmac_priv *priv)
2216{
2217 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2218 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2219 u32 chan;
2220
2221 /* set TX ring length */
2222 if (priv->hw->dma->set_tx_ring_len) {
2223 for (chan = 0; chan < tx_channels_count; chan++)
2224 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2225 (DMA_TX_SIZE - 1), chan);
2226 }
2227
2228 /* set RX ring length */
2229 if (priv->hw->dma->set_rx_ring_len) {
2230 for (chan = 0; chan < rx_channels_count; chan++)
2231 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2232 (DMA_RX_SIZE - 1), chan);
2233 }
2234}
2235
2236/**
2237 * stmmac_set_tx_queue_weight - Set TX queue weight
2238 * @priv: driver private structure
2239 * Description: It is used for setting TX queues weight
2240 */
2241static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2242{
2243 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2244 u32 weight;
2245 u32 queue;
2246
2247 for (queue = 0; queue < tx_queues_count; queue++) {
2248 weight = priv->plat->tx_queues_cfg[queue].weight;
2249 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2250 }
2251}
2252
2253/**
2254 * stmmac_configure_cbs - Configure CBS in TX queue
2255 * @priv: driver private structure
2256 * Description: It is used for configuring CBS in AVB TX queues
2257 */
2258static void stmmac_configure_cbs(struct stmmac_priv *priv)
2259{
2260 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2261 u32 mode_to_use;
2262 u32 queue;
2263
2264 for (queue = 0; queue < tx_queues_count; queue++) {
2265 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2266 if (mode_to_use == MTL_QUEUE_DCB)
2267 continue;
2268
2269 priv->hw->mac->config_cbs(priv->hw,
2270 priv->plat->tx_queues_cfg[queue].send_slope,
2271 priv->plat->tx_queues_cfg[queue].idle_slope,
2272 priv->plat->tx_queues_cfg[queue].high_credit,
2273 priv->plat->tx_queues_cfg[queue].low_credit,
2274 queue);
2275 }
2276}
2277
2278/**
2279 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2280 * @priv: driver private structure
2281 * Description: It is used for mapping RX queues to RX dma channels
2282 */
2283static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2284{
2285 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2286 u32 queue;
2287 u32 chan;
2288
2289 for (queue = 0; queue < rx_queues_count; queue++) {
2290 chan = priv->plat->rx_queues_cfg[queue].chan;
2291 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2292 }
2293}
2294
2295/**
2296 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2297 * @priv: driver private structure
2298 * Description: It is used for configuring the RX Queue Priority
2299 */
2300static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2301{
2302 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2303 u32 queue;
2304 u32 prio;
2305
2306 for (queue = 0; queue < rx_queues_count; queue++) {
2307 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2308 continue;
2309
2310 prio = priv->plat->rx_queues_cfg[queue].prio;
2311 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2312 }
2313}
2314
2315/**
2316 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2317 * @priv: driver private structure
2318 * Description: It is used for configuring the TX Queue Priority
2319 */
2320static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2321{
2322 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2323 u32 queue;
2324 u32 prio;
2325
2326 for (queue = 0; queue < tx_queues_count; queue++) {
2327 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2328 continue;
2329
2330 prio = priv->plat->tx_queues_cfg[queue].prio;
2331 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2332 }
2333}
2334
2335/**
2336 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2337 * @priv: driver private structure
2338 * Description: It is used for configuring the RX queue routing
2339 */
2340static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2341{
2342 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2343 u32 queue;
2344 u8 packet;
2345
2346 for (queue = 0; queue < rx_queues_count; queue++) {
2347 /* no specific packet type routing specified for the queue */
2348 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2349 continue;
2350
2351 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2352 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2353 }
2354}
2355
2356/**
2357 * stmmac_mtl_configuration - Configure MTL
2358 * @priv: driver private structure
2359 * Description: It is used for configurring MTL
2360 */
2361static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2362{
2363 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2364 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2365
2366 if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2367 stmmac_set_tx_queue_weight(priv);
2368
2369 /* Configure MTL RX algorithms */
2370 if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2371 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2372 priv->plat->rx_sched_algorithm);
2373
2374 /* Configure MTL TX algorithms */
2375 if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2376 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2377 priv->plat->tx_sched_algorithm);
2378
2379 /* Configure CBS in AVB TX queues */
2380 if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2381 stmmac_configure_cbs(priv);
2382
2383 /* Map RX MTL to DMA channels */
2384 if (rx_queues_count > 1 && priv->hw->mac->map_mtl_to_dma)
2385 stmmac_rx_queue_dma_chan_map(priv);
2386
2387 /* Enable MAC RX Queues */
2388 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
2389 stmmac_mac_enable_rx_queues(priv);
2390
2391 /* Set the HW DMA mode and the COE */
2392 stmmac_dma_operation_mode(priv);
2393
2394 /* Set RX priorities */
2395 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2396 stmmac_mac_config_rx_queues_prio(priv);
2397
2398 /* Set TX priorities */
2399 if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2400 stmmac_mac_config_tx_queues_prio(priv);
2401
2402 /* Set RX routing */
2403 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2404 stmmac_mac_config_rx_queues_routing(priv);
2405}
2406
1647/** 2407/**
1648 * stmmac_hw_setup - setup mac in a usable state. 2408 * stmmac_hw_setup - setup mac in a usable state.
1649 * @dev : pointer to the device structure. 2409 * @dev : pointer to the device structure.
@@ -1659,6 +2419,9 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1659static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 2419static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1660{ 2420{
1661 struct stmmac_priv *priv = netdev_priv(dev); 2421 struct stmmac_priv *priv = netdev_priv(dev);
2422 u32 rx_cnt = priv->plat->rx_queues_to_use;
2423 u32 tx_cnt = priv->plat->tx_queues_to_use;
2424 u32 chan;
1662 int ret; 2425 int ret;
1663 2426
1664 /* DMA initialization and SW reset */ 2427 /* DMA initialization and SW reset */
@@ -1688,9 +2451,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1688 /* Initialize the MAC Core */ 2451 /* Initialize the MAC Core */
1689 priv->hw->mac->core_init(priv->hw, dev->mtu); 2452 priv->hw->mac->core_init(priv->hw, dev->mtu);
1690 2453
1691 /* Initialize MAC RX Queues */ 2454 /* Initialize MTL*/
1692 if (priv->hw->mac->rx_queue_enable) 2455 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1693 stmmac_mac_enable_rx_queues(priv); 2456 stmmac_mtl_configuration(priv);
1694 2457
1695 ret = priv->hw->mac->rx_ipc(priv->hw); 2458 ret = priv->hw->mac->rx_ipc(priv->hw);
1696 if (!ret) { 2459 if (!ret) {
@@ -1705,12 +2468,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1705 else 2468 else
1706 stmmac_set_mac(priv->ioaddr, true); 2469 stmmac_set_mac(priv->ioaddr, true);
1707 2470
1708 /* Set the HW DMA mode and the COE */
1709 stmmac_dma_operation_mode(priv);
1710
1711 stmmac_mmc_setup(priv); 2471 stmmac_mmc_setup(priv);
1712 2472
1713 if (init_ptp) { 2473 if (init_ptp) {
2474 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2475 if (ret < 0)
2476 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2477
1714 ret = stmmac_init_ptp(priv); 2478 ret = stmmac_init_ptp(priv);
1715 if (ret == -EOPNOTSUPP) 2479 if (ret == -EOPNOTSUPP)
1716 netdev_warn(priv->dev, "PTP not supported by HW\n"); 2480 netdev_warn(priv->dev, "PTP not supported by HW\n");
@@ -1725,35 +2489,37 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1725 __func__); 2489 __func__);
1726#endif 2490#endif
1727 /* Start the ball rolling... */ 2491 /* Start the ball rolling... */
1728 netdev_dbg(priv->dev, "DMA RX/TX processes started...\n"); 2492 stmmac_start_all_dma(priv);
1729 priv->hw->dma->start_tx(priv->ioaddr);
1730 priv->hw->dma->start_rx(priv->ioaddr);
1731 2493
1732 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; 2494 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1733 2495
1734 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { 2496 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1735 priv->rx_riwt = MAX_DMA_RIWT; 2497 priv->rx_riwt = MAX_DMA_RIWT;
1736 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); 2498 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
1737 } 2499 }
1738 2500
1739 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) 2501 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
1740 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); 2502 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
1741 2503
1742 /* set TX ring length */ 2504 /* set TX and RX rings length */
1743 if (priv->hw->dma->set_tx_ring_len) 2505 stmmac_set_rings_length(priv);
1744 priv->hw->dma->set_tx_ring_len(priv->ioaddr, 2506
1745 (DMA_TX_SIZE - 1));
1746 /* set RX ring length */
1747 if (priv->hw->dma->set_rx_ring_len)
1748 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1749 (DMA_RX_SIZE - 1));
1750 /* Enable TSO */ 2507 /* Enable TSO */
1751 if (priv->tso) 2508 if (priv->tso) {
1752 priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0); 2509 for (chan = 0; chan < tx_cnt; chan++)
2510 priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2511 }
1753 2512
1754 return 0; 2513 return 0;
1755} 2514}
1756 2515
2516static void stmmac_hw_teardown(struct net_device *dev)
2517{
2518 struct stmmac_priv *priv = netdev_priv(dev);
2519
2520 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2521}
2522
1757/** 2523/**
1758 * stmmac_open - open entry point of the driver 2524 * stmmac_open - open entry point of the driver
1759 * @dev : pointer to the device structure. 2525 * @dev : pointer to the device structure.
@@ -1786,23 +2552,8 @@ static int stmmac_open(struct net_device *dev)
1786 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 2552 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1787 priv->xstats.threshold = tc; 2553 priv->xstats.threshold = tc;
1788 2554
1789 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1790 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 2555 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
1791 2556
1792 ret = alloc_dma_desc_resources(priv);
1793 if (ret < 0) {
1794 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
1795 __func__);
1796 goto dma_desc_error;
1797 }
1798
1799 ret = init_dma_desc_rings(dev, GFP_KERNEL);
1800 if (ret < 0) {
1801 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
1802 __func__);
1803 goto init_error;
1804 }
1805
1806 ret = stmmac_hw_setup(dev, true); 2557 ret = stmmac_hw_setup(dev, true);
1807 if (ret < 0) { 2558 if (ret < 0) {
1808 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 2559 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
@@ -1821,7 +2572,7 @@ static int stmmac_open(struct net_device *dev)
1821 netdev_err(priv->dev, 2572 netdev_err(priv->dev,
1822 "%s: ERROR: allocating the IRQ %d (error: %d)\n", 2573 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1823 __func__, dev->irq, ret); 2574 __func__, dev->irq, ret);
1824 goto init_error; 2575 goto irq_error;
1825 } 2576 }
1826 2577
1827 /* Request the Wake IRQ in case of another line is used for WoL */ 2578 /* Request the Wake IRQ in case of another line is used for WoL */
@@ -1848,8 +2599,8 @@ static int stmmac_open(struct net_device *dev)
1848 } 2599 }
1849 } 2600 }
1850 2601
1851 napi_enable(&priv->napi); 2602 stmmac_enable_all_queues(priv);
1852 netif_start_queue(dev); 2603 stmmac_start_all_queues(priv);
1853 2604
1854 return 0; 2605 return 0;
1855 2606
@@ -1858,10 +2609,15 @@ lpiirq_error:
1858 free_irq(priv->wol_irq, dev); 2609 free_irq(priv->wol_irq, dev);
1859wolirq_error: 2610wolirq_error:
1860 free_irq(dev->irq, dev); 2611 free_irq(dev->irq, dev);
2612irq_error:
2613 if (dev->phydev)
2614 phy_stop(dev->phydev);
1861 2615
2616 del_timer_sync(&priv->txtimer);
2617 stmmac_hw_teardown(dev);
1862init_error: 2618init_error:
1863 free_dma_desc_resources(priv); 2619 free_dma_desc_resources(priv);
1864dma_desc_error: 2620
1865 if (dev->phydev) 2621 if (dev->phydev)
1866 phy_disconnect(dev->phydev); 2622 phy_disconnect(dev->phydev);
1867 2623
@@ -1887,9 +2643,9 @@ static int stmmac_release(struct net_device *dev)
1887 phy_disconnect(dev->phydev); 2643 phy_disconnect(dev->phydev);
1888 } 2644 }
1889 2645
1890 netif_stop_queue(dev); 2646 stmmac_stop_all_queues(priv);
1891 2647
1892 napi_disable(&priv->napi); 2648 stmmac_disable_all_queues(priv);
1893 2649
1894 del_timer_sync(&priv->txtimer); 2650 del_timer_sync(&priv->txtimer);
1895 2651
@@ -1901,8 +2657,7 @@ static int stmmac_release(struct net_device *dev)
1901 free_irq(priv->lpi_irq, dev); 2657 free_irq(priv->lpi_irq, dev);
1902 2658
1903 /* Stop TX/RX DMA and clear the descriptors */ 2659 /* Stop TX/RX DMA and clear the descriptors */
1904 priv->hw->dma->stop_tx(priv->ioaddr); 2660 stmmac_stop_all_dma(priv);
1905 priv->hw->dma->stop_rx(priv->ioaddr);
1906 2661
1907 /* Release and free the Rx/Tx resources */ 2662 /* Release and free the Rx/Tx resources */
1908 free_dma_desc_resources(priv); 2663 free_dma_desc_resources(priv);
@@ -1927,22 +2682,24 @@ static int stmmac_release(struct net_device *dev)
1927 * @des: buffer start address 2682 * @des: buffer start address
1928 * @total_len: total length to fill in descriptors 2683 * @total_len: total length to fill in descriptors
1929 * @last_segmant: condition for the last descriptor 2684 * @last_segmant: condition for the last descriptor
2685 * @queue: TX queue index
1930 * Description: 2686 * Description:
1931 * This function fills descriptor and request new descriptors according to 2687 * This function fills descriptor and request new descriptors according to
1932 * buffer length to fill 2688 * buffer length to fill
1933 */ 2689 */
1934static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, 2690static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
1935 int total_len, bool last_segment) 2691 int total_len, bool last_segment, u32 queue)
1936{ 2692{
2693 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1937 struct dma_desc *desc; 2694 struct dma_desc *desc;
1938 int tmp_len;
1939 u32 buff_size; 2695 u32 buff_size;
2696 int tmp_len;
1940 2697
1941 tmp_len = total_len; 2698 tmp_len = total_len;
1942 2699
1943 while (tmp_len > 0) { 2700 while (tmp_len > 0) {
1944 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); 2701 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
1945 desc = priv->dma_tx + priv->cur_tx; 2702 desc = tx_q->dma_tx + tx_q->cur_tx;
1946 2703
1947 desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); 2704 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
1948 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 2705 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
@@ -1986,23 +2743,27 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
1986 */ 2743 */
1987static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 2744static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
1988{ 2745{
1989 u32 pay_len, mss; 2746 struct dma_desc *desc, *first, *mss_desc = NULL;
1990 int tmp_pay_len = 0;
1991 struct stmmac_priv *priv = netdev_priv(dev); 2747 struct stmmac_priv *priv = netdev_priv(dev);
2748 u32 queue = skb_get_queue_mapping(skb);
1992 int nfrags = skb_shinfo(skb)->nr_frags; 2749 int nfrags = skb_shinfo(skb)->nr_frags;
1993 unsigned int first_entry, des; 2750 unsigned int first_entry, des;
1994 struct dma_desc *desc, *first, *mss_desc = NULL; 2751 struct stmmac_tx_queue *tx_q;
2752 int tmp_pay_len = 0;
2753 u32 pay_len, mss;
1995 u8 proto_hdr_len; 2754 u8 proto_hdr_len;
1996 int i; 2755 int i;
1997 2756
2757 tx_q = &priv->tx_queue[queue];
2758
1998 /* Compute header lengths */ 2759 /* Compute header lengths */
1999 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2760 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2000 2761
2001 /* Desc availability based on threshold should be enough safe */ 2762 /* Desc availability based on threshold should be enough safe */
2002 if (unlikely(stmmac_tx_avail(priv) < 2763 if (unlikely(stmmac_tx_avail(priv, queue) <
2003 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 2764 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2004 if (!netif_queue_stopped(dev)) { 2765 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2005 netif_stop_queue(dev); 2766 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
2006 /* This is a hard error, log it. */ 2767 /* This is a hard error, log it. */
2007 netdev_err(priv->dev, 2768 netdev_err(priv->dev,
2008 "%s: Tx Ring full when queue awake\n", 2769 "%s: Tx Ring full when queue awake\n",
@@ -2017,10 +2778,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2017 2778
2018 /* set new MSS value if needed */ 2779 /* set new MSS value if needed */
2019 if (mss != priv->mss) { 2780 if (mss != priv->mss) {
2020 mss_desc = priv->dma_tx + priv->cur_tx; 2781 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2021 priv->hw->desc->set_mss(mss_desc, mss); 2782 priv->hw->desc->set_mss(mss_desc, mss);
2022 priv->mss = mss; 2783 priv->mss = mss;
2023 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); 2784 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2024 } 2785 }
2025 2786
2026 if (netif_msg_tx_queued(priv)) { 2787 if (netif_msg_tx_queued(priv)) {
@@ -2030,9 +2791,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2030 skb->data_len); 2791 skb->data_len);
2031 } 2792 }
2032 2793
2033 first_entry = priv->cur_tx; 2794 first_entry = tx_q->cur_tx;
2034 2795
2035 desc = priv->dma_tx + first_entry; 2796 desc = tx_q->dma_tx + first_entry;
2036 first = desc; 2797 first = desc;
2037 2798
2038 /* first descriptor: fill Headers on Buf1 */ 2799 /* first descriptor: fill Headers on Buf1 */
@@ -2041,9 +2802,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2041 if (dma_mapping_error(priv->device, des)) 2802 if (dma_mapping_error(priv->device, des))
2042 goto dma_map_err; 2803 goto dma_map_err;
2043 2804
2044 priv->tx_skbuff_dma[first_entry].buf = des; 2805 tx_q->tx_skbuff_dma[first_entry].buf = des;
2045 priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 2806 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2046 priv->tx_skbuff[first_entry] = skb; 2807 tx_q->tx_skbuff[first_entry] = skb;
2047 2808
2048 first->des0 = cpu_to_le32(des); 2809 first->des0 = cpu_to_le32(des);
2049 2810
@@ -2054,7 +2815,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2054 /* If needed take extra descriptors to fill the remaining payload */ 2815 /* If needed take extra descriptors to fill the remaining payload */
2055 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 2816 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2056 2817
2057 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0)); 2818 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2058 2819
2059 /* Prepare fragments */ 2820 /* Prepare fragments */
2060 for (i = 0; i < nfrags; i++) { 2821 for (i = 0; i < nfrags; i++) {
@@ -2063,24 +2824,26 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2063 des = skb_frag_dma_map(priv->device, frag, 0, 2824 des = skb_frag_dma_map(priv->device, frag, 0,
2064 skb_frag_size(frag), 2825 skb_frag_size(frag),
2065 DMA_TO_DEVICE); 2826 DMA_TO_DEVICE);
2827 if (dma_mapping_error(priv->device, des))
2828 goto dma_map_err;
2066 2829
2067 stmmac_tso_allocator(priv, des, skb_frag_size(frag), 2830 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2068 (i == nfrags - 1)); 2831 (i == nfrags - 1), queue);
2069 2832
2070 priv->tx_skbuff_dma[priv->cur_tx].buf = des; 2833 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2071 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag); 2834 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2072 priv->tx_skbuff[priv->cur_tx] = NULL; 2835 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2073 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true; 2836 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2074 } 2837 }
2075 2838
2076 priv->tx_skbuff_dma[priv->cur_tx].last_segment = true; 2839 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2077 2840
2078 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); 2841 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2079 2842
2080 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { 2843 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2081 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 2844 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2082 __func__); 2845 __func__);
2083 netif_stop_queue(dev); 2846 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
2084 } 2847 }
2085 2848
2086 dev->stats.tx_bytes += skb->len; 2849 dev->stats.tx_bytes += skb->len;
@@ -2112,7 +2875,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2112 priv->hw->desc->prepare_tso_tx_desc(first, 1, 2875 priv->hw->desc->prepare_tso_tx_desc(first, 1,
2113 proto_hdr_len, 2876 proto_hdr_len,
2114 pay_len, 2877 pay_len,
2115 1, priv->tx_skbuff_dma[first_entry].last_segment, 2878 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2116 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); 2879 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2117 2880
2118 /* If context desc is used to change MSS */ 2881 /* If context desc is used to change MSS */
@@ -2127,20 +2890,20 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2127 2890
2128 if (netif_msg_pktdata(priv)) { 2891 if (netif_msg_pktdata(priv)) {
2129 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 2892 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2130 __func__, priv->cur_tx, priv->dirty_tx, first_entry, 2893 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2131 priv->cur_tx, first, nfrags); 2894 tx_q->cur_tx, first, nfrags);
2132 2895
2133 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 2896 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2134 0); 2897 0);
2135 2898
2136 pr_info(">>> frame to be transmitted: "); 2899 pr_info(">>> frame to be transmitted: ");
2137 print_pkt(skb->data, skb_headlen(skb)); 2900 print_pkt(skb->data, skb_headlen(skb));
2138 } 2901 }
2139 2902
2140 netdev_sent_queue(dev, skb->len); 2903 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2141 2904
2142 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, 2905 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2143 STMMAC_CHAN0); 2906 queue);
2144 2907
2145 return NETDEV_TX_OK; 2908 return NETDEV_TX_OK;
2146 2909
@@ -2164,21 +2927,25 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2164 struct stmmac_priv *priv = netdev_priv(dev); 2927 struct stmmac_priv *priv = netdev_priv(dev);
2165 unsigned int nopaged_len = skb_headlen(skb); 2928 unsigned int nopaged_len = skb_headlen(skb);
2166 int i, csum_insertion = 0, is_jumbo = 0; 2929 int i, csum_insertion = 0, is_jumbo = 0;
2930 u32 queue = skb_get_queue_mapping(skb);
2167 int nfrags = skb_shinfo(skb)->nr_frags; 2931 int nfrags = skb_shinfo(skb)->nr_frags;
2168 unsigned int entry, first_entry; 2932 unsigned int entry, first_entry;
2169 struct dma_desc *desc, *first; 2933 struct dma_desc *desc, *first;
2934 struct stmmac_tx_queue *tx_q;
2170 unsigned int enh_desc; 2935 unsigned int enh_desc;
2171 unsigned int des; 2936 unsigned int des;
2172 2937
2938 tx_q = &priv->tx_queue[queue];
2939
2173 /* Manage oversized TCP frames for GMAC4 device */ 2940 /* Manage oversized TCP frames for GMAC4 device */
2174 if (skb_is_gso(skb) && priv->tso) { 2941 if (skb_is_gso(skb) && priv->tso) {
2175 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2942 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2176 return stmmac_tso_xmit(skb, dev); 2943 return stmmac_tso_xmit(skb, dev);
2177 } 2944 }
2178 2945
2179 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { 2946 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2180 if (!netif_queue_stopped(dev)) { 2947 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2181 netif_stop_queue(dev); 2948 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
2182 /* This is a hard error, log it. */ 2949 /* This is a hard error, log it. */
2183 netdev_err(priv->dev, 2950 netdev_err(priv->dev,
2184 "%s: Tx Ring full when queue awake\n", 2951 "%s: Tx Ring full when queue awake\n",
@@ -2190,19 +2957,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2190 if (priv->tx_path_in_lpi_mode) 2957 if (priv->tx_path_in_lpi_mode)
2191 stmmac_disable_eee_mode(priv); 2958 stmmac_disable_eee_mode(priv);
2192 2959
2193 entry = priv->cur_tx; 2960 entry = tx_q->cur_tx;
2194 first_entry = entry; 2961 first_entry = entry;
2195 2962
2196 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 2963 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2197 2964
2198 if (likely(priv->extend_desc)) 2965 if (likely(priv->extend_desc))
2199 desc = (struct dma_desc *)(priv->dma_etx + entry); 2966 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2200 else 2967 else
2201 desc = priv->dma_tx + entry; 2968 desc = tx_q->dma_tx + entry;
2202 2969
2203 first = desc; 2970 first = desc;
2204 2971
2205 priv->tx_skbuff[first_entry] = skb; 2972 tx_q->tx_skbuff[first_entry] = skb;
2206 2973
2207 enh_desc = priv->plat->enh_desc; 2974 enh_desc = priv->plat->enh_desc;
2208 /* To program the descriptors according to the size of the frame */ 2975 /* To program the descriptors according to the size of the frame */
@@ -2211,7 +2978,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2211 2978
2212 if (unlikely(is_jumbo) && likely(priv->synopsys_id < 2979 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2213 DWMAC_CORE_4_00)) { 2980 DWMAC_CORE_4_00)) {
2214 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); 2981 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
2215 if (unlikely(entry < 0)) 2982 if (unlikely(entry < 0))
2216 goto dma_map_err; 2983 goto dma_map_err;
2217 } 2984 }
@@ -2224,26 +2991,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2224 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 2991 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2225 2992
2226 if (likely(priv->extend_desc)) 2993 if (likely(priv->extend_desc))
2227 desc = (struct dma_desc *)(priv->dma_etx + entry); 2994 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2228 else 2995 else
2229 desc = priv->dma_tx + entry; 2996 desc = tx_q->dma_tx + entry;
2230 2997
2231 des = skb_frag_dma_map(priv->device, frag, 0, len, 2998 des = skb_frag_dma_map(priv->device, frag, 0, len,
2232 DMA_TO_DEVICE); 2999 DMA_TO_DEVICE);
2233 if (dma_mapping_error(priv->device, des)) 3000 if (dma_mapping_error(priv->device, des))
2234 goto dma_map_err; /* should reuse desc w/o issues */ 3001 goto dma_map_err; /* should reuse desc w/o issues */
2235 3002
2236 priv->tx_skbuff[entry] = NULL; 3003 tx_q->tx_skbuff[entry] = NULL;
2237 3004
2238 priv->tx_skbuff_dma[entry].buf = des; 3005 tx_q->tx_skbuff_dma[entry].buf = des;
2239 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) 3006 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2240 desc->des0 = cpu_to_le32(des); 3007 desc->des0 = cpu_to_le32(des);
2241 else 3008 else
2242 desc->des2 = cpu_to_le32(des); 3009 desc->des2 = cpu_to_le32(des);
2243 3010
2244 priv->tx_skbuff_dma[entry].map_as_page = true; 3011 tx_q->tx_skbuff_dma[entry].map_as_page = true;
2245 priv->tx_skbuff_dma[entry].len = len; 3012 tx_q->tx_skbuff_dma[entry].len = len;
2246 priv->tx_skbuff_dma[entry].last_segment = last_segment; 3013 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
2247 3014
2248 /* Prepare the descriptor and set the own bit too */ 3015 /* Prepare the descriptor and set the own bit too */
2249 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, 3016 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
@@ -2252,20 +3019,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2252 3019
2253 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3020 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2254 3021
2255 priv->cur_tx = entry; 3022 tx_q->cur_tx = entry;
2256 3023
2257 if (netif_msg_pktdata(priv)) { 3024 if (netif_msg_pktdata(priv)) {
2258 void *tx_head; 3025 void *tx_head;
2259 3026
2260 netdev_dbg(priv->dev, 3027 netdev_dbg(priv->dev,
2261 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 3028 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2262 __func__, priv->cur_tx, priv->dirty_tx, first_entry, 3029 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2263 entry, first, nfrags); 3030 entry, first, nfrags);
2264 3031
2265 if (priv->extend_desc) 3032 if (priv->extend_desc)
2266 tx_head = (void *)priv->dma_etx; 3033 tx_head = (void *)tx_q->dma_etx;
2267 else 3034 else
2268 tx_head = (void *)priv->dma_tx; 3035 tx_head = (void *)tx_q->dma_tx;
2269 3036
2270 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); 3037 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2271 3038
@@ -2273,10 +3040,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2273 print_pkt(skb->data, skb->len); 3040 print_pkt(skb->data, skb->len);
2274 } 3041 }
2275 3042
2276 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { 3043 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2277 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 3044 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2278 __func__); 3045 __func__);
2279 netif_stop_queue(dev); 3046 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
2280 } 3047 }
2281 3048
2282 dev->stats.tx_bytes += skb->len; 3049 dev->stats.tx_bytes += skb->len;
@@ -2311,14 +3078,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2311 if (dma_mapping_error(priv->device, des)) 3078 if (dma_mapping_error(priv->device, des))
2312 goto dma_map_err; 3079 goto dma_map_err;
2313 3080
2314 priv->tx_skbuff_dma[first_entry].buf = des; 3081 tx_q->tx_skbuff_dma[first_entry].buf = des;
2315 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) 3082 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2316 first->des0 = cpu_to_le32(des); 3083 first->des0 = cpu_to_le32(des);
2317 else 3084 else
2318 first->des2 = cpu_to_le32(des); 3085 first->des2 = cpu_to_le32(des);
2319 3086
2320 priv->tx_skbuff_dma[first_entry].len = nopaged_len; 3087 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
2321 priv->tx_skbuff_dma[first_entry].last_segment = last_segment; 3088 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
2322 3089
2323 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 3090 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2324 priv->hwts_tx_en)) { 3091 priv->hwts_tx_en)) {
@@ -2339,13 +3106,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2339 dma_wmb(); 3106 dma_wmb();
2340 } 3107 }
2341 3108
2342 netdev_sent_queue(dev, skb->len); 3109 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2343 3110
2344 if (priv->synopsys_id < DWMAC_CORE_4_00) 3111 if (priv->synopsys_id < DWMAC_CORE_4_00)
2345 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 3112 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2346 else 3113 else
2347 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, 3114 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2348 STMMAC_CHAN0); 3115 queue);
2349 3116
2350 return NETDEV_TX_OK; 3117 return NETDEV_TX_OK;
2351 3118
@@ -2373,9 +3140,9 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2373} 3140}
2374 3141
2375 3142
2376static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv) 3143static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
2377{ 3144{
2378 if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH) 3145 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
2379 return 0; 3146 return 0;
2380 3147
2381 return 1; 3148 return 1;
@@ -2384,30 +3151,32 @@ static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2384/** 3151/**
2385 * stmmac_rx_refill - refill used skb preallocated buffers 3152 * stmmac_rx_refill - refill used skb preallocated buffers
2386 * @priv: driver private structure 3153 * @priv: driver private structure
3154 * @queue: RX queue index
2387 * Description : this is to reallocate the skb for the reception process 3155 * Description : this is to reallocate the skb for the reception process
2388 * that is based on zero-copy. 3156 * that is based on zero-copy.
2389 */ 3157 */
2390static inline void stmmac_rx_refill(struct stmmac_priv *priv) 3158static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
2391{ 3159{
3160 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3161 int dirty = stmmac_rx_dirty(priv, queue);
3162 unsigned int entry = rx_q->dirty_rx;
2392 int bfsize = priv->dma_buf_sz; 3163 int bfsize = priv->dma_buf_sz;
2393 unsigned int entry = priv->dirty_rx;
2394 int dirty = stmmac_rx_dirty(priv);
2395 3164
2396 while (dirty-- > 0) { 3165 while (dirty-- > 0) {
2397 struct dma_desc *p; 3166 struct dma_desc *p;
2398 3167
2399 if (priv->extend_desc) 3168 if (priv->extend_desc)
2400 p = (struct dma_desc *)(priv->dma_erx + entry); 3169 p = (struct dma_desc *)(rx_q->dma_erx + entry);
2401 else 3170 else
2402 p = priv->dma_rx + entry; 3171 p = rx_q->dma_rx + entry;
2403 3172
2404 if (likely(priv->rx_skbuff[entry] == NULL)) { 3173 if (!rx_q->rx_skbuff[entry]) {
2405 struct sk_buff *skb; 3174 struct sk_buff *skb;
2406 3175
2407 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); 3176 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2408 if (unlikely(!skb)) { 3177 if (unlikely(!skb)) {
2409 /* so for a while no zero-copy! */ 3178 /* so for a while no zero-copy! */
2410 priv->rx_zeroc_thresh = STMMAC_RX_THRESH; 3179 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
2411 if (unlikely(net_ratelimit())) 3180 if (unlikely(net_ratelimit()))
2412 dev_err(priv->device, 3181 dev_err(priv->device,
2413 "fail to alloc skb entry %d\n", 3182 "fail to alloc skb entry %d\n",
@@ -2415,28 +3184,28 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2415 break; 3184 break;
2416 } 3185 }
2417 3186
2418 priv->rx_skbuff[entry] = skb; 3187 rx_q->rx_skbuff[entry] = skb;
2419 priv->rx_skbuff_dma[entry] = 3188 rx_q->rx_skbuff_dma[entry] =
2420 dma_map_single(priv->device, skb->data, bfsize, 3189 dma_map_single(priv->device, skb->data, bfsize,
2421 DMA_FROM_DEVICE); 3190 DMA_FROM_DEVICE);
2422 if (dma_mapping_error(priv->device, 3191 if (dma_mapping_error(priv->device,
2423 priv->rx_skbuff_dma[entry])) { 3192 rx_q->rx_skbuff_dma[entry])) {
2424 netdev_err(priv->dev, "Rx DMA map failed\n"); 3193 netdev_err(priv->dev, "Rx DMA map failed\n");
2425 dev_kfree_skb(skb); 3194 dev_kfree_skb(skb);
2426 break; 3195 break;
2427 } 3196 }
2428 3197
2429 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { 3198 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2430 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]); 3199 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
2431 p->des1 = 0; 3200 p->des1 = 0;
2432 } else { 3201 } else {
2433 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]); 3202 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
2434 } 3203 }
2435 if (priv->hw->mode->refill_desc3) 3204 if (priv->hw->mode->refill_desc3)
2436 priv->hw->mode->refill_desc3(priv, p); 3205 priv->hw->mode->refill_desc3(rx_q, p);
2437 3206
2438 if (priv->rx_zeroc_thresh > 0) 3207 if (rx_q->rx_zeroc_thresh > 0)
2439 priv->rx_zeroc_thresh--; 3208 rx_q->rx_zeroc_thresh--;
2440 3209
2441 netif_dbg(priv, rx_status, priv->dev, 3210 netif_dbg(priv, rx_status, priv->dev,
2442 "refill entry #%d\n", entry); 3211 "refill entry #%d\n", entry);
@@ -2452,7 +3221,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2452 3221
2453 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); 3222 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2454 } 3223 }
2455 priv->dirty_rx = entry; 3224 rx_q->dirty_rx = entry;
2456} 3225}
2457 3226
2458/** 3227/**
@@ -2462,21 +3231,22 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2462 * Description : this the function called by the napi poll method. 3231 * Description : this the function called by the napi poll method.
2463 * It gets all the frames inside the ring. 3232 * It gets all the frames inside the ring.
2464 */ 3233 */
2465static int stmmac_rx(struct stmmac_priv *priv, int limit) 3234static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
2466{ 3235{
2467 unsigned int entry = priv->cur_rx; 3236 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3237 unsigned int entry = rx_q->cur_rx;
3238 int coe = priv->hw->rx_csum;
2468 unsigned int next_entry; 3239 unsigned int next_entry;
2469 unsigned int count = 0; 3240 unsigned int count = 0;
2470 int coe = priv->hw->rx_csum;
2471 3241
2472 if (netif_msg_rx_status(priv)) { 3242 if (netif_msg_rx_status(priv)) {
2473 void *rx_head; 3243 void *rx_head;
2474 3244
2475 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 3245 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
2476 if (priv->extend_desc) 3246 if (priv->extend_desc)
2477 rx_head = (void *)priv->dma_erx; 3247 rx_head = (void *)rx_q->dma_erx;
2478 else 3248 else
2479 rx_head = (void *)priv->dma_rx; 3249 rx_head = (void *)rx_q->dma_rx;
2480 3250
2481 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); 3251 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2482 } 3252 }
@@ -2486,9 +3256,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2486 struct dma_desc *np; 3256 struct dma_desc *np;
2487 3257
2488 if (priv->extend_desc) 3258 if (priv->extend_desc)
2489 p = (struct dma_desc *)(priv->dma_erx + entry); 3259 p = (struct dma_desc *)(rx_q->dma_erx + entry);
2490 else 3260 else
2491 p = priv->dma_rx + entry; 3261 p = rx_q->dma_rx + entry;
2492 3262
2493 /* read the status of the incoming frame */ 3263 /* read the status of the incoming frame */
2494 status = priv->hw->desc->rx_status(&priv->dev->stats, 3264 status = priv->hw->desc->rx_status(&priv->dev->stats,
@@ -2499,20 +3269,20 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2499 3269
2500 count++; 3270 count++;
2501 3271
2502 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE); 3272 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
2503 next_entry = priv->cur_rx; 3273 next_entry = rx_q->cur_rx;
2504 3274
2505 if (priv->extend_desc) 3275 if (priv->extend_desc)
2506 np = (struct dma_desc *)(priv->dma_erx + next_entry); 3276 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
2507 else 3277 else
2508 np = priv->dma_rx + next_entry; 3278 np = rx_q->dma_rx + next_entry;
2509 3279
2510 prefetch(np); 3280 prefetch(np);
2511 3281
2512 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) 3282 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2513 priv->hw->desc->rx_extended_status(&priv->dev->stats, 3283 priv->hw->desc->rx_extended_status(&priv->dev->stats,
2514 &priv->xstats, 3284 &priv->xstats,
2515 priv->dma_erx + 3285 rx_q->dma_erx +
2516 entry); 3286 entry);
2517 if (unlikely(status == discard_frame)) { 3287 if (unlikely(status == discard_frame)) {
2518 priv->dev->stats.rx_errors++; 3288 priv->dev->stats.rx_errors++;
@@ -2522,9 +3292,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2522 * them in stmmac_rx_refill() function so that 3292 * them in stmmac_rx_refill() function so that
2523 * device can reuse it. 3293 * device can reuse it.
2524 */ 3294 */
2525 priv->rx_skbuff[entry] = NULL; 3295 rx_q->rx_skbuff[entry] = NULL;
2526 dma_unmap_single(priv->device, 3296 dma_unmap_single(priv->device,
2527 priv->rx_skbuff_dma[entry], 3297 rx_q->rx_skbuff_dma[entry],
2528 priv->dma_buf_sz, 3298 priv->dma_buf_sz,
2529 DMA_FROM_DEVICE); 3299 DMA_FROM_DEVICE);
2530 } 3300 }
@@ -2572,7 +3342,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2572 */ 3342 */
2573 if (unlikely(!priv->plat->has_gmac4 && 3343 if (unlikely(!priv->plat->has_gmac4 &&
2574 ((frame_len < priv->rx_copybreak) || 3344 ((frame_len < priv->rx_copybreak) ||
2575 stmmac_rx_threshold_count(priv)))) { 3345 stmmac_rx_threshold_count(rx_q)))) {
2576 skb = netdev_alloc_skb_ip_align(priv->dev, 3346 skb = netdev_alloc_skb_ip_align(priv->dev,
2577 frame_len); 3347 frame_len);
2578 if (unlikely(!skb)) { 3348 if (unlikely(!skb)) {
@@ -2584,21 +3354,21 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2584 } 3354 }
2585 3355
2586 dma_sync_single_for_cpu(priv->device, 3356 dma_sync_single_for_cpu(priv->device,
2587 priv->rx_skbuff_dma 3357 rx_q->rx_skbuff_dma
2588 [entry], frame_len, 3358 [entry], frame_len,
2589 DMA_FROM_DEVICE); 3359 DMA_FROM_DEVICE);
2590 skb_copy_to_linear_data(skb, 3360 skb_copy_to_linear_data(skb,
2591 priv-> 3361 rx_q->
2592 rx_skbuff[entry]->data, 3362 rx_skbuff[entry]->data,
2593 frame_len); 3363 frame_len);
2594 3364
2595 skb_put(skb, frame_len); 3365 skb_put(skb, frame_len);
2596 dma_sync_single_for_device(priv->device, 3366 dma_sync_single_for_device(priv->device,
2597 priv->rx_skbuff_dma 3367 rx_q->rx_skbuff_dma
2598 [entry], frame_len, 3368 [entry], frame_len,
2599 DMA_FROM_DEVICE); 3369 DMA_FROM_DEVICE);
2600 } else { 3370 } else {
2601 skb = priv->rx_skbuff[entry]; 3371 skb = rx_q->rx_skbuff[entry];
2602 if (unlikely(!skb)) { 3372 if (unlikely(!skb)) {
2603 netdev_err(priv->dev, 3373 netdev_err(priv->dev,
2604 "%s: Inconsistent Rx chain\n", 3374 "%s: Inconsistent Rx chain\n",
@@ -2607,12 +3377,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2607 break; 3377 break;
2608 } 3378 }
2609 prefetch(skb->data - NET_IP_ALIGN); 3379 prefetch(skb->data - NET_IP_ALIGN);
2610 priv->rx_skbuff[entry] = NULL; 3380 rx_q->rx_skbuff[entry] = NULL;
2611 priv->rx_zeroc_thresh++; 3381 rx_q->rx_zeroc_thresh++;
2612 3382
2613 skb_put(skb, frame_len); 3383 skb_put(skb, frame_len);
2614 dma_unmap_single(priv->device, 3384 dma_unmap_single(priv->device,
2615 priv->rx_skbuff_dma[entry], 3385 rx_q->rx_skbuff_dma[entry],
2616 priv->dma_buf_sz, 3386 priv->dma_buf_sz,
2617 DMA_FROM_DEVICE); 3387 DMA_FROM_DEVICE);
2618 } 3388 }
@@ -2634,7 +3404,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2634 else 3404 else
2635 skb->ip_summed = CHECKSUM_UNNECESSARY; 3405 skb->ip_summed = CHECKSUM_UNNECESSARY;
2636 3406
2637 napi_gro_receive(&priv->napi, skb); 3407 napi_gro_receive(&rx_q->napi, skb);
2638 3408
2639 priv->dev->stats.rx_packets++; 3409 priv->dev->stats.rx_packets++;
2640 priv->dev->stats.rx_bytes += frame_len; 3410 priv->dev->stats.rx_bytes += frame_len;
@@ -2642,7 +3412,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2642 entry = next_entry; 3412 entry = next_entry;
2643 } 3413 }
2644 3414
2645 stmmac_rx_refill(priv); 3415 stmmac_rx_refill(priv, queue);
2646 3416
2647 priv->xstats.rx_pkt_n += count; 3417 priv->xstats.rx_pkt_n += count;
2648 3418
@@ -2659,16 +3429,25 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2659 */ 3429 */
2660static int stmmac_poll(struct napi_struct *napi, int budget) 3430static int stmmac_poll(struct napi_struct *napi, int budget)
2661{ 3431{
2662 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); 3432 struct stmmac_rx_queue *rx_q =
2663 int work_done = 0; 3433 container_of(napi, struct stmmac_rx_queue, napi);
3434 struct stmmac_priv *priv = rx_q->priv_data;
3435 u32 tx_count = priv->dma_cap.number_tx_queues;
3436 u32 chan = rx_q->queue_index;
3437 u32 work_done = 0;
3438 u32 queue = 0;
2664 3439
2665 priv->xstats.napi_poll++; 3440 priv->xstats.napi_poll++;
2666 stmmac_tx_clean(priv); 3441 /* check all the queues */
3442 for (queue = 0; queue < tx_count; queue++)
3443 stmmac_tx_clean(priv, queue);
3444
3445 /* Process RX packets from this queue */
3446 work_done = stmmac_rx(priv, budget, rx_q->queue_index);
2667 3447
2668 work_done = stmmac_rx(priv, budget);
2669 if (work_done < budget) { 3448 if (work_done < budget) {
2670 napi_complete_done(napi, work_done); 3449 napi_complete_done(napi, work_done);
2671 stmmac_enable_dma_irq(priv); 3450 stmmac_enable_dma_irq(priv, chan);
2672 } 3451 }
2673 return work_done; 3452 return work_done;
2674} 3453}
@@ -2684,9 +3463,12 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
2684static void stmmac_tx_timeout(struct net_device *dev) 3463static void stmmac_tx_timeout(struct net_device *dev)
2685{ 3464{
2686 struct stmmac_priv *priv = netdev_priv(dev); 3465 struct stmmac_priv *priv = netdev_priv(dev);
3466 u32 tx_count = priv->plat->tx_queues_to_use;
3467 u32 chan;
2687 3468
2688 /* Clear Tx resources and restart transmitting again */ 3469 /* Clear Tx resources and restart transmitting again */
2689 stmmac_tx_err(priv); 3470 for (chan = 0; chan < tx_count; chan++)
3471 stmmac_tx_err(priv, chan);
2690} 3472}
2691 3473
2692/** 3474/**
@@ -2795,6 +3577,12 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2795{ 3577{
2796 struct net_device *dev = (struct net_device *)dev_id; 3578 struct net_device *dev = (struct net_device *)dev_id;
2797 struct stmmac_priv *priv = netdev_priv(dev); 3579 struct stmmac_priv *priv = netdev_priv(dev);
3580 u32 rx_cnt = priv->plat->rx_queues_to_use;
3581 u32 tx_cnt = priv->plat->tx_queues_to_use;
3582 u32 queues_count;
3583 u32 queue;
3584
3585 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
2798 3586
2799 if (priv->irq_wake) 3587 if (priv->irq_wake)
2800 pm_wakeup_event(priv->device, 0); 3588 pm_wakeup_event(priv->device, 0);
@@ -2808,16 +3596,30 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2808 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { 3596 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2809 int status = priv->hw->mac->host_irq_status(priv->hw, 3597 int status = priv->hw->mac->host_irq_status(priv->hw,
2810 &priv->xstats); 3598 &priv->xstats);
3599
2811 if (unlikely(status)) { 3600 if (unlikely(status)) {
2812 /* For LPI we need to save the tx status */ 3601 /* For LPI we need to save the tx status */
2813 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 3602 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2814 priv->tx_path_in_lpi_mode = true; 3603 priv->tx_path_in_lpi_mode = true;
2815 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 3604 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2816 priv->tx_path_in_lpi_mode = false; 3605 priv->tx_path_in_lpi_mode = false;
2817 if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr) 3606 }
2818 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, 3607
2819 priv->rx_tail_addr, 3608 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2820 STMMAC_CHAN0); 3609 for (queue = 0; queue < queues_count; queue++) {
3610 struct stmmac_rx_queue *rx_q =
3611 &priv->rx_queue[queue];
3612
3613 status |=
3614 priv->hw->mac->host_mtl_irq_status(priv->hw,
3615 queue);
3616
3617 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3618 priv->hw->dma->set_rx_tail_ptr)
3619 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3620 rx_q->rx_tail_addr,
3621 queue);
3622 }
2821 } 3623 }
2822 3624
2823 /* PCS link status */ 3625 /* PCS link status */
@@ -2915,17 +3717,40 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2915{ 3717{
2916 struct net_device *dev = seq->private; 3718 struct net_device *dev = seq->private;
2917 struct stmmac_priv *priv = netdev_priv(dev); 3719 struct stmmac_priv *priv = netdev_priv(dev);
3720 u32 rx_count = priv->plat->rx_queues_to_use;
3721 u32 tx_count = priv->plat->tx_queues_to_use;
3722 u32 queue;
2918 3723
2919 if (priv->extend_desc) { 3724 for (queue = 0; queue < rx_count; queue++) {
2920 seq_printf(seq, "Extended RX descriptor ring:\n"); 3725 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2921 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq); 3726
2922 seq_printf(seq, "Extended TX descriptor ring:\n"); 3727 seq_printf(seq, "RX Queue %d:\n", queue);
2923 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq); 3728
2924 } else { 3729 if (priv->extend_desc) {
2925 seq_printf(seq, "RX descriptor ring:\n"); 3730 seq_printf(seq, "Extended descriptor ring:\n");
2926 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq); 3731 sysfs_display_ring((void *)rx_q->dma_erx,
2927 seq_printf(seq, "TX descriptor ring:\n"); 3732 DMA_RX_SIZE, 1, seq);
2928 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq); 3733 } else {
3734 seq_printf(seq, "Descriptor ring:\n");
3735 sysfs_display_ring((void *)rx_q->dma_rx,
3736 DMA_RX_SIZE, 0, seq);
3737 }
3738 }
3739
3740 for (queue = 0; queue < tx_count; queue++) {
3741 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3742
3743 seq_printf(seq, "TX Queue %d:\n", queue);
3744
3745 if (priv->extend_desc) {
3746 seq_printf(seq, "Extended descriptor ring:\n");
3747 sysfs_display_ring((void *)tx_q->dma_etx,
3748 DMA_TX_SIZE, 1, seq);
3749 } else {
3750 seq_printf(seq, "Descriptor ring:\n");
3751 sysfs_display_ring((void *)tx_q->dma_tx,
3752 DMA_TX_SIZE, 0, seq);
3753 }
2929 } 3754 }
2930 3755
2931 return 0; 3756 return 0;
@@ -3208,11 +4033,14 @@ int stmmac_dvr_probe(struct device *device,
3208 struct plat_stmmacenet_data *plat_dat, 4033 struct plat_stmmacenet_data *plat_dat,
3209 struct stmmac_resources *res) 4034 struct stmmac_resources *res)
3210{ 4035{
3211 int ret = 0;
3212 struct net_device *ndev = NULL; 4036 struct net_device *ndev = NULL;
3213 struct stmmac_priv *priv; 4037 struct stmmac_priv *priv;
4038 int ret = 0;
4039 u32 queue;
3214 4040
3215 ndev = alloc_etherdev(sizeof(struct stmmac_priv)); 4041 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4042 MTL_MAX_TX_QUEUES,
4043 MTL_MAX_RX_QUEUES);
3216 if (!ndev) 4044 if (!ndev)
3217 return -ENOMEM; 4045 return -ENOMEM;
3218 4046
@@ -3254,6 +4082,12 @@ int stmmac_dvr_probe(struct device *device,
3254 if (ret) 4082 if (ret)
3255 goto error_hw_init; 4083 goto error_hw_init;
3256 4084
4085 /* Configure real RX and TX queues */
4086 ndev->real_num_rx_queues = priv->plat->rx_queues_to_use;
4087 ndev->real_num_tx_queues = priv->plat->tx_queues_to_use;
4088
4089 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
4090
3257 ndev->netdev_ops = &stmmac_netdev_ops; 4091 ndev->netdev_ops = &stmmac_netdev_ops;
3258 4092
3259 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4093 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -3303,7 +4137,26 @@ int stmmac_dvr_probe(struct device *device,
3303 "Enable RX Mitigation via HW Watchdog Timer\n"); 4137 "Enable RX Mitigation via HW Watchdog Timer\n");
3304 } 4138 }
3305 4139
3306 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); 4140 ret = alloc_dma_desc_resources(priv);
4141 if (ret < 0) {
4142 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4143 __func__);
4144 goto init_dma_error;
4145 }
4146
4147 ret = init_dma_desc_rings(priv->dev, GFP_KERNEL);
4148 if (ret < 0) {
4149 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4150 __func__);
4151 goto init_dma_error;
4152 }
4153
4154 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4155 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4156
4157 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4158 (64 * priv->plat->rx_queues_to_use));
4159 }
3307 4160
3308 spin_lock_init(&priv->lock); 4161 spin_lock_init(&priv->lock);
3309 4162
@@ -3348,7 +4201,13 @@ error_netdev_register:
3348 priv->hw->pcs != STMMAC_PCS_RTBI) 4201 priv->hw->pcs != STMMAC_PCS_RTBI)
3349 stmmac_mdio_unregister(ndev); 4202 stmmac_mdio_unregister(ndev);
3350error_mdio_register: 4203error_mdio_register:
3351 netif_napi_del(&priv->napi); 4204 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4205 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4206
4207 netif_napi_del(&rx_q->napi);
4208 }
4209init_dma_error:
4210 free_dma_desc_resources(priv);
3352error_hw_init: 4211error_hw_init:
3353 free_netdev(ndev); 4212 free_netdev(ndev);
3354 4213
@@ -3369,8 +4228,7 @@ int stmmac_dvr_remove(struct device *dev)
3369 4228
3370 netdev_info(priv->dev, "%s: removing driver", __func__); 4229 netdev_info(priv->dev, "%s: removing driver", __func__);
3371 4230
3372 priv->hw->dma->stop_rx(priv->ioaddr); 4231 stmmac_stop_all_dma(priv);
3373 priv->hw->dma->stop_tx(priv->ioaddr);
3374 4232
3375 stmmac_set_mac(priv->ioaddr, false); 4233 stmmac_set_mac(priv->ioaddr, false);
3376 netif_carrier_off(ndev); 4234 netif_carrier_off(ndev);
@@ -3411,13 +4269,12 @@ int stmmac_suspend(struct device *dev)
3411 spin_lock_irqsave(&priv->lock, flags); 4269 spin_lock_irqsave(&priv->lock, flags);
3412 4270
3413 netif_device_detach(ndev); 4271 netif_device_detach(ndev);
3414 netif_stop_queue(ndev); 4272 stmmac_stop_all_queues(priv);
3415 4273
3416 napi_disable(&priv->napi); 4274 stmmac_disable_all_queues(priv);
3417 4275
3418 /* Stop TX/RX DMA */ 4276 /* Stop TX/RX DMA */
3419 priv->hw->dma->stop_tx(priv->ioaddr); 4277 stmmac_stop_all_dma(priv);
3420 priv->hw->dma->stop_rx(priv->ioaddr);
3421 4278
3422 /* Enable Power down mode by programming the PMT regs */ 4279 /* Enable Power down mode by programming the PMT regs */
3423 if (device_may_wakeup(priv->device)) { 4280 if (device_may_wakeup(priv->device)) {
@@ -3440,6 +4297,31 @@ int stmmac_suspend(struct device *dev)
3440EXPORT_SYMBOL_GPL(stmmac_suspend); 4297EXPORT_SYMBOL_GPL(stmmac_suspend);
3441 4298
3442/** 4299/**
4300 * stmmac_reset_queues_param - reset queue parameters
4301 * @dev: device pointer
4302 */
4303static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4304{
4305 u32 rx_cnt = priv->plat->rx_queues_to_use;
4306 u32 tx_cnt = priv->plat->tx_queues_to_use;
4307 u32 queue;
4308
4309 for (queue = 0; queue < rx_cnt; queue++) {
4310 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4311
4312 rx_q->cur_rx = 0;
4313 rx_q->dirty_rx = 0;
4314 }
4315
4316 for (queue = 0; queue < tx_cnt; queue++) {
4317 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4318
4319 tx_q->cur_tx = 0;
4320 tx_q->dirty_tx = 0;
4321 }
4322}
4323
4324/**
3443 * stmmac_resume - resume callback 4325 * stmmac_resume - resume callback
3444 * @dev: device pointer 4326 * @dev: device pointer
3445 * Description: when resume this function is invoked to setup the DMA and CORE 4327 * Description: when resume this function is invoked to setup the DMA and CORE
@@ -3479,10 +4361,8 @@ int stmmac_resume(struct device *dev)
3479 4361
3480 spin_lock_irqsave(&priv->lock, flags); 4362 spin_lock_irqsave(&priv->lock, flags);
3481 4363
3482 priv->cur_rx = 0; 4364 stmmac_reset_queues_param(priv);
3483 priv->dirty_rx = 0; 4365
3484 priv->dirty_tx = 0;
3485 priv->cur_tx = 0;
3486 /* reset private mss value to force mss context settings at 4366 /* reset private mss value to force mss context settings at
3487 * next tso xmit (only used for gmac4). 4367 * next tso xmit (only used for gmac4).
3488 */ 4368 */
@@ -3494,9 +4374,9 @@ int stmmac_resume(struct device *dev)
3494 stmmac_init_tx_coalesce(priv); 4374 stmmac_init_tx_coalesce(priv);
3495 stmmac_set_rx_mode(ndev); 4375 stmmac_set_rx_mode(ndev);
3496 4376
3497 napi_enable(&priv->napi); 4377 stmmac_enable_all_queues(priv);
3498 4378
3499 netif_start_queue(ndev); 4379 stmmac_start_all_queues(priv);
3500 4380
3501 spin_unlock_irqrestore(&priv->lock, flags); 4381 spin_unlock_irqrestore(&priv->lock, flags);
3502 4382
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 5c9e462276b9..a224d7bf1c1b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -88,6 +88,17 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
88 88
89 /* Set the maxmtu to a default of JUMBO_LEN */ 89 /* Set the maxmtu to a default of JUMBO_LEN */
90 plat->maxmtu = JUMBO_LEN; 90 plat->maxmtu = JUMBO_LEN;
91
92 /* Set default number of RX and TX queues to use */
93 plat->tx_queues_to_use = 1;
94 plat->rx_queues_to_use = 1;
95
96 /* Disable Priority config by default */
97 plat->tx_queues_cfg[0].use_prio = false;
98 plat->rx_queues_cfg[0].use_prio = false;
99
100 /* Disable RX queues routing by default */
101 plat->rx_queues_cfg[0].pkt_route = 0x0;
91} 102}
92 103
93static int quark_default_data(struct plat_stmmacenet_data *plat, 104static int quark_default_data(struct plat_stmmacenet_data *plat,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 433a84239a68..f5c8b1bca002 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
108 if (!np) 108 if (!np)
109 return NULL; 109 return NULL;
110 110
111 axi = kzalloc(sizeof(*axi), GFP_KERNEL); 111 axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
112 if (!axi) { 112 if (!axi) {
113 of_node_put(np); 113 of_node_put(np);
114 return ERR_PTR(-ENOMEM); 114 return ERR_PTR(-ENOMEM);
@@ -132,6 +132,148 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
132} 132}
133 133
134/** 134/**
135 * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
136 * @pdev: platform device
137 */
138static void stmmac_mtl_setup(struct platform_device *pdev,
139 struct plat_stmmacenet_data *plat)
140{
141 struct device_node *q_node;
142 struct device_node *rx_node;
143 struct device_node *tx_node;
144 u8 queue = 0;
145
146 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
147 if (!rx_node)
148 return;
149
150 tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
151 if (!tx_node) {
152 of_node_put(rx_node);
153 return;
154 }
155
156 /* Processing RX queues common config */
157 if (of_property_read_u8(rx_node, "snps,rx-queues-to-use",
158 &plat->rx_queues_to_use))
159 plat->rx_queues_to_use = 1;
160
161 if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
162 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
163 else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
164 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
165 else
166 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
167
168 /* Processing individual RX queue config */
169 for_each_child_of_node(rx_node, q_node) {
170 if (queue >= plat->rx_queues_to_use)
171 break;
172
173 if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
174 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
175 else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
176 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
177 else
178 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
179
180 if (of_property_read_u8(q_node, "snps,map-to-dma-channel",
181 &plat->rx_queues_cfg[queue].chan))
182 plat->rx_queues_cfg[queue].chan = queue;
183 /* TODO: Dynamic mapping to be included in the future */
184
185 if (of_property_read_u32(q_node, "snps,priority",
186 &plat->rx_queues_cfg[queue].prio)) {
187 plat->rx_queues_cfg[queue].prio = 0;
188 plat->rx_queues_cfg[queue].use_prio = false;
189 } else {
190 plat->rx_queues_cfg[queue].use_prio = true;
191 }
192
193 /* RX queue specific packet type routing */
194 if (of_property_read_bool(q_node, "snps,route-avcp"))
195 plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
196 else if (of_property_read_bool(q_node, "snps,route-ptp"))
197 plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
198 else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
199 plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
200 else if (of_property_read_bool(q_node, "snps,route-up"))
201 plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
202 else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
203 plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
204 else
205 plat->rx_queues_cfg[queue].pkt_route = 0x0;
206
207 queue++;
208 }
209
210 /* Processing TX queues common config */
211 if (of_property_read_u8(tx_node, "snps,tx-queues-to-use",
212 &plat->tx_queues_to_use))
213 plat->tx_queues_to_use = 1;
214
215 if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
216 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
217 else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
218 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
219 else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
220 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
221 else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
222 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
223 else
224 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
225
226 queue = 0;
227
228 /* Processing individual TX queue config */
229 for_each_child_of_node(tx_node, q_node) {
230 if (queue >= plat->tx_queues_to_use)
231 break;
232
233 if (of_property_read_u8(q_node, "snps,weight",
234 &plat->tx_queues_cfg[queue].weight))
235 plat->tx_queues_cfg[queue].weight = 0x10 + queue;
236
237 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
238 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
239 } else if (of_property_read_bool(q_node,
240 "snps,avb-algorithm")) {
241 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
242
243 /* Credit Base Shaper parameters used by AVB */
244 if (of_property_read_u32(q_node, "snps,send_slope",
245 &plat->tx_queues_cfg[queue].send_slope))
246 plat->tx_queues_cfg[queue].send_slope = 0x0;
247 if (of_property_read_u32(q_node, "snps,idle_slope",
248 &plat->tx_queues_cfg[queue].idle_slope))
249 plat->tx_queues_cfg[queue].idle_slope = 0x0;
250 if (of_property_read_u32(q_node, "snps,high_credit",
251 &plat->tx_queues_cfg[queue].high_credit))
252 plat->tx_queues_cfg[queue].high_credit = 0x0;
253 if (of_property_read_u32(q_node, "snps,low_credit",
254 &plat->tx_queues_cfg[queue].low_credit))
255 plat->tx_queues_cfg[queue].low_credit = 0x0;
256 } else {
257 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
258 }
259
260 if (of_property_read_u32(q_node, "snps,priority",
261 &plat->tx_queues_cfg[queue].prio)) {
262 plat->tx_queues_cfg[queue].prio = 0;
263 plat->tx_queues_cfg[queue].use_prio = false;
264 } else {
265 plat->tx_queues_cfg[queue].use_prio = true;
266 }
267
268 queue++;
269 }
270
271 of_node_put(rx_node);
272 of_node_put(tx_node);
273 of_node_put(q_node);
274}
275
276/**
135 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources 277 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
136 * @plat: driver data platform structure 278 * @plat: driver data platform structure
137 * @np: device tree node 279 * @np: device tree node
@@ -340,6 +482,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
340 482
341 plat->axi = stmmac_axi_setup(pdev); 483 plat->axi = stmmac_axi_setup(pdev);
342 484
485 stmmac_mtl_setup(pdev, plat);
486
343 /* clock setup */ 487 /* clock setup */
344 plat->stmmac_clk = devm_clk_get(&pdev->dev, 488 plat->stmmac_clk = devm_clk_get(&pdev->dev,
345 STMMAC_RESOURCE_NAME); 489 STMMAC_RESOURCE_NAME);
@@ -359,13 +503,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
359 clk_prepare_enable(plat->pclk); 503 clk_prepare_enable(plat->pclk);
360 504
361 /* Fall-back to main clock in case of no PTP ref is passed */ 505 /* Fall-back to main clock in case of no PTP ref is passed */
362 plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref"); 506 plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
363 if (IS_ERR(plat->clk_ptp_ref)) { 507 if (IS_ERR(plat->clk_ptp_ref)) {
364 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); 508 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
365 plat->clk_ptp_ref = NULL; 509 plat->clk_ptp_ref = NULL;
366 dev_warn(&pdev->dev, "PTP uses main clock\n"); 510 dev_warn(&pdev->dev, "PTP uses main clock\n");
367 } else { 511 } else {
368 clk_prepare_enable(plat->clk_ptp_ref);
369 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); 512 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
370 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); 513 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
371 } 514 }
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 0e8e89f17dbb..382993c1561c 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -691,7 +691,8 @@ static void cas_mif_poll(struct cas *cp, const int enable)
691} 691}
692 692
693/* Must be invoked under cp->lock */ 693/* Must be invoked under cp->lock */
694static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) 694static void cas_begin_auto_negotiation(struct cas *cp,
695 const struct ethtool_link_ksettings *ep)
695{ 696{
696 u16 ctl; 697 u16 ctl;
697#if 1 698#if 1
@@ -704,16 +705,16 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
704 if (!ep) 705 if (!ep)
705 goto start_aneg; 706 goto start_aneg;
706 lcntl = cp->link_cntl; 707 lcntl = cp->link_cntl;
707 if (ep->autoneg == AUTONEG_ENABLE) 708 if (ep->base.autoneg == AUTONEG_ENABLE) {
708 cp->link_cntl = BMCR_ANENABLE; 709 cp->link_cntl = BMCR_ANENABLE;
709 else { 710 } else {
710 u32 speed = ethtool_cmd_speed(ep); 711 u32 speed = ep->base.speed;
711 cp->link_cntl = 0; 712 cp->link_cntl = 0;
712 if (speed == SPEED_100) 713 if (speed == SPEED_100)
713 cp->link_cntl |= BMCR_SPEED100; 714 cp->link_cntl |= BMCR_SPEED100;
714 else if (speed == SPEED_1000) 715 else if (speed == SPEED_1000)
715 cp->link_cntl |= CAS_BMCR_SPEED1000; 716 cp->link_cntl |= CAS_BMCR_SPEED1000;
716 if (ep->duplex == DUPLEX_FULL) 717 if (ep->base.duplex == DUPLEX_FULL)
717 cp->link_cntl |= BMCR_FULLDPLX; 718 cp->link_cntl |= BMCR_FULLDPLX;
718 } 719 }
719#if 1 720#if 1
@@ -4528,19 +4529,21 @@ static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
4528 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); 4529 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4529} 4530}
4530 4531
4531static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 4532static int cas_get_link_ksettings(struct net_device *dev,
4533 struct ethtool_link_ksettings *cmd)
4532{ 4534{
4533 struct cas *cp = netdev_priv(dev); 4535 struct cas *cp = netdev_priv(dev);
4534 u16 bmcr; 4536 u16 bmcr;
4535 int full_duplex, speed, pause; 4537 int full_duplex, speed, pause;
4536 unsigned long flags; 4538 unsigned long flags;
4537 enum link_state linkstate = link_up; 4539 enum link_state linkstate = link_up;
4540 u32 supported, advertising;
4538 4541
4539 cmd->advertising = 0; 4542 advertising = 0;
4540 cmd->supported = SUPPORTED_Autoneg; 4543 supported = SUPPORTED_Autoneg;
4541 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 4544 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4542 cmd->supported |= SUPPORTED_1000baseT_Full; 4545 supported |= SUPPORTED_1000baseT_Full;
4543 cmd->advertising |= ADVERTISED_1000baseT_Full; 4546 advertising |= ADVERTISED_1000baseT_Full;
4544 } 4547 }
4545 4548
4546 /* Record PHY settings if HW is on. */ 4549 /* Record PHY settings if HW is on. */
@@ -4548,17 +4551,15 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4548 bmcr = 0; 4551 bmcr = 0;
4549 linkstate = cp->lstate; 4552 linkstate = cp->lstate;
4550 if (CAS_PHY_MII(cp->phy_type)) { 4553 if (CAS_PHY_MII(cp->phy_type)) {
4551 cmd->port = PORT_MII; 4554 cmd->base.port = PORT_MII;
4552 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? 4555 cmd->base.phy_address = cp->phy_addr;
4553 XCVR_INTERNAL : XCVR_EXTERNAL; 4556 advertising |= ADVERTISED_TP | ADVERTISED_MII |
4554 cmd->phy_address = cp->phy_addr;
4555 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4556 ADVERTISED_10baseT_Half | 4557 ADVERTISED_10baseT_Half |
4557 ADVERTISED_10baseT_Full | 4558 ADVERTISED_10baseT_Full |
4558 ADVERTISED_100baseT_Half | 4559 ADVERTISED_100baseT_Half |
4559 ADVERTISED_100baseT_Full; 4560 ADVERTISED_100baseT_Full;
4560 4561
4561 cmd->supported |= 4562 supported |=
4562 (SUPPORTED_10baseT_Half | 4563 (SUPPORTED_10baseT_Half |
4563 SUPPORTED_10baseT_Full | 4564 SUPPORTED_10baseT_Full |
4564 SUPPORTED_100baseT_Half | 4565 SUPPORTED_100baseT_Half |
@@ -4574,11 +4575,10 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4574 } 4575 }
4575 4576
4576 } else { 4577 } else {
4577 cmd->port = PORT_FIBRE; 4578 cmd->base.port = PORT_FIBRE;
4578 cmd->transceiver = XCVR_INTERNAL; 4579 cmd->base.phy_address = 0;
4579 cmd->phy_address = 0; 4580 supported |= SUPPORTED_FIBRE;
4580 cmd->supported |= SUPPORTED_FIBRE; 4581 advertising |= ADVERTISED_FIBRE;
4581 cmd->advertising |= ADVERTISED_FIBRE;
4582 4582
4583 if (cp->hw_running) { 4583 if (cp->hw_running) {
4584 /* pcs uses the same bits as mii */ 4584 /* pcs uses the same bits as mii */
@@ -4590,21 +4590,20 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4590 spin_unlock_irqrestore(&cp->lock, flags); 4590 spin_unlock_irqrestore(&cp->lock, flags);
4591 4591
4592 if (bmcr & BMCR_ANENABLE) { 4592 if (bmcr & BMCR_ANENABLE) {
4593 cmd->advertising |= ADVERTISED_Autoneg; 4593 advertising |= ADVERTISED_Autoneg;
4594 cmd->autoneg = AUTONEG_ENABLE; 4594 cmd->base.autoneg = AUTONEG_ENABLE;
4595 ethtool_cmd_speed_set(cmd, ((speed == 10) ? 4595 cmd->base.speed = ((speed == 10) ?
4596 SPEED_10 : 4596 SPEED_10 :
4597 ((speed == 1000) ? 4597 ((speed == 1000) ?
4598 SPEED_1000 : SPEED_100))); 4598 SPEED_1000 : SPEED_100));
4599 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 4599 cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4600 } else { 4600 } else {
4601 cmd->autoneg = AUTONEG_DISABLE; 4601 cmd->base.autoneg = AUTONEG_DISABLE;
4602 ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ? 4602 cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
4603 SPEED_1000 : 4603 SPEED_1000 :
4604 ((bmcr & BMCR_SPEED100) ? 4604 ((bmcr & BMCR_SPEED100) ?
4605 SPEED_100 : SPEED_10))); 4605 SPEED_100 : SPEED_10));
4606 cmd->duplex = 4606 cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
4607 (bmcr & BMCR_FULLDPLX) ?
4608 DUPLEX_FULL : DUPLEX_HALF; 4607 DUPLEX_FULL : DUPLEX_HALF;
4609 } 4608 }
4610 if (linkstate != link_up) { 4609 if (linkstate != link_up) {
@@ -4619,39 +4618,46 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4619 * settings that we configured. 4618 * settings that we configured.
4620 */ 4619 */
4621 if (cp->link_cntl & BMCR_ANENABLE) { 4620 if (cp->link_cntl & BMCR_ANENABLE) {
4622 ethtool_cmd_speed_set(cmd, 0); 4621 cmd->base.speed = 0;
4623 cmd->duplex = 0xff; 4622 cmd->base.duplex = 0xff;
4624 } else { 4623 } else {
4625 ethtool_cmd_speed_set(cmd, SPEED_10); 4624 cmd->base.speed = SPEED_10;
4626 if (cp->link_cntl & BMCR_SPEED100) { 4625 if (cp->link_cntl & BMCR_SPEED100) {
4627 ethtool_cmd_speed_set(cmd, SPEED_100); 4626 cmd->base.speed = SPEED_100;
4628 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { 4627 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4629 ethtool_cmd_speed_set(cmd, SPEED_1000); 4628 cmd->base.speed = SPEED_1000;
4630 } 4629 }
4631 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? 4630 cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4632 DUPLEX_FULL : DUPLEX_HALF; 4631 DUPLEX_FULL : DUPLEX_HALF;
4633 } 4632 }
4634 } 4633 }
4634
4635 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4636 supported);
4637 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4638 advertising);
4639
4635 return 0; 4640 return 0;
4636} 4641}
4637 4642
4638static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 4643static int cas_set_link_ksettings(struct net_device *dev,
4644 const struct ethtool_link_ksettings *cmd)
4639{ 4645{
4640 struct cas *cp = netdev_priv(dev); 4646 struct cas *cp = netdev_priv(dev);
4641 unsigned long flags; 4647 unsigned long flags;
4642 u32 speed = ethtool_cmd_speed(cmd); 4648 u32 speed = cmd->base.speed;
4643 4649
4644 /* Verify the settings we care about. */ 4650 /* Verify the settings we care about. */
4645 if (cmd->autoneg != AUTONEG_ENABLE && 4651 if (cmd->base.autoneg != AUTONEG_ENABLE &&
4646 cmd->autoneg != AUTONEG_DISABLE) 4652 cmd->base.autoneg != AUTONEG_DISABLE)
4647 return -EINVAL; 4653 return -EINVAL;
4648 4654
4649 if (cmd->autoneg == AUTONEG_DISABLE && 4655 if (cmd->base.autoneg == AUTONEG_DISABLE &&
4650 ((speed != SPEED_1000 && 4656 ((speed != SPEED_1000 &&
4651 speed != SPEED_100 && 4657 speed != SPEED_100 &&
4652 speed != SPEED_10) || 4658 speed != SPEED_10) ||
4653 (cmd->duplex != DUPLEX_HALF && 4659 (cmd->base.duplex != DUPLEX_HALF &&
4654 cmd->duplex != DUPLEX_FULL))) 4660 cmd->base.duplex != DUPLEX_FULL)))
4655 return -EINVAL; 4661 return -EINVAL;
4656 4662
4657 /* Apply settings and restart link process. */ 4663 /* Apply settings and restart link process. */
@@ -4753,8 +4759,6 @@ static void cas_get_ethtool_stats(struct net_device *dev,
4753 4759
4754static const struct ethtool_ops cas_ethtool_ops = { 4760static const struct ethtool_ops cas_ethtool_ops = {
4755 .get_drvinfo = cas_get_drvinfo, 4761 .get_drvinfo = cas_get_drvinfo,
4756 .get_settings = cas_get_settings,
4757 .set_settings = cas_set_settings,
4758 .nway_reset = cas_nway_reset, 4762 .nway_reset = cas_nway_reset,
4759 .get_link = cas_get_link, 4763 .get_link = cas_get_link,
4760 .get_msglevel = cas_get_msglevel, 4764 .get_msglevel = cas_get_msglevel,
@@ -4764,6 +4768,8 @@ static const struct ethtool_ops cas_ethtool_ops = {
4764 .get_sset_count = cas_get_sset_count, 4768 .get_sset_count = cas_get_sset_count,
4765 .get_strings = cas_get_strings, 4769 .get_strings = cas_get_strings,
4766 .get_ethtool_stats = cas_get_ethtool_stats, 4770 .get_ethtool_stats = cas_get_ethtool_stats,
4771 .get_link_ksettings = cas_get_link_ksettings,
4772 .set_link_ksettings = cas_set_link_ksettings,
4767}; 4773};
4768 4774
4769static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4775static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 89952deae47f..5a90fed06260 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -1,6 +1,6 @@
1/* ldmvsw.c: Sun4v LDOM Virtual Switch Driver. 1/* ldmvsw.c: Sun4v LDOM Virtual Switch Driver.
2 * 2 *
3 * Copyright (C) 2016 Oracle. All rights reserved. 3 * Copyright (C) 2016-2017 Oracle. All rights reserved.
4 */ 4 */
5 5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -41,8 +41,8 @@
41static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 41static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
42 42
43#define DRV_MODULE_NAME "ldmvsw" 43#define DRV_MODULE_NAME "ldmvsw"
44#define DRV_MODULE_VERSION "1.1" 44#define DRV_MODULE_VERSION "1.2"
45#define DRV_MODULE_RELDATE "February 3, 2017" 45#define DRV_MODULE_RELDATE "March 4, 2017"
46 46
47static char version[] = 47static char version[] =
48 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; 48 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
@@ -123,6 +123,20 @@ static void vsw_set_rx_mode(struct net_device *dev)
123 return sunvnet_set_rx_mode_common(dev, port->vp); 123 return sunvnet_set_rx_mode_common(dev, port->vp);
124} 124}
125 125
126int ldmvsw_open(struct net_device *dev)
127{
128 struct vnet_port *port = netdev_priv(dev);
129 struct vio_driver_state *vio = &port->vio;
130
131 /* reset the channel */
132 vio_link_state_change(vio, LDC_EVENT_RESET);
133 vnet_port_reset(port);
134 vio_port_up(vio);
135
136 return 0;
137}
138EXPORT_SYMBOL_GPL(ldmvsw_open);
139
126#ifdef CONFIG_NET_POLL_CONTROLLER 140#ifdef CONFIG_NET_POLL_CONTROLLER
127static void vsw_poll_controller(struct net_device *dev) 141static void vsw_poll_controller(struct net_device *dev)
128{ 142{
@@ -133,7 +147,7 @@ static void vsw_poll_controller(struct net_device *dev)
133#endif 147#endif
134 148
135static const struct net_device_ops vsw_ops = { 149static const struct net_device_ops vsw_ops = {
136 .ndo_open = sunvnet_open_common, 150 .ndo_open = ldmvsw_open,
137 .ndo_stop = sunvnet_close_common, 151 .ndo_stop = sunvnet_close_common,
138 .ndo_set_rx_mode = vsw_set_rx_mode, 152 .ndo_set_rx_mode = vsw_set_rx_mode,
139 .ndo_set_mac_address = sunvnet_set_mac_addr_common, 153 .ndo_set_mac_address = sunvnet_set_mac_addr_common,
@@ -365,6 +379,11 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
365 napi_enable(&port->napi); 379 napi_enable(&port->napi);
366 vio_port_up(&port->vio); 380 vio_port_up(&port->vio);
367 381
382 /* assure no carrier until we receive an LDC_EVENT_UP,
383 * even if the vsw config script tries to force us up
384 */
385 netif_carrier_off(dev);
386
368 netdev_info(dev, "LDOM vsw-port %pM\n", dev->dev_addr); 387 netdev_info(dev, "LDOM vsw-port %pM\n", dev->dev_addr);
369 388
370 pr_info("%s: PORT ( remote-mac %pM%s )\n", dev->name, 389 pr_info("%s: PORT ( remote-mac %pM%s )\n", dev->name,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 57978056b336..2dcca249eb9c 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6813,7 +6813,8 @@ static void niu_get_drvinfo(struct net_device *dev,
6813 sizeof(info->bus_info)); 6813 sizeof(info->bus_info));
6814} 6814}
6815 6815
6816static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 6816static int niu_get_link_ksettings(struct net_device *dev,
6817 struct ethtool_link_ksettings *cmd)
6817{ 6818{
6818 struct niu *np = netdev_priv(dev); 6819 struct niu *np = netdev_priv(dev);
6819 struct niu_link_config *lp; 6820 struct niu_link_config *lp;
@@ -6821,28 +6822,30 @@ static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6821 lp = &np->link_config; 6822 lp = &np->link_config;
6822 6823
6823 memset(cmd, 0, sizeof(*cmd)); 6824 memset(cmd, 0, sizeof(*cmd));
6824 cmd->phy_address = np->phy_addr; 6825 cmd->base.phy_address = np->phy_addr;
6825 cmd->supported = lp->supported; 6826 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6826 cmd->advertising = lp->active_advertising; 6827 lp->supported);
6827 cmd->autoneg = lp->active_autoneg; 6828 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6828 ethtool_cmd_speed_set(cmd, lp->active_speed); 6829 lp->active_advertising);
6829 cmd->duplex = lp->active_duplex; 6830 cmd->base.autoneg = lp->active_autoneg;
6830 cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; 6831 cmd->base.speed = lp->active_speed;
6831 cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ? 6832 cmd->base.duplex = lp->active_duplex;
6832 XCVR_EXTERNAL : XCVR_INTERNAL; 6833 cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
6833 6834
6834 return 0; 6835 return 0;
6835} 6836}
6836 6837
6837static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 6838static int niu_set_link_ksettings(struct net_device *dev,
6839 const struct ethtool_link_ksettings *cmd)
6838{ 6840{
6839 struct niu *np = netdev_priv(dev); 6841 struct niu *np = netdev_priv(dev);
6840 struct niu_link_config *lp = &np->link_config; 6842 struct niu_link_config *lp = &np->link_config;
6841 6843
6842 lp->advertising = cmd->advertising; 6844 ethtool_convert_link_mode_to_legacy_u32(&lp->advertising,
6843 lp->speed = ethtool_cmd_speed(cmd); 6845 cmd->link_modes.advertising);
6844 lp->duplex = cmd->duplex; 6846 lp->speed = cmd->base.speed;
6845 lp->autoneg = cmd->autoneg; 6847 lp->duplex = cmd->base.duplex;
6848 lp->autoneg = cmd->base.autoneg;
6846 return niu_init_link(np); 6849 return niu_init_link(np);
6847} 6850}
6848 6851
@@ -7902,14 +7905,14 @@ static const struct ethtool_ops niu_ethtool_ops = {
7902 .nway_reset = niu_nway_reset, 7905 .nway_reset = niu_nway_reset,
7903 .get_eeprom_len = niu_get_eeprom_len, 7906 .get_eeprom_len = niu_get_eeprom_len,
7904 .get_eeprom = niu_get_eeprom, 7907 .get_eeprom = niu_get_eeprom,
7905 .get_settings = niu_get_settings,
7906 .set_settings = niu_set_settings,
7907 .get_strings = niu_get_strings, 7908 .get_strings = niu_get_strings,
7908 .get_sset_count = niu_get_sset_count, 7909 .get_sset_count = niu_get_sset_count,
7909 .get_ethtool_stats = niu_get_ethtool_stats, 7910 .get_ethtool_stats = niu_get_ethtool_stats,
7910 .set_phys_id = niu_set_phys_id, 7911 .set_phys_id = niu_set_phys_id,
7911 .get_rxnfc = niu_get_nfc, 7912 .get_rxnfc = niu_get_nfc,
7912 .set_rxnfc = niu_set_nfc, 7913 .set_rxnfc = niu_set_nfc,
7914 .get_link_ksettings = niu_get_link_ksettings,
7915 .set_link_ksettings = niu_set_link_ksettings,
7913}; 7916};
7914 7917
7915static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, 7918static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 5c5952e782cd..fa607d062cb3 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1250,12 +1250,18 @@ static void gem_stop_dma(struct gem *gp)
1250 1250
1251 1251
1252// XXX dbl check what that function should do when called on PCS PHY 1252// XXX dbl check what that function should do when called on PCS PHY
1253static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) 1253static void gem_begin_auto_negotiation(struct gem *gp,
1254 const struct ethtool_link_ksettings *ep)
1254{ 1255{
1255 u32 advertise, features; 1256 u32 advertise, features;
1256 int autoneg; 1257 int autoneg;
1257 int speed; 1258 int speed;
1258 int duplex; 1259 int duplex;
1260 u32 advertising;
1261
1262 if (ep)
1263 ethtool_convert_link_mode_to_legacy_u32(
1264 &advertising, ep->link_modes.advertising);
1259 1265
1260 if (gp->phy_type != phy_mii_mdio0 && 1266 if (gp->phy_type != phy_mii_mdio0 &&
1261 gp->phy_type != phy_mii_mdio1) 1267 gp->phy_type != phy_mii_mdio1)
@@ -1278,13 +1284,13 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1278 /* Setup link parameters */ 1284 /* Setup link parameters */
1279 if (!ep) 1285 if (!ep)
1280 goto start_aneg; 1286 goto start_aneg;
1281 if (ep->autoneg == AUTONEG_ENABLE) { 1287 if (ep->base.autoneg == AUTONEG_ENABLE) {
1282 advertise = ep->advertising; 1288 advertise = advertising;
1283 autoneg = 1; 1289 autoneg = 1;
1284 } else { 1290 } else {
1285 autoneg = 0; 1291 autoneg = 0;
1286 speed = ethtool_cmd_speed(ep); 1292 speed = ep->base.speed;
1287 duplex = ep->duplex; 1293 duplex = ep->base.duplex;
1288 } 1294 }
1289 1295
1290start_aneg: 1296start_aneg:
@@ -2515,85 +2521,96 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
2515 strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info)); 2521 strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
2516} 2522}
2517 2523
2518static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2524static int gem_get_link_ksettings(struct net_device *dev,
2525 struct ethtool_link_ksettings *cmd)
2519{ 2526{
2520 struct gem *gp = netdev_priv(dev); 2527 struct gem *gp = netdev_priv(dev);
2528 u32 supported, advertising;
2521 2529
2522 if (gp->phy_type == phy_mii_mdio0 || 2530 if (gp->phy_type == phy_mii_mdio0 ||
2523 gp->phy_type == phy_mii_mdio1) { 2531 gp->phy_type == phy_mii_mdio1) {
2524 if (gp->phy_mii.def) 2532 if (gp->phy_mii.def)
2525 cmd->supported = gp->phy_mii.def->features; 2533 supported = gp->phy_mii.def->features;
2526 else 2534 else
2527 cmd->supported = (SUPPORTED_10baseT_Half | 2535 supported = (SUPPORTED_10baseT_Half |
2528 SUPPORTED_10baseT_Full); 2536 SUPPORTED_10baseT_Full);
2529 2537
2530 /* XXX hardcoded stuff for now */ 2538 /* XXX hardcoded stuff for now */
2531 cmd->port = PORT_MII; 2539 cmd->base.port = PORT_MII;
2532 cmd->transceiver = XCVR_EXTERNAL; 2540 cmd->base.phy_address = 0; /* XXX fixed PHYAD */
2533 cmd->phy_address = 0; /* XXX fixed PHYAD */
2534 2541
2535 /* Return current PHY settings */ 2542 /* Return current PHY settings */
2536 cmd->autoneg = gp->want_autoneg; 2543 cmd->base.autoneg = gp->want_autoneg;
2537 ethtool_cmd_speed_set(cmd, gp->phy_mii.speed); 2544 cmd->base.speed = gp->phy_mii.speed;
2538 cmd->duplex = gp->phy_mii.duplex; 2545 cmd->base.duplex = gp->phy_mii.duplex;
2539 cmd->advertising = gp->phy_mii.advertising; 2546 advertising = gp->phy_mii.advertising;
2540 2547
2541 /* If we started with a forced mode, we don't have a default 2548 /* If we started with a forced mode, we don't have a default
2542 * advertise set, we need to return something sensible so 2549 * advertise set, we need to return something sensible so
2543 * userland can re-enable autoneg properly. 2550 * userland can re-enable autoneg properly.
2544 */ 2551 */
2545 if (cmd->advertising == 0) 2552 if (advertising == 0)
2546 cmd->advertising = cmd->supported; 2553 advertising = supported;
2547 } else { // XXX PCS ? 2554 } else { // XXX PCS ?
2548 cmd->supported = 2555 supported =
2549 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2556 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2550 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2557 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2551 SUPPORTED_Autoneg); 2558 SUPPORTED_Autoneg);
2552 cmd->advertising = cmd->supported; 2559 advertising = supported;
2553 ethtool_cmd_speed_set(cmd, 0); 2560 cmd->base.speed = 0;
2554 cmd->duplex = cmd->port = cmd->phy_address = 2561 cmd->base.duplex = 0;
2555 cmd->transceiver = cmd->autoneg = 0; 2562 cmd->base.port = 0;
2563 cmd->base.phy_address = 0;
2564 cmd->base.autoneg = 0;
2556 2565
2557 /* serdes means usually a Fibre connector, with most fixed */ 2566 /* serdes means usually a Fibre connector, with most fixed */
2558 if (gp->phy_type == phy_serdes) { 2567 if (gp->phy_type == phy_serdes) {
2559 cmd->port = PORT_FIBRE; 2568 cmd->base.port = PORT_FIBRE;
2560 cmd->supported = (SUPPORTED_1000baseT_Half | 2569 supported = (SUPPORTED_1000baseT_Half |
2561 SUPPORTED_1000baseT_Full | 2570 SUPPORTED_1000baseT_Full |
2562 SUPPORTED_FIBRE | SUPPORTED_Autoneg | 2571 SUPPORTED_FIBRE | SUPPORTED_Autoneg |
2563 SUPPORTED_Pause | SUPPORTED_Asym_Pause); 2572 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
2564 cmd->advertising = cmd->supported; 2573 advertising = supported;
2565 cmd->transceiver = XCVR_INTERNAL;
2566 if (gp->lstate == link_up) 2574 if (gp->lstate == link_up)
2567 ethtool_cmd_speed_set(cmd, SPEED_1000); 2575 cmd->base.speed = SPEED_1000;
2568 cmd->duplex = DUPLEX_FULL; 2576 cmd->base.duplex = DUPLEX_FULL;
2569 cmd->autoneg = 1; 2577 cmd->base.autoneg = 1;
2570 } 2578 }
2571 } 2579 }
2572 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2580
2581 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2582 supported);
2583 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2584 advertising);
2573 2585
2574 return 0; 2586 return 0;
2575} 2587}
2576 2588
2577static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2589static int gem_set_link_ksettings(struct net_device *dev,
2590 const struct ethtool_link_ksettings *cmd)
2578{ 2591{
2579 struct gem *gp = netdev_priv(dev); 2592 struct gem *gp = netdev_priv(dev);
2580 u32 speed = ethtool_cmd_speed(cmd); 2593 u32 speed = cmd->base.speed;
2594 u32 advertising;
2595
2596 ethtool_convert_link_mode_to_legacy_u32(&advertising,
2597 cmd->link_modes.advertising);
2581 2598
2582 /* Verify the settings we care about. */ 2599 /* Verify the settings we care about. */
2583 if (cmd->autoneg != AUTONEG_ENABLE && 2600 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2584 cmd->autoneg != AUTONEG_DISABLE) 2601 cmd->base.autoneg != AUTONEG_DISABLE)
2585 return -EINVAL; 2602 return -EINVAL;
2586 2603
2587 if (cmd->autoneg == AUTONEG_ENABLE && 2604 if (cmd->base.autoneg == AUTONEG_ENABLE &&
2588 cmd->advertising == 0) 2605 advertising == 0)
2589 return -EINVAL; 2606 return -EINVAL;
2590 2607
2591 if (cmd->autoneg == AUTONEG_DISABLE && 2608 if (cmd->base.autoneg == AUTONEG_DISABLE &&
2592 ((speed != SPEED_1000 && 2609 ((speed != SPEED_1000 &&
2593 speed != SPEED_100 && 2610 speed != SPEED_100 &&
2594 speed != SPEED_10) || 2611 speed != SPEED_10) ||
2595 (cmd->duplex != DUPLEX_HALF && 2612 (cmd->base.duplex != DUPLEX_HALF &&
2596 cmd->duplex != DUPLEX_FULL))) 2613 cmd->base.duplex != DUPLEX_FULL)))
2597 return -EINVAL; 2614 return -EINVAL;
2598 2615
2599 /* Apply settings and restart link process. */ 2616 /* Apply settings and restart link process. */
@@ -2666,13 +2683,13 @@ static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2666static const struct ethtool_ops gem_ethtool_ops = { 2683static const struct ethtool_ops gem_ethtool_ops = {
2667 .get_drvinfo = gem_get_drvinfo, 2684 .get_drvinfo = gem_get_drvinfo,
2668 .get_link = ethtool_op_get_link, 2685 .get_link = ethtool_op_get_link,
2669 .get_settings = gem_get_settings,
2670 .set_settings = gem_set_settings,
2671 .nway_reset = gem_nway_reset, 2686 .nway_reset = gem_nway_reset,
2672 .get_msglevel = gem_get_msglevel, 2687 .get_msglevel = gem_get_msglevel,
2673 .set_msglevel = gem_set_msglevel, 2688 .set_msglevel = gem_set_msglevel,
2674 .get_wol = gem_get_wol, 2689 .get_wol = gem_get_wol,
2675 .set_wol = gem_set_wol, 2690 .set_wol = gem_set_wol,
2691 .get_link_ksettings = gem_get_link_ksettings,
2692 .set_link_ksettings = gem_set_link_ksettings,
2676}; 2693};
2677 2694
2678static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2695static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72ff05cd3ed8..53ff66ef53ac 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1294,9 +1294,10 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1294} 1294}
1295 1295
1296/* hp->happy_lock must be held */ 1296/* hp->happy_lock must be held */
1297static void happy_meal_begin_auto_negotiation(struct happy_meal *hp, 1297static void
1298 void __iomem *tregs, 1298happy_meal_begin_auto_negotiation(struct happy_meal *hp,
1299 struct ethtool_cmd *ep) 1299 void __iomem *tregs,
1300 const struct ethtool_link_ksettings *ep)
1300{ 1301{
1301 int timeout; 1302 int timeout;
1302 1303
@@ -1309,7 +1310,7 @@ static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
1309 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */ 1310 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
1310 1311
1311 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); 1312 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1312 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { 1313 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1313 /* Advertise everything we can support. */ 1314 /* Advertise everything we can support. */
1314 if (hp->sw_bmsr & BMSR_10HALF) 1315 if (hp->sw_bmsr & BMSR_10HALF)
1315 hp->sw_advertise |= (ADVERTISE_10HALF); 1316 hp->sw_advertise |= (ADVERTISE_10HALF);
@@ -1384,14 +1385,14 @@ force_link:
1384 /* Disable auto-negotiation in BMCR, enable the duplex and 1385 /* Disable auto-negotiation in BMCR, enable the duplex and
1385 * speed setting, init the timer state machine, and fire it off. 1386 * speed setting, init the timer state machine, and fire it off.
1386 */ 1387 */
1387 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { 1388 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1388 hp->sw_bmcr = BMCR_SPEED100; 1389 hp->sw_bmcr = BMCR_SPEED100;
1389 } else { 1390 } else {
1390 if (ethtool_cmd_speed(ep) == SPEED_100) 1391 if (ep->base.speed == SPEED_100)
1391 hp->sw_bmcr = BMCR_SPEED100; 1392 hp->sw_bmcr = BMCR_SPEED100;
1392 else 1393 else
1393 hp->sw_bmcr = 0; 1394 hp->sw_bmcr = 0;
1394 if (ep->duplex == DUPLEX_FULL) 1395 if (ep->base.duplex == DUPLEX_FULL)
1395 hp->sw_bmcr |= BMCR_FULLDPLX; 1396 hp->sw_bmcr |= BMCR_FULLDPLX;
1396 } 1397 }
1397 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); 1398 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -2434,20 +2435,21 @@ static void happy_meal_set_multicast(struct net_device *dev)
2434} 2435}
2435 2436
2436/* Ethtool support... */ 2437/* Ethtool support... */
2437static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2438static int hme_get_link_ksettings(struct net_device *dev,
2439 struct ethtool_link_ksettings *cmd)
2438{ 2440{
2439 struct happy_meal *hp = netdev_priv(dev); 2441 struct happy_meal *hp = netdev_priv(dev);
2440 u32 speed; 2442 u32 speed;
2443 u32 supported;
2441 2444
2442 cmd->supported = 2445 supported =
2443 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2446 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2444 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2447 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2445 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); 2448 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2446 2449
2447 /* XXX hardcoded stuff for now */ 2450 /* XXX hardcoded stuff for now */
2448 cmd->port = PORT_TP; /* XXX no MII support */ 2451 cmd->base.port = PORT_TP; /* XXX no MII support */
2449 cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */ 2452 cmd->base.phy_address = 0; /* XXX fixed PHYAD */
2450 cmd->phy_address = 0; /* XXX fixed PHYAD */
2451 2453
2452 /* Record PHY settings. */ 2454 /* Record PHY settings. */
2453 spin_lock_irq(&hp->happy_lock); 2455 spin_lock_irq(&hp->happy_lock);
@@ -2456,41 +2458,45 @@ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2456 spin_unlock_irq(&hp->happy_lock); 2458 spin_unlock_irq(&hp->happy_lock);
2457 2459
2458 if (hp->sw_bmcr & BMCR_ANENABLE) { 2460 if (hp->sw_bmcr & BMCR_ANENABLE) {
2459 cmd->autoneg = AUTONEG_ENABLE; 2461 cmd->base.autoneg = AUTONEG_ENABLE;
2460 speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ? 2462 speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2461 SPEED_100 : SPEED_10); 2463 SPEED_100 : SPEED_10);
2462 if (speed == SPEED_100) 2464 if (speed == SPEED_100)
2463 cmd->duplex = 2465 cmd->base.duplex =
2464 (hp->sw_lpa & (LPA_100FULL)) ? 2466 (hp->sw_lpa & (LPA_100FULL)) ?
2465 DUPLEX_FULL : DUPLEX_HALF; 2467 DUPLEX_FULL : DUPLEX_HALF;
2466 else 2468 else
2467 cmd->duplex = 2469 cmd->base.duplex =
2468 (hp->sw_lpa & (LPA_10FULL)) ? 2470 (hp->sw_lpa & (LPA_10FULL)) ?
2469 DUPLEX_FULL : DUPLEX_HALF; 2471 DUPLEX_FULL : DUPLEX_HALF;
2470 } else { 2472 } else {
2471 cmd->autoneg = AUTONEG_DISABLE; 2473 cmd->base.autoneg = AUTONEG_DISABLE;
2472 speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; 2474 speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
2473 cmd->duplex = 2475 cmd->base.duplex =
2474 (hp->sw_bmcr & BMCR_FULLDPLX) ? 2476 (hp->sw_bmcr & BMCR_FULLDPLX) ?
2475 DUPLEX_FULL : DUPLEX_HALF; 2477 DUPLEX_FULL : DUPLEX_HALF;
2476 } 2478 }
2477 ethtool_cmd_speed_set(cmd, speed); 2479 cmd->base.speed = speed;
2480 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2481 supported);
2482
2478 return 0; 2483 return 0;
2479} 2484}
2480 2485
2481static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2486static int hme_set_link_ksettings(struct net_device *dev,
2487 const struct ethtool_link_ksettings *cmd)
2482{ 2488{
2483 struct happy_meal *hp = netdev_priv(dev); 2489 struct happy_meal *hp = netdev_priv(dev);
2484 2490
2485 /* Verify the settings we care about. */ 2491 /* Verify the settings we care about. */
2486 if (cmd->autoneg != AUTONEG_ENABLE && 2492 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2487 cmd->autoneg != AUTONEG_DISABLE) 2493 cmd->base.autoneg != AUTONEG_DISABLE)
2488 return -EINVAL; 2494 return -EINVAL;
2489 if (cmd->autoneg == AUTONEG_DISABLE && 2495 if (cmd->base.autoneg == AUTONEG_DISABLE &&
2490 ((ethtool_cmd_speed(cmd) != SPEED_100 && 2496 ((cmd->base.speed != SPEED_100 &&
2491 ethtool_cmd_speed(cmd) != SPEED_10) || 2497 cmd->base.speed != SPEED_10) ||
2492 (cmd->duplex != DUPLEX_HALF && 2498 (cmd->base.duplex != DUPLEX_HALF &&
2493 cmd->duplex != DUPLEX_FULL))) 2499 cmd->base.duplex != DUPLEX_FULL)))
2494 return -EINVAL; 2500 return -EINVAL;
2495 2501
2496 /* Ok, do it to it. */ 2502 /* Ok, do it to it. */
@@ -2537,10 +2543,10 @@ static u32 hme_get_link(struct net_device *dev)
2537} 2543}
2538 2544
2539static const struct ethtool_ops hme_ethtool_ops = { 2545static const struct ethtool_ops hme_ethtool_ops = {
2540 .get_settings = hme_get_settings,
2541 .set_settings = hme_set_settings,
2542 .get_drvinfo = hme_get_drvinfo, 2546 .get_drvinfo = hme_get_drvinfo,
2543 .get_link = hme_get_link, 2547 .get_link = hme_get_link,
2548 .get_link_ksettings = hme_get_link_ksettings,
2549 .set_link_ksettings = hme_set_link_ksettings,
2544}; 2550};
2545 2551
2546static int hme_version_printed; 2552static int hme_version_printed;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 4cc2571f71c6..0b95105f7060 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1,7 +1,7 @@
1/* sunvnet.c: Sun LDOM Virtual Network Driver. 1/* sunvnet.c: Sun LDOM Virtual Network Driver.
2 * 2 *
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 2016 Oracle. All rights reserved. 4 * Copyright (C) 2016-2017 Oracle. All rights reserved.
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -77,11 +77,125 @@ static void vnet_set_msglevel(struct net_device *dev, u32 value)
77 vp->msg_enable = value; 77 vp->msg_enable = value;
78} 78}
79 79
80static const struct {
81 const char string[ETH_GSTRING_LEN];
82} ethtool_stats_keys[] = {
83 { "rx_packets" },
84 { "tx_packets" },
85 { "rx_bytes" },
86 { "tx_bytes" },
87 { "rx_errors" },
88 { "tx_errors" },
89 { "rx_dropped" },
90 { "tx_dropped" },
91 { "multicast" },
92 { "rx_length_errors" },
93 { "rx_frame_errors" },
94 { "rx_missed_errors" },
95 { "tx_carrier_errors" },
96 { "nports" },
97};
98
99static int vnet_get_sset_count(struct net_device *dev, int sset)
100{
101 struct vnet *vp = (struct vnet *)netdev_priv(dev);
102
103 switch (sset) {
104 case ETH_SS_STATS:
105 return ARRAY_SIZE(ethtool_stats_keys)
106 + (NUM_VNET_PORT_STATS * vp->nports);
107 default:
108 return -EOPNOTSUPP;
109 }
110}
111
112static void vnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
113{
114 struct vnet *vp = (struct vnet *)netdev_priv(dev);
115 struct vnet_port *port;
116 char *p = (char *)buf;
117
118 switch (stringset) {
119 case ETH_SS_STATS:
120 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
121 p += sizeof(ethtool_stats_keys);
122
123 rcu_read_lock();
124 list_for_each_entry_rcu(port, &vp->port_list, list) {
125 snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM",
126 port->q_index, port->switch_port ? "s" : "q",
127 port->raddr);
128 p += ETH_GSTRING_LEN;
129 snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets",
130 port->q_index);
131 p += ETH_GSTRING_LEN;
132 snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets",
133 port->q_index);
134 p += ETH_GSTRING_LEN;
135 snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes",
136 port->q_index);
137 p += ETH_GSTRING_LEN;
138 snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes",
139 port->q_index);
140 p += ETH_GSTRING_LEN;
141 snprintf(p, ETH_GSTRING_LEN, "p%u.event_up",
142 port->q_index);
143 p += ETH_GSTRING_LEN;
144 snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset",
145 port->q_index);
146 p += ETH_GSTRING_LEN;
147 }
148 rcu_read_unlock();
149 break;
150 default:
151 WARN_ON(1);
152 break;
153 }
154}
155
156static void vnet_get_ethtool_stats(struct net_device *dev,
157 struct ethtool_stats *estats, u64 *data)
158{
159 struct vnet *vp = (struct vnet *)netdev_priv(dev);
160 struct vnet_port *port;
161 int i = 0;
162
163 data[i++] = dev->stats.rx_packets;
164 data[i++] = dev->stats.tx_packets;
165 data[i++] = dev->stats.rx_bytes;
166 data[i++] = dev->stats.tx_bytes;
167 data[i++] = dev->stats.rx_errors;
168 data[i++] = dev->stats.tx_errors;
169 data[i++] = dev->stats.rx_dropped;
170 data[i++] = dev->stats.tx_dropped;
171 data[i++] = dev->stats.multicast;
172 data[i++] = dev->stats.rx_length_errors;
173 data[i++] = dev->stats.rx_frame_errors;
174 data[i++] = dev->stats.rx_missed_errors;
175 data[i++] = dev->stats.tx_carrier_errors;
176 data[i++] = vp->nports;
177
178 rcu_read_lock();
179 list_for_each_entry_rcu(port, &vp->port_list, list) {
180 data[i++] = port->q_index;
181 data[i++] = port->stats.rx_packets;
182 data[i++] = port->stats.tx_packets;
183 data[i++] = port->stats.rx_bytes;
184 data[i++] = port->stats.tx_bytes;
185 data[i++] = port->stats.event_up;
186 data[i++] = port->stats.event_reset;
187 }
188 rcu_read_unlock();
189}
190
80static const struct ethtool_ops vnet_ethtool_ops = { 191static const struct ethtool_ops vnet_ethtool_ops = {
81 .get_drvinfo = vnet_get_drvinfo, 192 .get_drvinfo = vnet_get_drvinfo,
82 .get_msglevel = vnet_get_msglevel, 193 .get_msglevel = vnet_get_msglevel,
83 .set_msglevel = vnet_set_msglevel, 194 .set_msglevel = vnet_set_msglevel,
84 .get_link = ethtool_op_get_link, 195 .get_link = ethtool_op_get_link,
196 .get_sset_count = vnet_get_sset_count,
197 .get_strings = vnet_get_strings,
198 .get_ethtool_stats = vnet_get_ethtool_stats,
85}; 199};
86 200
87static LIST_HEAD(vnet_list); 201static LIST_HEAD(vnet_list);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index fa2d11ca9b81..9e86833249d4 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1,7 +1,7 @@
1/* sunvnet.c: Sun LDOM Virtual Network Driver. 1/* sunvnet.c: Sun LDOM Virtual Network Driver.
2 * 2 *
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 2016 Oracle. All rights reserved. 4 * Copyright (C) 2016-2017 Oracle. All rights reserved.
5 */ 5 */
6 6
7#include <linux/module.h> 7#include <linux/module.h>
@@ -43,7 +43,6 @@ MODULE_LICENSE("GPL");
43MODULE_VERSION("1.1"); 43MODULE_VERSION("1.1");
44 44
45static int __vnet_tx_trigger(struct vnet_port *port, u32 start); 45static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
46static void vnet_port_reset(struct vnet_port *port);
47 46
48static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) 47static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
49{ 48{
@@ -410,8 +409,12 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
410 409
411 skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; 410 skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
412 411
412 if (unlikely(is_multicast_ether_addr(eth_hdr(skb)->h_dest)))
413 dev->stats.multicast++;
413 dev->stats.rx_packets++; 414 dev->stats.rx_packets++;
414 dev->stats.rx_bytes += len; 415 dev->stats.rx_bytes += len;
416 port->stats.rx_packets++;
417 port->stats.rx_bytes += len;
415 napi_gro_receive(&port->napi, skb); 418 napi_gro_receive(&port->napi, skb);
416 return 0; 419 return 0;
417 420
@@ -747,6 +750,13 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
747 750
748 /* RESET takes precedent over any other event */ 751 /* RESET takes precedent over any other event */
749 if (port->rx_event & LDC_EVENT_RESET) { 752 if (port->rx_event & LDC_EVENT_RESET) {
753 /* a link went down */
754
755 if (port->vsw == 1) {
756 netif_tx_stop_all_queues(dev);
757 netif_carrier_off(dev);
758 }
759
750 vio_link_state_change(vio, LDC_EVENT_RESET); 760 vio_link_state_change(vio, LDC_EVENT_RESET);
751 vnet_port_reset(port); 761 vnet_port_reset(port);
752 vio_port_up(vio); 762 vio_port_up(vio);
@@ -762,12 +772,21 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
762 maybe_tx_wakeup(port); 772 maybe_tx_wakeup(port);
763 773
764 port->rx_event = 0; 774 port->rx_event = 0;
775 port->stats.event_reset++;
765 return 0; 776 return 0;
766 } 777 }
767 778
768 if (port->rx_event & LDC_EVENT_UP) { 779 if (port->rx_event & LDC_EVENT_UP) {
780 /* a link came up */
781
782 if (port->vsw == 1) {
783 netif_carrier_on(port->dev);
784 netif_tx_start_all_queues(port->dev);
785 }
786
769 vio_link_state_change(vio, LDC_EVENT_UP); 787 vio_link_state_change(vio, LDC_EVENT_UP);
770 port->rx_event = 0; 788 port->rx_event = 0;
789 port->stats.event_up++;
771 return 0; 790 return 0;
772 } 791 }
773 792
@@ -1417,6 +1436,8 @@ ldc_start_done:
1417 1436
1418 dev->stats.tx_packets++; 1437 dev->stats.tx_packets++;
1419 dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; 1438 dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1439 port->stats.tx_packets++;
1440 port->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1420 1441
1421 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); 1442 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1422 if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 1443 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
@@ -1631,7 +1652,7 @@ void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
1631} 1652}
1632EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common); 1653EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
1633 1654
1634static void vnet_port_reset(struct vnet_port *port) 1655void vnet_port_reset(struct vnet_port *port)
1635{ 1656{
1636 del_timer(&port->clean_timer); 1657 del_timer(&port->clean_timer);
1637 sunvnet_port_free_tx_bufs_common(port); 1658 sunvnet_port_free_tx_bufs_common(port);
@@ -1639,6 +1660,7 @@ static void vnet_port_reset(struct vnet_port *port)
1639 port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */ 1660 port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */
1640 port->tsolen = 0; 1661 port->tsolen = 0;
1641} 1662}
1663EXPORT_SYMBOL_GPL(vnet_port_reset);
1642 1664
1643static int vnet_port_alloc_tx_ring(struct vnet_port *port) 1665static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1644{ 1666{
@@ -1708,20 +1730,32 @@ EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
1708void sunvnet_port_add_txq_common(struct vnet_port *port) 1730void sunvnet_port_add_txq_common(struct vnet_port *port)
1709{ 1731{
1710 struct vnet *vp = port->vp; 1732 struct vnet *vp = port->vp;
1711 int n; 1733 int smallest = 0;
1734 int i;
1735
1736 /* find the first least-used q
1737 * When there are more ldoms than q's, we start to
1738 * double up on ports per queue.
1739 */
1740 for (i = 0; i < VNET_MAX_TXQS; i++) {
1741 if (vp->q_used[i] == 0) {
1742 smallest = i;
1743 break;
1744 }
1745 if (vp->q_used[i] < vp->q_used[smallest])
1746 smallest = i;
1747 }
1712 1748
1713 n = vp->nports++; 1749 vp->nports++;
1714 n = n & (VNET_MAX_TXQS - 1); 1750 vp->q_used[smallest]++;
1715 port->q_index = n; 1751 port->q_index = smallest;
1716 netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
1717 port->q_index));
1718} 1752}
1719EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); 1753EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
1720 1754
1721void sunvnet_port_rm_txq_common(struct vnet_port *port) 1755void sunvnet_port_rm_txq_common(struct vnet_port *port)
1722{ 1756{
1723 port->vp->nports--; 1757 port->vp->nports--;
1724 netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), 1758 port->vp->q_used[port->q_index]--;
1725 port->q_index)); 1759 port->q_index = 0;
1726} 1760}
1727EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common); 1761EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h
index ce5c824128a3..b20d6fa7ef25 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.h
+++ b/drivers/net/ethernet/sun/sunvnet_common.h
@@ -35,6 +35,19 @@ struct vnet_tx_entry {
35 35
36struct vnet; 36struct vnet;
37 37
38struct vnet_port_stats {
39 /* keep them all the same size */
40 u32 rx_bytes;
41 u32 tx_bytes;
42 u32 rx_packets;
43 u32 tx_packets;
44 u32 event_up;
45 u32 event_reset;
46 u32 q_placeholder;
47};
48
49#define NUM_VNET_PORT_STATS (sizeof(struct vnet_port_stats) / sizeof(u32))
50
38/* Structure to describe a vnet-port or vsw-port in the MD. 51/* Structure to describe a vnet-port or vsw-port in the MD.
39 * If the vsw bit is set, this structure represents a vswitch 52 * If the vsw bit is set, this structure represents a vswitch
40 * port, and the net_device can be found from ->dev. If the 53 * port, and the net_device can be found from ->dev. If the
@@ -44,6 +57,8 @@ struct vnet;
44struct vnet_port { 57struct vnet_port {
45 struct vio_driver_state vio; 58 struct vio_driver_state vio;
46 59
60 struct vnet_port_stats stats;
61
47 struct hlist_node hash; 62 struct hlist_node hash;
48 u8 raddr[ETH_ALEN]; 63 u8 raddr[ETH_ALEN];
49 unsigned switch_port:1; 64 unsigned switch_port:1;
@@ -97,22 +112,15 @@ struct vnet_mcast_entry {
97}; 112};
98 113
99struct vnet { 114struct vnet {
100 /* Protects port_list and port_hash. */ 115 spinlock_t lock; /* Protects port_list and port_hash. */
101 spinlock_t lock;
102
103 struct net_device *dev; 116 struct net_device *dev;
104
105 u32 msg_enable; 117 u32 msg_enable;
106 118 u8 q_used[VNET_MAX_TXQS];
107 struct list_head port_list; 119 struct list_head port_list;
108
109 struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; 120 struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
110
111 struct vnet_mcast_entry *mcast_list; 121 struct vnet_mcast_entry *mcast_list;
112
113 struct list_head list; 122 struct list_head list;
114 u64 local_mac; 123 u64 local_mac;
115
116 int nports; 124 int nports;
117}; 125};
118 126
@@ -139,6 +147,7 @@ int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg);
139void sunvnet_handshake_complete_common(struct vio_driver_state *vio); 147void sunvnet_handshake_complete_common(struct vio_driver_state *vio);
140int sunvnet_poll_common(struct napi_struct *napi, int budget); 148int sunvnet_poll_common(struct napi_struct *napi, int budget);
141void sunvnet_port_free_tx_bufs_common(struct vnet_port *port); 149void sunvnet_port_free_tx_bufs_common(struct vnet_port *port);
150void vnet_port_reset(struct vnet_port *port);
142bool sunvnet_port_is_up_common(struct vnet_port *vnet); 151bool sunvnet_port_is_up_common(struct vnet_port *vnet);
143void sunvnet_port_add_txq_common(struct vnet_port *port); 152void sunvnet_port_add_txq_common(struct vnet_port *port);
144void sunvnet_port_rm_txq_common(struct vnet_port *port); 153void sunvnet_port_rm_txq_common(struct vnet_port *port);
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644
index 000000000000..a9503884e1c2
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Kconfig
@@ -0,0 +1,41 @@
1#
2# Synopsys network device configuration
3#
4
5config NET_VENDOR_SYNOPSYS
6 bool "Synopsys devices"
7 default y
8 ---help---
9 If you have a network (Ethernet) device belonging to this class, say Y.
10
11 Note that the answer to this question doesn't directly affect the
12 kernel: saying N will just cause the configurator to skip all
13 the questions about Synopsys devices. If you say Y, you will be asked
14 for your specific device in the following questions.
15
16if NET_VENDOR_SYNOPSYS
17
18config DWC_XLGMAC
19 tristate "Synopsys DWC Enterprise Ethernet (XLGMAC) driver support"
20 depends on HAS_IOMEM && HAS_DMA
21 select BITREVERSE
22 select CRC32
23 ---help---
24 This driver supports the Synopsys DesignWare Cores Enterprise
25 Ethernet (dwc-xlgmac).
26
27if DWC_XLGMAC
28
29config DWC_XLGMAC_PCI
30 tristate "XLGMAC PCI bus support"
31 depends on DWC_XLGMAC && PCI
32 ---help---
33 This selects the pci bus support for the dwc-xlgmac driver.
34 This driver was tested on Synopsys XLGMAC IP Prototyping Kit.
35
36 If you have a controller with this interface, say Y or M here.
37 If unsure, say N.
38
39endif # DWC_XLGMAC
40
41endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644
index 000000000000..c06e2eb3be90
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the Synopsys network device drivers.
3#
4
5obj-$(CONFIG_DWC_XLGMAC) += dwc-xlgmac.o
6dwc-xlgmac-objs := dwc-xlgmac-net.o dwc-xlgmac-desc.o \
7 dwc-xlgmac-hw.o dwc-xlgmac-common.o
8
9dwc-xlgmac-$(CONFIG_DWC_XLGMAC_PCI) += dwc-xlgmac-pci.o
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
new file mode 100644
index 000000000000..726d78ac4907
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -0,0 +1,736 @@
1/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
2 *
3 * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This Synopsys DWC XLGMAC software driver and associated documentation
11 * (hereinafter the "Software") is an unsupported proprietary work of
12 * Synopsys, Inc. unless otherwise expressly agreed to in writing between
13 * Synopsys and you. The Software IS NOT an item of Licensed Software or a
14 * Licensed Product under any End User Software License Agreement or
15 * Agreement for Licensed Products with Synopsys or any supplement thereto.
16 * Synopsys is a registered trademark of Synopsys, Inc. Other names included
17 * in the SOFTWARE may be the trademarks of their respective owners.
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22
23#include "dwc-xlgmac.h"
24#include "dwc-xlgmac-reg.h"
25
26static int debug = -1;
27module_param(debug, int, 0644);
28MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)");
29static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
30 NETIF_MSG_IFUP);
31
32static unsigned char dev_addr[6] = {0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7};
33
34static void xlgmac_read_mac_addr(struct xlgmac_pdata *pdata)
35{
36 struct net_device *netdev = pdata->netdev;
37
38 /* Currently it uses a static mac address for test */
39 memcpy(pdata->mac_addr, dev_addr, netdev->addr_len);
40}
41
42static void xlgmac_default_config(struct xlgmac_pdata *pdata)
43{
44 pdata->tx_osp_mode = DMA_OSP_ENABLE;
45 pdata->tx_sf_mode = MTL_TSF_ENABLE;
46 pdata->rx_sf_mode = MTL_RSF_DISABLE;
47 pdata->pblx8 = DMA_PBL_X8_ENABLE;
48 pdata->tx_pbl = DMA_PBL_32;
49 pdata->rx_pbl = DMA_PBL_32;
50 pdata->tx_threshold = MTL_TX_THRESHOLD_128;
51 pdata->rx_threshold = MTL_RX_THRESHOLD_128;
52 pdata->tx_pause = 1;
53 pdata->rx_pause = 1;
54 pdata->phy_speed = SPEED_25000;
55 pdata->sysclk_rate = XLGMAC_SYSCLOCK;
56
57 strlcpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name));
58 strlcpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver));
59}
60
61static void xlgmac_init_all_ops(struct xlgmac_pdata *pdata)
62{
63 xlgmac_init_desc_ops(&pdata->desc_ops);
64 xlgmac_init_hw_ops(&pdata->hw_ops);
65}
66
67static int xlgmac_init(struct xlgmac_pdata *pdata)
68{
69 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
70 struct net_device *netdev = pdata->netdev;
71 unsigned int i;
72 int ret;
73
74 /* Set default configuration data */
75 xlgmac_default_config(pdata);
76
77 /* Set irq, base_addr, MAC address, */
78 netdev->irq = pdata->dev_irq;
79 netdev->base_addr = (unsigned long)pdata->mac_regs;
80 xlgmac_read_mac_addr(pdata);
81 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
82
83 /* Set all the function pointers */
84 xlgmac_init_all_ops(pdata);
85
86 /* Issue software reset to device */
87 hw_ops->exit(pdata);
88
89 /* Populate the hardware features */
90 xlgmac_get_all_hw_features(pdata);
91 xlgmac_print_all_hw_features(pdata);
92
93 /* TODO: Set the PHY mode to XLGMII */
94
95 /* Set the DMA mask */
96 ret = dma_set_mask_and_coherent(pdata->dev,
97 DMA_BIT_MASK(pdata->hw_feat.dma_width));
98 if (ret) {
99 dev_err(pdata->dev, "dma_set_mask_and_coherent failed\n");
100 return ret;
101 }
102
103 /* Channel and ring params initializtion
104 * pdata->channel_count;
105 * pdata->tx_ring_count;
106 * pdata->rx_ring_count;
107 * pdata->tx_desc_count;
108 * pdata->rx_desc_count;
109 */
110 BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_TX_DESC_CNT);
111 pdata->tx_desc_count = XLGMAC_TX_DESC_CNT;
112 if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
113 dev_err(pdata->dev, "tx descriptor count (%d) is not valid\n",
114 pdata->tx_desc_count);
115 ret = -EINVAL;
116 return ret;
117 }
118 BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_RX_DESC_CNT);
119 pdata->rx_desc_count = XLGMAC_RX_DESC_CNT;
120 if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
121 dev_err(pdata->dev, "rx descriptor count (%d) is not valid\n",
122 pdata->rx_desc_count);
123 ret = -EINVAL;
124 return ret;
125 }
126
127 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
128 pdata->hw_feat.tx_ch_cnt);
129 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
130 pdata->hw_feat.tx_q_cnt);
131 pdata->tx_q_count = pdata->tx_ring_count;
132 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_q_count);
133 if (ret) {
134 dev_err(pdata->dev, "error setting real tx queue count\n");
135 return ret;
136 }
137
138 pdata->rx_ring_count = min_t(unsigned int,
139 netif_get_num_default_rss_queues(),
140 pdata->hw_feat.rx_ch_cnt);
141 pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
142 pdata->hw_feat.rx_q_cnt);
143 pdata->rx_q_count = pdata->rx_ring_count;
144 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_q_count);
145 if (ret) {
146 dev_err(pdata->dev, "error setting real rx queue count\n");
147 return ret;
148 }
149
150 pdata->channel_count =
151 max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
152
153 /* Initialize RSS hash key and lookup table */
154 netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
155
156 for (i = 0; i < XLGMAC_RSS_MAX_TABLE_SIZE; i++)
157 pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
158 pdata->rss_table[i],
159 MAC_RSSDR_DMCH_POS,
160 MAC_RSSDR_DMCH_LEN,
161 i % pdata->rx_ring_count);
162
163 pdata->rss_options = XLGMAC_SET_REG_BITS(
164 pdata->rss_options,
165 MAC_RSSCR_IP2TE_POS,
166 MAC_RSSCR_IP2TE_LEN, 1);
167 pdata->rss_options = XLGMAC_SET_REG_BITS(
168 pdata->rss_options,
169 MAC_RSSCR_TCP4TE_POS,
170 MAC_RSSCR_TCP4TE_LEN, 1);
171 pdata->rss_options = XLGMAC_SET_REG_BITS(
172 pdata->rss_options,
173 MAC_RSSCR_UDP4TE_POS,
174 MAC_RSSCR_UDP4TE_LEN, 1);
175
176 /* Set device operations */
177 netdev->netdev_ops = xlgmac_get_netdev_ops();
178
179 /* Set device features */
180 if (pdata->hw_feat.tso) {
181 netdev->hw_features = NETIF_F_TSO;
182 netdev->hw_features |= NETIF_F_TSO6;
183 netdev->hw_features |= NETIF_F_SG;
184 netdev->hw_features |= NETIF_F_IP_CSUM;
185 netdev->hw_features |= NETIF_F_IPV6_CSUM;
186 } else if (pdata->hw_feat.tx_coe) {
187 netdev->hw_features = NETIF_F_IP_CSUM;
188 netdev->hw_features |= NETIF_F_IPV6_CSUM;
189 }
190
191 if (pdata->hw_feat.rx_coe) {
192 netdev->hw_features |= NETIF_F_RXCSUM;
193 netdev->hw_features |= NETIF_F_GRO;
194 }
195
196 if (pdata->hw_feat.rss)
197 netdev->hw_features |= NETIF_F_RXHASH;
198
199 netdev->vlan_features |= netdev->hw_features;
200
201 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
202 if (pdata->hw_feat.sa_vlan_ins)
203 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
204 if (pdata->hw_feat.vlhash)
205 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
206
207 netdev->features |= netdev->hw_features;
208 pdata->netdev_features = netdev->features;
209
210 netdev->priv_flags |= IFF_UNICAST_FLT;
211
212 /* Use default watchdog timeout */
213 netdev->watchdog_timeo = 0;
214
215 /* Tx coalesce parameters initialization */
216 pdata->tx_usecs = XLGMAC_INIT_DMA_TX_USECS;
217 pdata->tx_frames = XLGMAC_INIT_DMA_TX_FRAMES;
218
219 /* Rx coalesce parameters initialization */
220 pdata->rx_riwt = hw_ops->usec_to_riwt(pdata, XLGMAC_INIT_DMA_RX_USECS);
221 pdata->rx_usecs = XLGMAC_INIT_DMA_RX_USECS;
222 pdata->rx_frames = XLGMAC_INIT_DMA_RX_FRAMES;
223
224 return 0;
225}
226
227int xlgmac_drv_probe(struct device *dev, struct xlgmac_resources *res)
228{
229 struct xlgmac_pdata *pdata;
230 struct net_device *netdev;
231 int ret;
232
233 netdev = alloc_etherdev_mq(sizeof(struct xlgmac_pdata),
234 XLGMAC_MAX_DMA_CHANNELS);
235
236 if (!netdev) {
237 dev_err(dev, "alloc_etherdev failed\n");
238 return -ENOMEM;
239 }
240
241 SET_NETDEV_DEV(netdev, dev);
242 dev_set_drvdata(dev, netdev);
243 pdata = netdev_priv(netdev);
244 pdata->dev = dev;
245 pdata->netdev = netdev;
246
247 pdata->dev_irq = res->irq;
248 pdata->mac_regs = res->addr;
249
250 mutex_init(&pdata->rss_mutex);
251 pdata->msg_enable = netif_msg_init(debug, default_msg_level);
252
253 ret = xlgmac_init(pdata);
254 if (ret) {
255 dev_err(dev, "xlgmac init failed\n");
256 goto err_free_netdev;
257 }
258
259 ret = register_netdev(netdev);
260 if (ret) {
261 dev_err(dev, "net device registration failed\n");
262 goto err_free_netdev;
263 }
264
265 return 0;
266
267err_free_netdev:
268 free_netdev(netdev);
269
270 return ret;
271}
272
273int xlgmac_drv_remove(struct device *dev)
274{
275 struct net_device *netdev = dev_get_drvdata(dev);
276
277 unregister_netdev(netdev);
278 free_netdev(netdev);
279
280 return 0;
281}
282
283void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata,
284 struct xlgmac_ring *ring,
285 unsigned int idx,
286 unsigned int count,
287 unsigned int flag)
288{
289 struct xlgmac_desc_data *desc_data;
290 struct xlgmac_dma_desc *dma_desc;
291
292 while (count--) {
293 desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
294 dma_desc = desc_data->dma_desc;
295
296 netdev_dbg(pdata->netdev, "TX: dma_desc=%p, dma_desc_addr=%pad\n",
297 desc_data->dma_desc, &desc_data->dma_desc_addr);
298 netdev_dbg(pdata->netdev,
299 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
300 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
301 le32_to_cpu(dma_desc->desc0),
302 le32_to_cpu(dma_desc->desc1),
303 le32_to_cpu(dma_desc->desc2),
304 le32_to_cpu(dma_desc->desc3));
305
306 idx++;
307 }
308}
309
310void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata,
311 struct xlgmac_ring *ring,
312 unsigned int idx)
313{
314 struct xlgmac_desc_data *desc_data;
315 struct xlgmac_dma_desc *dma_desc;
316
317 desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
318 dma_desc = desc_data->dma_desc;
319
320 netdev_dbg(pdata->netdev, "RX: dma_desc=%p, dma_desc_addr=%pad\n",
321 desc_data->dma_desc, &desc_data->dma_desc_addr);
322 netdev_dbg(pdata->netdev,
323 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
324 idx,
325 le32_to_cpu(dma_desc->desc0),
326 le32_to_cpu(dma_desc->desc1),
327 le32_to_cpu(dma_desc->desc2),
328 le32_to_cpu(dma_desc->desc3));
329}
330
331void xlgmac_print_pkt(struct net_device *netdev,
332 struct sk_buff *skb, bool tx_rx)
333{
334 struct ethhdr *eth = (struct ethhdr *)skb->data;
335 unsigned char *buf = skb->data;
336 unsigned char buffer[128];
337 unsigned int i, j;
338
339 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
340
341 netdev_dbg(netdev, "%s packet of %d bytes\n",
342 (tx_rx ? "TX" : "RX"), skb->len);
343
344 netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
345 netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
346 netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
347
348 for (i = 0, j = 0; i < skb->len;) {
349 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
350 buf[i++]);
351
352 if ((i % 32) == 0) {
353 netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
354 j = 0;
355 } else if ((i % 16) == 0) {
356 buffer[j++] = ' ';
357 buffer[j++] = ' ';
358 } else if ((i % 4) == 0) {
359 buffer[j++] = ' ';
360 }
361 }
362 if (i % 32)
363 netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
364
365 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
366}
367
368void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata)
369{
370 struct xlgmac_hw_features *hw_feat = &pdata->hw_feat;
371 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
372
373 mac_hfr0 = readl(pdata->mac_regs + MAC_HWF0R);
374 mac_hfr1 = readl(pdata->mac_regs + MAC_HWF1R);
375 mac_hfr2 = readl(pdata->mac_regs + MAC_HWF2R);
376
377 memset(hw_feat, 0, sizeof(*hw_feat));
378
379 hw_feat->version = readl(pdata->mac_regs + MAC_VR);
380
381 /* Hardware feature register 0 */
382 hw_feat->phyifsel = XLGMAC_GET_REG_BITS(mac_hfr0,
383 MAC_HWF0R_PHYIFSEL_POS,
384 MAC_HWF0R_PHYIFSEL_LEN);
385 hw_feat->vlhash = XLGMAC_GET_REG_BITS(mac_hfr0,
386 MAC_HWF0R_VLHASH_POS,
387 MAC_HWF0R_VLHASH_LEN);
388 hw_feat->sma = XLGMAC_GET_REG_BITS(mac_hfr0,
389 MAC_HWF0R_SMASEL_POS,
390 MAC_HWF0R_SMASEL_LEN);
391 hw_feat->rwk = XLGMAC_GET_REG_BITS(mac_hfr0,
392 MAC_HWF0R_RWKSEL_POS,
393 MAC_HWF0R_RWKSEL_LEN);
394 hw_feat->mgk = XLGMAC_GET_REG_BITS(mac_hfr0,
395 MAC_HWF0R_MGKSEL_POS,
396 MAC_HWF0R_MGKSEL_LEN);
397 hw_feat->mmc = XLGMAC_GET_REG_BITS(mac_hfr0,
398 MAC_HWF0R_MMCSEL_POS,
399 MAC_HWF0R_MMCSEL_LEN);
400 hw_feat->aoe = XLGMAC_GET_REG_BITS(mac_hfr0,
401 MAC_HWF0R_ARPOFFSEL_POS,
402 MAC_HWF0R_ARPOFFSEL_LEN);
403 hw_feat->ts = XLGMAC_GET_REG_BITS(mac_hfr0,
404 MAC_HWF0R_TSSEL_POS,
405 MAC_HWF0R_TSSEL_LEN);
406 hw_feat->eee = XLGMAC_GET_REG_BITS(mac_hfr0,
407 MAC_HWF0R_EEESEL_POS,
408 MAC_HWF0R_EEESEL_LEN);
409 hw_feat->tx_coe = XLGMAC_GET_REG_BITS(mac_hfr0,
410 MAC_HWF0R_TXCOESEL_POS,
411 MAC_HWF0R_TXCOESEL_LEN);
412 hw_feat->rx_coe = XLGMAC_GET_REG_BITS(mac_hfr0,
413 MAC_HWF0R_RXCOESEL_POS,
414 MAC_HWF0R_RXCOESEL_LEN);
415 hw_feat->addn_mac = XLGMAC_GET_REG_BITS(mac_hfr0,
416 MAC_HWF0R_ADDMACADRSEL_POS,
417 MAC_HWF0R_ADDMACADRSEL_LEN);
418 hw_feat->ts_src = XLGMAC_GET_REG_BITS(mac_hfr0,
419 MAC_HWF0R_TSSTSSEL_POS,
420 MAC_HWF0R_TSSTSSEL_LEN);
421 hw_feat->sa_vlan_ins = XLGMAC_GET_REG_BITS(mac_hfr0,
422 MAC_HWF0R_SAVLANINS_POS,
423 MAC_HWF0R_SAVLANINS_LEN);
424
425 /* Hardware feature register 1 */
426 hw_feat->rx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1,
427 MAC_HWF1R_RXFIFOSIZE_POS,
428 MAC_HWF1R_RXFIFOSIZE_LEN);
429 hw_feat->tx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1,
430 MAC_HWF1R_TXFIFOSIZE_POS,
431 MAC_HWF1R_TXFIFOSIZE_LEN);
432 hw_feat->adv_ts_hi = XLGMAC_GET_REG_BITS(mac_hfr1,
433 MAC_HWF1R_ADVTHWORD_POS,
434 MAC_HWF1R_ADVTHWORD_LEN);
435 hw_feat->dma_width = XLGMAC_GET_REG_BITS(mac_hfr1,
436 MAC_HWF1R_ADDR64_POS,
437 MAC_HWF1R_ADDR64_LEN);
438 hw_feat->dcb = XLGMAC_GET_REG_BITS(mac_hfr1,
439 MAC_HWF1R_DCBEN_POS,
440 MAC_HWF1R_DCBEN_LEN);
441 hw_feat->sph = XLGMAC_GET_REG_BITS(mac_hfr1,
442 MAC_HWF1R_SPHEN_POS,
443 MAC_HWF1R_SPHEN_LEN);
444 hw_feat->tso = XLGMAC_GET_REG_BITS(mac_hfr1,
445 MAC_HWF1R_TSOEN_POS,
446 MAC_HWF1R_TSOEN_LEN);
447 hw_feat->dma_debug = XLGMAC_GET_REG_BITS(mac_hfr1,
448 MAC_HWF1R_DBGMEMA_POS,
449 MAC_HWF1R_DBGMEMA_LEN);
450 hw_feat->rss = XLGMAC_GET_REG_BITS(mac_hfr1,
451 MAC_HWF1R_RSSEN_POS,
452 MAC_HWF1R_RSSEN_LEN);
453 hw_feat->tc_cnt = XLGMAC_GET_REG_BITS(mac_hfr1,
454 MAC_HWF1R_NUMTC_POS,
455 MAC_HWF1R_NUMTC_LEN);
456 hw_feat->hash_table_size = XLGMAC_GET_REG_BITS(mac_hfr1,
457 MAC_HWF1R_HASHTBLSZ_POS,
458 MAC_HWF1R_HASHTBLSZ_LEN);
459 hw_feat->l3l4_filter_num = XLGMAC_GET_REG_BITS(mac_hfr1,
460 MAC_HWF1R_L3L4FNUM_POS,
461 MAC_HWF1R_L3L4FNUM_LEN);
462
463 /* Hardware feature register 2 */
464 hw_feat->rx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
465 MAC_HWF2R_RXQCNT_POS,
466 MAC_HWF2R_RXQCNT_LEN);
467 hw_feat->tx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
468 MAC_HWF2R_TXQCNT_POS,
469 MAC_HWF2R_TXQCNT_LEN);
470 hw_feat->rx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
471 MAC_HWF2R_RXCHCNT_POS,
472 MAC_HWF2R_RXCHCNT_LEN);
473 hw_feat->tx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
474 MAC_HWF2R_TXCHCNT_POS,
475 MAC_HWF2R_TXCHCNT_LEN);
476 hw_feat->pps_out_num = XLGMAC_GET_REG_BITS(mac_hfr2,
477 MAC_HWF2R_PPSOUTNUM_POS,
478 MAC_HWF2R_PPSOUTNUM_LEN);
479 hw_feat->aux_snap_num = XLGMAC_GET_REG_BITS(mac_hfr2,
480 MAC_HWF2R_AUXSNAPNUM_POS,
481 MAC_HWF2R_AUXSNAPNUM_LEN);
482
483 /* Translate the Hash Table size into actual number */
484 switch (hw_feat->hash_table_size) {
485 case 0:
486 break;
487 case 1:
488 hw_feat->hash_table_size = 64;
489 break;
490 case 2:
491 hw_feat->hash_table_size = 128;
492 break;
493 case 3:
494 hw_feat->hash_table_size = 256;
495 break;
496 }
497
498 /* Translate the address width setting into actual number */
499 switch (hw_feat->dma_width) {
500 case 0:
501 hw_feat->dma_width = 32;
502 break;
503 case 1:
504 hw_feat->dma_width = 40;
505 break;
506 case 2:
507 hw_feat->dma_width = 48;
508 break;
509 default:
510 hw_feat->dma_width = 32;
511 }
512
513 /* The Queue, Channel and TC counts are zero based so increment them
514 * to get the actual number
515 */
516 hw_feat->rx_q_cnt++;
517 hw_feat->tx_q_cnt++;
518 hw_feat->rx_ch_cnt++;
519 hw_feat->tx_ch_cnt++;
520 hw_feat->tc_cnt++;
521}
522
523void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata)
524{
525 char *str = NULL;
526
527 XLGMAC_PR("\n");
528 XLGMAC_PR("=====================================================\n");
529 XLGMAC_PR("\n");
530 XLGMAC_PR("HW support following features\n");
531 XLGMAC_PR("\n");
532 /* HW Feature Register0 */
533 XLGMAC_PR("VLAN Hash Filter Selected : %s\n",
534 pdata->hw_feat.vlhash ? "YES" : "NO");
535 XLGMAC_PR("SMA (MDIO) Interface : %s\n",
536 pdata->hw_feat.sma ? "YES" : "NO");
537 XLGMAC_PR("PMT Remote Wake-up Packet Enable : %s\n",
538 pdata->hw_feat.rwk ? "YES" : "NO");
539 XLGMAC_PR("PMT Magic Packet Enable : %s\n",
540 pdata->hw_feat.mgk ? "YES" : "NO");
541 XLGMAC_PR("RMON/MMC Module Enable : %s\n",
542 pdata->hw_feat.mmc ? "YES" : "NO");
543 XLGMAC_PR("ARP Offload Enabled : %s\n",
544 pdata->hw_feat.aoe ? "YES" : "NO");
545 XLGMAC_PR("IEEE 1588-2008 Timestamp Enabled : %s\n",
546 pdata->hw_feat.ts ? "YES" : "NO");
547 XLGMAC_PR("Energy Efficient Ethernet Enabled : %s\n",
548 pdata->hw_feat.eee ? "YES" : "NO");
549 XLGMAC_PR("Transmit Checksum Offload Enabled : %s\n",
550 pdata->hw_feat.tx_coe ? "YES" : "NO");
551 XLGMAC_PR("Receive Checksum Offload Enabled : %s\n",
552 pdata->hw_feat.rx_coe ? "YES" : "NO");
553 XLGMAC_PR("Additional MAC Addresses 1-31 Selected : %s\n",
554 pdata->hw_feat.addn_mac ? "YES" : "NO");
555
556 switch (pdata->hw_feat.ts_src) {
557 case 0:
558 str = "RESERVED";
559 break;
560 case 1:
561 str = "INTERNAL";
562 break;
563 case 2:
564 str = "EXTERNAL";
565 break;
566 case 3:
567 str = "BOTH";
568 break;
569 }
570 XLGMAC_PR("Timestamp System Time Source : %s\n", str);
571
572 XLGMAC_PR("Source Address or VLAN Insertion Enable : %s\n",
573 pdata->hw_feat.sa_vlan_ins ? "YES" : "NO");
574
575 /* HW Feature Register1 */
576 switch (pdata->hw_feat.rx_fifo_size) {
577 case 0:
578 str = "128 bytes";
579 break;
580 case 1:
581 str = "256 bytes";
582 break;
583 case 2:
584 str = "512 bytes";
585 break;
586 case 3:
587 str = "1 KBytes";
588 break;
589 case 4:
590 str = "2 KBytes";
591 break;
592 case 5:
593 str = "4 KBytes";
594 break;
595 case 6:
596 str = "8 KBytes";
597 break;
598 case 7:
599 str = "16 KBytes";
600 break;
601 case 8:
602 str = "32 kBytes";
603 break;
604 case 9:
605 str = "64 KBytes";
606 break;
607 case 10:
608 str = "128 KBytes";
609 break;
610 case 11:
611 str = "256 KBytes";
612 break;
613 default:
614 str = "RESERVED";
615 }
616 XLGMAC_PR("MTL Receive FIFO Size : %s\n", str);
617
618 switch (pdata->hw_feat.tx_fifo_size) {
619 case 0:
620 str = "128 bytes";
621 break;
622 case 1:
623 str = "256 bytes";
624 break;
625 case 2:
626 str = "512 bytes";
627 break;
628 case 3:
629 str = "1 KBytes";
630 break;
631 case 4:
632 str = "2 KBytes";
633 break;
634 case 5:
635 str = "4 KBytes";
636 break;
637 case 6:
638 str = "8 KBytes";
639 break;
640 case 7:
641 str = "16 KBytes";
642 break;
643 case 8:
644 str = "32 kBytes";
645 break;
646 case 9:
647 str = "64 KBytes";
648 break;
649 case 10:
650 str = "128 KBytes";
651 break;
652 case 11:
653 str = "256 KBytes";
654 break;
655 default:
656 str = "RESERVED";
657 }
658 XLGMAC_PR("MTL Transmit FIFO Size : %s\n", str);
659
660 XLGMAC_PR("IEEE 1588 High Word Register Enable : %s\n",
661 pdata->hw_feat.adv_ts_hi ? "YES" : "NO");
662 XLGMAC_PR("Address width : %u\n",
663 pdata->hw_feat.dma_width);
664 XLGMAC_PR("DCB Feature Enable : %s\n",
665 pdata->hw_feat.dcb ? "YES" : "NO");
666 XLGMAC_PR("Split Header Feature Enable : %s\n",
667 pdata->hw_feat.sph ? "YES" : "NO");
668 XLGMAC_PR("TCP Segmentation Offload Enable : %s\n",
669 pdata->hw_feat.tso ? "YES" : "NO");
670 XLGMAC_PR("DMA Debug Registers Enabled : %s\n",
671 pdata->hw_feat.dma_debug ? "YES" : "NO");
672 XLGMAC_PR("RSS Feature Enabled : %s\n",
673 pdata->hw_feat.rss ? "YES" : "NO");
674 XLGMAC_PR("Number of Traffic classes : %u\n",
675 (pdata->hw_feat.tc_cnt));
676 XLGMAC_PR("Hash Table Size : %u\n",
677 pdata->hw_feat.hash_table_size);
678 XLGMAC_PR("Total number of L3 or L4 Filters : %u\n",
679 pdata->hw_feat.l3l4_filter_num);
680
681 /* HW Feature Register2 */
682 XLGMAC_PR("Number of MTL Receive Queues : %u\n",
683 pdata->hw_feat.rx_q_cnt);
684 XLGMAC_PR("Number of MTL Transmit Queues : %u\n",
685 pdata->hw_feat.tx_q_cnt);
686 XLGMAC_PR("Number of DMA Receive Channels : %u\n",
687 pdata->hw_feat.rx_ch_cnt);
688 XLGMAC_PR("Number of DMA Transmit Channels : %u\n",
689 pdata->hw_feat.tx_ch_cnt);
690
691 switch (pdata->hw_feat.pps_out_num) {
692 case 0:
693 str = "No PPS output";
694 break;
695 case 1:
696 str = "1 PPS output";
697 break;
698 case 2:
699 str = "2 PPS output";
700 break;
701 case 3:
702 str = "3 PPS output";
703 break;
704 case 4:
705 str = "4 PPS output";
706 break;
707 default:
708 str = "RESERVED";
709 }
710 XLGMAC_PR("Number of PPS Outputs : %s\n", str);
711
712 switch (pdata->hw_feat.aux_snap_num) {
713 case 0:
714 str = "No auxiliary input";
715 break;
716 case 1:
717 str = "1 auxiliary input";
718 break;
719 case 2:
720 str = "2 auxiliary input";
721 break;
722 case 3:
723 str = "3 auxiliary input";
724 break;
725 case 4:
726 str = "4 auxiliary input";
727 break;
728 default:
729 str = "RESERVED";
730 }
731 XLGMAC_PR("Number of Auxiliary Snapshot Inputs : %s", str);
732
733 XLGMAC_PR("\n");
734 XLGMAC_PR("=====================================================\n");
735 XLGMAC_PR("\n");
736}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
new file mode 100644
index 000000000000..55c796ed7d26
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -0,0 +1,648 @@
1/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
2 *
3 * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This Synopsys DWC XLGMAC software driver and associated documentation
11 * (hereinafter the "Software") is an unsupported proprietary work of
12 * Synopsys, Inc. unless otherwise expressly agreed to in writing between
13 * Synopsys and you. The Software IS NOT an item of Licensed Software or a
14 * Licensed Product under any End User Software License Agreement or
15 * Agreement for Licensed Products with Synopsys or any supplement thereto.
16 * Synopsys is a registered trademark of Synopsys, Inc. Other names included
17 * in the SOFTWARE may be the trademarks of their respective owners.
18 */
19
20#include "dwc-xlgmac.h"
21#include "dwc-xlgmac-reg.h"
22
23static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata,
24 struct xlgmac_desc_data *desc_data)
25{
26 if (desc_data->skb_dma) {
27 if (desc_data->mapped_as_page) {
28 dma_unmap_page(pdata->dev, desc_data->skb_dma,
29 desc_data->skb_dma_len, DMA_TO_DEVICE);
30 } else {
31 dma_unmap_single(pdata->dev, desc_data->skb_dma,
32 desc_data->skb_dma_len, DMA_TO_DEVICE);
33 }
34 desc_data->skb_dma = 0;
35 desc_data->skb_dma_len = 0;
36 }
37
38 if (desc_data->skb) {
39 dev_kfree_skb_any(desc_data->skb);
40 desc_data->skb = NULL;
41 }
42
43 if (desc_data->rx.hdr.pa.pages)
44 put_page(desc_data->rx.hdr.pa.pages);
45
46 if (desc_data->rx.hdr.pa_unmap.pages) {
47 dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma,
48 desc_data->rx.hdr.pa_unmap.pages_len,
49 DMA_FROM_DEVICE);
50 put_page(desc_data->rx.hdr.pa_unmap.pages);
51 }
52
53 if (desc_data->rx.buf.pa.pages)
54 put_page(desc_data->rx.buf.pa.pages);
55
56 if (desc_data->rx.buf.pa_unmap.pages) {
57 dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma,
58 desc_data->rx.buf.pa_unmap.pages_len,
59 DMA_FROM_DEVICE);
60 put_page(desc_data->rx.buf.pa_unmap.pages);
61 }
62
63 memset(&desc_data->tx, 0, sizeof(desc_data->tx));
64 memset(&desc_data->rx, 0, sizeof(desc_data->rx));
65
66 desc_data->mapped_as_page = 0;
67
68 if (desc_data->state_saved) {
69 desc_data->state_saved = 0;
70 desc_data->state.skb = NULL;
71 desc_data->state.len = 0;
72 desc_data->state.error = 0;
73 }
74}
75
76static void xlgmac_free_ring(struct xlgmac_pdata *pdata,
77 struct xlgmac_ring *ring)
78{
79 struct xlgmac_desc_data *desc_data;
80 unsigned int i;
81
82 if (!ring)
83 return;
84
85 if (ring->desc_data_head) {
86 for (i = 0; i < ring->dma_desc_count; i++) {
87 desc_data = XLGMAC_GET_DESC_DATA(ring, i);
88 xlgmac_unmap_desc_data(pdata, desc_data);
89 }
90
91 kfree(ring->desc_data_head);
92 ring->desc_data_head = NULL;
93 }
94
95 if (ring->rx_hdr_pa.pages) {
96 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
97 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
98 put_page(ring->rx_hdr_pa.pages);
99
100 ring->rx_hdr_pa.pages = NULL;
101 ring->rx_hdr_pa.pages_len = 0;
102 ring->rx_hdr_pa.pages_offset = 0;
103 ring->rx_hdr_pa.pages_dma = 0;
104 }
105
106 if (ring->rx_buf_pa.pages) {
107 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
108 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
109 put_page(ring->rx_buf_pa.pages);
110
111 ring->rx_buf_pa.pages = NULL;
112 ring->rx_buf_pa.pages_len = 0;
113 ring->rx_buf_pa.pages_offset = 0;
114 ring->rx_buf_pa.pages_dma = 0;
115 }
116
117 if (ring->dma_desc_head) {
118 dma_free_coherent(pdata->dev,
119 (sizeof(struct xlgmac_dma_desc) *
120 ring->dma_desc_count),
121 ring->dma_desc_head,
122 ring->dma_desc_head_addr);
123 ring->dma_desc_head = NULL;
124 }
125}
126
127static int xlgmac_init_ring(struct xlgmac_pdata *pdata,
128 struct xlgmac_ring *ring,
129 unsigned int dma_desc_count)
130{
131 if (!ring)
132 return 0;
133
134 /* Descriptors */
135 ring->dma_desc_count = dma_desc_count;
136 ring->dma_desc_head = dma_alloc_coherent(pdata->dev,
137 (sizeof(struct xlgmac_dma_desc) *
138 dma_desc_count),
139 &ring->dma_desc_head_addr,
140 GFP_KERNEL);
141 if (!ring->dma_desc_head)
142 return -ENOMEM;
143
144 /* Array of descriptor data */
145 ring->desc_data_head = kcalloc(dma_desc_count,
146 sizeof(struct xlgmac_desc_data),
147 GFP_KERNEL);
148 if (!ring->desc_data_head)
149 return -ENOMEM;
150
151 netif_dbg(pdata, drv, pdata->netdev,
152 "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n",
153 ring->dma_desc_head,
154 &ring->dma_desc_head_addr,
155 ring->desc_data_head);
156
157 return 0;
158}
159
160static void xlgmac_free_rings(struct xlgmac_pdata *pdata)
161{
162 struct xlgmac_channel *channel;
163 unsigned int i;
164
165 if (!pdata->channel_head)
166 return;
167
168 channel = pdata->channel_head;
169 for (i = 0; i < pdata->channel_count; i++, channel++) {
170 xlgmac_free_ring(pdata, channel->tx_ring);
171 xlgmac_free_ring(pdata, channel->rx_ring);
172 }
173}
174
175static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata)
176{
177 struct xlgmac_channel *channel;
178 unsigned int i;
179 int ret;
180
181 channel = pdata->channel_head;
182 for (i = 0; i < pdata->channel_count; i++, channel++) {
183 netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
184 channel->name);
185
186 ret = xlgmac_init_ring(pdata, channel->tx_ring,
187 pdata->tx_desc_count);
188
189 if (ret) {
190 netdev_alert(pdata->netdev,
191 "error initializing Tx ring");
192 goto err_init_ring;
193 }
194
195 netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
196 channel->name);
197
198 ret = xlgmac_init_ring(pdata, channel->rx_ring,
199 pdata->rx_desc_count);
200 if (ret) {
201 netdev_alert(pdata->netdev,
202 "error initializing Rx ring\n");
203 goto err_init_ring;
204 }
205 }
206
207 return 0;
208
209err_init_ring:
210 xlgmac_free_rings(pdata);
211
212 return ret;
213}
214
215static void xlgmac_free_channels(struct xlgmac_pdata *pdata)
216{
217 if (!pdata->channel_head)
218 return;
219
220 kfree(pdata->channel_head->tx_ring);
221 pdata->channel_head->tx_ring = NULL;
222
223 kfree(pdata->channel_head->rx_ring);
224 pdata->channel_head->rx_ring = NULL;
225
226 kfree(pdata->channel_head);
227
228 pdata->channel_head = NULL;
229 pdata->channel_count = 0;
230}
231
232static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata)
233{
234 struct xlgmac_channel *channel_head, *channel;
235 struct xlgmac_ring *tx_ring, *rx_ring;
236 int ret = -ENOMEM;
237 unsigned int i;
238
239 channel_head = kcalloc(pdata->channel_count,
240 sizeof(struct xlgmac_channel), GFP_KERNEL);
241 if (!channel_head)
242 return ret;
243
244 netif_dbg(pdata, drv, pdata->netdev,
245 "channel_head=%p\n", channel_head);
246
247 tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring),
248 GFP_KERNEL);
249 if (!tx_ring)
250 goto err_tx_ring;
251
252 rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring),
253 GFP_KERNEL);
254 if (!rx_ring)
255 goto err_rx_ring;
256
257 for (i = 0, channel = channel_head; i < pdata->channel_count;
258 i++, channel++) {
259 snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
260 channel->pdata = pdata;
261 channel->queue_index = i;
262 channel->dma_regs = pdata->mac_regs + DMA_CH_BASE +
263 (DMA_CH_INC * i);
264
265 if (pdata->per_channel_irq) {
266 /* Get the per DMA interrupt */
267 ret = pdata->channel_irq[i];
268 if (ret < 0) {
269 netdev_err(pdata->netdev,
270 "get_irq %u failed\n",
271 i + 1);
272 goto err_irq;
273 }
274 channel->dma_irq = ret;
275 }
276
277 if (i < pdata->tx_ring_count)
278 channel->tx_ring = tx_ring++;
279
280 if (i < pdata->rx_ring_count)
281 channel->rx_ring = rx_ring++;
282
283 netif_dbg(pdata, drv, pdata->netdev,
284 "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n",
285 channel->name, channel->dma_regs,
286 channel->tx_ring, channel->rx_ring);
287 }
288
289 pdata->channel_head = channel_head;
290
291 return 0;
292
293err_irq:
294 kfree(rx_ring);
295
296err_rx_ring:
297 kfree(tx_ring);
298
299err_tx_ring:
300 kfree(channel_head);
301
302 return ret;
303}
304
305static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata)
306{
307 xlgmac_free_rings(pdata);
308
309 xlgmac_free_channels(pdata);
310}
311
312static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata)
313{
314 int ret;
315
316 ret = xlgmac_alloc_channels(pdata);
317 if (ret)
318 goto err_alloc;
319
320 ret = xlgmac_alloc_rings(pdata);
321 if (ret)
322 goto err_alloc;
323
324 return 0;
325
326err_alloc:
327 xlgmac_free_channels_and_rings(pdata);
328
329 return ret;
330}
331
332static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
333 struct xlgmac_page_alloc *pa,
334 gfp_t gfp, int order)
335{
336 struct page *pages = NULL;
337 dma_addr_t pages_dma;
338 int ret;
339
340 /* Try to obtain pages, decreasing order if necessary */
341 gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
342 while (order >= 0) {
343 pages = alloc_pages(gfp, order);
344 if (pages)
345 break;
346
347 order--;
348 }
349 if (!pages)
350 return -ENOMEM;
351
352 /* Map the pages */
353 pages_dma = dma_map_page(pdata->dev, pages, 0,
354 PAGE_SIZE << order, DMA_FROM_DEVICE);
355 ret = dma_mapping_error(pdata->dev, pages_dma);
356 if (ret) {
357 put_page(pages);
358 return ret;
359 }
360
361 pa->pages = pages;
362 pa->pages_len = PAGE_SIZE << order;
363 pa->pages_offset = 0;
364 pa->pages_dma = pages_dma;
365
366 return 0;
367}
368
369static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd,
370 struct xlgmac_page_alloc *pa,
371 unsigned int len)
372{
373 get_page(pa->pages);
374 bd->pa = *pa;
375
376 bd->dma_base = pa->pages_dma;
377 bd->dma_off = pa->pages_offset;
378 bd->dma_len = len;
379
380 pa->pages_offset += len;
381 if ((pa->pages_offset + len) > pa->pages_len) {
382 /* This data descriptor is responsible for unmapping page(s) */
383 bd->pa_unmap = *pa;
384
385 /* Get a new allocation next time */
386 pa->pages = NULL;
387 pa->pages_len = 0;
388 pa->pages_offset = 0;
389 pa->pages_dma = 0;
390 }
391}
392
393static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata,
394 struct xlgmac_ring *ring,
395 struct xlgmac_desc_data *desc_data)
396{
397 int order, ret;
398
399 if (!ring->rx_hdr_pa.pages) {
400 ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa,
401 GFP_ATOMIC, 0);
402 if (ret)
403 return ret;
404 }
405
406 if (!ring->rx_buf_pa.pages) {
407 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
408 ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa,
409 GFP_ATOMIC, order);
410 if (ret)
411 return ret;
412 }
413
414 /* Set up the header page info */
415 xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa,
416 XLGMAC_SKB_ALLOC_SIZE);
417
418 /* Set up the buffer page info */
419 xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa,
420 pdata->rx_buf_size);
421
422 return 0;
423}
424
425static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata)
426{
427 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
428 struct xlgmac_desc_data *desc_data;
429 struct xlgmac_dma_desc *dma_desc;
430 struct xlgmac_channel *channel;
431 struct xlgmac_ring *ring;
432 dma_addr_t dma_desc_addr;
433 unsigned int i, j;
434
435 channel = pdata->channel_head;
436 for (i = 0; i < pdata->channel_count; i++, channel++) {
437 ring = channel->tx_ring;
438 if (!ring)
439 break;
440
441 dma_desc = ring->dma_desc_head;
442 dma_desc_addr = ring->dma_desc_head_addr;
443
444 for (j = 0; j < ring->dma_desc_count; j++) {
445 desc_data = XLGMAC_GET_DESC_DATA(ring, j);
446
447 desc_data->dma_desc = dma_desc;
448 desc_data->dma_desc_addr = dma_desc_addr;
449
450 dma_desc++;
451 dma_desc_addr += sizeof(struct xlgmac_dma_desc);
452 }
453
454 ring->cur = 0;
455 ring->dirty = 0;
456 memset(&ring->tx, 0, sizeof(ring->tx));
457
458 hw_ops->tx_desc_init(channel);
459 }
460}
461
462static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata)
463{
464 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
465 struct xlgmac_desc_data *desc_data;
466 struct xlgmac_dma_desc *dma_desc;
467 struct xlgmac_channel *channel;
468 struct xlgmac_ring *ring;
469 dma_addr_t dma_desc_addr;
470 unsigned int i, j;
471
472 channel = pdata->channel_head;
473 for (i = 0; i < pdata->channel_count; i++, channel++) {
474 ring = channel->rx_ring;
475 if (!ring)
476 break;
477
478 dma_desc = ring->dma_desc_head;
479 dma_desc_addr = ring->dma_desc_head_addr;
480
481 for (j = 0; j < ring->dma_desc_count; j++) {
482 desc_data = XLGMAC_GET_DESC_DATA(ring, j);
483
484 desc_data->dma_desc = dma_desc;
485 desc_data->dma_desc_addr = dma_desc_addr;
486
487 if (xlgmac_map_rx_buffer(pdata, ring, desc_data))
488 break;
489
490 dma_desc++;
491 dma_desc_addr += sizeof(struct xlgmac_dma_desc);
492 }
493
494 ring->cur = 0;
495 ring->dirty = 0;
496
497 hw_ops->rx_desc_init(channel);
498 }
499}
500
501static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
502 struct sk_buff *skb)
503{
504 struct xlgmac_pdata *pdata = channel->pdata;
505 struct xlgmac_ring *ring = channel->tx_ring;
506 unsigned int start_index, cur_index;
507 struct xlgmac_desc_data *desc_data;
508 unsigned int offset, datalen, len;
509 struct xlgmac_pkt_info *pkt_info;
510 struct skb_frag_struct *frag;
511 unsigned int tso, vlan;
512 dma_addr_t skb_dma;
513 unsigned int i;
514
515 offset = 0;
516 start_index = ring->cur;
517 cur_index = ring->cur;
518
519 pkt_info = &ring->pkt_info;
520 pkt_info->desc_count = 0;
521 pkt_info->length = 0;
522
523 tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
524 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
525 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
526 vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
527 TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
528 TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
529
530 /* Save space for a context descriptor if needed */
531 if ((tso && (pkt_info->mss != ring->tx.cur_mss)) ||
532 (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)))
533 cur_index++;
534 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
535
536 if (tso) {
537 /* Map the TSO header */
538 skb_dma = dma_map_single(pdata->dev, skb->data,
539 pkt_info->header_len, DMA_TO_DEVICE);
540 if (dma_mapping_error(pdata->dev, skb_dma)) {
541 netdev_alert(pdata->netdev, "dma_map_single failed\n");
542 goto err_out;
543 }
544 desc_data->skb_dma = skb_dma;
545 desc_data->skb_dma_len = pkt_info->header_len;
546 netif_dbg(pdata, tx_queued, pdata->netdev,
547 "skb header: index=%u, dma=%pad, len=%u\n",
548 cur_index, &skb_dma, pkt_info->header_len);
549
550 offset = pkt_info->header_len;
551
552 pkt_info->length += pkt_info->header_len;
553
554 cur_index++;
555 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
556 }
557
558 /* Map the (remainder of the) packet */
559 for (datalen = skb_headlen(skb) - offset; datalen; ) {
560 len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE);
561
562 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
563 DMA_TO_DEVICE);
564 if (dma_mapping_error(pdata->dev, skb_dma)) {
565 netdev_alert(pdata->netdev, "dma_map_single failed\n");
566 goto err_out;
567 }
568 desc_data->skb_dma = skb_dma;
569 desc_data->skb_dma_len = len;
570 netif_dbg(pdata, tx_queued, pdata->netdev,
571 "skb data: index=%u, dma=%pad, len=%u\n",
572 cur_index, &skb_dma, len);
573
574 datalen -= len;
575 offset += len;
576
577 pkt_info->length += len;
578
579 cur_index++;
580 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
581 }
582
583 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
584 netif_dbg(pdata, tx_queued, pdata->netdev,
585 "mapping frag %u\n", i);
586
587 frag = &skb_shinfo(skb)->frags[i];
588 offset = 0;
589
590 for (datalen = skb_frag_size(frag); datalen; ) {
591 len = min_t(unsigned int, datalen,
592 XLGMAC_TX_MAX_BUF_SIZE);
593
594 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
595 len, DMA_TO_DEVICE);
596 if (dma_mapping_error(pdata->dev, skb_dma)) {
597 netdev_alert(pdata->netdev,
598 "skb_frag_dma_map failed\n");
599 goto err_out;
600 }
601 desc_data->skb_dma = skb_dma;
602 desc_data->skb_dma_len = len;
603 desc_data->mapped_as_page = 1;
604 netif_dbg(pdata, tx_queued, pdata->netdev,
605 "skb frag: index=%u, dma=%pad, len=%u\n",
606 cur_index, &skb_dma, len);
607
608 datalen -= len;
609 offset += len;
610
611 pkt_info->length += len;
612
613 cur_index++;
614 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
615 }
616 }
617
618 /* Save the skb address in the last entry. We always have some data
619 * that has been mapped so desc_data is always advanced past the last
620 * piece of mapped data - use the entry pointed to by cur_index - 1.
621 */
622 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1);
623 desc_data->skb = skb;
624
625 /* Save the number of descriptor entries used */
626 pkt_info->desc_count = cur_index - start_index;
627
628 return pkt_info->desc_count;
629
630err_out:
631 while (start_index < cur_index) {
632 desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++);
633 xlgmac_unmap_desc_data(pdata, desc_data);
634 }
635
636 return 0;
637}
638
639void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
640{
641 desc_ops->alloc_channles_and_rings = xlgmac_alloc_channels_and_rings;
642 desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
643 desc_ops->map_tx_skb = xlgmac_map_tx_skb;
644 desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
645 desc_ops->unmap_desc_data = xlgmac_unmap_desc_data;
646 desc_ops->tx_desc_init = xlgmac_tx_desc_init;
647 desc_ops->rx_desc_init = xlgmac_rx_desc_init;
648}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
new file mode 100644
index 000000000000..5cf3e90d4834
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -0,0 +1,3146 @@
1/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
2 *
3 * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This Synopsys DWC XLGMAC software driver and associated documentation
11 * (hereinafter the "Software") is an unsupported proprietary work of
12 * Synopsys, Inc. unless otherwise expressly agreed to in writing between
13 * Synopsys and you. The Software IS NOT an item of Licensed Software or a
14 * Licensed Product under any End User Software License Agreement or
15 * Agreement for Licensed Products with Synopsys or any supplement thereto.
16 * Synopsys is a registered trademark of Synopsys, Inc. Other names included
17 * in the SOFTWARE may be the trademarks of their respective owners.
18 */
19
20#include <linux/phy.h>
21#include <linux/mdio.h>
22#include <linux/clk.h>
23#include <linux/bitrev.h>
24#include <linux/crc32.h>
25
26#include "dwc-xlgmac.h"
27#include "dwc-xlgmac-reg.h"
28
29static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc)
30{
31 return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
32 TX_NORMAL_DESC3_OWN_POS,
33 TX_NORMAL_DESC3_OWN_LEN);
34}
35
36static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata)
37{
38 u32 regval;
39
40 regval = readl(pdata->mac_regs + MAC_RCR);
41 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
42 MAC_RCR_IPC_LEN, 0);
43 writel(regval, pdata->mac_regs + MAC_RCR);
44
45 return 0;
46}
47
48static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata)
49{
50 u32 regval;
51
52 regval = readl(pdata->mac_regs + MAC_RCR);
53 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
54 MAC_RCR_IPC_LEN, 1);
55 writel(regval, pdata->mac_regs + MAC_RCR);
56
57 return 0;
58}
59
60static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr)
61{
62 unsigned int mac_addr_hi, mac_addr_lo;
63
64 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
65 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
66 (addr[1] << 8) | (addr[0] << 0);
67
68 writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR);
69 writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR);
70
71 return 0;
72}
73
74static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata,
75 struct netdev_hw_addr *ha,
76 unsigned int *mac_reg)
77{
78 unsigned int mac_addr_hi, mac_addr_lo;
79 u8 *mac_addr;
80
81 mac_addr_lo = 0;
82 mac_addr_hi = 0;
83
84 if (ha) {
85 mac_addr = (u8 *)&mac_addr_lo;
86 mac_addr[0] = ha->addr[0];
87 mac_addr[1] = ha->addr[1];
88 mac_addr[2] = ha->addr[2];
89 mac_addr[3] = ha->addr[3];
90 mac_addr = (u8 *)&mac_addr_hi;
91 mac_addr[0] = ha->addr[4];
92 mac_addr[1] = ha->addr[5];
93
94 netif_dbg(pdata, drv, pdata->netdev,
95 "adding mac address %pM at %#x\n",
96 ha->addr, *mac_reg);
97
98 mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi,
99 MAC_MACA1HR_AE_POS,
100 MAC_MACA1HR_AE_LEN,
101 1);
102 }
103
104 writel(mac_addr_hi, pdata->mac_regs + *mac_reg);
105 *mac_reg += MAC_MACA_INC;
106 writel(mac_addr_lo, pdata->mac_regs + *mac_reg);
107 *mac_reg += MAC_MACA_INC;
108}
109
110static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
111{
112 u32 regval;
113
114 regval = readl(pdata->mac_regs + MAC_VLANTR);
115 /* Put the VLAN tag in the Rx descriptor */
116 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS,
117 MAC_VLANTR_EVLRXS_LEN, 1);
118 /* Don't check the VLAN type */
119 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS,
120 MAC_VLANTR_DOVLTC_LEN, 1);
121 /* Check only C-TAG (0x8100) packets */
122 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS,
123 MAC_VLANTR_ERSVLM_LEN, 0);
124 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
125 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS,
126 MAC_VLANTR_ESVL_LEN, 0);
127 /* Enable VLAN tag stripping */
128 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
129 MAC_VLANTR_EVLS_LEN, 0x3);
130 writel(regval, pdata->mac_regs + MAC_VLANTR);
131
132 return 0;
133}
134
135static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
136{
137 u32 regval;
138
139 regval = readl(pdata->mac_regs + MAC_VLANTR);
140 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
141 MAC_VLANTR_EVLS_LEN, 0);
142 writel(regval, pdata->mac_regs + MAC_VLANTR);
143
144 return 0;
145}
146
147static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
148{
149 u32 regval;
150
151 regval = readl(pdata->mac_regs + MAC_PFR);
152 /* Enable VLAN filtering */
153 regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
154 MAC_PFR_VTFE_LEN, 1);
155 writel(regval, pdata->mac_regs + MAC_PFR);
156
157 regval = readl(pdata->mac_regs + MAC_VLANTR);
158 /* Enable VLAN Hash Table filtering */
159 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS,
160 MAC_VLANTR_VTHM_LEN, 1);
161 /* Disable VLAN tag inverse matching */
162 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS,
163 MAC_VLANTR_VTIM_LEN, 0);
164 /* Only filter on the lower 12-bits of the VLAN tag */
165 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS,
166 MAC_VLANTR_ETV_LEN, 1);
167 /* In order for the VLAN Hash Table filtering to be effective,
168 * the VLAN tag identifier in the VLAN Tag Register must not
169 * be zero. Set the VLAN tag identifier to "1" to enable the
170 * VLAN Hash Table filtering. This implies that a VLAN tag of
171 * 1 will always pass filtering.
172 */
173 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS,
174 MAC_VLANTR_VL_LEN, 1);
175 writel(regval, pdata->mac_regs + MAC_VLANTR);
176
177 return 0;
178}
179
180static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
181{
182 u32 regval;
183
184 regval = readl(pdata->mac_regs + MAC_PFR);
185 /* Disable VLAN filtering */
186 regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
187 MAC_PFR_VTFE_LEN, 0);
188 writel(regval, pdata->mac_regs + MAC_PFR);
189
190 return 0;
191}
192
193static u32 xlgmac_vid_crc32_le(__le16 vid_le)
194{
195 unsigned char *data = (unsigned char *)&vid_le;
196 unsigned char data_byte = 0;
197 u32 poly = 0xedb88320;
198 u32 crc = ~0;
199 u32 temp = 0;
200 int i, bits;
201
202 bits = get_bitmask_order(VLAN_VID_MASK);
203 for (i = 0; i < bits; i++) {
204 if ((i % 8) == 0)
205 data_byte = data[i / 8];
206
207 temp = ((crc & 1) ^ data_byte) & 1;
208 crc >>= 1;
209 data_byte >>= 1;
210
211 if (temp)
212 crc ^= poly;
213 }
214
215 return crc;
216}
217
218static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata)
219{
220 u16 vlan_hash_table = 0;
221 __le16 vid_le;
222 u32 regval;
223 u32 crc;
224 u16 vid;
225
226 /* Generate the VLAN Hash Table value */
227 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
228 /* Get the CRC32 value of the VLAN ID */
229 vid_le = cpu_to_le16(vid);
230 crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28;
231
232 vlan_hash_table |= (1 << crc);
233 }
234
235 regval = readl(pdata->mac_regs + MAC_VLANHTR);
236 /* Set the VLAN Hash Table filtering register */
237 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS,
238 MAC_VLANHTR_VLHT_LEN, vlan_hash_table);
239 writel(regval, pdata->mac_regs + MAC_VLANHTR);
240
241 return 0;
242}
243
244static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata,
245 unsigned int enable)
246{
247 unsigned int val = enable ? 1 : 0;
248 u32 regval;
249
250 regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
251 MAC_PFR_PR_POS, MAC_PFR_PR_LEN);
252 if (regval == val)
253 return 0;
254
255 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
256 enable ? "entering" : "leaving");
257
258 regval = readl(pdata->mac_regs + MAC_PFR);
259 regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS,
260 MAC_PFR_PR_LEN, val);
261 writel(regval, pdata->mac_regs + MAC_PFR);
262
263 /* Hardware will still perform VLAN filtering in promiscuous mode */
264 if (enable) {
265 xlgmac_disable_rx_vlan_filtering(pdata);
266 } else {
267 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
268 xlgmac_enable_rx_vlan_filtering(pdata);
269 }
270
271 return 0;
272}
273
274static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata,
275 unsigned int enable)
276{
277 unsigned int val = enable ? 1 : 0;
278 u32 regval;
279
280 regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
281 MAC_PFR_PM_POS, MAC_PFR_PM_LEN);
282 if (regval == val)
283 return 0;
284
285 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
286 enable ? "entering" : "leaving");
287
288 regval = readl(pdata->mac_regs + MAC_PFR);
289 regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS,
290 MAC_PFR_PM_LEN, val);
291 writel(regval, pdata->mac_regs + MAC_PFR);
292
293 return 0;
294}
295
296static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata)
297{
298 struct net_device *netdev = pdata->netdev;
299 struct netdev_hw_addr *ha;
300 unsigned int addn_macs;
301 unsigned int mac_reg;
302
303 mac_reg = MAC_MACA1HR;
304 addn_macs = pdata->hw_feat.addn_mac;
305
306 if (netdev_uc_count(netdev) > addn_macs) {
307 xlgmac_set_promiscuous_mode(pdata, 1);
308 } else {
309 netdev_for_each_uc_addr(ha, netdev) {
310 xlgmac_set_mac_reg(pdata, ha, &mac_reg);
311 addn_macs--;
312 }
313
314 if (netdev_mc_count(netdev) > addn_macs) {
315 xlgmac_set_all_multicast_mode(pdata, 1);
316 } else {
317 netdev_for_each_mc_addr(ha, netdev) {
318 xlgmac_set_mac_reg(pdata, ha, &mac_reg);
319 addn_macs--;
320 }
321 }
322 }
323
324 /* Clear remaining additional MAC address entries */
325 while (addn_macs--)
326 xlgmac_set_mac_reg(pdata, NULL, &mac_reg);
327}
328
329static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata)
330{
331 unsigned int hash_table_shift, hash_table_count;
332 u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE];
333 struct net_device *netdev = pdata->netdev;
334 struct netdev_hw_addr *ha;
335 unsigned int hash_reg;
336 unsigned int i;
337 u32 crc;
338
339 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
340 hash_table_count = pdata->hw_feat.hash_table_size / 32;
341 memset(hash_table, 0, sizeof(hash_table));
342
343 /* Build the MAC Hash Table register values */
344 netdev_for_each_uc_addr(ha, netdev) {
345 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
346 crc >>= hash_table_shift;
347 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
348 }
349
350 netdev_for_each_mc_addr(ha, netdev) {
351 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
352 crc >>= hash_table_shift;
353 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
354 }
355
356 /* Set the MAC Hash Table registers */
357 hash_reg = MAC_HTR0;
358 for (i = 0; i < hash_table_count; i++) {
359 writel(hash_table[i], pdata->mac_regs + hash_reg);
360 hash_reg += MAC_HTR_INC;
361 }
362}
363
364static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata)
365{
366 if (pdata->hw_feat.hash_table_size)
367 xlgmac_set_mac_hash_table(pdata);
368 else
369 xlgmac_set_mac_addn_addrs(pdata);
370
371 return 0;
372}
373
374static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata)
375{
376 u32 regval;
377
378 xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr);
379
380 /* Filtering is done using perfect filtering and hash filtering */
381 if (pdata->hw_feat.hash_table_size) {
382 regval = readl(pdata->mac_regs + MAC_PFR);
383 regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS,
384 MAC_PFR_HPF_LEN, 1);
385 regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS,
386 MAC_PFR_HUC_LEN, 1);
387 regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS,
388 MAC_PFR_HMC_LEN, 1);
389 writel(regval, pdata->mac_regs + MAC_PFR);
390 }
391}
392
393static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata)
394{
395 unsigned int val;
396 u32 regval;
397
398 val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0;
399
400 regval = readl(pdata->mac_regs + MAC_RCR);
401 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS,
402 MAC_RCR_JE_LEN, val);
403 writel(regval, pdata->mac_regs + MAC_RCR);
404}
405
406static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata)
407{
408 if (pdata->netdev->features & NETIF_F_RXCSUM)
409 xlgmac_enable_rx_csum(pdata);
410 else
411 xlgmac_disable_rx_csum(pdata);
412}
413
414static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata)
415{
416 u32 regval;
417
418 regval = readl(pdata->mac_regs + MAC_VLANIR);
419 /* Indicate that VLAN Tx CTAGs come from context descriptors */
420 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS,
421 MAC_VLANIR_CSVL_LEN, 0);
422 regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS,
423 MAC_VLANIR_VLTI_LEN, 1);
424 writel(regval, pdata->mac_regs + MAC_VLANIR);
425
426 /* Set the current VLAN Hash Table register value */
427 xlgmac_update_vlan_hash_table(pdata);
428
429 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
430 xlgmac_enable_rx_vlan_filtering(pdata);
431 else
432 xlgmac_disable_rx_vlan_filtering(pdata);
433
434 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
435 xlgmac_enable_rx_vlan_stripping(pdata);
436 else
437 xlgmac_disable_rx_vlan_stripping(pdata);
438}
439
440static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata)
441{
442 struct net_device *netdev = pdata->netdev;
443 unsigned int pr_mode, am_mode;
444
445 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
446 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
447
448 xlgmac_set_promiscuous_mode(pdata, pr_mode);
449 xlgmac_set_all_multicast_mode(pdata, am_mode);
450
451 xlgmac_add_mac_addresses(pdata);
452
453 return 0;
454}
455
456static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata,
457 struct xlgmac_channel *channel)
458{
459 unsigned int tx_dsr, tx_pos, tx_qidx;
460 unsigned long tx_timeout;
461 unsigned int tx_status;
462
463 /* Calculate the status register to read and the position within */
464 if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
465 tx_dsr = DMA_DSR0;
466 tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) +
467 DMA_DSR0_TPS_START;
468 } else {
469 tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
470
471 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
472 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) +
473 DMA_DSRX_TPS_START;
474 }
475
476 /* The Tx engine cannot be stopped if it is actively processing
477 * descriptors. Wait for the Tx engine to enter the stopped or
478 * suspended state. Don't wait forever though...
479 */
480 tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
481 while (time_before(jiffies, tx_timeout)) {
482 tx_status = readl(pdata->mac_regs + tx_dsr);
483 tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos,
484 DMA_DSR_TPS_LEN);
485 if ((tx_status == DMA_TPS_STOPPED) ||
486 (tx_status == DMA_TPS_SUSPENDED))
487 break;
488
489 usleep_range(500, 1000);
490 }
491
492 if (!time_before(jiffies, tx_timeout))
493 netdev_info(pdata->netdev,
494 "timed out waiting for Tx DMA channel %u to stop\n",
495 channel->queue_index);
496}
497
498static void xlgmac_enable_tx(struct xlgmac_pdata *pdata)
499{
500 struct xlgmac_channel *channel;
501 unsigned int i;
502 u32 regval;
503
504 /* Enable each Tx DMA channel */
505 channel = pdata->channel_head;
506 for (i = 0; i < pdata->channel_count; i++, channel++) {
507 if (!channel->tx_ring)
508 break;
509
510 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
511 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
512 DMA_CH_TCR_ST_LEN, 1);
513 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
514 }
515
516 /* Enable each Tx queue */
517 for (i = 0; i < pdata->tx_q_count; i++) {
518 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
519 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
520 MTL_Q_TQOMR_TXQEN_LEN,
521 MTL_Q_ENABLED);
522 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
523 }
524
525 /* Enable MAC Tx */
526 regval = readl(pdata->mac_regs + MAC_TCR);
527 regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
528 MAC_TCR_TE_LEN, 1);
529 writel(regval, pdata->mac_regs + MAC_TCR);
530}
531
532static void xlgmac_disable_tx(struct xlgmac_pdata *pdata)
533{
534 struct xlgmac_channel *channel;
535 unsigned int i;
536 u32 regval;
537
538 /* Prepare for Tx DMA channel stop */
539 channel = pdata->channel_head;
540 for (i = 0; i < pdata->channel_count; i++, channel++) {
541 if (!channel->tx_ring)
542 break;
543
544 xlgmac_prepare_tx_stop(pdata, channel);
545 }
546
547 /* Disable MAC Tx */
548 regval = readl(pdata->mac_regs + MAC_TCR);
549 regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
550 MAC_TCR_TE_LEN, 0);
551 writel(regval, pdata->mac_regs + MAC_TCR);
552
553 /* Disable each Tx queue */
554 for (i = 0; i < pdata->tx_q_count; i++) {
555 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
556 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
557 MTL_Q_TQOMR_TXQEN_LEN, 0);
558 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
559 }
560
561 /* Disable each Tx DMA channel */
562 channel = pdata->channel_head;
563 for (i = 0; i < pdata->channel_count; i++, channel++) {
564 if (!channel->tx_ring)
565 break;
566
567 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
568 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
569 DMA_CH_TCR_ST_LEN, 0);
570 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
571 }
572}
573
574static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata,
575 unsigned int queue)
576{
577 unsigned int rx_status, prxq, rxqsts;
578 unsigned long rx_timeout;
579
580 /* The Rx engine cannot be stopped if it is actively processing
581 * packets. Wait for the Rx queue to empty the Rx fifo. Don't
582 * wait forever though...
583 */
584 rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
585 while (time_before(jiffies, rx_timeout)) {
586 rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR));
587 prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS,
588 MTL_Q_RQDR_PRXQ_LEN);
589 rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS,
590 MTL_Q_RQDR_RXQSTS_LEN);
591 if ((prxq == 0) && (rxqsts == 0))
592 break;
593
594 usleep_range(500, 1000);
595 }
596
597 if (!time_before(jiffies, rx_timeout))
598 netdev_info(pdata->netdev,
599 "timed out waiting for Rx queue %u to empty\n",
600 queue);
601}
602
603static void xlgmac_enable_rx(struct xlgmac_pdata *pdata)
604{
605 struct xlgmac_channel *channel;
606 unsigned int regval, i;
607
608 /* Enable each Rx DMA channel */
609 channel = pdata->channel_head;
610 for (i = 0; i < pdata->channel_count; i++, channel++) {
611 if (!channel->rx_ring)
612 break;
613
614 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
615 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
616 DMA_CH_RCR_SR_LEN, 1);
617 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
618 }
619
620 /* Enable each Rx queue */
621 regval = 0;
622 for (i = 0; i < pdata->rx_q_count; i++)
623 regval |= (0x02 << (i << 1));
624 writel(regval, pdata->mac_regs + MAC_RQC0R);
625
626 /* Enable MAC Rx */
627 regval = readl(pdata->mac_regs + MAC_RCR);
628 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
629 MAC_RCR_DCRCC_LEN, 1);
630 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
631 MAC_RCR_CST_LEN, 1);
632 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
633 MAC_RCR_ACS_LEN, 1);
634 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
635 MAC_RCR_RE_LEN, 1);
636 writel(regval, pdata->mac_regs + MAC_RCR);
637}
638
639static void xlgmac_disable_rx(struct xlgmac_pdata *pdata)
640{
641 struct xlgmac_channel *channel;
642 unsigned int i;
643 u32 regval;
644
645 /* Disable MAC Rx */
646 regval = readl(pdata->mac_regs + MAC_RCR);
647 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
648 MAC_RCR_DCRCC_LEN, 0);
649 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
650 MAC_RCR_CST_LEN, 0);
651 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
652 MAC_RCR_ACS_LEN, 0);
653 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
654 MAC_RCR_RE_LEN, 0);
655 writel(regval, pdata->mac_regs + MAC_RCR);
656
657 /* Prepare for Rx DMA channel stop */
658 for (i = 0; i < pdata->rx_q_count; i++)
659 xlgmac_prepare_rx_stop(pdata, i);
660
661 /* Disable each Rx queue */
662 writel(0, pdata->mac_regs + MAC_RQC0R);
663
664 /* Disable each Rx DMA channel */
665 channel = pdata->channel_head;
666 for (i = 0; i < pdata->channel_count; i++, channel++) {
667 if (!channel->rx_ring)
668 break;
669
670 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
671 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
672 DMA_CH_RCR_SR_LEN, 0);
673 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
674 }
675}
676
677static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel,
678 struct xlgmac_ring *ring)
679{
680 struct xlgmac_pdata *pdata = channel->pdata;
681 struct xlgmac_desc_data *desc_data;
682
683 /* Make sure everything is written before the register write */
684 wmb();
685
686 /* Issue a poll command to Tx DMA by writing address
687 * of next immediate free descriptor
688 */
689 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
690 writel(lower_32_bits(desc_data->dma_desc_addr),
691 XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO));
692
693 /* Start the Tx timer */
694 if (pdata->tx_usecs && !channel->tx_timer_active) {
695 channel->tx_timer_active = 1;
696 mod_timer(&channel->tx_timer,
697 jiffies + usecs_to_jiffies(pdata->tx_usecs));
698 }
699
700 ring->tx.xmit_more = 0;
701}
702
703static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
704{
705 struct xlgmac_pdata *pdata = channel->pdata;
706 struct xlgmac_ring *ring = channel->tx_ring;
707 unsigned int tso_context, vlan_context;
708 struct xlgmac_desc_data *desc_data;
709 struct xlgmac_dma_desc *dma_desc;
710 struct xlgmac_pkt_info *pkt_info;
711 unsigned int csum, tso, vlan;
712 int start_index = ring->cur;
713 int cur_index = ring->cur;
714 unsigned int tx_set_ic;
715 int i;
716
717 pkt_info = &ring->pkt_info;
718 csum = XLGMAC_GET_REG_BITS(pkt_info->attributes,
719 TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
720 TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN);
721 tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
722 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
723 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
724 vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
725 TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
726 TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
727
728 if (tso && (pkt_info->mss != ring->tx.cur_mss))
729 tso_context = 1;
730 else
731 tso_context = 0;
732
733 if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))
734 vlan_context = 1;
735 else
736 vlan_context = 0;
737
738 /* Determine if an interrupt should be generated for this Tx:
739 * Interrupt:
740 * - Tx frame count exceeds the frame count setting
741 * - Addition of Tx frame count to the frame count since the
742 * last interrupt was set exceeds the frame count setting
743 * No interrupt:
744 * - No frame count setting specified (ethtool -C ethX tx-frames 0)
745 * - Addition of Tx frame count to the frame count since the
746 * last interrupt was set does not exceed the frame count setting
747 */
748 ring->coalesce_count += pkt_info->tx_packets;
749 if (!pdata->tx_frames)
750 tx_set_ic = 0;
751 else if (pkt_info->tx_packets > pdata->tx_frames)
752 tx_set_ic = 1;
753 else if ((ring->coalesce_count % pdata->tx_frames) <
754 pkt_info->tx_packets)
755 tx_set_ic = 1;
756 else
757 tx_set_ic = 0;
758
759 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
760 dma_desc = desc_data->dma_desc;
761
762 /* Create a context descriptor if this is a TSO pkt_info */
763 if (tso_context || vlan_context) {
764 if (tso_context) {
765 netif_dbg(pdata, tx_queued, pdata->netdev,
766 "TSO context descriptor, mss=%u\n",
767 pkt_info->mss);
768
769 /* Set the MSS size */
770 dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
771 dma_desc->desc2,
772 TX_CONTEXT_DESC2_MSS_POS,
773 TX_CONTEXT_DESC2_MSS_LEN,
774 pkt_info->mss);
775
776 /* Mark it as a CONTEXT descriptor */
777 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
778 dma_desc->desc3,
779 TX_CONTEXT_DESC3_CTXT_POS,
780 TX_CONTEXT_DESC3_CTXT_LEN,
781 1);
782
783 /* Indicate this descriptor contains the MSS */
784 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
785 dma_desc->desc3,
786 TX_CONTEXT_DESC3_TCMSSV_POS,
787 TX_CONTEXT_DESC3_TCMSSV_LEN,
788 1);
789
790 ring->tx.cur_mss = pkt_info->mss;
791 }
792
793 if (vlan_context) {
794 netif_dbg(pdata, tx_queued, pdata->netdev,
795 "VLAN context descriptor, ctag=%u\n",
796 pkt_info->vlan_ctag);
797
798 /* Mark it as a CONTEXT descriptor */
799 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
800 dma_desc->desc3,
801 TX_CONTEXT_DESC3_CTXT_POS,
802 TX_CONTEXT_DESC3_CTXT_LEN,
803 1);
804
805 /* Set the VLAN tag */
806 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
807 dma_desc->desc3,
808 TX_CONTEXT_DESC3_VT_POS,
809 TX_CONTEXT_DESC3_VT_LEN,
810 pkt_info->vlan_ctag);
811
812 /* Indicate this descriptor contains the VLAN tag */
813 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
814 dma_desc->desc3,
815 TX_CONTEXT_DESC3_VLTV_POS,
816 TX_CONTEXT_DESC3_VLTV_LEN,
817 1);
818
819 ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag;
820 }
821
822 cur_index++;
823 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
824 dma_desc = desc_data->dma_desc;
825 }
826
827 /* Update buffer address (for TSO this is the header) */
828 dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma));
829 dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma));
830
831 /* Update the buffer length */
832 dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
833 dma_desc->desc2,
834 TX_NORMAL_DESC2_HL_B1L_POS,
835 TX_NORMAL_DESC2_HL_B1L_LEN,
836 desc_data->skb_dma_len);
837
838 /* VLAN tag insertion check */
839 if (vlan)
840 dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
841 dma_desc->desc2,
842 TX_NORMAL_DESC2_VTIR_POS,
843 TX_NORMAL_DESC2_VTIR_LEN,
844 TX_NORMAL_DESC2_VLAN_INSERT);
845
846 /* Timestamp enablement check */
847 if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
848 TX_PACKET_ATTRIBUTES_PTP_POS,
849 TX_PACKET_ATTRIBUTES_PTP_LEN))
850 dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
851 dma_desc->desc2,
852 TX_NORMAL_DESC2_TTSE_POS,
853 TX_NORMAL_DESC2_TTSE_LEN,
854 1);
855
856 /* Mark it as First Descriptor */
857 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
858 dma_desc->desc3,
859 TX_NORMAL_DESC3_FD_POS,
860 TX_NORMAL_DESC3_FD_LEN,
861 1);
862
863 /* Mark it as a NORMAL descriptor */
864 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
865 dma_desc->desc3,
866 TX_NORMAL_DESC3_CTXT_POS,
867 TX_NORMAL_DESC3_CTXT_LEN,
868 0);
869
870 /* Set OWN bit if not the first descriptor */
871 if (cur_index != start_index)
872 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
873 dma_desc->desc3,
874 TX_NORMAL_DESC3_OWN_POS,
875 TX_NORMAL_DESC3_OWN_LEN,
876 1);
877
878 if (tso) {
879 /* Enable TSO */
880 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
881 dma_desc->desc3,
882 TX_NORMAL_DESC3_TSE_POS,
883 TX_NORMAL_DESC3_TSE_LEN, 1);
884 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
885 dma_desc->desc3,
886 TX_NORMAL_DESC3_TCPPL_POS,
887 TX_NORMAL_DESC3_TCPPL_LEN,
888 pkt_info->tcp_payload_len);
889 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
890 dma_desc->desc3,
891 TX_NORMAL_DESC3_TCPHDRLEN_POS,
892 TX_NORMAL_DESC3_TCPHDRLEN_LEN,
893 pkt_info->tcp_header_len / 4);
894
895 pdata->stats.tx_tso_packets++;
896 } else {
897 /* Enable CRC and Pad Insertion */
898 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
899 dma_desc->desc3,
900 TX_NORMAL_DESC3_CPC_POS,
901 TX_NORMAL_DESC3_CPC_LEN, 0);
902
903 /* Enable HW CSUM */
904 if (csum)
905 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
906 dma_desc->desc3,
907 TX_NORMAL_DESC3_CIC_POS,
908 TX_NORMAL_DESC3_CIC_LEN,
909 0x3);
910
911 /* Set the total length to be transmitted */
912 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
913 dma_desc->desc3,
914 TX_NORMAL_DESC3_FL_POS,
915 TX_NORMAL_DESC3_FL_LEN,
916 pkt_info->length);
917 }
918
919 for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) {
920 cur_index++;
921 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
922 dma_desc = desc_data->dma_desc;
923
924 /* Update buffer address */
925 dma_desc->desc0 =
926 cpu_to_le32(lower_32_bits(desc_data->skb_dma));
927 dma_desc->desc1 =
928 cpu_to_le32(upper_32_bits(desc_data->skb_dma));
929
930 /* Update the buffer length */
931 dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
932 dma_desc->desc2,
933 TX_NORMAL_DESC2_HL_B1L_POS,
934 TX_NORMAL_DESC2_HL_B1L_LEN,
935 desc_data->skb_dma_len);
936
937 /* Set OWN bit */
938 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
939 dma_desc->desc3,
940 TX_NORMAL_DESC3_OWN_POS,
941 TX_NORMAL_DESC3_OWN_LEN, 1);
942
943 /* Mark it as NORMAL descriptor */
944 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
945 dma_desc->desc3,
946 TX_NORMAL_DESC3_CTXT_POS,
947 TX_NORMAL_DESC3_CTXT_LEN, 0);
948
949 /* Enable HW CSUM */
950 if (csum)
951 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
952 dma_desc->desc3,
953 TX_NORMAL_DESC3_CIC_POS,
954 TX_NORMAL_DESC3_CIC_LEN,
955 0x3);
956 }
957
958 /* Set LAST bit for the last descriptor */
959 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
960 dma_desc->desc3,
961 TX_NORMAL_DESC3_LD_POS,
962 TX_NORMAL_DESC3_LD_LEN, 1);
963
964 /* Set IC bit based on Tx coalescing settings */
965 if (tx_set_ic)
966 dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
967 dma_desc->desc2,
968 TX_NORMAL_DESC2_IC_POS,
969 TX_NORMAL_DESC2_IC_LEN, 1);
970
971 /* Save the Tx info to report back during cleanup */
972 desc_data->tx.packets = pkt_info->tx_packets;
973 desc_data->tx.bytes = pkt_info->tx_bytes;
974
975 /* In case the Tx DMA engine is running, make sure everything
976 * is written to the descriptor(s) before setting the OWN bit
977 * for the first descriptor
978 */
979 dma_wmb();
980
981 /* Set OWN bit for the first descriptor */
982 desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
983 dma_desc = desc_data->dma_desc;
984 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
985 dma_desc->desc3,
986 TX_NORMAL_DESC3_OWN_POS,
987 TX_NORMAL_DESC3_OWN_LEN, 1);
988
989 if (netif_msg_tx_queued(pdata))
990 xlgmac_dump_tx_desc(pdata, ring, start_index,
991 pkt_info->desc_count, 1);
992
993 /* Make sure ownership is written to the descriptor */
994 smp_wmb();
995
996 ring->cur = cur_index + 1;
997 if (!pkt_info->skb->xmit_more ||
998 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
999 channel->queue_index)))
1000 xlgmac_tx_start_xmit(channel, ring);
1001 else
1002 ring->tx.xmit_more = 1;
1003
1004 XLGMAC_PR("%s: descriptors %u to %u written\n",
1005 channel->name, start_index & (ring->dma_desc_count - 1),
1006 (ring->cur - 1) & (ring->dma_desc_count - 1));
1007}
1008
1009static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info,
1010 struct xlgmac_dma_desc *dma_desc)
1011{
1012 u32 tsa, tsd;
1013 u64 nsec;
1014
1015 tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
1016 RX_CONTEXT_DESC3_TSA_POS,
1017 RX_CONTEXT_DESC3_TSA_LEN);
1018 tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
1019 RX_CONTEXT_DESC3_TSD_POS,
1020 RX_CONTEXT_DESC3_TSD_LEN);
1021 if (tsa && !tsd) {
1022 nsec = le32_to_cpu(dma_desc->desc1);
1023 nsec <<= 32;
1024 nsec |= le32_to_cpu(dma_desc->desc0);
1025 if (nsec != 0xffffffffffffffffULL) {
1026 pkt_info->rx_tstamp = nsec;
1027 pkt_info->attributes = XLGMAC_SET_REG_BITS(
1028 pkt_info->attributes,
1029 RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS,
1030 RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN,
1031 1);
1032 }
1033 }
1034}
1035
1036static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data)
1037{
1038 struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
1039
1040 /* Reset the Tx descriptor
1041 * Set buffer 1 (lo) address to zero
1042 * Set buffer 1 (hi) address to zero
1043 * Reset all other control bits (IC, TTSE, B2L & B1L)
1044 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1045 */
1046 dma_desc->desc0 = 0;
1047 dma_desc->desc1 = 0;
1048 dma_desc->desc2 = 0;
1049 dma_desc->desc3 = 0;
1050
1051 /* Make sure ownership is written to the descriptor */
1052 dma_wmb();
1053}
1054
1055static void xlgmac_tx_desc_init(struct xlgmac_channel *channel)
1056{
1057 struct xlgmac_ring *ring = channel->tx_ring;
1058 struct xlgmac_desc_data *desc_data;
1059 int start_index = ring->cur;
1060 int i;
1061
1062 /* Initialze all descriptors */
1063 for (i = 0; i < ring->dma_desc_count; i++) {
1064 desc_data = XLGMAC_GET_DESC_DATA(ring, i);
1065
1066 /* Initialize Tx descriptor */
1067 xlgmac_tx_desc_reset(desc_data);
1068 }
1069
1070 /* Update the total number of Tx descriptors */
1071 writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR));
1072
1073 /* Update the starting address of descriptor ring */
1074 desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
1075 writel(upper_32_bits(desc_data->dma_desc_addr),
1076 XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI));
1077 writel(lower_32_bits(desc_data->dma_desc_addr),
1078 XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO));
1079}
1080
1081static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata,
1082 struct xlgmac_desc_data *desc_data,
1083 unsigned int index)
1084{
1085 struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
1086 unsigned int rx_frames = pdata->rx_frames;
1087 unsigned int rx_usecs = pdata->rx_usecs;
1088 dma_addr_t hdr_dma, buf_dma;
1089 unsigned int inte;
1090
1091 if (!rx_usecs && !rx_frames) {
1092 /* No coalescing, interrupt for every descriptor */
1093 inte = 1;
1094 } else {
1095 /* Set interrupt based on Rx frame coalescing setting */
1096 if (rx_frames && !((index + 1) % rx_frames))
1097 inte = 1;
1098 else
1099 inte = 0;
1100 }
1101
1102 /* Reset the Rx descriptor
1103 * Set buffer 1 (lo) address to header dma address (lo)
1104 * Set buffer 1 (hi) address to header dma address (hi)
1105 * Set buffer 2 (lo) address to buffer dma address (lo)
1106 * Set buffer 2 (hi) address to buffer dma address (hi) and
1107 * set control bits OWN and INTE
1108 */
1109 hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off;
1110 buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off;
1111 dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1112 dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1113 dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1114 dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1115
1116 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
1117 dma_desc->desc3,
1118 RX_NORMAL_DESC3_INTE_POS,
1119 RX_NORMAL_DESC3_INTE_LEN,
1120 inte);
1121
1122 /* Since the Rx DMA engine is likely running, make sure everything
1123 * is written to the descriptor(s) before setting the OWN bit
1124 * for the descriptor
1125 */
1126 dma_wmb();
1127
1128 dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
1129 dma_desc->desc3,
1130 RX_NORMAL_DESC3_OWN_POS,
1131 RX_NORMAL_DESC3_OWN_LEN,
1132 1);
1133
1134 /* Make sure ownership is written to the descriptor */
1135 dma_wmb();
1136}
1137
1138static void xlgmac_rx_desc_init(struct xlgmac_channel *channel)
1139{
1140 struct xlgmac_pdata *pdata = channel->pdata;
1141 struct xlgmac_ring *ring = channel->rx_ring;
1142 unsigned int start_index = ring->cur;
1143 struct xlgmac_desc_data *desc_data;
1144 unsigned int i;
1145
1146 /* Initialize all descriptors */
1147 for (i = 0; i < ring->dma_desc_count; i++) {
1148 desc_data = XLGMAC_GET_DESC_DATA(ring, i);
1149
1150 /* Initialize Rx descriptor */
1151 xlgmac_rx_desc_reset(pdata, desc_data, i);
1152 }
1153
1154 /* Update the total number of Rx descriptors */
1155 writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR));
1156
1157 /* Update the starting address of descriptor ring */
1158 desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
1159 writel(upper_32_bits(desc_data->dma_desc_addr),
1160 XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI));
1161 writel(lower_32_bits(desc_data->dma_desc_addr),
1162 XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO));
1163
1164 /* Update the Rx Descriptor Tail Pointer */
1165 desc_data = XLGMAC_GET_DESC_DATA(ring, start_index +
1166 ring->dma_desc_count - 1);
1167 writel(lower_32_bits(desc_data->dma_desc_addr),
1168 XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
1169}
1170
1171static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc)
1172{
1173 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1174 return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
1175 TX_NORMAL_DESC3_CTXT_POS,
1176 TX_NORMAL_DESC3_CTXT_LEN);
1177}
1178
1179static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc)
1180{
1181 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1182 return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
1183 TX_NORMAL_DESC3_LD_POS,
1184 TX_NORMAL_DESC3_LD_LEN);
1185}
1186
1187static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata)
1188{
1189 unsigned int max_q_count, q_count;
1190 unsigned int reg, regval;
1191 unsigned int i;
1192
1193 /* Clear MTL flow control */
1194 for (i = 0; i < pdata->rx_q_count; i++) {
1195 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1196 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
1197 MTL_Q_RQOMR_EHFC_LEN, 0);
1198 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1199 }
1200
1201 /* Clear MAC flow control */
1202 max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
1203 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
1204 reg = MAC_Q0TFCR;
1205 for (i = 0; i < q_count; i++) {
1206 regval = readl(pdata->mac_regs + reg);
1207 regval = XLGMAC_SET_REG_BITS(regval,
1208 MAC_Q0TFCR_TFE_POS,
1209 MAC_Q0TFCR_TFE_LEN,
1210 0);
1211 writel(regval, pdata->mac_regs + reg);
1212
1213 reg += MAC_QTFCR_INC;
1214 }
1215
1216 return 0;
1217}
1218
1219static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata)
1220{
1221 unsigned int max_q_count, q_count;
1222 unsigned int reg, regval;
1223 unsigned int i;
1224
1225 /* Set MTL flow control */
1226 for (i = 0; i < pdata->rx_q_count; i++) {
1227 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1228 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
1229 MTL_Q_RQOMR_EHFC_LEN, 1);
1230 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1231 }
1232
1233 /* Set MAC flow control */
1234 max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
1235 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
1236 reg = MAC_Q0TFCR;
1237 for (i = 0; i < q_count; i++) {
1238 regval = readl(pdata->mac_regs + reg);
1239
1240 /* Enable transmit flow control */
1241 regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS,
1242 MAC_Q0TFCR_TFE_LEN, 1);
1243 /* Set pause time */
1244 regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS,
1245 MAC_Q0TFCR_PT_LEN, 0xffff);
1246
1247 writel(regval, pdata->mac_regs + reg);
1248
1249 reg += MAC_QTFCR_INC;
1250 }
1251
1252 return 0;
1253}
1254
1255static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata)
1256{
1257 u32 regval;
1258
1259 regval = readl(pdata->mac_regs + MAC_RFCR);
1260 regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
1261 MAC_RFCR_RFE_LEN, 0);
1262 writel(regval, pdata->mac_regs + MAC_RFCR);
1263
1264 return 0;
1265}
1266
1267static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata)
1268{
1269 u32 regval;
1270
1271 regval = readl(pdata->mac_regs + MAC_RFCR);
1272 regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
1273 MAC_RFCR_RFE_LEN, 1);
1274 writel(regval, pdata->mac_regs + MAC_RFCR);
1275
1276 return 0;
1277}
1278
1279static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata)
1280{
1281 if (pdata->tx_pause)
1282 xlgmac_enable_tx_flow_control(pdata);
1283 else
1284 xlgmac_disable_tx_flow_control(pdata);
1285
1286 return 0;
1287}
1288
1289static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata)
1290{
1291 if (pdata->rx_pause)
1292 xlgmac_enable_rx_flow_control(pdata);
1293 else
1294 xlgmac_disable_rx_flow_control(pdata);
1295
1296 return 0;
1297}
1298
1299static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata)
1300{
1301 struct xlgmac_channel *channel;
1302 unsigned int i;
1303 u32 regval;
1304
1305 channel = pdata->channel_head;
1306 for (i = 0; i < pdata->channel_count; i++, channel++) {
1307 if (!channel->rx_ring)
1308 break;
1309
1310 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
1311 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS,
1312 DMA_CH_RIWT_RWT_LEN,
1313 pdata->rx_riwt);
1314 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
1315 }
1316
1317 return 0;
1318}
1319
1320static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata)
1321{
1322 xlgmac_config_tx_flow_control(pdata);
1323 xlgmac_config_rx_flow_control(pdata);
1324}
1325
1326static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata)
1327{
1328 unsigned int i;
1329 u32 regval;
1330
1331 for (i = 0; i < pdata->rx_q_count; i++) {
1332 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1333 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS,
1334 MTL_Q_RQOMR_FEP_LEN, 1);
1335 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1336 }
1337}
1338
1339static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata)
1340{
1341 unsigned int i;
1342 u32 regval;
1343
1344 for (i = 0; i < pdata->rx_q_count; i++) {
1345 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1346 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS,
1347 MTL_Q_RQOMR_FUP_LEN, 1);
1348 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1349 }
1350}
1351
1352static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata)
1353{
1354 return 0;
1355}
1356
1357static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata)
1358{
1359 struct xlgmac_channel *channel;
1360 unsigned int i;
1361 u32 regval;
1362
1363 channel = pdata->channel_head;
1364 for (i = 0; i < pdata->channel_count; i++, channel++) {
1365 if (!channel->rx_ring)
1366 break;
1367
1368 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
1369 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS,
1370 DMA_CH_RCR_RBSZ_LEN,
1371 pdata->rx_buf_size);
1372 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
1373 }
1374}
1375
1376static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata)
1377{
1378 struct xlgmac_channel *channel;
1379 unsigned int i;
1380 u32 regval;
1381
1382 channel = pdata->channel_head;
1383 for (i = 0; i < pdata->channel_count; i++, channel++) {
1384 if (!channel->tx_ring)
1385 break;
1386
1387 if (pdata->hw_feat.tso) {
1388 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1389 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS,
1390 DMA_CH_TCR_TSE_LEN, 1);
1391 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1392 }
1393 }
1394}
1395
1396static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata)
1397{
1398 struct xlgmac_channel *channel;
1399 unsigned int i;
1400 u32 regval;
1401
1402 channel = pdata->channel_head;
1403 for (i = 0; i < pdata->channel_count; i++, channel++) {
1404 if (!channel->rx_ring)
1405 break;
1406
1407 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
1408 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS,
1409 DMA_CH_CR_SPH_LEN, 1);
1410 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
1411 }
1412
1413 regval = readl(pdata->mac_regs + MAC_RCR);
1414 regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS,
1415 MAC_RCR_HDSMS_LEN,
1416 XLGMAC_SPH_HDSMS_SIZE);
1417 writel(regval, pdata->mac_regs + MAC_RCR);
1418}
1419
1420static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata,
1421 unsigned int usec)
1422{
1423 unsigned long rate;
1424 unsigned int ret;
1425
1426 rate = pdata->sysclk_rate;
1427
1428 /* Convert the input usec value to the watchdog timer value. Each
1429 * watchdog timer value is equivalent to 256 clock cycles.
1430 * Calculate the required value as:
1431 * ( usec * ( system_clock_mhz / 10^6 ) / 256
1432 */
1433 ret = (usec * (rate / 1000000)) / 256;
1434
1435 return ret;
1436}
1437
1438static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata,
1439 unsigned int riwt)
1440{
1441 unsigned long rate;
1442 unsigned int ret;
1443
1444 rate = pdata->sysclk_rate;
1445
1446 /* Convert the input watchdog timer value to the usec value. Each
1447 * watchdog timer value is equivalent to 256 clock cycles.
1448 * Calculate the required value as:
1449 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
1450 */
1451 ret = (riwt * 256) / (rate / 1000000);
1452
1453 return ret;
1454}
1455
1456static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata,
1457 unsigned int val)
1458{
1459 unsigned int i;
1460 u32 regval;
1461
1462 for (i = 0; i < pdata->rx_q_count; i++) {
1463 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1464 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS,
1465 MTL_Q_RQOMR_RTC_LEN, val);
1466 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1467 }
1468
1469 return 0;
1470}
1471
1472static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata)
1473{
1474 unsigned int i;
1475 u32 regval;
1476
1477 /* Set Tx to weighted round robin scheduling algorithm */
1478 regval = readl(pdata->mac_regs + MTL_OMR);
1479 regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS,
1480 MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR);
1481 writel(regval, pdata->mac_regs + MTL_OMR);
1482
1483 /* Set Tx traffic classes to use WRR algorithm with equal weights */
1484 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1485 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
1486 regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS,
1487 MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS);
1488 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
1489
1490 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
1491 regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS,
1492 MTL_TC_QWR_QW_LEN, 1);
1493 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
1494 }
1495
1496 /* Set Rx to strict priority algorithm */
1497 regval = readl(pdata->mac_regs + MTL_OMR);
1498 regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS,
1499 MTL_OMR_RAA_LEN, MTL_RAA_SP);
1500 writel(regval, pdata->mac_regs + MTL_OMR);
1501}
1502
1503static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata)
1504{
1505 unsigned int ppq, ppq_extra, prio, prio_queues;
1506 unsigned int qptc, qptc_extra, queue;
1507 unsigned int reg, regval;
1508 unsigned int mask;
1509 unsigned int i, j;
1510
1511 /* Map the MTL Tx Queues to Traffic Classes
1512 * Note: Tx Queues >= Traffic Classes
1513 */
1514 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
1515 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
1516
1517 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
1518 for (j = 0; j < qptc; j++) {
1519 netif_dbg(pdata, drv, pdata->netdev,
1520 "TXq%u mapped to TC%u\n", queue, i);
1521 regval = readl(XLGMAC_MTL_REG(pdata, queue,
1522 MTL_Q_TQOMR));
1523 regval = XLGMAC_SET_REG_BITS(regval,
1524 MTL_Q_TQOMR_Q2TCMAP_POS,
1525 MTL_Q_TQOMR_Q2TCMAP_LEN,
1526 i);
1527 writel(regval, XLGMAC_MTL_REG(pdata, queue,
1528 MTL_Q_TQOMR));
1529 queue++;
1530 }
1531
1532 if (i < qptc_extra) {
1533 netif_dbg(pdata, drv, pdata->netdev,
1534 "TXq%u mapped to TC%u\n", queue, i);
1535 regval = readl(XLGMAC_MTL_REG(pdata, queue,
1536 MTL_Q_TQOMR));
1537 regval = XLGMAC_SET_REG_BITS(regval,
1538 MTL_Q_TQOMR_Q2TCMAP_POS,
1539 MTL_Q_TQOMR_Q2TCMAP_LEN,
1540 i);
1541 writel(regval, XLGMAC_MTL_REG(pdata, queue,
1542 MTL_Q_TQOMR));
1543 queue++;
1544 }
1545 }
1546
1547 /* Map the 8 VLAN priority values to available MTL Rx queues */
1548 prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
1549 pdata->rx_q_count);
1550 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
1551 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
1552
1553 reg = MAC_RQC2R;
1554 regval = 0;
1555 for (i = 0, prio = 0; i < prio_queues;) {
1556 mask = 0;
1557 for (j = 0; j < ppq; j++) {
1558 netif_dbg(pdata, drv, pdata->netdev,
1559 "PRIO%u mapped to RXq%u\n", prio, i);
1560 mask |= (1 << prio);
1561 prio++;
1562 }
1563
1564 if (i < ppq_extra) {
1565 netif_dbg(pdata, drv, pdata->netdev,
1566 "PRIO%u mapped to RXq%u\n", prio, i);
1567 mask |= (1 << prio);
1568 prio++;
1569 }
1570
1571 regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
1572
1573 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
1574 continue;
1575
1576 writel(regval, pdata->mac_regs + reg);
1577 reg += MAC_RQC2_INC;
1578 regval = 0;
1579 }
1580
1581 /* Configure one to one, MTL Rx queue to DMA Rx channel mapping
1582 * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11
1583 */
1584 reg = MTL_RQDCM0R;
1585 regval = readl(pdata->mac_regs + reg);
1586 regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH |
1587 MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH);
1588 writel(regval, pdata->mac_regs + reg);
1589
1590 reg += MTL_RQDCM_INC;
1591 regval = readl(pdata->mac_regs + reg);
1592 regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH |
1593 MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH);
1594 writel(regval, pdata->mac_regs + reg);
1595
1596 reg += MTL_RQDCM_INC;
1597 regval = readl(pdata->mac_regs + reg);
1598 regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH |
1599 MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH);
1600 writel(regval, pdata->mac_regs + reg);
1601}
1602
1603static unsigned int xlgmac_calculate_per_queue_fifo(
1604 unsigned int fifo_size,
1605 unsigned int queue_count)
1606{
1607 unsigned int q_fifo_size;
1608 unsigned int p_fifo;
1609
1610 /* Calculate the configured fifo size */
1611 q_fifo_size = 1 << (fifo_size + 7);
1612
1613 /* The configured value may not be the actual amount of fifo RAM */
1614 q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size);
1615
1616 q_fifo_size = q_fifo_size / queue_count;
1617
1618 /* Each increment in the queue fifo size represents 256 bytes of
1619 * fifo, with 0 representing 256 bytes. Distribute the fifo equally
1620 * between the queues.
1621 */
1622 p_fifo = q_fifo_size / 256;
1623 if (p_fifo)
1624 p_fifo--;
1625
1626 return p_fifo;
1627}
1628
1629static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata)
1630{
1631 unsigned int fifo_size;
1632 unsigned int i;
1633 u32 regval;
1634
1635 fifo_size = xlgmac_calculate_per_queue_fifo(
1636 pdata->hw_feat.tx_fifo_size,
1637 pdata->tx_q_count);
1638
1639 for (i = 0; i < pdata->tx_q_count; i++) {
1640 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1641 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS,
1642 MTL_Q_TQOMR_TQS_LEN, fifo_size);
1643 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1644 }
1645
1646 netif_info(pdata, drv, pdata->netdev,
1647 "%d Tx hardware queues, %d byte fifo per queue\n",
1648 pdata->tx_q_count, ((fifo_size + 1) * 256));
1649}
1650
1651static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata)
1652{
1653 unsigned int fifo_size;
1654 unsigned int i;
1655 u32 regval;
1656
1657 fifo_size = xlgmac_calculate_per_queue_fifo(
1658 pdata->hw_feat.rx_fifo_size,
1659 pdata->rx_q_count);
1660
1661 for (i = 0; i < pdata->rx_q_count; i++) {
1662 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1663 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS,
1664 MTL_Q_RQOMR_RQS_LEN, fifo_size);
1665 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1666 }
1667
1668 netif_info(pdata, drv, pdata->netdev,
1669 "%d Rx hardware queues, %d byte fifo per queue\n",
1670 pdata->rx_q_count, ((fifo_size + 1) * 256));
1671}
1672
1673static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata)
1674{
1675 unsigned int i;
1676 u32 regval;
1677
1678 for (i = 0; i < pdata->rx_q_count; i++) {
1679 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
1680 /* Activate flow control when less than 4k left in fifo */
1681 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS,
1682 MTL_Q_RQFCR_RFA_LEN, 2);
1683 /* De-activate flow control when more than 6k left in fifo */
1684 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS,
1685 MTL_Q_RQFCR_RFD_LEN, 4);
1686 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
1687 }
1688}
1689
1690static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata,
1691 unsigned int val)
1692{
1693 unsigned int i;
1694 u32 regval;
1695
1696 for (i = 0; i < pdata->tx_q_count; i++) {
1697 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1698 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS,
1699 MTL_Q_TQOMR_TTC_LEN, val);
1700 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1701 }
1702
1703 return 0;
1704}
1705
1706static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata,
1707 unsigned int val)
1708{
1709 unsigned int i;
1710 u32 regval;
1711
1712 for (i = 0; i < pdata->rx_q_count; i++) {
1713 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1714 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS,
1715 MTL_Q_RQOMR_RSF_LEN, val);
1716 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1717 }
1718
1719 return 0;
1720}
1721
1722static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata,
1723 unsigned int val)
1724{
1725 unsigned int i;
1726 u32 regval;
1727
1728 for (i = 0; i < pdata->tx_q_count; i++) {
1729 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1730 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS,
1731 MTL_Q_TQOMR_TSF_LEN, val);
1732 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1733 }
1734
1735 return 0;
1736}
1737
1738static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata)
1739{
1740 struct xlgmac_channel *channel;
1741 unsigned int i;
1742 u32 regval;
1743
1744 channel = pdata->channel_head;
1745 for (i = 0; i < pdata->channel_count; i++, channel++) {
1746 if (!channel->tx_ring)
1747 break;
1748
1749 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1750 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS,
1751 DMA_CH_TCR_OSP_LEN,
1752 pdata->tx_osp_mode);
1753 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1754 }
1755
1756 return 0;
1757}
1758
1759static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata)
1760{
1761 struct xlgmac_channel *channel;
1762 unsigned int i;
1763 u32 regval;
1764
1765 channel = pdata->channel_head;
1766 for (i = 0; i < pdata->channel_count; i++, channel++) {
1767 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
1768 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS,
1769 DMA_CH_CR_PBLX8_LEN,
1770 pdata->pblx8);
1771 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
1772 }
1773
1774 return 0;
1775}
1776
1777static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata)
1778{
1779 u32 regval;
1780
1781 regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR));
1782 regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
1783 DMA_CH_TCR_PBL_LEN);
1784 return regval;
1785}
1786
1787static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata)
1788{
1789 struct xlgmac_channel *channel;
1790 unsigned int i;
1791 u32 regval;
1792
1793 channel = pdata->channel_head;
1794 for (i = 0; i < pdata->channel_count; i++, channel++) {
1795 if (!channel->tx_ring)
1796 break;
1797
1798 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1799 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
1800 DMA_CH_TCR_PBL_LEN,
1801 pdata->tx_pbl);
1802 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1803 }
1804
1805 return 0;
1806}
1807
1808static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata)
1809{
1810 u32 regval;
1811
1812 regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR));
1813 regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
1814 DMA_CH_RCR_PBL_LEN);
1815 return regval;
1816}
1817
1818static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata)
1819{
1820 struct xlgmac_channel *channel;
1821 unsigned int i;
1822 u32 regval;
1823
1824 channel = pdata->channel_head;
1825 for (i = 0; i < pdata->channel_count; i++, channel++) {
1826 if (!channel->rx_ring)
1827 break;
1828
1829 regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
1830 regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
1831 DMA_CH_RCR_PBL_LEN,
1832 pdata->rx_pbl);
1833 writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
1834 }
1835
1836 return 0;
1837}
1838
1839static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo)
1840{
1841 bool read_hi;
1842 u64 val;
1843
1844 switch (reg_lo) {
1845 /* These registers are always 64 bit */
1846 case MMC_TXOCTETCOUNT_GB_LO:
1847 case MMC_TXOCTETCOUNT_G_LO:
1848 case MMC_RXOCTETCOUNT_GB_LO:
1849 case MMC_RXOCTETCOUNT_G_LO:
1850 read_hi = true;
1851 break;
1852
1853 default:
1854 read_hi = false;
1855 }
1856
1857 val = (u64)readl(pdata->mac_regs + reg_lo);
1858
1859 if (read_hi)
1860 val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32);
1861
1862 return val;
1863}
1864
1865static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata)
1866{
1867 unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR);
1868 struct xlgmac_stats *stats = &pdata->stats;
1869
1870 if (XLGMAC_GET_REG_BITS(mmc_isr,
1871 MMC_TISR_TXOCTETCOUNT_GB_POS,
1872 MMC_TISR_TXOCTETCOUNT_GB_LEN))
1873 stats->txoctetcount_gb +=
1874 xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
1875
1876 if (XLGMAC_GET_REG_BITS(mmc_isr,
1877 MMC_TISR_TXFRAMECOUNT_GB_POS,
1878 MMC_TISR_TXFRAMECOUNT_GB_LEN))
1879 stats->txframecount_gb +=
1880 xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
1881
1882 if (XLGMAC_GET_REG_BITS(mmc_isr,
1883 MMC_TISR_TXBROADCASTFRAMES_G_POS,
1884 MMC_TISR_TXBROADCASTFRAMES_G_LEN))
1885 stats->txbroadcastframes_g +=
1886 xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1887
1888 if (XLGMAC_GET_REG_BITS(mmc_isr,
1889 MMC_TISR_TXMULTICASTFRAMES_G_POS,
1890 MMC_TISR_TXMULTICASTFRAMES_G_LEN))
1891 stats->txmulticastframes_g +=
1892 xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1893
1894 if (XLGMAC_GET_REG_BITS(mmc_isr,
1895 MMC_TISR_TX64OCTETS_GB_POS,
1896 MMC_TISR_TX64OCTETS_GB_LEN))
1897 stats->tx64octets_gb +=
1898 xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
1899
1900 if (XLGMAC_GET_REG_BITS(mmc_isr,
1901 MMC_TISR_TX65TO127OCTETS_GB_POS,
1902 MMC_TISR_TX65TO127OCTETS_GB_LEN))
1903 stats->tx65to127octets_gb +=
1904 xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
1905
1906 if (XLGMAC_GET_REG_BITS(mmc_isr,
1907 MMC_TISR_TX128TO255OCTETS_GB_POS,
1908 MMC_TISR_TX128TO255OCTETS_GB_LEN))
1909 stats->tx128to255octets_gb +=
1910 xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
1911
1912 if (XLGMAC_GET_REG_BITS(mmc_isr,
1913 MMC_TISR_TX256TO511OCTETS_GB_POS,
1914 MMC_TISR_TX256TO511OCTETS_GB_LEN))
1915 stats->tx256to511octets_gb +=
1916 xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
1917
1918 if (XLGMAC_GET_REG_BITS(mmc_isr,
1919 MMC_TISR_TX512TO1023OCTETS_GB_POS,
1920 MMC_TISR_TX512TO1023OCTETS_GB_LEN))
1921 stats->tx512to1023octets_gb +=
1922 xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1923
1924 if (XLGMAC_GET_REG_BITS(mmc_isr,
1925 MMC_TISR_TX1024TOMAXOCTETS_GB_POS,
1926 MMC_TISR_TX1024TOMAXOCTETS_GB_LEN))
1927 stats->tx1024tomaxoctets_gb +=
1928 xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1929
1930 if (XLGMAC_GET_REG_BITS(mmc_isr,
1931 MMC_TISR_TXUNICASTFRAMES_GB_POS,
1932 MMC_TISR_TXUNICASTFRAMES_GB_LEN))
1933 stats->txunicastframes_gb +=
1934 xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1935
1936 if (XLGMAC_GET_REG_BITS(mmc_isr,
1937 MMC_TISR_TXMULTICASTFRAMES_GB_POS,
1938 MMC_TISR_TXMULTICASTFRAMES_GB_LEN))
1939 stats->txmulticastframes_gb +=
1940 xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1941
1942 if (XLGMAC_GET_REG_BITS(mmc_isr,
1943 MMC_TISR_TXBROADCASTFRAMES_GB_POS,
1944 MMC_TISR_TXBROADCASTFRAMES_GB_LEN))
1945 stats->txbroadcastframes_g +=
1946 xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1947
1948 if (XLGMAC_GET_REG_BITS(mmc_isr,
1949 MMC_TISR_TXUNDERFLOWERROR_POS,
1950 MMC_TISR_TXUNDERFLOWERROR_LEN))
1951 stats->txunderflowerror +=
1952 xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
1953
1954 if (XLGMAC_GET_REG_BITS(mmc_isr,
1955 MMC_TISR_TXOCTETCOUNT_G_POS,
1956 MMC_TISR_TXOCTETCOUNT_G_LEN))
1957 stats->txoctetcount_g +=
1958 xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
1959
1960 if (XLGMAC_GET_REG_BITS(mmc_isr,
1961 MMC_TISR_TXFRAMECOUNT_G_POS,
1962 MMC_TISR_TXFRAMECOUNT_G_LEN))
1963 stats->txframecount_g +=
1964 xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
1965
1966 if (XLGMAC_GET_REG_BITS(mmc_isr,
1967 MMC_TISR_TXPAUSEFRAMES_POS,
1968 MMC_TISR_TXPAUSEFRAMES_LEN))
1969 stats->txpauseframes +=
1970 xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
1971
1972 if (XLGMAC_GET_REG_BITS(mmc_isr,
1973 MMC_TISR_TXVLANFRAMES_G_POS,
1974 MMC_TISR_TXVLANFRAMES_G_LEN))
1975 stats->txvlanframes_g +=
1976 xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
1977}
1978
1979static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata)
1980{
1981 unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR);
1982 struct xlgmac_stats *stats = &pdata->stats;
1983
1984 if (XLGMAC_GET_REG_BITS(mmc_isr,
1985 MMC_RISR_RXFRAMECOUNT_GB_POS,
1986 MMC_RISR_RXFRAMECOUNT_GB_LEN))
1987 stats->rxframecount_gb +=
1988 xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
1989
1990 if (XLGMAC_GET_REG_BITS(mmc_isr,
1991 MMC_RISR_RXOCTETCOUNT_GB_POS,
1992 MMC_RISR_RXOCTETCOUNT_GB_LEN))
1993 stats->rxoctetcount_gb +=
1994 xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
1995
1996 if (XLGMAC_GET_REG_BITS(mmc_isr,
1997 MMC_RISR_RXOCTETCOUNT_G_POS,
1998 MMC_RISR_RXOCTETCOUNT_G_LEN))
1999 stats->rxoctetcount_g +=
2000 xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2001
2002 if (XLGMAC_GET_REG_BITS(mmc_isr,
2003 MMC_RISR_RXBROADCASTFRAMES_G_POS,
2004 MMC_RISR_RXBROADCASTFRAMES_G_LEN))
2005 stats->rxbroadcastframes_g +=
2006 xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2007
2008 if (XLGMAC_GET_REG_BITS(mmc_isr,
2009 MMC_RISR_RXMULTICASTFRAMES_G_POS,
2010 MMC_RISR_RXMULTICASTFRAMES_G_LEN))
2011 stats->rxmulticastframes_g +=
2012 xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2013
2014 if (XLGMAC_GET_REG_BITS(mmc_isr,
2015 MMC_RISR_RXCRCERROR_POS,
2016 MMC_RISR_RXCRCERROR_LEN))
2017 stats->rxcrcerror +=
2018 xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
2019
2020 if (XLGMAC_GET_REG_BITS(mmc_isr,
2021 MMC_RISR_RXRUNTERROR_POS,
2022 MMC_RISR_RXRUNTERROR_LEN))
2023 stats->rxrunterror +=
2024 xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
2025
2026 if (XLGMAC_GET_REG_BITS(mmc_isr,
2027 MMC_RISR_RXJABBERERROR_POS,
2028 MMC_RISR_RXJABBERERROR_LEN))
2029 stats->rxjabbererror +=
2030 xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
2031
2032 if (XLGMAC_GET_REG_BITS(mmc_isr,
2033 MMC_RISR_RXUNDERSIZE_G_POS,
2034 MMC_RISR_RXUNDERSIZE_G_LEN))
2035 stats->rxundersize_g +=
2036 xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2037
2038 if (XLGMAC_GET_REG_BITS(mmc_isr,
2039 MMC_RISR_RXOVERSIZE_G_POS,
2040 MMC_RISR_RXOVERSIZE_G_LEN))
2041 stats->rxoversize_g +=
2042 xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
2043
2044 if (XLGMAC_GET_REG_BITS(mmc_isr,
2045 MMC_RISR_RX64OCTETS_GB_POS,
2046 MMC_RISR_RX64OCTETS_GB_LEN))
2047 stats->rx64octets_gb +=
2048 xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2049
2050 if (XLGMAC_GET_REG_BITS(mmc_isr,
2051 MMC_RISR_RX65TO127OCTETS_GB_POS,
2052 MMC_RISR_RX65TO127OCTETS_GB_LEN))
2053 stats->rx65to127octets_gb +=
2054 xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2055
2056 if (XLGMAC_GET_REG_BITS(mmc_isr,
2057 MMC_RISR_RX128TO255OCTETS_GB_POS,
2058 MMC_RISR_RX128TO255OCTETS_GB_LEN))
2059 stats->rx128to255octets_gb +=
2060 xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2061
2062 if (XLGMAC_GET_REG_BITS(mmc_isr,
2063 MMC_RISR_RX256TO511OCTETS_GB_POS,
2064 MMC_RISR_RX256TO511OCTETS_GB_LEN))
2065 stats->rx256to511octets_gb +=
2066 xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2067
2068 if (XLGMAC_GET_REG_BITS(mmc_isr,
2069 MMC_RISR_RX512TO1023OCTETS_GB_POS,
2070 MMC_RISR_RX512TO1023OCTETS_GB_LEN))
2071 stats->rx512to1023octets_gb +=
2072 xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2073
2074 if (XLGMAC_GET_REG_BITS(mmc_isr,
2075 MMC_RISR_RX1024TOMAXOCTETS_GB_POS,
2076 MMC_RISR_RX1024TOMAXOCTETS_GB_LEN))
2077 stats->rx1024tomaxoctets_gb +=
2078 xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2079
2080 if (XLGMAC_GET_REG_BITS(mmc_isr,
2081 MMC_RISR_RXUNICASTFRAMES_G_POS,
2082 MMC_RISR_RXUNICASTFRAMES_G_LEN))
2083 stats->rxunicastframes_g +=
2084 xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2085
2086 if (XLGMAC_GET_REG_BITS(mmc_isr,
2087 MMC_RISR_RXLENGTHERROR_POS,
2088 MMC_RISR_RXLENGTHERROR_LEN))
2089 stats->rxlengtherror +=
2090 xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2091
2092 if (XLGMAC_GET_REG_BITS(mmc_isr,
2093 MMC_RISR_RXOUTOFRANGETYPE_POS,
2094 MMC_RISR_RXOUTOFRANGETYPE_LEN))
2095 stats->rxoutofrangetype +=
2096 xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2097
2098 if (XLGMAC_GET_REG_BITS(mmc_isr,
2099 MMC_RISR_RXPAUSEFRAMES_POS,
2100 MMC_RISR_RXPAUSEFRAMES_LEN))
2101 stats->rxpauseframes +=
2102 xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2103
2104 if (XLGMAC_GET_REG_BITS(mmc_isr,
2105 MMC_RISR_RXFIFOOVERFLOW_POS,
2106 MMC_RISR_RXFIFOOVERFLOW_LEN))
2107 stats->rxfifooverflow +=
2108 xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2109
2110 if (XLGMAC_GET_REG_BITS(mmc_isr,
2111 MMC_RISR_RXVLANFRAMES_GB_POS,
2112 MMC_RISR_RXVLANFRAMES_GB_LEN))
2113 stats->rxvlanframes_gb +=
2114 xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2115
2116 if (XLGMAC_GET_REG_BITS(mmc_isr,
2117 MMC_RISR_RXWATCHDOGERROR_POS,
2118 MMC_RISR_RXWATCHDOGERROR_LEN))
2119 stats->rxwatchdogerror +=
2120 xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2121}
2122
2123static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata)
2124{
2125 struct xlgmac_stats *stats = &pdata->stats;
2126 u32 regval;
2127
2128 /* Freeze counters */
2129 regval = readl(pdata->mac_regs + MMC_CR);
2130 regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
2131 MMC_CR_MCF_LEN, 1);
2132 writel(regval, pdata->mac_regs + MMC_CR);
2133
2134 stats->txoctetcount_gb +=
2135 xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2136
2137 stats->txframecount_gb +=
2138 xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2139
2140 stats->txbroadcastframes_g +=
2141 xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2142
2143 stats->txmulticastframes_g +=
2144 xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2145
2146 stats->tx64octets_gb +=
2147 xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2148
2149 stats->tx65to127octets_gb +=
2150 xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2151
2152 stats->tx128to255octets_gb +=
2153 xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2154
2155 stats->tx256to511octets_gb +=
2156 xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2157
2158 stats->tx512to1023octets_gb +=
2159 xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2160
2161 stats->tx1024tomaxoctets_gb +=
2162 xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2163
2164 stats->txunicastframes_gb +=
2165 xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2166
2167 stats->txmulticastframes_gb +=
2168 xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2169
2170 stats->txbroadcastframes_g +=
2171 xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2172
2173 stats->txunderflowerror +=
2174 xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2175
2176 stats->txoctetcount_g +=
2177 xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2178
2179 stats->txframecount_g +=
2180 xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2181
2182 stats->txpauseframes +=
2183 xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2184
2185 stats->txvlanframes_g +=
2186 xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2187
2188 stats->rxframecount_gb +=
2189 xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2190
2191 stats->rxoctetcount_gb +=
2192 xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2193
2194 stats->rxoctetcount_g +=
2195 xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2196
2197 stats->rxbroadcastframes_g +=
2198 xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2199
2200 stats->rxmulticastframes_g +=
2201 xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2202
2203 stats->rxcrcerror +=
2204 xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
2205
2206 stats->rxrunterror +=
2207 xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
2208
2209 stats->rxjabbererror +=
2210 xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
2211
2212 stats->rxundersize_g +=
2213 xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2214
2215 stats->rxoversize_g +=
2216 xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
2217
2218 stats->rx64octets_gb +=
2219 xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2220
2221 stats->rx65to127octets_gb +=
2222 xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2223
2224 stats->rx128to255octets_gb +=
2225 xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2226
2227 stats->rx256to511octets_gb +=
2228 xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2229
2230 stats->rx512to1023octets_gb +=
2231 xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2232
2233 stats->rx1024tomaxoctets_gb +=
2234 xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2235
2236 stats->rxunicastframes_g +=
2237 xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2238
2239 stats->rxlengtherror +=
2240 xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2241
2242 stats->rxoutofrangetype +=
2243 xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2244
2245 stats->rxpauseframes +=
2246 xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2247
2248 stats->rxfifooverflow +=
2249 xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2250
2251 stats->rxvlanframes_gb +=
2252 xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2253
2254 stats->rxwatchdogerror +=
2255 xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2256
2257 /* Un-freeze counters */
2258 regval = readl(pdata->mac_regs + MMC_CR);
2259 regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
2260 MMC_CR_MCF_LEN, 0);
2261 writel(regval, pdata->mac_regs + MMC_CR);
2262}
2263
2264static void xlgmac_config_mmc(struct xlgmac_pdata *pdata)
2265{
2266 u32 regval;
2267
2268 regval = readl(pdata->mac_regs + MMC_CR);
2269 /* Set counters to reset on read */
2270 regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS,
2271 MMC_CR_ROR_LEN, 1);
2272 /* Reset the counters */
2273 regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS,
2274 MMC_CR_CR_LEN, 1);
2275 writel(regval, pdata->mac_regs + MMC_CR);
2276}
2277
2278static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type,
2279 unsigned int index, unsigned int val)
2280{
2281 unsigned int wait;
2282 int ret = 0;
2283 u32 regval;
2284
2285 mutex_lock(&pdata->rss_mutex);
2286
2287 regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
2288 MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN);
2289 if (regval) {
2290 ret = -EBUSY;
2291 goto unlock;
2292 }
2293
2294 writel(val, pdata->mac_regs + MAC_RSSDR);
2295
2296 regval = readl(pdata->mac_regs + MAC_RSSAR);
2297 regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS,
2298 MAC_RSSAR_RSSIA_LEN, index);
2299 regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS,
2300 MAC_RSSAR_ADDRT_LEN, type);
2301 regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS,
2302 MAC_RSSAR_CT_LEN, 0);
2303 regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS,
2304 MAC_RSSAR_OB_LEN, 1);
2305 writel(regval, pdata->mac_regs + MAC_RSSAR);
2306
2307 wait = 1000;
2308 while (wait--) {
2309 regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
2310 MAC_RSSAR_OB_POS,
2311 MAC_RSSAR_OB_LEN);
2312 if (!regval)
2313 goto unlock;
2314
2315 usleep_range(1000, 1500);
2316 }
2317
2318 ret = -EBUSY;
2319
2320unlock:
2321 mutex_unlock(&pdata->rss_mutex);
2322
2323 return ret;
2324}
2325
2326static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata)
2327{
2328 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
2329 unsigned int *key = (unsigned int *)&pdata->rss_key;
2330 int ret;
2331
2332 while (key_regs--) {
2333 ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE,
2334 key_regs, *key++);
2335 if (ret)
2336 return ret;
2337 }
2338
2339 return 0;
2340}
2341
2342static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata)
2343{
2344 unsigned int i;
2345 int ret;
2346
2347 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
2348 ret = xlgmac_write_rss_reg(pdata,
2349 XLGMAC_RSS_LOOKUP_TABLE_TYPE, i,
2350 pdata->rss_table[i]);
2351 if (ret)
2352 return ret;
2353 }
2354
2355 return 0;
2356}
2357
2358static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key)
2359{
2360 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
2361
2362 return xlgmac_write_rss_hash_key(pdata);
2363}
2364
2365static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata,
2366 const u32 *table)
2367{
2368 unsigned int i;
2369 u32 tval;
2370
2371 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
2372 tval = table[i];
2373 pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
2374 pdata->rss_table[i],
2375 MAC_RSSDR_DMCH_POS,
2376 MAC_RSSDR_DMCH_LEN,
2377 tval);
2378 }
2379
2380 return xlgmac_write_rss_lookup_table(pdata);
2381}
2382
2383static int xlgmac_enable_rss(struct xlgmac_pdata *pdata)
2384{
2385 u32 regval;
2386 int ret;
2387
2388 if (!pdata->hw_feat.rss)
2389 return -EOPNOTSUPP;
2390
2391 /* Program the hash key */
2392 ret = xlgmac_write_rss_hash_key(pdata);
2393 if (ret)
2394 return ret;
2395
2396 /* Program the lookup table */
2397 ret = xlgmac_write_rss_lookup_table(pdata);
2398 if (ret)
2399 return ret;
2400
2401 /* Set the RSS options */
2402 writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR);
2403
2404 /* Enable RSS */
2405 regval = readl(pdata->mac_regs + MAC_RSSCR);
2406 regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
2407 MAC_RSSCR_RSSE_LEN, 1);
2408 writel(regval, pdata->mac_regs + MAC_RSSCR);
2409
2410 return 0;
2411}
2412
2413static int xlgmac_disable_rss(struct xlgmac_pdata *pdata)
2414{
2415 u32 regval;
2416
2417 if (!pdata->hw_feat.rss)
2418 return -EOPNOTSUPP;
2419
2420 regval = readl(pdata->mac_regs + MAC_RSSCR);
2421 regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
2422 MAC_RSSCR_RSSE_LEN, 0);
2423 writel(regval, pdata->mac_regs + MAC_RSSCR);
2424
2425 return 0;
2426}
2427
2428static void xlgmac_config_rss(struct xlgmac_pdata *pdata)
2429{
2430 int ret;
2431
2432 if (!pdata->hw_feat.rss)
2433 return;
2434
2435 if (pdata->netdev->features & NETIF_F_RXHASH)
2436 ret = xlgmac_enable_rss(pdata);
2437 else
2438 ret = xlgmac_disable_rss(pdata);
2439
2440 if (ret)
2441 netdev_err(pdata->netdev,
2442 "error configuring RSS, RSS disabled\n");
2443}
2444
2445static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata)
2446{
2447 unsigned int dma_ch_isr, dma_ch_ier;
2448 struct xlgmac_channel *channel;
2449 unsigned int i;
2450
2451 channel = pdata->channel_head;
2452 for (i = 0; i < pdata->channel_count; i++, channel++) {
2453 /* Clear all the interrupts which are set */
2454 dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
2455 writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
2456
2457 /* Clear all interrupt enable bits */
2458 dma_ch_ier = 0;
2459
2460 /* Enable following interrupts
2461 * NIE - Normal Interrupt Summary Enable
2462 * AIE - Abnormal Interrupt Summary Enable
2463 * FBEE - Fatal Bus Error Enable
2464 */
2465 dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
2466 DMA_CH_IER_NIE_POS,
2467 DMA_CH_IER_NIE_LEN, 1);
2468 dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
2469 DMA_CH_IER_AIE_POS,
2470 DMA_CH_IER_AIE_LEN, 1);
2471 dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
2472 DMA_CH_IER_FBEE_POS,
2473 DMA_CH_IER_FBEE_LEN, 1);
2474
2475 if (channel->tx_ring) {
2476 /* Enable the following Tx interrupts
2477 * TIE - Transmit Interrupt Enable (unless using
2478 * per channel interrupts)
2479 */
2480 if (!pdata->per_channel_irq)
2481 dma_ch_ier = XLGMAC_SET_REG_BITS(
2482 dma_ch_ier,
2483 DMA_CH_IER_TIE_POS,
2484 DMA_CH_IER_TIE_LEN,
2485 1);
2486 }
2487 if (channel->rx_ring) {
2488 /* Enable following Rx interrupts
2489 * RBUE - Receive Buffer Unavailable Enable
2490 * RIE - Receive Interrupt Enable (unless using
2491 * per channel interrupts)
2492 */
2493 dma_ch_ier = XLGMAC_SET_REG_BITS(
2494 dma_ch_ier,
2495 DMA_CH_IER_RBUE_POS,
2496 DMA_CH_IER_RBUE_LEN,
2497 1);
2498 if (!pdata->per_channel_irq)
2499 dma_ch_ier = XLGMAC_SET_REG_BITS(
2500 dma_ch_ier,
2501 DMA_CH_IER_RIE_POS,
2502 DMA_CH_IER_RIE_LEN,
2503 1);
2504 }
2505
2506 writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER));
2507 }
2508}
2509
2510static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata)
2511{
2512 unsigned int q_count, i;
2513 unsigned int mtl_q_isr;
2514
2515 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
2516 for (i = 0; i < q_count; i++) {
2517 /* Clear all the interrupts which are set */
2518 mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
2519 writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
2520
2521 /* No MTL interrupts to be enabled */
2522 writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER));
2523 }
2524}
2525
2526static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata)
2527{
2528 unsigned int mac_ier = 0;
2529 u32 regval;
2530
2531 /* Enable Timestamp interrupt */
2532 mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS,
2533 MAC_IER_TSIE_LEN, 1);
2534
2535 writel(mac_ier, pdata->mac_regs + MAC_IER);
2536
2537 /* Enable all counter interrupts */
2538 regval = readl(pdata->mac_regs + MMC_RIER);
2539 regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS,
2540 MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff);
2541 writel(regval, pdata->mac_regs + MMC_RIER);
2542 regval = readl(pdata->mac_regs + MMC_TIER);
2543 regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS,
2544 MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff);
2545 writel(regval, pdata->mac_regs + MMC_TIER);
2546}
2547
2548static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata)
2549{
2550 u32 regval;
2551
2552 regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
2553 MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
2554 if (regval == 0x1)
2555 return 0;
2556
2557 regval = readl(pdata->mac_regs + MAC_TCR);
2558 regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
2559 MAC_TCR_SS_LEN, 0x1);
2560 writel(regval, pdata->mac_regs + MAC_TCR);
2561
2562 return 0;
2563}
2564
2565static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata)
2566{
2567 u32 regval;
2568
2569 regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
2570 MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
2571 if (regval == 0)
2572 return 0;
2573
2574 regval = readl(pdata->mac_regs + MAC_TCR);
2575 regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
2576 MAC_TCR_SS_LEN, 0);
2577 writel(regval, pdata->mac_regs + MAC_TCR);
2578
2579 return 0;
2580}
2581
2582static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata)
2583{
2584 u32 regval;
2585
2586 regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
2587 MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
2588 if (regval == 0x2)
2589 return 0;
2590
2591 regval = readl(pdata->mac_regs + MAC_TCR);
2592 regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
2593 MAC_TCR_SS_LEN, 0x2);
2594 writel(regval, pdata->mac_regs + MAC_TCR);
2595
2596 return 0;
2597}
2598
2599static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata)
2600{
2601 u32 regval;
2602
2603 regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
2604 MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
2605 if (regval == 0x3)
2606 return 0;
2607
2608 regval = readl(pdata->mac_regs + MAC_TCR);
2609 regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
2610 MAC_TCR_SS_LEN, 0x3);
2611 writel(regval, pdata->mac_regs + MAC_TCR);
2612
2613 return 0;
2614}
2615
2616static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata)
2617{
2618 switch (pdata->phy_speed) {
2619 case SPEED_100000:
2620 xlgmac_set_xlgmii_100000_speed(pdata);
2621 break;
2622
2623 case SPEED_50000:
2624 xlgmac_set_xlgmii_50000_speed(pdata);
2625 break;
2626
2627 case SPEED_40000:
2628 xlgmac_set_xlgmii_40000_speed(pdata);
2629 break;
2630
2631 case SPEED_25000:
2632 xlgmac_set_xlgmii_25000_speed(pdata);
2633 break;
2634 }
2635}
2636
2637static int xlgmac_dev_read(struct xlgmac_channel *channel)
2638{
2639 struct xlgmac_pdata *pdata = channel->pdata;
2640 struct xlgmac_ring *ring = channel->rx_ring;
2641 struct net_device *netdev = pdata->netdev;
2642 struct xlgmac_desc_data *desc_data;
2643 struct xlgmac_dma_desc *dma_desc;
2644 struct xlgmac_pkt_info *pkt_info;
2645 unsigned int err, etlt, l34t;
2646
2647 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
2648 dma_desc = desc_data->dma_desc;
2649 pkt_info = &ring->pkt_info;
2650
2651 /* Check for data availability */
2652 if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2653 RX_NORMAL_DESC3_OWN_POS,
2654 RX_NORMAL_DESC3_OWN_LEN))
2655 return 1;
2656
2657 /* Make sure descriptor fields are read after reading the OWN bit */
2658 dma_rmb();
2659
2660 if (netif_msg_rx_status(pdata))
2661 xlgmac_dump_rx_desc(pdata, ring, ring->cur);
2662
2663 if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2664 RX_NORMAL_DESC3_CTXT_POS,
2665 RX_NORMAL_DESC3_CTXT_LEN)) {
2666 /* Timestamp Context Descriptor */
2667 xlgmac_get_rx_tstamp(pkt_info, dma_desc);
2668
2669 pkt_info->attributes = XLGMAC_SET_REG_BITS(
2670 pkt_info->attributes,
2671 RX_PACKET_ATTRIBUTES_CONTEXT_POS,
2672 RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
2673 1);
2674 pkt_info->attributes = XLGMAC_SET_REG_BITS(
2675 pkt_info->attributes,
2676 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
2677 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
2678 0);
2679 return 0;
2680 }
2681
2682 /* Normal Descriptor, be sure Context Descriptor bit is off */
2683 pkt_info->attributes = XLGMAC_SET_REG_BITS(
2684 pkt_info->attributes,
2685 RX_PACKET_ATTRIBUTES_CONTEXT_POS,
2686 RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
2687 0);
2688
2689 /* Indicate if a Context Descriptor is next */
2690 if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2691 RX_NORMAL_DESC3_CDA_POS,
2692 RX_NORMAL_DESC3_CDA_LEN))
2693 pkt_info->attributes = XLGMAC_SET_REG_BITS(
2694 pkt_info->attributes,
2695 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
2696 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
2697 1);
2698
2699 /* Get the header length */
2700 if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2701 RX_NORMAL_DESC3_FD_POS,
2702 RX_NORMAL_DESC3_FD_LEN)) {
2703 desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2,
2704 RX_NORMAL_DESC2_HL_POS,
2705 RX_NORMAL_DESC2_HL_LEN);
2706 if (desc_data->rx.hdr_len)
2707 pdata->stats.rx_split_header_packets++;
2708 }
2709
2710 /* Get the RSS hash */
2711 if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2712 RX_NORMAL_DESC3_RSV_POS,
2713 RX_NORMAL_DESC3_RSV_LEN)) {
2714 pkt_info->attributes = XLGMAC_SET_REG_BITS(
2715 pkt_info->attributes,
2716 RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
2717 RX_PACKET_ATTRIBUTES_RSS_HASH_LEN,
2718 1);
2719
2720 pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1);
2721
2722 l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2723 RX_NORMAL_DESC3_L34T_POS,
2724 RX_NORMAL_DESC3_L34T_LEN);
2725 switch (l34t) {
2726 case RX_DESC3_L34T_IPV4_TCP:
2727 case RX_DESC3_L34T_IPV4_UDP:
2728 case RX_DESC3_L34T_IPV6_TCP:
2729 case RX_DESC3_L34T_IPV6_UDP:
2730 pkt_info->rss_hash_type = PKT_HASH_TYPE_L4;
2731 break;
2732 default:
2733 pkt_info->rss_hash_type = PKT_HASH_TYPE_L3;
2734 }
2735 }
2736
2737 /* Get the pkt_info length */
2738 desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2739 RX_NORMAL_DESC3_PL_POS,
2740 RX_NORMAL_DESC3_PL_LEN);
2741
2742 if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2743 RX_NORMAL_DESC3_LD_POS,
2744 RX_NORMAL_DESC3_LD_LEN)) {
2745 /* Not all the data has been transferred for this pkt_info */
2746 pkt_info->attributes = XLGMAC_SET_REG_BITS(
2747 pkt_info->attributes,
2748 RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
2749 RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
2750 1);
2751 return 0;
2752 }
2753
2754 /* This is the last of the data for this pkt_info */
2755 pkt_info->attributes = XLGMAC_SET_REG_BITS(
2756 pkt_info->attributes,
2757 RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
2758 RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
2759 0);
2760
2761 /* Set checksum done indicator as appropriate */
2762 if (netdev->features & NETIF_F_RXCSUM)
2763 pkt_info->attributes = XLGMAC_SET_REG_BITS(
2764 pkt_info->attributes,
2765 RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
2766 RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
2767 1);
2768
2769 /* Check for errors (only valid in last descriptor) */
2770 err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2771 RX_NORMAL_DESC3_ES_POS,
2772 RX_NORMAL_DESC3_ES_LEN);
2773 etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2774 RX_NORMAL_DESC3_ETLT_POS,
2775 RX_NORMAL_DESC3_ETLT_LEN);
2776 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
2777
2778 if (!err || !etlt) {
2779 /* No error if err is 0 or etlt is 0 */
2780 if ((etlt == 0x09) &&
2781 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
2782 pkt_info->attributes = XLGMAC_SET_REG_BITS(
2783 pkt_info->attributes,
2784 RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
2785 RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
2786 1);
2787 pkt_info->vlan_ctag =
2788 XLGMAC_GET_REG_BITS_LE(dma_desc->desc0,
2789 RX_NORMAL_DESC0_OVT_POS,
2790 RX_NORMAL_DESC0_OVT_LEN);
2791 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
2792 pkt_info->vlan_ctag);
2793 }
2794 } else {
2795 if ((etlt == 0x05) || (etlt == 0x06))
2796 pkt_info->attributes = XLGMAC_SET_REG_BITS(
2797 pkt_info->attributes,
2798 RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
2799 RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
2800 0);
2801 else
2802 pkt_info->errors = XLGMAC_SET_REG_BITS(
2803 pkt_info->errors,
2804 RX_PACKET_ERRORS_FRAME_POS,
2805 RX_PACKET_ERRORS_FRAME_LEN,
2806 1);
2807 }
2808
2809 XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name,
2810 ring->cur & (ring->dma_desc_count - 1), ring->cur);
2811
2812 return 0;
2813}
2814
2815static int xlgmac_enable_int(struct xlgmac_channel *channel,
2816 enum xlgmac_int int_id)
2817{
2818 unsigned int dma_ch_ier;
2819
2820 dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
2821
2822 switch (int_id) {
2823 case XLGMAC_INT_DMA_CH_SR_TI:
2824 dma_ch_ier = XLGMAC_SET_REG_BITS(
2825 dma_ch_ier, DMA_CH_IER_TIE_POS,
2826 DMA_CH_IER_TIE_LEN, 1);
2827 break;
2828 case XLGMAC_INT_DMA_CH_SR_TPS:
2829 dma_ch_ier = XLGMAC_SET_REG_BITS(
2830 dma_ch_ier, DMA_CH_IER_TXSE_POS,
2831 DMA_CH_IER_TXSE_LEN, 1);
2832 break;
2833 case XLGMAC_INT_DMA_CH_SR_TBU:
2834 dma_ch_ier = XLGMAC_SET_REG_BITS(
2835 dma_ch_ier, DMA_CH_IER_TBUE_POS,
2836 DMA_CH_IER_TBUE_LEN, 1);
2837 break;
2838 case XLGMAC_INT_DMA_CH_SR_RI:
2839 dma_ch_ier = XLGMAC_SET_REG_BITS(
2840 dma_ch_ier, DMA_CH_IER_RIE_POS,
2841 DMA_CH_IER_RIE_LEN, 1);
2842 break;
2843 case XLGMAC_INT_DMA_CH_SR_RBU:
2844 dma_ch_ier = XLGMAC_SET_REG_BITS(
2845 dma_ch_ier, DMA_CH_IER_RBUE_POS,
2846 DMA_CH_IER_RBUE_LEN, 1);
2847 break;
2848 case XLGMAC_INT_DMA_CH_SR_RPS:
2849 dma_ch_ier = XLGMAC_SET_REG_BITS(
2850 dma_ch_ier, DMA_CH_IER_RSE_POS,
2851 DMA_CH_IER_RSE_LEN, 1);
2852 break;
2853 case XLGMAC_INT_DMA_CH_SR_TI_RI:
2854 dma_ch_ier = XLGMAC_SET_REG_BITS(
2855 dma_ch_ier, DMA_CH_IER_TIE_POS,
2856 DMA_CH_IER_TIE_LEN, 1);
2857 dma_ch_ier = XLGMAC_SET_REG_BITS(
2858 dma_ch_ier, DMA_CH_IER_RIE_POS,
2859 DMA_CH_IER_RIE_LEN, 1);
2860 break;
2861 case XLGMAC_INT_DMA_CH_SR_FBE:
2862 dma_ch_ier = XLGMAC_SET_REG_BITS(
2863 dma_ch_ier, DMA_CH_IER_FBEE_POS,
2864 DMA_CH_IER_FBEE_LEN, 1);
2865 break;
2866 case XLGMAC_INT_DMA_ALL:
2867 dma_ch_ier |= channel->saved_ier;
2868 break;
2869 default:
2870 return -1;
2871 }
2872
2873 writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
2874
2875 return 0;
2876}
2877
2878static int xlgmac_disable_int(struct xlgmac_channel *channel,
2879 enum xlgmac_int int_id)
2880{
2881 unsigned int dma_ch_ier;
2882
2883 dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
2884
2885 switch (int_id) {
2886 case XLGMAC_INT_DMA_CH_SR_TI:
2887 dma_ch_ier = XLGMAC_SET_REG_BITS(
2888 dma_ch_ier, DMA_CH_IER_TIE_POS,
2889 DMA_CH_IER_TIE_LEN, 0);
2890 break;
2891 case XLGMAC_INT_DMA_CH_SR_TPS:
2892 dma_ch_ier = XLGMAC_SET_REG_BITS(
2893 dma_ch_ier, DMA_CH_IER_TXSE_POS,
2894 DMA_CH_IER_TXSE_LEN, 0);
2895 break;
2896 case XLGMAC_INT_DMA_CH_SR_TBU:
2897 dma_ch_ier = XLGMAC_SET_REG_BITS(
2898 dma_ch_ier, DMA_CH_IER_TBUE_POS,
2899 DMA_CH_IER_TBUE_LEN, 0);
2900 break;
2901 case XLGMAC_INT_DMA_CH_SR_RI:
2902 dma_ch_ier = XLGMAC_SET_REG_BITS(
2903 dma_ch_ier, DMA_CH_IER_RIE_POS,
2904 DMA_CH_IER_RIE_LEN, 0);
2905 break;
2906 case XLGMAC_INT_DMA_CH_SR_RBU:
2907 dma_ch_ier = XLGMAC_SET_REG_BITS(
2908 dma_ch_ier, DMA_CH_IER_RBUE_POS,
2909 DMA_CH_IER_RBUE_LEN, 0);
2910 break;
2911 case XLGMAC_INT_DMA_CH_SR_RPS:
2912 dma_ch_ier = XLGMAC_SET_REG_BITS(
2913 dma_ch_ier, DMA_CH_IER_RSE_POS,
2914 DMA_CH_IER_RSE_LEN, 0);
2915 break;
2916 case XLGMAC_INT_DMA_CH_SR_TI_RI:
2917 dma_ch_ier = XLGMAC_SET_REG_BITS(
2918 dma_ch_ier, DMA_CH_IER_TIE_POS,
2919 DMA_CH_IER_TIE_LEN, 0);
2920 dma_ch_ier = XLGMAC_SET_REG_BITS(
2921 dma_ch_ier, DMA_CH_IER_RIE_POS,
2922 DMA_CH_IER_RIE_LEN, 0);
2923 break;
2924 case XLGMAC_INT_DMA_CH_SR_FBE:
2925 dma_ch_ier = XLGMAC_SET_REG_BITS(
2926 dma_ch_ier, DMA_CH_IER_FBEE_POS,
2927 DMA_CH_IER_FBEE_LEN, 0);
2928 break;
2929 case XLGMAC_INT_DMA_ALL:
2930 channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK;
2931 dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK;
2932 break;
2933 default:
2934 return -1;
2935 }
2936
2937 writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
2938
2939 return 0;
2940}
2941
2942static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata)
2943{
2944 unsigned int i, count;
2945 u32 regval;
2946
2947 for (i = 0; i < pdata->tx_q_count; i++) {
2948 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
2949 regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
2950 MTL_Q_TQOMR_FTQ_LEN, 1);
2951 writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
2952 }
2953
2954 /* Poll Until Poll Condition */
2955 for (i = 0; i < pdata->tx_q_count; i++) {
2956 count = 2000;
2957 regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
2958 regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
2959 MTL_Q_TQOMR_FTQ_LEN);
2960 while (--count && regval)
2961 usleep_range(500, 600);
2962
2963 if (!count)
2964 return -EBUSY;
2965 }
2966
2967 return 0;
2968}
2969
2970static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata)
2971{
2972 u32 regval;
2973
2974 regval = readl(pdata->mac_regs + DMA_SBMR);
2975 /* Set enhanced addressing mode */
2976 regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS,
2977 DMA_SBMR_EAME_LEN, 1);
2978 /* Set the System Bus mode */
2979 regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS,
2980 DMA_SBMR_UNDEF_LEN, 1);
2981 regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS,
2982 DMA_SBMR_BLEN_256_LEN, 1);
2983 writel(regval, pdata->mac_regs + DMA_SBMR);
2984}
2985
2986static int xlgmac_hw_init(struct xlgmac_pdata *pdata)
2987{
2988 struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
2989 int ret;
2990
2991 /* Flush Tx queues */
2992 ret = xlgmac_flush_tx_queues(pdata);
2993 if (ret)
2994 return ret;
2995
2996 /* Initialize DMA related features */
2997 xlgmac_config_dma_bus(pdata);
2998 xlgmac_config_osp_mode(pdata);
2999 xlgmac_config_pblx8(pdata);
3000 xlgmac_config_tx_pbl_val(pdata);
3001 xlgmac_config_rx_pbl_val(pdata);
3002 xlgmac_config_rx_coalesce(pdata);
3003 xlgmac_config_tx_coalesce(pdata);
3004 xlgmac_config_rx_buffer_size(pdata);
3005 xlgmac_config_tso_mode(pdata);
3006 xlgmac_config_sph_mode(pdata);
3007 xlgmac_config_rss(pdata);
3008 desc_ops->tx_desc_init(pdata);
3009 desc_ops->rx_desc_init(pdata);
3010 xlgmac_enable_dma_interrupts(pdata);
3011
3012 /* Initialize MTL related features */
3013 xlgmac_config_mtl_mode(pdata);
3014 xlgmac_config_queue_mapping(pdata);
3015 xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode);
3016 xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode);
3017 xlgmac_config_tx_threshold(pdata, pdata->tx_threshold);
3018 xlgmac_config_rx_threshold(pdata, pdata->rx_threshold);
3019 xlgmac_config_tx_fifo_size(pdata);
3020 xlgmac_config_rx_fifo_size(pdata);
3021 xlgmac_config_flow_control_threshold(pdata);
3022 xlgmac_config_rx_fep_enable(pdata);
3023 xlgmac_config_rx_fup_enable(pdata);
3024 xlgmac_enable_mtl_interrupts(pdata);
3025
3026 /* Initialize MAC related features */
3027 xlgmac_config_mac_address(pdata);
3028 xlgmac_config_rx_mode(pdata);
3029 xlgmac_config_jumbo_enable(pdata);
3030 xlgmac_config_flow_control(pdata);
3031 xlgmac_config_mac_speed(pdata);
3032 xlgmac_config_checksum_offload(pdata);
3033 xlgmac_config_vlan_support(pdata);
3034 xlgmac_config_mmc(pdata);
3035 xlgmac_enable_mac_interrupts(pdata);
3036
3037 return 0;
3038}
3039
3040static int xlgmac_hw_exit(struct xlgmac_pdata *pdata)
3041{
3042 unsigned int count = 2000;
3043 u32 regval;
3044
3045 /* Issue a software reset */
3046 regval = readl(pdata->mac_regs + DMA_MR);
3047 regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS,
3048 DMA_MR_SWR_LEN, 1);
3049 writel(regval, pdata->mac_regs + DMA_MR);
3050 usleep_range(10, 15);
3051
3052 /* Poll Until Poll Condition */
3053 while (--count &&
3054 XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR),
3055 DMA_MR_SWR_POS, DMA_MR_SWR_LEN))
3056 usleep_range(500, 600);
3057
3058 if (!count)
3059 return -EBUSY;
3060
3061 return 0;
3062}
3063
3064void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops)
3065{
3066 hw_ops->init = xlgmac_hw_init;
3067 hw_ops->exit = xlgmac_hw_exit;
3068
3069 hw_ops->tx_complete = xlgmac_tx_complete;
3070
3071 hw_ops->enable_tx = xlgmac_enable_tx;
3072 hw_ops->disable_tx = xlgmac_disable_tx;
3073 hw_ops->enable_rx = xlgmac_enable_rx;
3074 hw_ops->disable_rx = xlgmac_disable_rx;
3075
3076 hw_ops->dev_xmit = xlgmac_dev_xmit;
3077 hw_ops->dev_read = xlgmac_dev_read;
3078 hw_ops->enable_int = xlgmac_enable_int;
3079 hw_ops->disable_int = xlgmac_disable_int;
3080
3081 hw_ops->set_mac_address = xlgmac_set_mac_address;
3082 hw_ops->config_rx_mode = xlgmac_config_rx_mode;
3083 hw_ops->enable_rx_csum = xlgmac_enable_rx_csum;
3084 hw_ops->disable_rx_csum = xlgmac_disable_rx_csum;
3085
3086 /* For MII speed configuration */
3087 hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed;
3088 hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed;
3089 hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed;
3090 hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed;
3091
3092 /* For descriptor related operation */
3093 hw_ops->tx_desc_init = xlgmac_tx_desc_init;
3094 hw_ops->rx_desc_init = xlgmac_rx_desc_init;
3095 hw_ops->tx_desc_reset = xlgmac_tx_desc_reset;
3096 hw_ops->rx_desc_reset = xlgmac_rx_desc_reset;
3097 hw_ops->is_last_desc = xlgmac_is_last_desc;
3098 hw_ops->is_context_desc = xlgmac_is_context_desc;
3099 hw_ops->tx_start_xmit = xlgmac_tx_start_xmit;
3100
3101 /* For Flow Control */
3102 hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control;
3103 hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control;
3104
3105 /* For Vlan related config */
3106 hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping;
3107 hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping;
3108 hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering;
3109 hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering;
3110 hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table;
3111
3112 /* For RX coalescing */
3113 hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce;
3114 hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce;
3115 hw_ops->usec_to_riwt = xlgmac_usec_to_riwt;
3116 hw_ops->riwt_to_usec = xlgmac_riwt_to_usec;
3117
3118 /* For RX and TX threshold config */
3119 hw_ops->config_rx_threshold = xlgmac_config_rx_threshold;
3120 hw_ops->config_tx_threshold = xlgmac_config_tx_threshold;
3121
3122 /* For RX and TX Store and Forward Mode config */
3123 hw_ops->config_rsf_mode = xlgmac_config_rsf_mode;
3124 hw_ops->config_tsf_mode = xlgmac_config_tsf_mode;
3125
3126 /* For TX DMA Operating on Second Frame config */
3127 hw_ops->config_osp_mode = xlgmac_config_osp_mode;
3128
3129 /* For RX and TX PBL config */
3130 hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val;
3131 hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val;
3132 hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val;
3133 hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val;
3134 hw_ops->config_pblx8 = xlgmac_config_pblx8;
3135
3136 /* For MMC statistics support */
3137 hw_ops->tx_mmc_int = xlgmac_tx_mmc_int;
3138 hw_ops->rx_mmc_int = xlgmac_rx_mmc_int;
3139 hw_ops->read_mmc_stats = xlgmac_read_mmc_stats;
3140
3141 /* For Receive Side Scaling */
3142 hw_ops->enable_rss = xlgmac_enable_rss;
3143 hw_ops->disable_rss = xlgmac_disable_rss;
3144 hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key;
3145 hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table;
3146}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
new file mode 100644
index 000000000000..5e8428be3d66
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -0,0 +1,1334 @@
1/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
2 *
3 * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This Synopsys DWC XLGMAC software driver and associated documentation
11 * (hereinafter the "Software") is an unsupported proprietary work of
12 * Synopsys, Inc. unless otherwise expressly agreed to in writing between
13 * Synopsys and you. The Software IS NOT an item of Licensed Software or a
14 * Licensed Product under any End User Software License Agreement or
15 * Agreement for Licensed Products with Synopsys or any supplement thereto.
16 * Synopsys is a registered trademark of Synopsys, Inc. Other names included
17 * in the SOFTWARE may be the trademarks of their respective owners.
18 */
19
20#include <linux/netdevice.h>
21#include <linux/tcp.h>
22
23#include "dwc-xlgmac.h"
24#include "dwc-xlgmac-reg.h"
25
26static int xlgmac_one_poll(struct napi_struct *, int);
27static int xlgmac_all_poll(struct napi_struct *, int);
28
29static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
30{
31 return (ring->dma_desc_count - (ring->cur - ring->dirty));
32}
33
34static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
35{
36 return (ring->cur - ring->dirty);
37}
38
39static int xlgmac_maybe_stop_tx_queue(
40 struct xlgmac_channel *channel,
41 struct xlgmac_ring *ring,
42 unsigned int count)
43{
44 struct xlgmac_pdata *pdata = channel->pdata;
45
46 if (count > xlgmac_tx_avail_desc(ring)) {
47 netif_info(pdata, drv, pdata->netdev,
48 "Tx queue stopped, not enough descriptors available\n");
49 netif_stop_subqueue(pdata->netdev, channel->queue_index);
50 ring->tx.queue_stopped = 1;
51
52 /* If we haven't notified the hardware because of xmit_more
53 * support, tell it now
54 */
55 if (ring->tx.xmit_more)
56 pdata->hw_ops.tx_start_xmit(channel, ring);
57
58 return NETDEV_TX_BUSY;
59 }
60
61 return 0;
62}
63
64static void xlgmac_prep_vlan(struct sk_buff *skb,
65 struct xlgmac_pkt_info *pkt_info)
66{
67 if (skb_vlan_tag_present(skb))
68 pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
69}
70
71static int xlgmac_prep_tso(struct sk_buff *skb,
72 struct xlgmac_pkt_info *pkt_info)
73{
74 int ret;
75
76 if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
77 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
78 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
79 return 0;
80
81 ret = skb_cow_head(skb, 0);
82 if (ret)
83 return ret;
84
85 pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
86 pkt_info->tcp_header_len = tcp_hdrlen(skb);
87 pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
88 pkt_info->mss = skb_shinfo(skb)->gso_size;
89
90 XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
91 XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
92 pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
93 XLGMAC_PR("mss=%u\n", pkt_info->mss);
94
95 /* Update the number of packets that will ultimately be transmitted
96 * along with the extra bytes for each extra packet
97 */
98 pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
99 pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
100
101 return 0;
102}
103
104static int xlgmac_is_tso(struct sk_buff *skb)
105{
106 if (skb->ip_summed != CHECKSUM_PARTIAL)
107 return 0;
108
109 if (!skb_is_gso(skb))
110 return 0;
111
112 return 1;
113}
114
115static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
116 struct xlgmac_ring *ring,
117 struct sk_buff *skb,
118 struct xlgmac_pkt_info *pkt_info)
119{
120 struct skb_frag_struct *frag;
121 unsigned int context_desc;
122 unsigned int len;
123 unsigned int i;
124
125 pkt_info->skb = skb;
126
127 context_desc = 0;
128 pkt_info->desc_count = 0;
129
130 pkt_info->tx_packets = 1;
131 pkt_info->tx_bytes = skb->len;
132
133 if (xlgmac_is_tso(skb)) {
134 /* TSO requires an extra descriptor if mss is different */
135 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
136 context_desc = 1;
137 pkt_info->desc_count++;
138 }
139
140 /* TSO requires an extra descriptor for TSO header */
141 pkt_info->desc_count++;
142
143 pkt_info->attributes = XLGMAC_SET_REG_BITS(
144 pkt_info->attributes,
145 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
146 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
147 1);
148 pkt_info->attributes = XLGMAC_SET_REG_BITS(
149 pkt_info->attributes,
150 TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
151 TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
152 1);
153 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
154 pkt_info->attributes = XLGMAC_SET_REG_BITS(
155 pkt_info->attributes,
156 TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
157 TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
158 1);
159
160 if (skb_vlan_tag_present(skb)) {
161 /* VLAN requires an extra descriptor if tag is different */
162 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
163 /* We can share with the TSO context descriptor */
164 if (!context_desc) {
165 context_desc = 1;
166 pkt_info->desc_count++;
167 }
168
169 pkt_info->attributes = XLGMAC_SET_REG_BITS(
170 pkt_info->attributes,
171 TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
172 TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
173 1);
174 }
175
176 for (len = skb_headlen(skb); len;) {
177 pkt_info->desc_count++;
178 len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
179 }
180
181 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
182 frag = &skb_shinfo(skb)->frags[i];
183 for (len = skb_frag_size(frag); len; ) {
184 pkt_info->desc_count++;
185 len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
186 }
187 }
188}
189
190static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
191{
192 unsigned int rx_buf_size;
193
194 if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
195 netdev_alert(netdev, "MTU exceeds maximum supported value\n");
196 return -EINVAL;
197 }
198
199 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
200 rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
201
202 rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
203 ~(XLGMAC_RX_BUF_ALIGN - 1);
204
205 return rx_buf_size;
206}
207
208static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
209{
210 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
211 struct xlgmac_channel *channel;
212 enum xlgmac_int int_id;
213 unsigned int i;
214
215 channel = pdata->channel_head;
216 for (i = 0; i < pdata->channel_count; i++, channel++) {
217 if (channel->tx_ring && channel->rx_ring)
218 int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
219 else if (channel->tx_ring)
220 int_id = XLGMAC_INT_DMA_CH_SR_TI;
221 else if (channel->rx_ring)
222 int_id = XLGMAC_INT_DMA_CH_SR_RI;
223 else
224 continue;
225
226 hw_ops->enable_int(channel, int_id);
227 }
228}
229
230static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
231{
232 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
233 struct xlgmac_channel *channel;
234 enum xlgmac_int int_id;
235 unsigned int i;
236
237 channel = pdata->channel_head;
238 for (i = 0; i < pdata->channel_count; i++, channel++) {
239 if (channel->tx_ring && channel->rx_ring)
240 int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
241 else if (channel->tx_ring)
242 int_id = XLGMAC_INT_DMA_CH_SR_TI;
243 else if (channel->rx_ring)
244 int_id = XLGMAC_INT_DMA_CH_SR_RI;
245 else
246 continue;
247
248 hw_ops->disable_int(channel, int_id);
249 }
250}
251
252static irqreturn_t xlgmac_isr(int irq, void *data)
253{
254 unsigned int dma_isr, dma_ch_isr, mac_isr;
255 struct xlgmac_pdata *pdata = data;
256 struct xlgmac_channel *channel;
257 struct xlgmac_hw_ops *hw_ops;
258 unsigned int i, ti, ri;
259
260 hw_ops = &pdata->hw_ops;
261
262 /* The DMA interrupt status register also reports MAC and MTL
263 * interrupts. So for polling mode, we just need to check for
264 * this register to be non-zero
265 */
266 dma_isr = readl(pdata->mac_regs + DMA_ISR);
267 if (!dma_isr)
268 return IRQ_HANDLED;
269
270 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
271
272 for (i = 0; i < pdata->channel_count; i++) {
273 if (!(dma_isr & (1 << i)))
274 continue;
275
276 channel = pdata->channel_head + i;
277
278 dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
279 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
280 i, dma_ch_isr);
281
282 /* The TI or RI interrupt bits may still be set even if using
283 * per channel DMA interrupts. Check to be sure those are not
284 * enabled before using the private data napi structure.
285 */
286 ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
287 DMA_CH_SR_TI_LEN);
288 ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
289 DMA_CH_SR_RI_LEN);
290 if (!pdata->per_channel_irq && (ti || ri)) {
291 if (napi_schedule_prep(&pdata->napi)) {
292 /* Disable Tx and Rx interrupts */
293 xlgmac_disable_rx_tx_ints(pdata);
294
295 /* Turn on polling */
296 __napi_schedule_irqoff(&pdata->napi);
297 }
298 }
299
300 if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
301 DMA_CH_SR_RBU_LEN))
302 pdata->stats.rx_buffer_unavailable++;
303
304 /* Restart the device on a Fatal Bus Error */
305 if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
306 DMA_CH_SR_FBE_LEN))
307 schedule_work(&pdata->restart_work);
308
309 /* Clear all interrupt signals */
310 writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
311 }
312
313 if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
314 DMA_ISR_MACIS_LEN)) {
315 mac_isr = readl(pdata->mac_regs + MAC_ISR);
316
317 if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
318 MAC_ISR_MMCTXIS_LEN))
319 hw_ops->tx_mmc_int(pdata);
320
321 if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
322 MAC_ISR_MMCRXIS_LEN))
323 hw_ops->rx_mmc_int(pdata);
324 }
325
326 return IRQ_HANDLED;
327}
328
329static irqreturn_t xlgmac_dma_isr(int irq, void *data)
330{
331 struct xlgmac_channel *channel = data;
332
333 /* Per channel DMA interrupts are enabled, so we use the per
334 * channel napi structure and not the private data napi structure
335 */
336 if (napi_schedule_prep(&channel->napi)) {
337 /* Disable Tx and Rx interrupts */
338 disable_irq_nosync(channel->dma_irq);
339
340 /* Turn on polling */
341 __napi_schedule_irqoff(&channel->napi);
342 }
343
344 return IRQ_HANDLED;
345}
346
347static void xlgmac_tx_timer(unsigned long data)
348{
349 struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
350 struct xlgmac_pdata *pdata = channel->pdata;
351 struct napi_struct *napi;
352
353 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
354
355 if (napi_schedule_prep(napi)) {
356 /* Disable Tx and Rx interrupts */
357 if (pdata->per_channel_irq)
358 disable_irq_nosync(channel->dma_irq);
359 else
360 xlgmac_disable_rx_tx_ints(pdata);
361
362 /* Turn on polling */
363 __napi_schedule(napi);
364 }
365
366 channel->tx_timer_active = 0;
367}
368
369static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
370{
371 struct xlgmac_channel *channel;
372 unsigned int i;
373
374 channel = pdata->channel_head;
375 for (i = 0; i < pdata->channel_count; i++, channel++) {
376 if (!channel->tx_ring)
377 break;
378
379 setup_timer(&channel->tx_timer, xlgmac_tx_timer,
380 (unsigned long)channel);
381 }
382}
383
384static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
385{
386 struct xlgmac_channel *channel;
387 unsigned int i;
388
389 channel = pdata->channel_head;
390 for (i = 0; i < pdata->channel_count; i++, channel++) {
391 if (!channel->tx_ring)
392 break;
393
394 del_timer_sync(&channel->tx_timer);
395 }
396}
397
398static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
399{
400 struct xlgmac_channel *channel;
401 unsigned int i;
402
403 if (pdata->per_channel_irq) {
404 channel = pdata->channel_head;
405 for (i = 0; i < pdata->channel_count; i++, channel++) {
406 if (add)
407 netif_napi_add(pdata->netdev, &channel->napi,
408 xlgmac_one_poll,
409 NAPI_POLL_WEIGHT);
410
411 napi_enable(&channel->napi);
412 }
413 } else {
414 if (add)
415 netif_napi_add(pdata->netdev, &pdata->napi,
416 xlgmac_all_poll, NAPI_POLL_WEIGHT);
417
418 napi_enable(&pdata->napi);
419 }
420}
421
422static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
423{
424 struct xlgmac_channel *channel;
425 unsigned int i;
426
427 if (pdata->per_channel_irq) {
428 channel = pdata->channel_head;
429 for (i = 0; i < pdata->channel_count; i++, channel++) {
430 napi_disable(&channel->napi);
431
432 if (del)
433 netif_napi_del(&channel->napi);
434 }
435 } else {
436 napi_disable(&pdata->napi);
437
438 if (del)
439 netif_napi_del(&pdata->napi);
440 }
441}
442
443static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
444{
445 struct net_device *netdev = pdata->netdev;
446 struct xlgmac_channel *channel;
447 unsigned int i;
448 int ret;
449
450 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
451 IRQF_SHARED, netdev->name, pdata);
452 if (ret) {
453 netdev_alert(netdev, "error requesting irq %d\n",
454 pdata->dev_irq);
455 return ret;
456 }
457
458 if (!pdata->per_channel_irq)
459 return 0;
460
461 channel = pdata->channel_head;
462 for (i = 0; i < pdata->channel_count; i++, channel++) {
463 snprintf(channel->dma_irq_name,
464 sizeof(channel->dma_irq_name) - 1,
465 "%s-TxRx-%u", netdev_name(netdev),
466 channel->queue_index);
467
468 ret = devm_request_irq(pdata->dev, channel->dma_irq,
469 xlgmac_dma_isr, 0,
470 channel->dma_irq_name, channel);
471 if (ret) {
472 netdev_alert(netdev, "error requesting irq %d\n",
473 channel->dma_irq);
474 goto err_irq;
475 }
476 }
477
478 return 0;
479
480err_irq:
481 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
482 for (i--, channel--; i < pdata->channel_count; i--, channel--)
483 devm_free_irq(pdata->dev, channel->dma_irq, channel);
484
485 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
486
487 return ret;
488}
489
490static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
491{
492 struct xlgmac_channel *channel;
493 unsigned int i;
494
495 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
496
497 if (!pdata->per_channel_irq)
498 return;
499
500 channel = pdata->channel_head;
501 for (i = 0; i < pdata->channel_count; i++, channel++)
502 devm_free_irq(pdata->dev, channel->dma_irq, channel);
503}
504
505static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
506{
507 struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
508 struct xlgmac_desc_data *desc_data;
509 struct xlgmac_channel *channel;
510 struct xlgmac_ring *ring;
511 unsigned int i, j;
512
513 channel = pdata->channel_head;
514 for (i = 0; i < pdata->channel_count; i++, channel++) {
515 ring = channel->tx_ring;
516 if (!ring)
517 break;
518
519 for (j = 0; j < ring->dma_desc_count; j++) {
520 desc_data = XLGMAC_GET_DESC_DATA(ring, j);
521 desc_ops->unmap_desc_data(pdata, desc_data);
522 }
523 }
524}
525
526static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
527{
528 struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
529 struct xlgmac_desc_data *desc_data;
530 struct xlgmac_channel *channel;
531 struct xlgmac_ring *ring;
532 unsigned int i, j;
533
534 channel = pdata->channel_head;
535 for (i = 0; i < pdata->channel_count; i++, channel++) {
536 ring = channel->rx_ring;
537 if (!ring)
538 break;
539
540 for (j = 0; j < ring->dma_desc_count; j++) {
541 desc_data = XLGMAC_GET_DESC_DATA(ring, j);
542 desc_ops->unmap_desc_data(pdata, desc_data);
543 }
544 }
545}
546
547static int xlgmac_start(struct xlgmac_pdata *pdata)
548{
549 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
550 struct net_device *netdev = pdata->netdev;
551 int ret;
552
553 hw_ops->init(pdata);
554 xlgmac_napi_enable(pdata, 1);
555
556 ret = xlgmac_request_irqs(pdata);
557 if (ret)
558 goto err_napi;
559
560 hw_ops->enable_tx(pdata);
561 hw_ops->enable_rx(pdata);
562 netif_tx_start_all_queues(netdev);
563
564 return 0;
565
566err_napi:
567 xlgmac_napi_disable(pdata, 1);
568 hw_ops->exit(pdata);
569
570 return ret;
571}
572
573static void xlgmac_stop(struct xlgmac_pdata *pdata)
574{
575 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
576 struct net_device *netdev = pdata->netdev;
577 struct xlgmac_channel *channel;
578 struct netdev_queue *txq;
579 unsigned int i;
580
581 netif_tx_stop_all_queues(netdev);
582 xlgmac_stop_timers(pdata);
583 hw_ops->disable_tx(pdata);
584 hw_ops->disable_rx(pdata);
585 xlgmac_free_irqs(pdata);
586 xlgmac_napi_disable(pdata, 1);
587 hw_ops->exit(pdata);
588
589 channel = pdata->channel_head;
590 for (i = 0; i < pdata->channel_count; i++, channel++) {
591 if (!channel->tx_ring)
592 continue;
593
594 txq = netdev_get_tx_queue(netdev, channel->queue_index);
595 netdev_tx_reset_queue(txq);
596 }
597}
598
599static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
600{
601 /* If not running, "restart" will happen on open */
602 if (!netif_running(pdata->netdev))
603 return;
604
605 xlgmac_stop(pdata);
606
607 xlgmac_free_tx_data(pdata);
608 xlgmac_free_rx_data(pdata);
609
610 xlgmac_start(pdata);
611}
612
613static void xlgmac_restart(struct work_struct *work)
614{
615 struct xlgmac_pdata *pdata = container_of(work,
616 struct xlgmac_pdata,
617 restart_work);
618
619 rtnl_lock();
620
621 xlgmac_restart_dev(pdata);
622
623 rtnl_unlock();
624}
625
626static int xlgmac_open(struct net_device *netdev)
627{
628 struct xlgmac_pdata *pdata = netdev_priv(netdev);
629 struct xlgmac_desc_ops *desc_ops;
630 int ret;
631
632 desc_ops = &pdata->desc_ops;
633
634 /* TODO: Initialize the phy */
635
636 /* Calculate the Rx buffer size before allocating rings */
637 ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
638 if (ret < 0)
639 return ret;
640 pdata->rx_buf_size = ret;
641
642 /* Allocate the channels and rings */
643 ret = desc_ops->alloc_channles_and_rings(pdata);
644 if (ret)
645 return ret;
646
647 INIT_WORK(&pdata->restart_work, xlgmac_restart);
648 xlgmac_init_timers(pdata);
649
650 ret = xlgmac_start(pdata);
651 if (ret)
652 goto err_channels_and_rings;
653
654 return 0;
655
656err_channels_and_rings:
657 desc_ops->free_channels_and_rings(pdata);
658
659 return ret;
660}
661
662static int xlgmac_close(struct net_device *netdev)
663{
664 struct xlgmac_pdata *pdata = netdev_priv(netdev);
665 struct xlgmac_desc_ops *desc_ops;
666
667 desc_ops = &pdata->desc_ops;
668
669 /* Stop the device */
670 xlgmac_stop(pdata);
671
672 /* Free the channels and rings */
673 desc_ops->free_channels_and_rings(pdata);
674
675 return 0;
676}
677
678static void xlgmac_tx_timeout(struct net_device *netdev)
679{
680 struct xlgmac_pdata *pdata = netdev_priv(netdev);
681
682 netdev_warn(netdev, "tx timeout, device restarting\n");
683 schedule_work(&pdata->restart_work);
684}
685
686static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
687{
688 struct xlgmac_pdata *pdata = netdev_priv(netdev);
689 struct xlgmac_pkt_info *tx_pkt_info;
690 struct xlgmac_desc_ops *desc_ops;
691 struct xlgmac_channel *channel;
692 struct xlgmac_hw_ops *hw_ops;
693 struct netdev_queue *txq;
694 struct xlgmac_ring *ring;
695 int ret;
696
697 desc_ops = &pdata->desc_ops;
698 hw_ops = &pdata->hw_ops;
699
700 XLGMAC_PR("skb->len = %d\n", skb->len);
701
702 channel = pdata->channel_head + skb->queue_mapping;
703 txq = netdev_get_tx_queue(netdev, channel->queue_index);
704 ring = channel->tx_ring;
705 tx_pkt_info = &ring->pkt_info;
706
707 if (skb->len == 0) {
708 netif_err(pdata, tx_err, netdev,
709 "empty skb received from stack\n");
710 dev_kfree_skb_any(skb);
711 return NETDEV_TX_OK;
712 }
713
714 /* Prepare preliminary packet info for TX */
715 memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
716 xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
717
718 /* Check that there are enough descriptors available */
719 ret = xlgmac_maybe_stop_tx_queue(channel, ring,
720 tx_pkt_info->desc_count);
721 if (ret)
722 return ret;
723
724 ret = xlgmac_prep_tso(skb, tx_pkt_info);
725 if (ret) {
726 netif_err(pdata, tx_err, netdev,
727 "error processing TSO packet\n");
728 dev_kfree_skb_any(skb);
729 return ret;
730 }
731 xlgmac_prep_vlan(skb, tx_pkt_info);
732
733 if (!desc_ops->map_tx_skb(channel, skb)) {
734 dev_kfree_skb_any(skb);
735 return NETDEV_TX_OK;
736 }
737
738 /* Report on the actual number of bytes (to be) sent */
739 netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
740
741 /* Configure required descriptor fields for transmission */
742 hw_ops->dev_xmit(channel);
743
744 if (netif_msg_pktdata(pdata))
745 xlgmac_print_pkt(netdev, skb, true);
746
747 /* Stop the queue in advance if there may not be enough descriptors */
748 xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
749
750 return NETDEV_TX_OK;
751}
752
753static void xlgmac_get_stats64(struct net_device *netdev,
754 struct rtnl_link_stats64 *s)
755{
756 struct xlgmac_pdata *pdata = netdev_priv(netdev);
757 struct xlgmac_stats *pstats = &pdata->stats;
758
759 pdata->hw_ops.read_mmc_stats(pdata);
760
761 s->rx_packets = pstats->rxframecount_gb;
762 s->rx_bytes = pstats->rxoctetcount_gb;
763 s->rx_errors = pstats->rxframecount_gb -
764 pstats->rxbroadcastframes_g -
765 pstats->rxmulticastframes_g -
766 pstats->rxunicastframes_g;
767 s->multicast = pstats->rxmulticastframes_g;
768 s->rx_length_errors = pstats->rxlengtherror;
769 s->rx_crc_errors = pstats->rxcrcerror;
770 s->rx_fifo_errors = pstats->rxfifooverflow;
771
772 s->tx_packets = pstats->txframecount_gb;
773 s->tx_bytes = pstats->txoctetcount_gb;
774 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
775 s->tx_dropped = netdev->stats.tx_dropped;
776}
777
778static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
779{
780 struct xlgmac_pdata *pdata = netdev_priv(netdev);
781 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
782 struct sockaddr *saddr = addr;
783
784 if (!is_valid_ether_addr(saddr->sa_data))
785 return -EADDRNOTAVAIL;
786
787 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
788
789 hw_ops->set_mac_address(pdata, netdev->dev_addr);
790
791 return 0;
792}
793
794static int xlgmac_ioctl(struct net_device *netdev,
795 struct ifreq *ifreq, int cmd)
796{
797 if (!netif_running(netdev))
798 return -ENODEV;
799
800 return 0;
801}
802
803static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
804{
805 struct xlgmac_pdata *pdata = netdev_priv(netdev);
806 int ret;
807
808 ret = xlgmac_calc_rx_buf_size(netdev, mtu);
809 if (ret < 0)
810 return ret;
811
812 pdata->rx_buf_size = ret;
813 netdev->mtu = mtu;
814
815 xlgmac_restart_dev(pdata);
816
817 return 0;
818}
819
820static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
821 __be16 proto,
822 u16 vid)
823{
824 struct xlgmac_pdata *pdata = netdev_priv(netdev);
825 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
826
827 set_bit(vid, pdata->active_vlans);
828 hw_ops->update_vlan_hash_table(pdata);
829
830 return 0;
831}
832
833static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
834 __be16 proto,
835 u16 vid)
836{
837 struct xlgmac_pdata *pdata = netdev_priv(netdev);
838 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
839
840 clear_bit(vid, pdata->active_vlans);
841 hw_ops->update_vlan_hash_table(pdata);
842
843 return 0;
844}
845
846#ifdef CONFIG_NET_POLL_CONTROLLER
847static void xlgmac_poll_controller(struct net_device *netdev)
848{
849 struct xlgmac_pdata *pdata = netdev_priv(netdev);
850 struct xlgmac_channel *channel;
851 unsigned int i;
852
853 if (pdata->per_channel_irq) {
854 channel = pdata->channel_head;
855 for (i = 0; i < pdata->channel_count; i++, channel++)
856 xlgmac_dma_isr(channel->dma_irq, channel);
857 } else {
858 disable_irq(pdata->dev_irq);
859 xlgmac_isr(pdata->dev_irq, pdata);
860 enable_irq(pdata->dev_irq);
861 }
862}
863#endif /* CONFIG_NET_POLL_CONTROLLER */
864
865static int xlgmac_set_features(struct net_device *netdev,
866 netdev_features_t features)
867{
868 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
869 struct xlgmac_pdata *pdata = netdev_priv(netdev);
870 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
871 int ret = 0;
872
873 rxhash = pdata->netdev_features & NETIF_F_RXHASH;
874 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
875 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
876 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
877
878 if ((features & NETIF_F_RXHASH) && !rxhash)
879 ret = hw_ops->enable_rss(pdata);
880 else if (!(features & NETIF_F_RXHASH) && rxhash)
881 ret = hw_ops->disable_rss(pdata);
882 if (ret)
883 return ret;
884
885 if ((features & NETIF_F_RXCSUM) && !rxcsum)
886 hw_ops->enable_rx_csum(pdata);
887 else if (!(features & NETIF_F_RXCSUM) && rxcsum)
888 hw_ops->disable_rx_csum(pdata);
889
890 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
891 hw_ops->enable_rx_vlan_stripping(pdata);
892 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
893 hw_ops->disable_rx_vlan_stripping(pdata);
894
895 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
896 hw_ops->enable_rx_vlan_filtering(pdata);
897 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
898 hw_ops->disable_rx_vlan_filtering(pdata);
899
900 pdata->netdev_features = features;
901
902 return 0;
903}
904
905static void xlgmac_set_rx_mode(struct net_device *netdev)
906{
907 struct xlgmac_pdata *pdata = netdev_priv(netdev);
908 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
909
910 hw_ops->config_rx_mode(pdata);
911}
912
913static const struct net_device_ops xlgmac_netdev_ops = {
914 .ndo_open = xlgmac_open,
915 .ndo_stop = xlgmac_close,
916 .ndo_start_xmit = xlgmac_xmit,
917 .ndo_tx_timeout = xlgmac_tx_timeout,
918 .ndo_get_stats64 = xlgmac_get_stats64,
919 .ndo_change_mtu = xlgmac_change_mtu,
920 .ndo_set_mac_address = xlgmac_set_mac_address,
921 .ndo_validate_addr = eth_validate_addr,
922 .ndo_do_ioctl = xlgmac_ioctl,
923 .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid,
924 .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid,
925#ifdef CONFIG_NET_POLL_CONTROLLER
926 .ndo_poll_controller = xlgmac_poll_controller,
927#endif
928 .ndo_set_features = xlgmac_set_features,
929 .ndo_set_rx_mode = xlgmac_set_rx_mode,
930};
931
932const struct net_device_ops *xlgmac_get_netdev_ops(void)
933{
934 return &xlgmac_netdev_ops;
935}
936
937static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
938{
939 struct xlgmac_pdata *pdata = channel->pdata;
940 struct xlgmac_ring *ring = channel->rx_ring;
941 struct xlgmac_desc_data *desc_data;
942 struct xlgmac_desc_ops *desc_ops;
943 struct xlgmac_hw_ops *hw_ops;
944
945 desc_ops = &pdata->desc_ops;
946 hw_ops = &pdata->hw_ops;
947
948 while (ring->dirty != ring->cur) {
949 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
950
951 /* Reset desc_data values */
952 desc_ops->unmap_desc_data(pdata, desc_data);
953
954 if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
955 break;
956
957 hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
958
959 ring->dirty++;
960 }
961
962 /* Make sure everything is written before the register write */
963 wmb();
964
965 /* Update the Rx Tail Pointer Register with address of
966 * the last cleaned entry
967 */
968 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
969 writel(lower_32_bits(desc_data->dma_desc_addr),
970 XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
971}
972
973static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
974 struct napi_struct *napi,
975 struct xlgmac_desc_data *desc_data,
976 unsigned int len)
977{
978 unsigned int copy_len;
979 struct sk_buff *skb;
980 u8 *packet;
981
982 skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
983 if (!skb)
984 return NULL;
985
986 /* Start with the header buffer which may contain just the header
987 * or the header plus data
988 */
989 dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
990 desc_data->rx.hdr.dma_off,
991 desc_data->rx.hdr.dma_len,
992 DMA_FROM_DEVICE);
993
994 packet = page_address(desc_data->rx.hdr.pa.pages) +
995 desc_data->rx.hdr.pa.pages_offset;
996 copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
997 copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
998 skb_copy_to_linear_data(skb, packet, copy_len);
999 skb_put(skb, copy_len);
1000
1001 len -= copy_len;
1002 if (len) {
1003 /* Add the remaining data as a frag */
1004 dma_sync_single_range_for_cpu(pdata->dev,
1005 desc_data->rx.buf.dma_base,
1006 desc_data->rx.buf.dma_off,
1007 desc_data->rx.buf.dma_len,
1008 DMA_FROM_DEVICE);
1009
1010 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1011 desc_data->rx.buf.pa.pages,
1012 desc_data->rx.buf.pa.pages_offset,
1013 len, desc_data->rx.buf.dma_len);
1014 desc_data->rx.buf.pa.pages = NULL;
1015 }
1016
1017 return skb;
1018}
1019
1020static int xlgmac_tx_poll(struct xlgmac_channel *channel)
1021{
1022 struct xlgmac_pdata *pdata = channel->pdata;
1023 struct xlgmac_ring *ring = channel->tx_ring;
1024 struct net_device *netdev = pdata->netdev;
1025 unsigned int tx_packets = 0, tx_bytes = 0;
1026 struct xlgmac_desc_data *desc_data;
1027 struct xlgmac_dma_desc *dma_desc;
1028 struct xlgmac_desc_ops *desc_ops;
1029 struct xlgmac_hw_ops *hw_ops;
1030 struct netdev_queue *txq;
1031 int processed = 0;
1032 unsigned int cur;
1033
1034 desc_ops = &pdata->desc_ops;
1035 hw_ops = &pdata->hw_ops;
1036
1037 /* Nothing to do if there isn't a Tx ring for this channel */
1038 if (!ring)
1039 return 0;
1040
1041 cur = ring->cur;
1042
1043 /* Be sure we get ring->cur before accessing descriptor data */
1044 smp_rmb();
1045
1046 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1047
1048 while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
1049 (ring->dirty != cur)) {
1050 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
1051 dma_desc = desc_data->dma_desc;
1052
1053 if (!hw_ops->tx_complete(dma_desc))
1054 break;
1055
1056 /* Make sure descriptor fields are read after reading
1057 * the OWN bit
1058 */
1059 dma_rmb();
1060
1061 if (netif_msg_tx_done(pdata))
1062 xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
1063
1064 if (hw_ops->is_last_desc(dma_desc)) {
1065 tx_packets += desc_data->tx.packets;
1066 tx_bytes += desc_data->tx.bytes;
1067 }
1068
1069 /* Free the SKB and reset the descriptor for re-use */
1070 desc_ops->unmap_desc_data(pdata, desc_data);
1071 hw_ops->tx_desc_reset(desc_data);
1072
1073 processed++;
1074 ring->dirty++;
1075 }
1076
1077 if (!processed)
1078 return 0;
1079
1080 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1081
1082 if ((ring->tx.queue_stopped == 1) &&
1083 (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
1084 ring->tx.queue_stopped = 0;
1085 netif_tx_wake_queue(txq);
1086 }
1087
1088 XLGMAC_PR("processed=%d\n", processed);
1089
1090 return processed;
1091}
1092
1093static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
1094{
1095 struct xlgmac_pdata *pdata = channel->pdata;
1096 struct xlgmac_ring *ring = channel->rx_ring;
1097 struct net_device *netdev = pdata->netdev;
1098 unsigned int len, dma_desc_len, max_len;
1099 unsigned int context_next, context;
1100 struct xlgmac_desc_data *desc_data;
1101 struct xlgmac_pkt_info *pkt_info;
1102 unsigned int incomplete, error;
1103 struct xlgmac_hw_ops *hw_ops;
1104 unsigned int received = 0;
1105 struct napi_struct *napi;
1106 struct sk_buff *skb;
1107 int packet_count = 0;
1108
1109 hw_ops = &pdata->hw_ops;
1110
1111 /* Nothing to do if there isn't a Rx ring for this channel */
1112 if (!ring)
1113 return 0;
1114
1115 incomplete = 0;
1116 context_next = 0;
1117
1118 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
1119
1120 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1121 pkt_info = &ring->pkt_info;
1122 while (packet_count < budget) {
1123 /* First time in loop see if we need to restore state */
1124 if (!received && desc_data->state_saved) {
1125 skb = desc_data->state.skb;
1126 error = desc_data->state.error;
1127 len = desc_data->state.len;
1128 } else {
1129 memset(pkt_info, 0, sizeof(*pkt_info));
1130 skb = NULL;
1131 error = 0;
1132 len = 0;
1133 }
1134
1135read_again:
1136 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1137
1138 if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
1139 xlgmac_rx_refresh(channel);
1140
1141 if (hw_ops->dev_read(channel))
1142 break;
1143
1144 received++;
1145 ring->cur++;
1146
1147 incomplete = XLGMAC_GET_REG_BITS(
1148 pkt_info->attributes,
1149 RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
1150 RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
1151 context_next = XLGMAC_GET_REG_BITS(
1152 pkt_info->attributes,
1153 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
1154 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
1155 context = XLGMAC_GET_REG_BITS(
1156 pkt_info->attributes,
1157 RX_PACKET_ATTRIBUTES_CONTEXT_POS,
1158 RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
1159
1160 /* Earlier error, just drain the remaining data */
1161 if ((incomplete || context_next) && error)
1162 goto read_again;
1163
1164 if (error || pkt_info->errors) {
1165 if (pkt_info->errors)
1166 netif_err(pdata, rx_err, netdev,
1167 "error in received packet\n");
1168 dev_kfree_skb(skb);
1169 goto next_packet;
1170 }
1171
1172 if (!context) {
1173 /* Length is cumulative, get this descriptor's length */
1174 dma_desc_len = desc_data->rx.len - len;
1175 len += dma_desc_len;
1176
1177 if (dma_desc_len && !skb) {
1178 skb = xlgmac_create_skb(pdata, napi, desc_data,
1179 dma_desc_len);
1180 if (!skb)
1181 error = 1;
1182 } else if (dma_desc_len) {
1183 dma_sync_single_range_for_cpu(
1184 pdata->dev,
1185 desc_data->rx.buf.dma_base,
1186 desc_data->rx.buf.dma_off,
1187 desc_data->rx.buf.dma_len,
1188 DMA_FROM_DEVICE);
1189
1190 skb_add_rx_frag(
1191 skb, skb_shinfo(skb)->nr_frags,
1192 desc_data->rx.buf.pa.pages,
1193 desc_data->rx.buf.pa.pages_offset,
1194 dma_desc_len,
1195 desc_data->rx.buf.dma_len);
1196 desc_data->rx.buf.pa.pages = NULL;
1197 }
1198 }
1199
1200 if (incomplete || context_next)
1201 goto read_again;
1202
1203 if (!skb)
1204 goto next_packet;
1205
1206 /* Be sure we don't exceed the configured MTU */
1207 max_len = netdev->mtu + ETH_HLEN;
1208 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1209 (skb->protocol == htons(ETH_P_8021Q)))
1210 max_len += VLAN_HLEN;
1211
1212 if (skb->len > max_len) {
1213 netif_err(pdata, rx_err, netdev,
1214 "packet length exceeds configured MTU\n");
1215 dev_kfree_skb(skb);
1216 goto next_packet;
1217 }
1218
1219 if (netif_msg_pktdata(pdata))
1220 xlgmac_print_pkt(netdev, skb, false);
1221
1222 skb_checksum_none_assert(skb);
1223 if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1224 RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
1225 RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
1226 skb->ip_summed = CHECKSUM_UNNECESSARY;
1227
1228 if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1229 RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
1230 RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN))
1231 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1232 pkt_info->vlan_ctag);
1233
1234 if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1235 RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
1236 RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
1237 skb_set_hash(skb, pkt_info->rss_hash,
1238 pkt_info->rss_hash_type);
1239
1240 skb->dev = netdev;
1241 skb->protocol = eth_type_trans(skb, netdev);
1242 skb_record_rx_queue(skb, channel->queue_index);
1243
1244 napi_gro_receive(napi, skb);
1245
1246next_packet:
1247 packet_count++;
1248 }
1249
1250 /* Check if we need to save state before leaving */
1251 if (received && (incomplete || context_next)) {
1252 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1253 desc_data->state_saved = 1;
1254 desc_data->state.skb = skb;
1255 desc_data->state.len = len;
1256 desc_data->state.error = error;
1257 }
1258
1259 XLGMAC_PR("packet_count = %d\n", packet_count);
1260
1261 return packet_count;
1262}
1263
1264static int xlgmac_one_poll(struct napi_struct *napi, int budget)
1265{
1266 struct xlgmac_channel *channel = container_of(napi,
1267 struct xlgmac_channel,
1268 napi);
1269 int processed = 0;
1270
1271 XLGMAC_PR("budget=%d\n", budget);
1272
1273 /* Cleanup Tx ring first */
1274 xlgmac_tx_poll(channel);
1275
1276 /* Process Rx ring next */
1277 processed = xlgmac_rx_poll(channel, budget);
1278
1279 /* If we processed everything, we are done */
1280 if (processed < budget) {
1281 /* Turn off polling */
1282 napi_complete_done(napi, processed);
1283
1284 /* Enable Tx and Rx interrupts */
1285 enable_irq(channel->dma_irq);
1286 }
1287
1288 XLGMAC_PR("received = %d\n", processed);
1289
1290 return processed;
1291}
1292
1293static int xlgmac_all_poll(struct napi_struct *napi, int budget)
1294{
1295 struct xlgmac_pdata *pdata = container_of(napi,
1296 struct xlgmac_pdata,
1297 napi);
1298 struct xlgmac_channel *channel;
1299 int processed, last_processed;
1300 int ring_budget;
1301 unsigned int i;
1302
1303 XLGMAC_PR("budget=%d\n", budget);
1304
1305 processed = 0;
1306 ring_budget = budget / pdata->rx_ring_count;
1307 do {
1308 last_processed = processed;
1309
1310 channel = pdata->channel_head;
1311 for (i = 0; i < pdata->channel_count; i++, channel++) {
1312 /* Cleanup Tx ring first */
1313 xlgmac_tx_poll(channel);
1314
1315 /* Process Rx ring next */
1316 if (ring_budget > (budget - processed))
1317 ring_budget = budget - processed;
1318 processed += xlgmac_rx_poll(channel, ring_budget);
1319 }
1320 } while ((processed < budget) && (processed != last_processed));
1321
1322 /* If we processed everything, we are done */
1323 if (processed < budget) {
1324 /* Turn off polling */
1325 napi_complete_done(napi, processed);
1326
1327 /* Enable Tx and Rx interrupts */
1328 xlgmac_enable_rx_tx_ints(pdata);
1329 }
1330
1331 XLGMAC_PR("received = %d\n", processed);
1332
1333 return processed;
1334}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
new file mode 100644
index 000000000000..504e80de7bba
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
@@ -0,0 +1,80 @@
1/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
2 *
3 * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This Synopsys DWC XLGMAC software driver and associated documentation
11 * (hereinafter the "Software") is an unsupported proprietary work of
12 * Synopsys, Inc. unless otherwise expressly agreed to in writing between
13 * Synopsys and you. The Software IS NOT an item of Licensed Software or a
14 * Licensed Product under any End User Software License Agreement or
15 * Agreement for Licensed Products with Synopsys or any supplement thereto.
16 * Synopsys is a registered trademark of Synopsys, Inc. Other names included
17 * in the SOFTWARE may be the trademarks of their respective owners.
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23
24#include "dwc-xlgmac.h"
25#include "dwc-xlgmac-reg.h"
26
27static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
28{
29 struct device *dev = &pcidev->dev;
30 struct xlgmac_resources res;
31 int i, ret;
32
33 ret = pcim_enable_device(pcidev);
34 if (ret) {
35 dev_err(dev, "ERROR: failed to enable device\n");
36 return ret;
37 }
38
39 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
40 if (pci_resource_len(pcidev, i) == 0)
41 continue;
42 ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME);
43 if (ret)
44 return ret;
45 break;
46 }
47
48 pci_set_master(pcidev);
49
50 memset(&res, 0, sizeof(res));
51 res.irq = pcidev->irq;
52 res.addr = pcim_iomap_table(pcidev)[i];
53
54 return xlgmac_drv_probe(&pcidev->dev, &res);
55}
56
57static void xlgmac_remove(struct pci_dev *pcidev)
58{
59 xlgmac_drv_remove(&pcidev->dev);
60}
61
62static const struct pci_device_id xlgmac_pci_tbl[] = {
63 { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0x7302) },
64 { 0 }
65};
66MODULE_DEVICE_TABLE(pci, xlgmac_pci_tbl);
67
68static struct pci_driver xlgmac_pci_driver = {
69 .name = XLGMAC_DRV_NAME,
70 .id_table = xlgmac_pci_tbl,
71 .probe = xlgmac_probe,
72 .remove = xlgmac_remove,
73};
74
75module_pci_driver(xlgmac_pci_driver);
76
77MODULE_DESCRIPTION(XLGMAC_DRV_DESC);
78MODULE_VERSION(XLGMAC_DRV_VERSION);
79MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>");
80MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h b/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h
new file mode 100644
index 000000000000..782448128a89
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h
@@ -0,0 +1,746 @@
1/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
2 *
3 * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This Synopsys DWC XLGMAC software driver and associated documentation
11 * (hereinafter the "Software") is an unsupported proprietary work of
12 * Synopsys, Inc. unless otherwise expressly agreed to in writing between
13 * Synopsys and you. The Software IS NOT an item of Licensed Software or a
14 * Licensed Product under any End User Software License Agreement or
15 * Agreement for Licensed Products with Synopsys or any supplement thereto.
16 * Synopsys is a registered trademark of Synopsys, Inc. Other names included
17 * in the SOFTWARE may be the trademarks of their respective owners.
18 */
19
20#ifndef __DWC_XLGMAC_REG_H__
21#define __DWC_XLGMAC_REG_H__
22
23/* MAC register offsets */
24#define MAC_TCR 0x0000
25#define MAC_RCR 0x0004
26#define MAC_PFR 0x0008
27#define MAC_HTR0 0x0010
28#define MAC_VLANTR 0x0050
29#define MAC_VLANHTR 0x0058
30#define MAC_VLANIR 0x0060
31#define MAC_Q0TFCR 0x0070
32#define MAC_RFCR 0x0090
33#define MAC_RQC0R 0x00a0
34#define MAC_RQC1R 0x00a4
35#define MAC_RQC2R 0x00a8
36#define MAC_RQC3R 0x00ac
37#define MAC_ISR 0x00b0
38#define MAC_IER 0x00b4
39#define MAC_VR 0x0110
40#define MAC_HWF0R 0x011c
41#define MAC_HWF1R 0x0120
42#define MAC_HWF2R 0x0124
43#define MAC_MACA0HR 0x0300
44#define MAC_MACA0LR 0x0304
45#define MAC_MACA1HR 0x0308
46#define MAC_MACA1LR 0x030c
47#define MAC_RSSCR 0x0c80
48#define MAC_RSSAR 0x0c88
49#define MAC_RSSDR 0x0c8c
50
51#define MAC_QTFCR_INC 4
52#define MAC_MACA_INC 4
53#define MAC_HTR_INC 4
54#define MAC_RQC2_INC 4
55#define MAC_RQC2_Q_PER_REG 4
56
57/* MAC register entry bit positions and sizes */
58#define MAC_HWF0R_ADDMACADRSEL_POS 18
59#define MAC_HWF0R_ADDMACADRSEL_LEN 5
60#define MAC_HWF0R_ARPOFFSEL_POS 9
61#define MAC_HWF0R_ARPOFFSEL_LEN 1
62#define MAC_HWF0R_EEESEL_POS 13
63#define MAC_HWF0R_EEESEL_LEN 1
64#define MAC_HWF0R_PHYIFSEL_POS 1
65#define MAC_HWF0R_PHYIFSEL_LEN 2
66#define MAC_HWF0R_MGKSEL_POS 7
67#define MAC_HWF0R_MGKSEL_LEN 1
68#define MAC_HWF0R_MMCSEL_POS 8
69#define MAC_HWF0R_MMCSEL_LEN 1
70#define MAC_HWF0R_RWKSEL_POS 6
71#define MAC_HWF0R_RWKSEL_LEN 1
72#define MAC_HWF0R_RXCOESEL_POS 16
73#define MAC_HWF0R_RXCOESEL_LEN 1
74#define MAC_HWF0R_SAVLANINS_POS 27
75#define MAC_HWF0R_SAVLANINS_LEN 1
76#define MAC_HWF0R_SMASEL_POS 5
77#define MAC_HWF0R_SMASEL_LEN 1
78#define MAC_HWF0R_TSSEL_POS 12
79#define MAC_HWF0R_TSSEL_LEN 1
80#define MAC_HWF0R_TSSTSSEL_POS 25
81#define MAC_HWF0R_TSSTSSEL_LEN 2
82#define MAC_HWF0R_TXCOESEL_POS 14
83#define MAC_HWF0R_TXCOESEL_LEN 1
84#define MAC_HWF0R_VLHASH_POS 4
85#define MAC_HWF0R_VLHASH_LEN 1
86#define MAC_HWF1R_ADDR64_POS 14
87#define MAC_HWF1R_ADDR64_LEN 2
88#define MAC_HWF1R_ADVTHWORD_POS 13
89#define MAC_HWF1R_ADVTHWORD_LEN 1
90#define MAC_HWF1R_DBGMEMA_POS 19
91#define MAC_HWF1R_DBGMEMA_LEN 1
92#define MAC_HWF1R_DCBEN_POS 16
93#define MAC_HWF1R_DCBEN_LEN 1
94#define MAC_HWF1R_HASHTBLSZ_POS 24
95#define MAC_HWF1R_HASHTBLSZ_LEN 3
96#define MAC_HWF1R_L3L4FNUM_POS 27
97#define MAC_HWF1R_L3L4FNUM_LEN 4
98#define MAC_HWF1R_NUMTC_POS 21
99#define MAC_HWF1R_NUMTC_LEN 3
100#define MAC_HWF1R_RSSEN_POS 20
101#define MAC_HWF1R_RSSEN_LEN 1
102#define MAC_HWF1R_RXFIFOSIZE_POS 0
103#define MAC_HWF1R_RXFIFOSIZE_LEN 5
104#define MAC_HWF1R_SPHEN_POS 17
105#define MAC_HWF1R_SPHEN_LEN 1
106#define MAC_HWF1R_TSOEN_POS 18
107#define MAC_HWF1R_TSOEN_LEN 1
108#define MAC_HWF1R_TXFIFOSIZE_POS 6
109#define MAC_HWF1R_TXFIFOSIZE_LEN 5
110#define MAC_HWF2R_AUXSNAPNUM_POS 28
111#define MAC_HWF2R_AUXSNAPNUM_LEN 3
112#define MAC_HWF2R_PPSOUTNUM_POS 24
113#define MAC_HWF2R_PPSOUTNUM_LEN 3
114#define MAC_HWF2R_RXCHCNT_POS 12
115#define MAC_HWF2R_RXCHCNT_LEN 4
116#define MAC_HWF2R_RXQCNT_POS 0
117#define MAC_HWF2R_RXQCNT_LEN 4
118#define MAC_HWF2R_TXCHCNT_POS 18
119#define MAC_HWF2R_TXCHCNT_LEN 4
120#define MAC_HWF2R_TXQCNT_POS 6
121#define MAC_HWF2R_TXQCNT_LEN 4
122#define MAC_IER_TSIE_POS 12
123#define MAC_IER_TSIE_LEN 1
124#define MAC_ISR_MMCRXIS_POS 9
125#define MAC_ISR_MMCRXIS_LEN 1
126#define MAC_ISR_MMCTXIS_POS 10
127#define MAC_ISR_MMCTXIS_LEN 1
128#define MAC_ISR_PMTIS_POS 4
129#define MAC_ISR_PMTIS_LEN 1
130#define MAC_ISR_TSIS_POS 12
131#define MAC_ISR_TSIS_LEN 1
132#define MAC_MACA1HR_AE_POS 31
133#define MAC_MACA1HR_AE_LEN 1
134#define MAC_PFR_HMC_POS 2
135#define MAC_PFR_HMC_LEN 1
136#define MAC_PFR_HPF_POS 10
137#define MAC_PFR_HPF_LEN 1
138#define MAC_PFR_HUC_POS 1
139#define MAC_PFR_HUC_LEN 1
140#define MAC_PFR_PM_POS 4
141#define MAC_PFR_PM_LEN 1
142#define MAC_PFR_PR_POS 0
143#define MAC_PFR_PR_LEN 1
144#define MAC_PFR_VTFE_POS 16
145#define MAC_PFR_VTFE_LEN 1
146#define MAC_Q0TFCR_PT_POS 16
147#define MAC_Q0TFCR_PT_LEN 16
148#define MAC_Q0TFCR_TFE_POS 1
149#define MAC_Q0TFCR_TFE_LEN 1
150#define MAC_RCR_ACS_POS 1
151#define MAC_RCR_ACS_LEN 1
152#define MAC_RCR_CST_POS 2
153#define MAC_RCR_CST_LEN 1
154#define MAC_RCR_DCRCC_POS 3
155#define MAC_RCR_DCRCC_LEN 1
156#define MAC_RCR_HDSMS_POS 12
157#define MAC_RCR_HDSMS_LEN 3
158#define MAC_RCR_IPC_POS 9
159#define MAC_RCR_IPC_LEN 1
160#define MAC_RCR_JE_POS 8
161#define MAC_RCR_JE_LEN 1
162#define MAC_RCR_LM_POS 10
163#define MAC_RCR_LM_LEN 1
164#define MAC_RCR_RE_POS 0
165#define MAC_RCR_RE_LEN 1
166#define MAC_RFCR_PFCE_POS 8
167#define MAC_RFCR_PFCE_LEN 1
168#define MAC_RFCR_RFE_POS 0
169#define MAC_RFCR_RFE_LEN 1
170#define MAC_RFCR_UP_POS 1
171#define MAC_RFCR_UP_LEN 1
172#define MAC_RQC0R_RXQ0EN_POS 0
173#define MAC_RQC0R_RXQ0EN_LEN 2
174#define MAC_RSSAR_ADDRT_POS 2
175#define MAC_RSSAR_ADDRT_LEN 1
176#define MAC_RSSAR_CT_POS 1
177#define MAC_RSSAR_CT_LEN 1
178#define MAC_RSSAR_OB_POS 0
179#define MAC_RSSAR_OB_LEN 1
180#define MAC_RSSAR_RSSIA_POS 8
181#define MAC_RSSAR_RSSIA_LEN 8
182#define MAC_RSSCR_IP2TE_POS 1
183#define MAC_RSSCR_IP2TE_LEN 1
184#define MAC_RSSCR_RSSE_POS 0
185#define MAC_RSSCR_RSSE_LEN 1
186#define MAC_RSSCR_TCP4TE_POS 2
187#define MAC_RSSCR_TCP4TE_LEN 1
188#define MAC_RSSCR_UDP4TE_POS 3
189#define MAC_RSSCR_UDP4TE_LEN 1
190#define MAC_RSSDR_DMCH_POS 0
191#define MAC_RSSDR_DMCH_LEN 4
192#define MAC_TCR_SS_POS 28
193#define MAC_TCR_SS_LEN 3
194#define MAC_TCR_TE_POS 0
195#define MAC_TCR_TE_LEN 1
196#define MAC_VLANHTR_VLHT_POS 0
197#define MAC_VLANHTR_VLHT_LEN 16
198#define MAC_VLANIR_VLTI_POS 20
199#define MAC_VLANIR_VLTI_LEN 1
200#define MAC_VLANIR_CSVL_POS 19
201#define MAC_VLANIR_CSVL_LEN 1
202#define MAC_VLANTR_DOVLTC_POS 20
203#define MAC_VLANTR_DOVLTC_LEN 1
204#define MAC_VLANTR_ERSVLM_POS 19
205#define MAC_VLANTR_ERSVLM_LEN 1
206#define MAC_VLANTR_ESVL_POS 18
207#define MAC_VLANTR_ESVL_LEN 1
208#define MAC_VLANTR_ETV_POS 16
209#define MAC_VLANTR_ETV_LEN 1
210#define MAC_VLANTR_EVLS_POS 21
211#define MAC_VLANTR_EVLS_LEN 2
212#define MAC_VLANTR_EVLRXS_POS 24
213#define MAC_VLANTR_EVLRXS_LEN 1
214#define MAC_VLANTR_VL_POS 0
215#define MAC_VLANTR_VL_LEN 16
216#define MAC_VLANTR_VTHM_POS 25
217#define MAC_VLANTR_VTHM_LEN 1
218#define MAC_VLANTR_VTIM_POS 17
219#define MAC_VLANTR_VTIM_LEN 1
220#define MAC_VR_DEVID_POS 8
221#define MAC_VR_DEVID_LEN 8
222#define MAC_VR_SNPSVER_POS 0
223#define MAC_VR_SNPSVER_LEN 8
224#define MAC_VR_USERVER_POS 16
225#define MAC_VR_USERVER_LEN 8
226
227/* MMC register offsets */
228#define MMC_CR 0x0800
229#define MMC_RISR 0x0804
230#define MMC_TISR 0x0808
231#define MMC_RIER 0x080c
232#define MMC_TIER 0x0810
233#define MMC_TXOCTETCOUNT_GB_LO 0x0814
234#define MMC_TXFRAMECOUNT_GB_LO 0x081c
235#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
236#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
237#define MMC_TX64OCTETS_GB_LO 0x0834
238#define MMC_TX65TO127OCTETS_GB_LO 0x083c
239#define MMC_TX128TO255OCTETS_GB_LO 0x0844
240#define MMC_TX256TO511OCTETS_GB_LO 0x084c
241#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
242#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
243#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
244#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
245#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
246#define MMC_TXUNDERFLOWERROR_LO 0x087c
247#define MMC_TXOCTETCOUNT_G_LO 0x0884
248#define MMC_TXFRAMECOUNT_G_LO 0x088c
249#define MMC_TXPAUSEFRAMES_LO 0x0894
250#define MMC_TXVLANFRAMES_G_LO 0x089c
251#define MMC_RXFRAMECOUNT_GB_LO 0x0900
252#define MMC_RXOCTETCOUNT_GB_LO 0x0908
253#define MMC_RXOCTETCOUNT_G_LO 0x0910
254#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
255#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
256#define MMC_RXCRCERROR_LO 0x0928
257#define MMC_RXRUNTERROR 0x0930
258#define MMC_RXJABBERERROR 0x0934
259#define MMC_RXUNDERSIZE_G 0x0938
260#define MMC_RXOVERSIZE_G 0x093c
261#define MMC_RX64OCTETS_GB_LO 0x0940
262#define MMC_RX65TO127OCTETS_GB_LO 0x0948
263#define MMC_RX128TO255OCTETS_GB_LO 0x0950
264#define MMC_RX256TO511OCTETS_GB_LO 0x0958
265#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
266#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
267#define MMC_RXUNICASTFRAMES_G_LO 0x0970
268#define MMC_RXLENGTHERROR_LO 0x0978
269#define MMC_RXOUTOFRANGETYPE_LO 0x0980
270#define MMC_RXPAUSEFRAMES_LO 0x0988
271#define MMC_RXFIFOOVERFLOW_LO 0x0990
272#define MMC_RXVLANFRAMES_GB_LO 0x0998
273#define MMC_RXWATCHDOGERROR 0x09a0
274
275/* MMC register entry bit positions and sizes */
276#define MMC_CR_CR_POS 0
277#define MMC_CR_CR_LEN 1
278#define MMC_CR_CSR_POS 1
279#define MMC_CR_CSR_LEN 1
280#define MMC_CR_ROR_POS 2
281#define MMC_CR_ROR_LEN 1
282#define MMC_CR_MCF_POS 3
283#define MMC_CR_MCF_LEN 1
284#define MMC_CR_MCT_POS 4
285#define MMC_CR_MCT_LEN 2
286#define MMC_RIER_ALL_INTERRUPTS_POS 0
287#define MMC_RIER_ALL_INTERRUPTS_LEN 23
288#define MMC_RISR_RXFRAMECOUNT_GB_POS 0
289#define MMC_RISR_RXFRAMECOUNT_GB_LEN 1
290#define MMC_RISR_RXOCTETCOUNT_GB_POS 1
291#define MMC_RISR_RXOCTETCOUNT_GB_LEN 1
292#define MMC_RISR_RXOCTETCOUNT_G_POS 2
293#define MMC_RISR_RXOCTETCOUNT_G_LEN 1
294#define MMC_RISR_RXBROADCASTFRAMES_G_POS 3
295#define MMC_RISR_RXBROADCASTFRAMES_G_LEN 1
296#define MMC_RISR_RXMULTICASTFRAMES_G_POS 4
297#define MMC_RISR_RXMULTICASTFRAMES_G_LEN 1
298#define MMC_RISR_RXCRCERROR_POS 5
299#define MMC_RISR_RXCRCERROR_LEN 1
300#define MMC_RISR_RXRUNTERROR_POS 6
301#define MMC_RISR_RXRUNTERROR_LEN 1
302#define MMC_RISR_RXJABBERERROR_POS 7
303#define MMC_RISR_RXJABBERERROR_LEN 1
304#define MMC_RISR_RXUNDERSIZE_G_POS 8
305#define MMC_RISR_RXUNDERSIZE_G_LEN 1
306#define MMC_RISR_RXOVERSIZE_G_POS 9
307#define MMC_RISR_RXOVERSIZE_G_LEN 1
308#define MMC_RISR_RX64OCTETS_GB_POS 10
309#define MMC_RISR_RX64OCTETS_GB_LEN 1
310#define MMC_RISR_RX65TO127OCTETS_GB_POS 11
311#define MMC_RISR_RX65TO127OCTETS_GB_LEN 1
312#define MMC_RISR_RX128TO255OCTETS_GB_POS 12
313#define MMC_RISR_RX128TO255OCTETS_GB_LEN 1
314#define MMC_RISR_RX256TO511OCTETS_GB_POS 13
315#define MMC_RISR_RX256TO511OCTETS_GB_LEN 1
316#define MMC_RISR_RX512TO1023OCTETS_GB_POS 14
317#define MMC_RISR_RX512TO1023OCTETS_GB_LEN 1
318#define MMC_RISR_RX1024TOMAXOCTETS_GB_POS 15
319#define MMC_RISR_RX1024TOMAXOCTETS_GB_LEN 1
320#define MMC_RISR_RXUNICASTFRAMES_G_POS 16
321#define MMC_RISR_RXUNICASTFRAMES_G_LEN 1
322#define MMC_RISR_RXLENGTHERROR_POS 17
323#define MMC_RISR_RXLENGTHERROR_LEN 1
324#define MMC_RISR_RXOUTOFRANGETYPE_POS 18
325#define MMC_RISR_RXOUTOFRANGETYPE_LEN 1
326#define MMC_RISR_RXPAUSEFRAMES_POS 19
327#define MMC_RISR_RXPAUSEFRAMES_LEN 1
328#define MMC_RISR_RXFIFOOVERFLOW_POS 20
329#define MMC_RISR_RXFIFOOVERFLOW_LEN 1
330#define MMC_RISR_RXVLANFRAMES_GB_POS 21
331#define MMC_RISR_RXVLANFRAMES_GB_LEN 1
332#define MMC_RISR_RXWATCHDOGERROR_POS 22
333#define MMC_RISR_RXWATCHDOGERROR_LEN 1
334#define MMC_TIER_ALL_INTERRUPTS_POS 0
335#define MMC_TIER_ALL_INTERRUPTS_LEN 18
336#define MMC_TISR_TXOCTETCOUNT_GB_POS 0
337#define MMC_TISR_TXOCTETCOUNT_GB_LEN 1
338#define MMC_TISR_TXFRAMECOUNT_GB_POS 1
339#define MMC_TISR_TXFRAMECOUNT_GB_LEN 1
340#define MMC_TISR_TXBROADCASTFRAMES_G_POS 2
341#define MMC_TISR_TXBROADCASTFRAMES_G_LEN 1
342#define MMC_TISR_TXMULTICASTFRAMES_G_POS 3
343#define MMC_TISR_TXMULTICASTFRAMES_G_LEN 1
344#define MMC_TISR_TX64OCTETS_GB_POS 4
345#define MMC_TISR_TX64OCTETS_GB_LEN 1
346#define MMC_TISR_TX65TO127OCTETS_GB_POS 5
347#define MMC_TISR_TX65TO127OCTETS_GB_LEN 1
348#define MMC_TISR_TX128TO255OCTETS_GB_POS 6
349#define MMC_TISR_TX128TO255OCTETS_GB_LEN 1
350#define MMC_TISR_TX256TO511OCTETS_GB_POS 7
351#define MMC_TISR_TX256TO511OCTETS_GB_LEN 1
352#define MMC_TISR_TX512TO1023OCTETS_GB_POS 8
353#define MMC_TISR_TX512TO1023OCTETS_GB_LEN 1
354#define MMC_TISR_TX1024TOMAXOCTETS_GB_POS 9
355#define MMC_TISR_TX1024TOMAXOCTETS_GB_LEN 1
356#define MMC_TISR_TXUNICASTFRAMES_GB_POS 10
357#define MMC_TISR_TXUNICASTFRAMES_GB_LEN 1
358#define MMC_TISR_TXMULTICASTFRAMES_GB_POS 11
359#define MMC_TISR_TXMULTICASTFRAMES_GB_LEN 1
360#define MMC_TISR_TXBROADCASTFRAMES_GB_POS 12
361#define MMC_TISR_TXBROADCASTFRAMES_GB_LEN 1
362#define MMC_TISR_TXUNDERFLOWERROR_POS 13
363#define MMC_TISR_TXUNDERFLOWERROR_LEN 1
364#define MMC_TISR_TXOCTETCOUNT_G_POS 14
365#define MMC_TISR_TXOCTETCOUNT_G_LEN 1
366#define MMC_TISR_TXFRAMECOUNT_G_POS 15
367#define MMC_TISR_TXFRAMECOUNT_G_LEN 1
368#define MMC_TISR_TXPAUSEFRAMES_POS 16
369#define MMC_TISR_TXPAUSEFRAMES_LEN 1
370#define MMC_TISR_TXVLANFRAMES_G_POS 17
371#define MMC_TISR_TXVLANFRAMES_G_LEN 1
372
373/* MTL register offsets */
374#define MTL_OMR 0x1000
375#define MTL_FDDR 0x1010
376#define MTL_RQDCM0R 0x1030
377
378#define MTL_RQDCM_INC 4
379#define MTL_RQDCM_Q_PER_REG 4
380
381/* MTL register entry bit positions and sizes */
382#define MTL_OMR_ETSALG_POS 5
383#define MTL_OMR_ETSALG_LEN 2
384#define MTL_OMR_RAA_POS 2
385#define MTL_OMR_RAA_LEN 1
386
387/* MTL queue register offsets
388 * Multiple queues can be active. The first queue has registers
389 * that begin at 0x1100. Each subsequent queue has registers that
390 * are accessed using an offset of 0x80 from the previous queue.
391 */
392#define MTL_Q_BASE 0x1100
393#define MTL_Q_INC 0x80
394
395#define MTL_Q_TQOMR 0x00
396#define MTL_Q_RQOMR 0x40
397#define MTL_Q_RQDR 0x48
398#define MTL_Q_RQFCR 0x50
399#define MTL_Q_IER 0x70
400#define MTL_Q_ISR 0x74
401
402/* MTL queue register entry bit positions and sizes */
403#define MTL_Q_RQDR_PRXQ_POS 16
404#define MTL_Q_RQDR_PRXQ_LEN 14
405#define MTL_Q_RQDR_RXQSTS_POS 4
406#define MTL_Q_RQDR_RXQSTS_LEN 2
407#define MTL_Q_RQFCR_RFA_POS 1
408#define MTL_Q_RQFCR_RFA_LEN 6
409#define MTL_Q_RQFCR_RFD_POS 17
410#define MTL_Q_RQFCR_RFD_LEN 6
411#define MTL_Q_RQOMR_EHFC_POS 7
412#define MTL_Q_RQOMR_EHFC_LEN 1
413#define MTL_Q_RQOMR_RQS_POS 16
414#define MTL_Q_RQOMR_RQS_LEN 9
415#define MTL_Q_RQOMR_RSF_POS 5
416#define MTL_Q_RQOMR_RSF_LEN 1
417#define MTL_Q_RQOMR_FEP_POS 4
418#define MTL_Q_RQOMR_FEP_LEN 1
419#define MTL_Q_RQOMR_FUP_POS 3
420#define MTL_Q_RQOMR_FUP_LEN 1
421#define MTL_Q_RQOMR_RTC_POS 0
422#define MTL_Q_RQOMR_RTC_LEN 2
423#define MTL_Q_TQOMR_FTQ_POS 0
424#define MTL_Q_TQOMR_FTQ_LEN 1
425#define MTL_Q_TQOMR_Q2TCMAP_POS 8
426#define MTL_Q_TQOMR_Q2TCMAP_LEN 3
427#define MTL_Q_TQOMR_TQS_POS 16
428#define MTL_Q_TQOMR_TQS_LEN 10
429#define MTL_Q_TQOMR_TSF_POS 1
430#define MTL_Q_TQOMR_TSF_LEN 1
431#define MTL_Q_TQOMR_TTC_POS 4
432#define MTL_Q_TQOMR_TTC_LEN 3
433#define MTL_Q_TQOMR_TXQEN_POS 2
434#define MTL_Q_TQOMR_TXQEN_LEN 2
435
436/* MTL queue register value */
437#define MTL_RSF_DISABLE 0x00
438#define MTL_RSF_ENABLE 0x01
439#define MTL_TSF_DISABLE 0x00
440#define MTL_TSF_ENABLE 0x01
441
442#define MTL_RX_THRESHOLD_64 0x00
443#define MTL_RX_THRESHOLD_96 0x02
444#define MTL_RX_THRESHOLD_128 0x03
445#define MTL_TX_THRESHOLD_64 0x00
446#define MTL_TX_THRESHOLD_96 0x02
447#define MTL_TX_THRESHOLD_128 0x03
448#define MTL_TX_THRESHOLD_192 0x04
449#define MTL_TX_THRESHOLD_256 0x05
450#define MTL_TX_THRESHOLD_384 0x06
451#define MTL_TX_THRESHOLD_512 0x07
452
453#define MTL_ETSALG_WRR 0x00
454#define MTL_ETSALG_WFQ 0x01
455#define MTL_ETSALG_DWRR 0x02
456#define MTL_RAA_SP 0x00
457#define MTL_RAA_WSP 0x01
458
459#define MTL_Q_DISABLED 0x00
460#define MTL_Q_ENABLED 0x02
461
462#define MTL_RQDCM0R_Q0MDMACH 0x0
463#define MTL_RQDCM0R_Q1MDMACH 0x00000100
464#define MTL_RQDCM0R_Q2MDMACH 0x00020000
465#define MTL_RQDCM0R_Q3MDMACH 0x03000000
466#define MTL_RQDCM1R_Q4MDMACH 0x00000004
467#define MTL_RQDCM1R_Q5MDMACH 0x00000500
468#define MTL_RQDCM1R_Q6MDMACH 0x00060000
469#define MTL_RQDCM1R_Q7MDMACH 0x07000000
470#define MTL_RQDCM2R_Q8MDMACH 0x00000008
471#define MTL_RQDCM2R_Q9MDMACH 0x00000900
472#define MTL_RQDCM2R_Q10MDMACH 0x000A0000
473#define MTL_RQDCM2R_Q11MDMACH 0x0B000000
474
475/* MTL traffic class register offsets
476 * Multiple traffic classes can be active. The first class has registers
477 * that begin at 0x1100. Each subsequent queue has registers that
478 * are accessed using an offset of 0x80 from the previous queue.
479 */
480#define MTL_TC_BASE MTL_Q_BASE
481#define MTL_TC_INC MTL_Q_INC
482
483#define MTL_TC_ETSCR 0x10
484#define MTL_TC_ETSSR 0x14
485#define MTL_TC_QWR 0x18
486
487/* MTL traffic class register entry bit positions and sizes */
488#define MTL_TC_ETSCR_TSA_POS 0
489#define MTL_TC_ETSCR_TSA_LEN 2
490#define MTL_TC_QWR_QW_POS 0
491#define MTL_TC_QWR_QW_LEN 21
492
493/* MTL traffic class register value */
494#define MTL_TSA_SP 0x00
495#define MTL_TSA_ETS 0x02
496
497/* DMA register offsets */
498#define DMA_MR 0x3000
499#define DMA_SBMR 0x3004
500#define DMA_ISR 0x3008
501#define DMA_DSR0 0x3020
502#define DMA_DSR1 0x3024
503
504/* DMA register entry bit positions and sizes */
505#define DMA_ISR_MACIS_POS 17
506#define DMA_ISR_MACIS_LEN 1
507#define DMA_ISR_MTLIS_POS 16
508#define DMA_ISR_MTLIS_LEN 1
509#define DMA_MR_SWR_POS 0
510#define DMA_MR_SWR_LEN 1
511#define DMA_SBMR_EAME_POS 11
512#define DMA_SBMR_EAME_LEN 1
513#define DMA_SBMR_BLEN_64_POS 5
514#define DMA_SBMR_BLEN_64_LEN 1
515#define DMA_SBMR_BLEN_128_POS 6
516#define DMA_SBMR_BLEN_128_LEN 1
517#define DMA_SBMR_BLEN_256_POS 7
518#define DMA_SBMR_BLEN_256_LEN 1
519#define DMA_SBMR_UNDEF_POS 0
520#define DMA_SBMR_UNDEF_LEN 1
521
522/* DMA register values */
523#define DMA_DSR_RPS_LEN 4
524#define DMA_DSR_TPS_LEN 4
525#define DMA_DSR_Q_LEN (DMA_DSR_RPS_LEN + DMA_DSR_TPS_LEN)
526#define DMA_DSR0_TPS_START 12
527#define DMA_DSRX_FIRST_QUEUE 3
528#define DMA_DSRX_INC 4
529#define DMA_DSRX_QPR 4
530#define DMA_DSRX_TPS_START 4
531#define DMA_TPS_STOPPED 0x00
532#define DMA_TPS_SUSPENDED 0x06
533
534/* DMA channel register offsets
535 * Multiple channels can be active. The first channel has registers
536 * that begin at 0x3100. Each subsequent channel has registers that
537 * are accessed using an offset of 0x80 from the previous channel.
538 */
539#define DMA_CH_BASE 0x3100
540#define DMA_CH_INC 0x80
541
542#define DMA_CH_CR 0x00
543#define DMA_CH_TCR 0x04
544#define DMA_CH_RCR 0x08
545#define DMA_CH_TDLR_HI 0x10
546#define DMA_CH_TDLR_LO 0x14
547#define DMA_CH_RDLR_HI 0x18
548#define DMA_CH_RDLR_LO 0x1c
549#define DMA_CH_TDTR_LO 0x24
550#define DMA_CH_RDTR_LO 0x2c
551#define DMA_CH_TDRLR 0x30
552#define DMA_CH_RDRLR 0x34
553#define DMA_CH_IER 0x38
554#define DMA_CH_RIWT 0x3c
555#define DMA_CH_SR 0x60
556
557/* DMA channel register entry bit positions and sizes */
558#define DMA_CH_CR_PBLX8_POS 16
559#define DMA_CH_CR_PBLX8_LEN 1
560#define DMA_CH_CR_SPH_POS 24
561#define DMA_CH_CR_SPH_LEN 1
562#define DMA_CH_IER_AIE_POS 15
563#define DMA_CH_IER_AIE_LEN 1
564#define DMA_CH_IER_FBEE_POS 12
565#define DMA_CH_IER_FBEE_LEN 1
566#define DMA_CH_IER_NIE_POS 16
567#define DMA_CH_IER_NIE_LEN 1
568#define DMA_CH_IER_RBUE_POS 7
569#define DMA_CH_IER_RBUE_LEN 1
570#define DMA_CH_IER_RIE_POS 6
571#define DMA_CH_IER_RIE_LEN 1
572#define DMA_CH_IER_RSE_POS 8
573#define DMA_CH_IER_RSE_LEN 1
574#define DMA_CH_IER_TBUE_POS 2
575#define DMA_CH_IER_TBUE_LEN 1
576#define DMA_CH_IER_TIE_POS 0
577#define DMA_CH_IER_TIE_LEN 1
578#define DMA_CH_IER_TXSE_POS 1
579#define DMA_CH_IER_TXSE_LEN 1
580#define DMA_CH_RCR_PBL_POS 16
581#define DMA_CH_RCR_PBL_LEN 6
582#define DMA_CH_RCR_RBSZ_POS 1
583#define DMA_CH_RCR_RBSZ_LEN 14
584#define DMA_CH_RCR_SR_POS 0
585#define DMA_CH_RCR_SR_LEN 1
586#define DMA_CH_RIWT_RWT_POS 0
587#define DMA_CH_RIWT_RWT_LEN 8
588#define DMA_CH_SR_FBE_POS 12
589#define DMA_CH_SR_FBE_LEN 1
590#define DMA_CH_SR_RBU_POS 7
591#define DMA_CH_SR_RBU_LEN 1
592#define DMA_CH_SR_RI_POS 6
593#define DMA_CH_SR_RI_LEN 1
594#define DMA_CH_SR_RPS_POS 8
595#define DMA_CH_SR_RPS_LEN 1
596#define DMA_CH_SR_TBU_POS 2
597#define DMA_CH_SR_TBU_LEN 1
598#define DMA_CH_SR_TI_POS 0
599#define DMA_CH_SR_TI_LEN 1
600#define DMA_CH_SR_TPS_POS 1
601#define DMA_CH_SR_TPS_LEN 1
602#define DMA_CH_TCR_OSP_POS 4
603#define DMA_CH_TCR_OSP_LEN 1
604#define DMA_CH_TCR_PBL_POS 16
605#define DMA_CH_TCR_PBL_LEN 6
606#define DMA_CH_TCR_ST_POS 0
607#define DMA_CH_TCR_ST_LEN 1
608#define DMA_CH_TCR_TSE_POS 12
609#define DMA_CH_TCR_TSE_LEN 1
610
611/* DMA channel register values */
612#define DMA_OSP_DISABLE 0x00
613#define DMA_OSP_ENABLE 0x01
614#define DMA_PBL_1 1
615#define DMA_PBL_2 2
616#define DMA_PBL_4 4
617#define DMA_PBL_8 8
618#define DMA_PBL_16 16
619#define DMA_PBL_32 32
620#define DMA_PBL_64 64
621#define DMA_PBL_128 128
622#define DMA_PBL_256 256
623#define DMA_PBL_X8_DISABLE 0x00
624#define DMA_PBL_X8_ENABLE 0x01
625
626/* Descriptor/Packet entry bit positions and sizes */
627#define RX_PACKET_ERRORS_CRC_POS 2
628#define RX_PACKET_ERRORS_CRC_LEN 1
629#define RX_PACKET_ERRORS_FRAME_POS 3
630#define RX_PACKET_ERRORS_FRAME_LEN 1
631#define RX_PACKET_ERRORS_LENGTH_POS 0
632#define RX_PACKET_ERRORS_LENGTH_LEN 1
633#define RX_PACKET_ERRORS_OVERRUN_POS 1
634#define RX_PACKET_ERRORS_OVERRUN_LEN 1
635
636#define RX_PACKET_ATTRIBUTES_CSUM_DONE_POS 0
637#define RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN 1
638#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 1
639#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1
640#define RX_PACKET_ATTRIBUTES_INCOMPLETE_POS 2
641#define RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN 1
642#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS 3
643#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN 1
644#define RX_PACKET_ATTRIBUTES_CONTEXT_POS 4
645#define RX_PACKET_ATTRIBUTES_CONTEXT_LEN 1
646#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS 5
647#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN 1
648#define RX_PACKET_ATTRIBUTES_RSS_HASH_POS 6
649#define RX_PACKET_ATTRIBUTES_RSS_HASH_LEN 1
650
651#define RX_NORMAL_DESC0_OVT_POS 0
652#define RX_NORMAL_DESC0_OVT_LEN 16
653#define RX_NORMAL_DESC2_HL_POS 0
654#define RX_NORMAL_DESC2_HL_LEN 10
655#define RX_NORMAL_DESC3_CDA_POS 27
656#define RX_NORMAL_DESC3_CDA_LEN 1
657#define RX_NORMAL_DESC3_CTXT_POS 30
658#define RX_NORMAL_DESC3_CTXT_LEN 1
659#define RX_NORMAL_DESC3_ES_POS 15
660#define RX_NORMAL_DESC3_ES_LEN 1
661#define RX_NORMAL_DESC3_ETLT_POS 16
662#define RX_NORMAL_DESC3_ETLT_LEN 4
663#define RX_NORMAL_DESC3_FD_POS 29
664#define RX_NORMAL_DESC3_FD_LEN 1
665#define RX_NORMAL_DESC3_INTE_POS 30
666#define RX_NORMAL_DESC3_INTE_LEN 1
667#define RX_NORMAL_DESC3_L34T_POS 20
668#define RX_NORMAL_DESC3_L34T_LEN 4
669#define RX_NORMAL_DESC3_LD_POS 28
670#define RX_NORMAL_DESC3_LD_LEN 1
671#define RX_NORMAL_DESC3_OWN_POS 31
672#define RX_NORMAL_DESC3_OWN_LEN 1
673#define RX_NORMAL_DESC3_PL_POS 0
674#define RX_NORMAL_DESC3_PL_LEN 14
675#define RX_NORMAL_DESC3_RSV_POS 26
676#define RX_NORMAL_DESC3_RSV_LEN 1
677
678#define RX_DESC3_L34T_IPV4_TCP 1
679#define RX_DESC3_L34T_IPV4_UDP 2
680#define RX_DESC3_L34T_IPV4_ICMP 3
681#define RX_DESC3_L34T_IPV6_TCP 9
682#define RX_DESC3_L34T_IPV6_UDP 10
683#define RX_DESC3_L34T_IPV6_ICMP 11
684
685#define RX_CONTEXT_DESC3_TSA_POS 4
686#define RX_CONTEXT_DESC3_TSA_LEN 1
687#define RX_CONTEXT_DESC3_TSD_POS 6
688#define RX_CONTEXT_DESC3_TSD_LEN 1
689
690#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS 0
691#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN 1
692#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS 1
693#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN 1
694#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 2
695#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1
696#define TX_PACKET_ATTRIBUTES_PTP_POS 3
697#define TX_PACKET_ATTRIBUTES_PTP_LEN 1
698
699#define TX_CONTEXT_DESC2_MSS_POS 0
700#define TX_CONTEXT_DESC2_MSS_LEN 15
701#define TX_CONTEXT_DESC3_CTXT_POS 30
702#define TX_CONTEXT_DESC3_CTXT_LEN 1
703#define TX_CONTEXT_DESC3_TCMSSV_POS 26
704#define TX_CONTEXT_DESC3_TCMSSV_LEN 1
705#define TX_CONTEXT_DESC3_VLTV_POS 16
706#define TX_CONTEXT_DESC3_VLTV_LEN 1
707#define TX_CONTEXT_DESC3_VT_POS 0
708#define TX_CONTEXT_DESC3_VT_LEN 16
709
710#define TX_NORMAL_DESC2_HL_B1L_POS 0
711#define TX_NORMAL_DESC2_HL_B1L_LEN 14
712#define TX_NORMAL_DESC2_IC_POS 31
713#define TX_NORMAL_DESC2_IC_LEN 1
714#define TX_NORMAL_DESC2_TTSE_POS 30
715#define TX_NORMAL_DESC2_TTSE_LEN 1
716#define TX_NORMAL_DESC2_VTIR_POS 14
717#define TX_NORMAL_DESC2_VTIR_LEN 2
718#define TX_NORMAL_DESC3_CIC_POS 16
719#define TX_NORMAL_DESC3_CIC_LEN 2
720#define TX_NORMAL_DESC3_CPC_POS 26
721#define TX_NORMAL_DESC3_CPC_LEN 2
722#define TX_NORMAL_DESC3_CTXT_POS 30
723#define TX_NORMAL_DESC3_CTXT_LEN 1
724#define TX_NORMAL_DESC3_FD_POS 29
725#define TX_NORMAL_DESC3_FD_LEN 1
726#define TX_NORMAL_DESC3_FL_POS 0
727#define TX_NORMAL_DESC3_FL_LEN 15
728#define TX_NORMAL_DESC3_LD_POS 28
729#define TX_NORMAL_DESC3_LD_LEN 1
730#define TX_NORMAL_DESC3_OWN_POS 31
731#define TX_NORMAL_DESC3_OWN_LEN 1
732#define TX_NORMAL_DESC3_TCPHDRLEN_POS 19
733#define TX_NORMAL_DESC3_TCPHDRLEN_LEN 4
734#define TX_NORMAL_DESC3_TCPPL_POS 0
735#define TX_NORMAL_DESC3_TCPPL_LEN 18
736#define TX_NORMAL_DESC3_TSE_POS 18
737#define TX_NORMAL_DESC3_TSE_LEN 1
738
739#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
740
741#define XLGMAC_MTL_REG(pdata, n, reg) \
742 ((pdata)->mac_regs + MTL_Q_BASE + ((n) * MTL_Q_INC) + (reg))
743
744#define XLGMAC_DMA_REG(channel, reg) ((channel)->dma_regs + (reg))
745
746#endif /* __DWC_XLGMAC_REG_H__ */
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
new file mode 100644
index 000000000000..7a4dc643b2b9
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
@@ -0,0 +1,651 @@
1/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
2 *
3 * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This Synopsys DWC XLGMAC software driver and associated documentation
11 * (hereinafter the "Software") is an unsupported proprietary work of
12 * Synopsys, Inc. unless otherwise expressly agreed to in writing between
13 * Synopsys and you. The Software IS NOT an item of Licensed Software or a
14 * Licensed Product under any End User Software License Agreement or
15 * Agreement for Licensed Products with Synopsys or any supplement thereto.
16 * Synopsys is a registered trademark of Synopsys, Inc. Other names included
17 * in the SOFTWARE may be the trademarks of their respective owners.
18 */
19
20#ifndef __DWC_XLGMAC_H__
21#define __DWC_XLGMAC_H__
22
23#include <linux/dma-mapping.h>
24#include <linux/netdevice.h>
25#include <linux/workqueue.h>
26#include <linux/phy.h>
27#include <linux/if_vlan.h>
28#include <linux/bitops.h>
29#include <linux/timecounter.h>
30
31#define XLGMAC_DRV_NAME "dwc-xlgmac"
32#define XLGMAC_DRV_VERSION "1.0.0"
33#define XLGMAC_DRV_DESC "Synopsys DWC XLGMAC Driver"
34
35/* Descriptor related parameters */
36#define XLGMAC_TX_DESC_CNT 1024
37#define XLGMAC_TX_DESC_MIN_FREE (XLGMAC_TX_DESC_CNT >> 3)
38#define XLGMAC_TX_DESC_MAX_PROC (XLGMAC_TX_DESC_CNT >> 1)
39#define XLGMAC_RX_DESC_CNT 1024
40#define XLGMAC_RX_DESC_MAX_DIRTY (XLGMAC_RX_DESC_CNT >> 3)
41
42/* Descriptors required for maximum contiguous TSO/GSO packet */
43#define XLGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
44
45/* Maximum possible descriptors needed for a SKB */
46#define XLGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + XLGMAC_TX_MAX_SPLIT + 2)
47
48#define XLGMAC_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
49#define XLGMAC_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
50#define XLGMAC_RX_BUF_ALIGN 64
51
52/* Maximum Size for Splitting the Header Data
53 * Keep in sync with SKB_ALLOC_SIZE
54 * 3'b000: 64 bytes, 3'b001: 128 bytes
55 * 3'b010: 256 bytes, 3'b011: 512 bytes
56 * 3'b100: 1023 bytes , 3'b101'3'b111: Reserved
57 */
58#define XLGMAC_SPH_HDSMS_SIZE 3
59#define XLGMAC_SKB_ALLOC_SIZE 512
60
61#define XLGMAC_MAX_FIFO 81920
62
63#define XLGMAC_MAX_DMA_CHANNELS 16
64#define XLGMAC_DMA_STOP_TIMEOUT 5
65#define XLGMAC_DMA_INTERRUPT_MASK 0x31c7
66
67/* Default coalescing parameters */
68#define XLGMAC_INIT_DMA_TX_USECS 1000
69#define XLGMAC_INIT_DMA_TX_FRAMES 25
70#define XLGMAC_INIT_DMA_RX_USECS 30
71#define XLGMAC_INIT_DMA_RX_FRAMES 25
72
73/* Flow control queue count */
74#define XLGMAC_MAX_FLOW_CONTROL_QUEUES 8
75
76/* System clock is 125 MHz */
77#define XLGMAC_SYSCLOCK 125000000
78
79/* Maximum MAC address hash table size (256 bits = 8 bytes) */
80#define XLGMAC_MAC_HASH_TABLE_SIZE 8
81
82/* Receive Side Scaling */
83#define XLGMAC_RSS_HASH_KEY_SIZE 40
84#define XLGMAC_RSS_MAX_TABLE_SIZE 256
85#define XLGMAC_RSS_LOOKUP_TABLE_TYPE 0
86#define XLGMAC_RSS_HASH_KEY_TYPE 1
87
88#define XLGMAC_STD_PACKET_MTU 1500
89#define XLGMAC_JUMBO_PACKET_MTU 9000
90
91/* Helper macro for descriptor handling
92 * Always use XLGMAC_GET_DESC_DATA to access the descriptor data
93 */
94#define XLGMAC_GET_DESC_DATA(ring, idx) ({ \
95 typeof(ring) _ring = (ring); \
96 ((_ring)->desc_data_head + \
97 ((idx) & ((_ring)->dma_desc_count - 1))); \
98})
99
100#define XLGMAC_GET_REG_BITS(var, pos, len) ({ \
101 typeof(pos) _pos = (pos); \
102 typeof(len) _len = (len); \
103 ((var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \
104})
105
106#define XLGMAC_GET_REG_BITS_LE(var, pos, len) ({ \
107 typeof(pos) _pos = (pos); \
108 typeof(len) _len = (len); \
109 typeof(var) _var = le32_to_cpu((var)); \
110 ((_var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \
111})
112
113#define XLGMAC_SET_REG_BITS(var, pos, len, val) ({ \
114 typeof(var) _var = (var); \
115 typeof(pos) _pos = (pos); \
116 typeof(len) _len = (len); \
117 typeof(val) _val = (val); \
118 _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \
119 _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \
120})
121
122#define XLGMAC_SET_REG_BITS_LE(var, pos, len, val) ({ \
123 typeof(var) _var = (var); \
124 typeof(pos) _pos = (pos); \
125 typeof(len) _len = (len); \
126 typeof(val) _val = (val); \
127 _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \
128 _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \
129 cpu_to_le32(_var); \
130})
131
132struct xlgmac_pdata;
133
134enum xlgmac_int {
135 XLGMAC_INT_DMA_CH_SR_TI,
136 XLGMAC_INT_DMA_CH_SR_TPS,
137 XLGMAC_INT_DMA_CH_SR_TBU,
138 XLGMAC_INT_DMA_CH_SR_RI,
139 XLGMAC_INT_DMA_CH_SR_RBU,
140 XLGMAC_INT_DMA_CH_SR_RPS,
141 XLGMAC_INT_DMA_CH_SR_TI_RI,
142 XLGMAC_INT_DMA_CH_SR_FBE,
143 XLGMAC_INT_DMA_ALL,
144};
145
146struct xlgmac_stats {
147 /* MMC TX counters */
148 u64 txoctetcount_gb;
149 u64 txframecount_gb;
150 u64 txbroadcastframes_g;
151 u64 txmulticastframes_g;
152 u64 tx64octets_gb;
153 u64 tx65to127octets_gb;
154 u64 tx128to255octets_gb;
155 u64 tx256to511octets_gb;
156 u64 tx512to1023octets_gb;
157 u64 tx1024tomaxoctets_gb;
158 u64 txunicastframes_gb;
159 u64 txmulticastframes_gb;
160 u64 txbroadcastframes_gb;
161 u64 txunderflowerror;
162 u64 txoctetcount_g;
163 u64 txframecount_g;
164 u64 txpauseframes;
165 u64 txvlanframes_g;
166
167 /* MMC RX counters */
168 u64 rxframecount_gb;
169 u64 rxoctetcount_gb;
170 u64 rxoctetcount_g;
171 u64 rxbroadcastframes_g;
172 u64 rxmulticastframes_g;
173 u64 rxcrcerror;
174 u64 rxrunterror;
175 u64 rxjabbererror;
176 u64 rxundersize_g;
177 u64 rxoversize_g;
178 u64 rx64octets_gb;
179 u64 rx65to127octets_gb;
180 u64 rx128to255octets_gb;
181 u64 rx256to511octets_gb;
182 u64 rx512to1023octets_gb;
183 u64 rx1024tomaxoctets_gb;
184 u64 rxunicastframes_g;
185 u64 rxlengtherror;
186 u64 rxoutofrangetype;
187 u64 rxpauseframes;
188 u64 rxfifooverflow;
189 u64 rxvlanframes_gb;
190 u64 rxwatchdogerror;
191
192 /* Extra counters */
193 u64 tx_tso_packets;
194 u64 rx_split_header_packets;
195 u64 rx_buffer_unavailable;
196};
197
198struct xlgmac_ring_buf {
199 struct sk_buff *skb;
200 dma_addr_t skb_dma;
201 unsigned int skb_len;
202};
203
204/* Common Tx and Rx DMA hardware descriptor */
205struct xlgmac_dma_desc {
206 __le32 desc0;
207 __le32 desc1;
208 __le32 desc2;
209 __le32 desc3;
210};
211
212/* Page allocation related values */
213struct xlgmac_page_alloc {
214 struct page *pages;
215 unsigned int pages_len;
216 unsigned int pages_offset;
217
218 dma_addr_t pages_dma;
219};
220
221/* Ring entry buffer data */
222struct xlgmac_buffer_data {
223 struct xlgmac_page_alloc pa;
224 struct xlgmac_page_alloc pa_unmap;
225
226 dma_addr_t dma_base;
227 unsigned long dma_off;
228 unsigned int dma_len;
229};
230
231/* Tx-related desc data */
232struct xlgmac_tx_desc_data {
233 unsigned int packets; /* BQL packet count */
234 unsigned int bytes; /* BQL byte count */
235};
236
237/* Rx-related desc data */
238struct xlgmac_rx_desc_data {
239 struct xlgmac_buffer_data hdr; /* Header locations */
240 struct xlgmac_buffer_data buf; /* Payload locations */
241
242 unsigned short hdr_len; /* Length of received header */
243 unsigned short len; /* Length of received packet */
244};
245
246struct xlgmac_pkt_info {
247 struct sk_buff *skb;
248
249 unsigned int attributes;
250
251 unsigned int errors;
252
253 /* descriptors needed for this packet */
254 unsigned int desc_count;
255 unsigned int length;
256
257 unsigned int tx_packets;
258 unsigned int tx_bytes;
259
260 unsigned int header_len;
261 unsigned int tcp_header_len;
262 unsigned int tcp_payload_len;
263 unsigned short mss;
264
265 unsigned short vlan_ctag;
266
267 u64 rx_tstamp;
268
269 u32 rss_hash;
270 enum pkt_hash_types rss_hash_type;
271};
272
273struct xlgmac_desc_data {
274 /* dma_desc: Virtual address of descriptor
275 * dma_desc_addr: DMA address of descriptor
276 */
277 struct xlgmac_dma_desc *dma_desc;
278 dma_addr_t dma_desc_addr;
279
280 /* skb: Virtual address of SKB
281 * skb_dma: DMA address of SKB data
282 * skb_dma_len: Length of SKB DMA area
283 */
284 struct sk_buff *skb;
285 dma_addr_t skb_dma;
286 unsigned int skb_dma_len;
287
288 /* Tx/Rx -related data */
289 struct xlgmac_tx_desc_data tx;
290 struct xlgmac_rx_desc_data rx;
291
292 unsigned int mapped_as_page;
293
294 /* Incomplete receive save location. If the budget is exhausted
295 * or the last descriptor (last normal descriptor or a following
296 * context descriptor) has not been DMA'd yet the current state
297 * of the receive processing needs to be saved.
298 */
299 unsigned int state_saved;
300 struct {
301 struct sk_buff *skb;
302 unsigned int len;
303 unsigned int error;
304 } state;
305};
306
307struct xlgmac_ring {
308 /* Per packet related information */
309 struct xlgmac_pkt_info pkt_info;
310
311 /* Virtual/DMA addresses of DMA descriptor list and the total count */
312 struct xlgmac_dma_desc *dma_desc_head;
313 dma_addr_t dma_desc_head_addr;
314 unsigned int dma_desc_count;
315
316 /* Array of descriptor data corresponding the DMA descriptor
317 * (always use the XLGMAC_GET_DESC_DATA macro to access this data)
318 */
319 struct xlgmac_desc_data *desc_data_head;
320
321 /* Page allocation for RX buffers */
322 struct xlgmac_page_alloc rx_hdr_pa;
323 struct xlgmac_page_alloc rx_buf_pa;
324
325 /* Ring index values
326 * cur - Tx: index of descriptor to be used for current transfer
327 * Rx: index of descriptor to check for packet availability
328 * dirty - Tx: index of descriptor to check for transfer complete
329 * Rx: index of descriptor to check for buffer reallocation
330 */
331 unsigned int cur;
332 unsigned int dirty;
333
334 /* Coalesce frame count used for interrupt bit setting */
335 unsigned int coalesce_count;
336
337 union {
338 struct {
339 unsigned int xmit_more;
340 unsigned int queue_stopped;
341 unsigned short cur_mss;
342 unsigned short cur_vlan_ctag;
343 } tx;
344 };
345} ____cacheline_aligned;
346
347struct xlgmac_channel {
348 char name[16];
349
350 /* Address of private data area for device */
351 struct xlgmac_pdata *pdata;
352
353 /* Queue index and base address of queue's DMA registers */
354 unsigned int queue_index;
355 void __iomem *dma_regs;
356
357 /* Per channel interrupt irq number */
358 int dma_irq;
359 char dma_irq_name[IFNAMSIZ + 32];
360
361 /* Netdev related settings */
362 struct napi_struct napi;
363
364 unsigned int saved_ier;
365
366 unsigned int tx_timer_active;
367 struct timer_list tx_timer;
368
369 struct xlgmac_ring *tx_ring;
370 struct xlgmac_ring *rx_ring;
371} ____cacheline_aligned;
372
373struct xlgmac_desc_ops {
374 int (*alloc_channles_and_rings)(struct xlgmac_pdata *pdata);
375 void (*free_channels_and_rings)(struct xlgmac_pdata *pdata);
376 int (*map_tx_skb)(struct xlgmac_channel *channel,
377 struct sk_buff *skb);
378 int (*map_rx_buffer)(struct xlgmac_pdata *pdata,
379 struct xlgmac_ring *ring,
380 struct xlgmac_desc_data *desc_data);
381 void (*unmap_desc_data)(struct xlgmac_pdata *pdata,
382 struct xlgmac_desc_data *desc_data);
383 void (*tx_desc_init)(struct xlgmac_pdata *pdata);
384 void (*rx_desc_init)(struct xlgmac_pdata *pdata);
385};
386
387struct xlgmac_hw_ops {
388 int (*init)(struct xlgmac_pdata *pdata);
389 int (*exit)(struct xlgmac_pdata *pdata);
390
391 int (*tx_complete)(struct xlgmac_dma_desc *dma_desc);
392
393 void (*enable_tx)(struct xlgmac_pdata *pdata);
394 void (*disable_tx)(struct xlgmac_pdata *pdata);
395 void (*enable_rx)(struct xlgmac_pdata *pdata);
396 void (*disable_rx)(struct xlgmac_pdata *pdata);
397
398 int (*enable_int)(struct xlgmac_channel *channel,
399 enum xlgmac_int int_id);
400 int (*disable_int)(struct xlgmac_channel *channel,
401 enum xlgmac_int int_id);
402 void (*dev_xmit)(struct xlgmac_channel *channel);
403 int (*dev_read)(struct xlgmac_channel *channel);
404
405 int (*set_mac_address)(struct xlgmac_pdata *pdata, u8 *addr);
406 int (*config_rx_mode)(struct xlgmac_pdata *pdata);
407 int (*enable_rx_csum)(struct xlgmac_pdata *pdata);
408 int (*disable_rx_csum)(struct xlgmac_pdata *pdata);
409
410 /* For MII speed configuration */
411 int (*set_xlgmii_25000_speed)(struct xlgmac_pdata *pdata);
412 int (*set_xlgmii_40000_speed)(struct xlgmac_pdata *pdata);
413 int (*set_xlgmii_50000_speed)(struct xlgmac_pdata *pdata);
414 int (*set_xlgmii_100000_speed)(struct xlgmac_pdata *pdata);
415
416 /* For descriptor related operation */
417 void (*tx_desc_init)(struct xlgmac_channel *channel);
418 void (*rx_desc_init)(struct xlgmac_channel *channel);
419 void (*tx_desc_reset)(struct xlgmac_desc_data *desc_data);
420 void (*rx_desc_reset)(struct xlgmac_pdata *pdata,
421 struct xlgmac_desc_data *desc_data,
422 unsigned int index);
423 int (*is_last_desc)(struct xlgmac_dma_desc *dma_desc);
424 int (*is_context_desc)(struct xlgmac_dma_desc *dma_desc);
425 void (*tx_start_xmit)(struct xlgmac_channel *channel,
426 struct xlgmac_ring *ring);
427
428 /* For Flow Control */
429 int (*config_tx_flow_control)(struct xlgmac_pdata *pdata);
430 int (*config_rx_flow_control)(struct xlgmac_pdata *pdata);
431
432 /* For Vlan related config */
433 int (*enable_rx_vlan_stripping)(struct xlgmac_pdata *pdata);
434 int (*disable_rx_vlan_stripping)(struct xlgmac_pdata *pdata);
435 int (*enable_rx_vlan_filtering)(struct xlgmac_pdata *pdata);
436 int (*disable_rx_vlan_filtering)(struct xlgmac_pdata *pdata);
437 int (*update_vlan_hash_table)(struct xlgmac_pdata *pdata);
438
439 /* For RX coalescing */
440 int (*config_rx_coalesce)(struct xlgmac_pdata *pdata);
441 int (*config_tx_coalesce)(struct xlgmac_pdata *pdata);
442 unsigned int (*usec_to_riwt)(struct xlgmac_pdata *pdata,
443 unsigned int usec);
444 unsigned int (*riwt_to_usec)(struct xlgmac_pdata *pdata,
445 unsigned int riwt);
446
447 /* For RX and TX threshold config */
448 int (*config_rx_threshold)(struct xlgmac_pdata *pdata,
449 unsigned int val);
450 int (*config_tx_threshold)(struct xlgmac_pdata *pdata,
451 unsigned int val);
452
453 /* For RX and TX Store and Forward Mode config */
454 int (*config_rsf_mode)(struct xlgmac_pdata *pdata,
455 unsigned int val);
456 int (*config_tsf_mode)(struct xlgmac_pdata *pdata,
457 unsigned int val);
458
459 /* For TX DMA Operate on Second Frame config */
460 int (*config_osp_mode)(struct xlgmac_pdata *pdata);
461
462 /* For RX and TX PBL config */
463 int (*config_rx_pbl_val)(struct xlgmac_pdata *pdata);
464 int (*get_rx_pbl_val)(struct xlgmac_pdata *pdata);
465 int (*config_tx_pbl_val)(struct xlgmac_pdata *pdata);
466 int (*get_tx_pbl_val)(struct xlgmac_pdata *pdata);
467 int (*config_pblx8)(struct xlgmac_pdata *pdata);
468
469 /* For MMC statistics */
470 void (*rx_mmc_int)(struct xlgmac_pdata *pdata);
471 void (*tx_mmc_int)(struct xlgmac_pdata *pdata);
472 void (*read_mmc_stats)(struct xlgmac_pdata *pdata);
473
474 /* For Receive Side Scaling */
475 int (*enable_rss)(struct xlgmac_pdata *pdata);
476 int (*disable_rss)(struct xlgmac_pdata *pdata);
477 int (*set_rss_hash_key)(struct xlgmac_pdata *pdata,
478 const u8 *key);
479 int (*set_rss_lookup_table)(struct xlgmac_pdata *pdata,
480 const u32 *table);
481};
482
483/* This structure contains flags that indicate what hardware features
484 * or configurations are present in the device.
485 */
486struct xlgmac_hw_features {
487 /* HW Version */
488 unsigned int version;
489
490 /* HW Feature Register0 */
491 unsigned int phyifsel; /* PHY interface support */
492 unsigned int vlhash; /* VLAN Hash Filter */
493 unsigned int sma; /* SMA(MDIO) Interface */
494 unsigned int rwk; /* PMT remote wake-up packet */
495 unsigned int mgk; /* PMT magic packet */
496 unsigned int mmc; /* RMON module */
497 unsigned int aoe; /* ARP Offload */
498 unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
499 unsigned int eee; /* Energy Efficient Ethernet */
500 unsigned int tx_coe; /* Tx Checksum Offload */
501 unsigned int rx_coe; /* Rx Checksum Offload */
502 unsigned int addn_mac; /* Additional MAC Addresses */
503 unsigned int ts_src; /* Timestamp Source */
504 unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
505
506 /* HW Feature Register1 */
507 unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
508 unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
509 unsigned int adv_ts_hi; /* Advance Timestamping High Word */
510 unsigned int dma_width; /* DMA width */
511 unsigned int dcb; /* DCB Feature */
512 unsigned int sph; /* Split Header Feature */
513 unsigned int tso; /* TCP Segmentation Offload */
514 unsigned int dma_debug; /* DMA Debug Registers */
515 unsigned int rss; /* Receive Side Scaling */
516 unsigned int tc_cnt; /* Number of Traffic Classes */
517 unsigned int hash_table_size; /* Hash Table Size */
518 unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
519
520 /* HW Feature Register2 */
521 unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
522 unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
523 unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
524 unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
525 unsigned int pps_out_num; /* Number of PPS outputs */
526 unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
527};
528
529struct xlgmac_resources {
530 void __iomem *addr;
531 int irq;
532};
533
534struct xlgmac_pdata {
535 struct net_device *netdev;
536 struct device *dev;
537
538 struct xlgmac_hw_ops hw_ops;
539 struct xlgmac_desc_ops desc_ops;
540
541 /* Device statistics */
542 struct xlgmac_stats stats;
543
544 u32 msg_enable;
545
546 /* MAC registers base */
547 void __iomem *mac_regs;
548
549 /* Hardware features of the device */
550 struct xlgmac_hw_features hw_feat;
551
552 struct work_struct restart_work;
553
554 /* Rings for Tx/Rx on a DMA channel */
555 struct xlgmac_channel *channel_head;
556 unsigned int channel_count;
557 unsigned int tx_ring_count;
558 unsigned int rx_ring_count;
559 unsigned int tx_desc_count;
560 unsigned int rx_desc_count;
561 unsigned int tx_q_count;
562 unsigned int rx_q_count;
563
564 /* Tx/Rx common settings */
565 unsigned int pblx8;
566
567 /* Tx settings */
568 unsigned int tx_sf_mode;
569 unsigned int tx_threshold;
570 unsigned int tx_pbl;
571 unsigned int tx_osp_mode;
572
573 /* Rx settings */
574 unsigned int rx_sf_mode;
575 unsigned int rx_threshold;
576 unsigned int rx_pbl;
577
578 /* Tx coalescing settings */
579 unsigned int tx_usecs;
580 unsigned int tx_frames;
581
582 /* Rx coalescing settings */
583 unsigned int rx_riwt;
584 unsigned int rx_usecs;
585 unsigned int rx_frames;
586
587 /* Current Rx buffer size */
588 unsigned int rx_buf_size;
589
590 /* Flow control settings */
591 unsigned int tx_pause;
592 unsigned int rx_pause;
593
594 /* Device interrupt number */
595 int dev_irq;
596 unsigned int per_channel_irq;
597 int channel_irq[XLGMAC_MAX_DMA_CHANNELS];
598
599 /* Netdev related settings */
600 unsigned char mac_addr[ETH_ALEN];
601 netdev_features_t netdev_features;
602 struct napi_struct napi;
603
604 /* Filtering support */
605 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
606
607 /* Device clocks */
608 unsigned long sysclk_rate;
609
610 /* RSS addressing mutex */
611 struct mutex rss_mutex;
612
613 /* Receive Side Scaling settings */
614 u8 rss_key[XLGMAC_RSS_HASH_KEY_SIZE];
615 u32 rss_table[XLGMAC_RSS_MAX_TABLE_SIZE];
616 u32 rss_options;
617
618 int phy_speed;
619
620 char drv_name[32];
621 char drv_ver[32];
622};
623
624void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops);
625void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops);
626const struct net_device_ops *xlgmac_get_netdev_ops(void);
627void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata,
628 struct xlgmac_ring *ring,
629 unsigned int idx,
630 unsigned int count,
631 unsigned int flag);
632void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata,
633 struct xlgmac_ring *ring,
634 unsigned int idx);
635void xlgmac_print_pkt(struct net_device *netdev,
636 struct sk_buff *skb, bool tx_rx);
637void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata);
638void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata);
639int xlgmac_drv_probe(struct device *dev,
640 struct xlgmac_resources *res);
641int xlgmac_drv_remove(struct device *dev);
642
643/* For debug prints */
644#ifdef XLGMAC_DEBUG
645#define XLGMAC_PR(fmt, args...) \
646 pr_alert("[%s,%d]:" fmt, __func__, __LINE__, ## args)
647#else
648#define XLGMAC_PR(x...) do { } while (0)
649#endif
650
651#endif /* __DWC_XLGMAC_H__ */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 7c7ae0890e90..9027c9c509b5 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1882,6 +1882,7 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
1882static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 1882static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
1883 struct tc_to_netdev *tc) 1883 struct tc_to_netdev *tc)
1884{ 1884{
1885 u8 num_tc;
1885 int i; 1886 int i;
1886 1887
1887 /* setup tc must be called under rtnl lock */ 1888 /* setup tc must be called under rtnl lock */
@@ -1890,15 +1891,18 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
1890 if (tc->type != TC_SETUP_MQPRIO) 1891 if (tc->type != TC_SETUP_MQPRIO)
1891 return -EINVAL; 1892 return -EINVAL;
1892 1893
1894 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1895 num_tc = tc->mqprio->num_tc;
1896
1893 /* Sanity-check the number of traffic classes requested */ 1897 /* Sanity-check the number of traffic classes requested */
1894 if ((dev->real_num_tx_queues <= 1) || 1898 if ((dev->real_num_tx_queues <= 1) ||
1895 (dev->real_num_tx_queues < tc->tc)) 1899 (dev->real_num_tx_queues < num_tc))
1896 return -EINVAL; 1900 return -EINVAL;
1897 1901
1898 /* Configure traffic class to queue mappings */ 1902 /* Configure traffic class to queue mappings */
1899 if (tc->tc) { 1903 if (num_tc) {
1900 netdev_set_num_tc(dev, tc->tc); 1904 netdev_set_num_tc(dev, num_tc);
1901 for (i = 0; i < tc->tc; i++) 1905 for (i = 0; i < num_tc; i++)
1902 netdev_set_tc_queue(dev, i, 1, i); 1906 netdev_set_tc_queue(dev, i, 1, i);
1903 } else { 1907 } else {
1904 netdev_reset_tc(dev); 1908 netdev_reset_tc(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 72013314bba8..fa6a06571187 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1206,61 +1206,68 @@ void gelic_net_get_drvinfo(struct net_device *netdev,
1206 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1206 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1207} 1207}
1208 1208
1209static int gelic_ether_get_settings(struct net_device *netdev, 1209static int gelic_ether_get_link_ksettings(struct net_device *netdev,
1210 struct ethtool_cmd *cmd) 1210 struct ethtool_link_ksettings *cmd)
1211{ 1211{
1212 struct gelic_card *card = netdev_card(netdev); 1212 struct gelic_card *card = netdev_card(netdev);
1213 u32 supported, advertising;
1213 1214
1214 gelic_card_get_ether_port_status(card, 0); 1215 gelic_card_get_ether_port_status(card, 0);
1215 1216
1216 if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX) 1217 if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX)
1217 cmd->duplex = DUPLEX_FULL; 1218 cmd->base.duplex = DUPLEX_FULL;
1218 else 1219 else
1219 cmd->duplex = DUPLEX_HALF; 1220 cmd->base.duplex = DUPLEX_HALF;
1220 1221
1221 switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) { 1222 switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) {
1222 case GELIC_LV1_ETHER_SPEED_10: 1223 case GELIC_LV1_ETHER_SPEED_10:
1223 ethtool_cmd_speed_set(cmd, SPEED_10); 1224 cmd->base.speed = SPEED_10;
1224 break; 1225 break;
1225 case GELIC_LV1_ETHER_SPEED_100: 1226 case GELIC_LV1_ETHER_SPEED_100:
1226 ethtool_cmd_speed_set(cmd, SPEED_100); 1227 cmd->base.speed = SPEED_100;
1227 break; 1228 break;
1228 case GELIC_LV1_ETHER_SPEED_1000: 1229 case GELIC_LV1_ETHER_SPEED_1000:
1229 ethtool_cmd_speed_set(cmd, SPEED_1000); 1230 cmd->base.speed = SPEED_1000;
1230 break; 1231 break;
1231 default: 1232 default:
1232 pr_info("%s: speed unknown\n", __func__); 1233 pr_info("%s: speed unknown\n", __func__);
1233 ethtool_cmd_speed_set(cmd, SPEED_10); 1234 cmd->base.speed = SPEED_10;
1234 break; 1235 break;
1235 } 1236 }
1236 1237
1237 cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | 1238 supported = SUPPORTED_TP | SUPPORTED_Autoneg |
1238 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 1239 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
1239 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 1240 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1240 SUPPORTED_1000baseT_Full; 1241 SUPPORTED_1000baseT_Full;
1241 cmd->advertising = cmd->supported; 1242 advertising = supported;
1242 if (card->link_mode & GELIC_LV1_ETHER_AUTO_NEG) { 1243 if (card->link_mode & GELIC_LV1_ETHER_AUTO_NEG) {
1243 cmd->autoneg = AUTONEG_ENABLE; 1244 cmd->base.autoneg = AUTONEG_ENABLE;
1244 } else { 1245 } else {
1245 cmd->autoneg = AUTONEG_DISABLE; 1246 cmd->base.autoneg = AUTONEG_DISABLE;
1246 cmd->advertising &= ~ADVERTISED_Autoneg; 1247 advertising &= ~ADVERTISED_Autoneg;
1247 } 1248 }
1248 cmd->port = PORT_TP; 1249 cmd->base.port = PORT_TP;
1250
1251 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1252 supported);
1253 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1254 advertising);
1249 1255
1250 return 0; 1256 return 0;
1251} 1257}
1252 1258
1253static int gelic_ether_set_settings(struct net_device *netdev, 1259static int
1254 struct ethtool_cmd *cmd) 1260gelic_ether_set_link_ksettings(struct net_device *netdev,
1261 const struct ethtool_link_ksettings *cmd)
1255{ 1262{
1256 struct gelic_card *card = netdev_card(netdev); 1263 struct gelic_card *card = netdev_card(netdev);
1257 u64 mode; 1264 u64 mode;
1258 int ret; 1265 int ret;
1259 1266
1260 if (cmd->autoneg == AUTONEG_ENABLE) { 1267 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1261 mode = GELIC_LV1_ETHER_AUTO_NEG; 1268 mode = GELIC_LV1_ETHER_AUTO_NEG;
1262 } else { 1269 } else {
1263 switch (cmd->speed) { 1270 switch (cmd->base.speed) {
1264 case SPEED_10: 1271 case SPEED_10:
1265 mode = GELIC_LV1_ETHER_SPEED_10; 1272 mode = GELIC_LV1_ETHER_SPEED_10;
1266 break; 1273 break;
@@ -1273,9 +1280,9 @@ static int gelic_ether_set_settings(struct net_device *netdev,
1273 default: 1280 default:
1274 return -EINVAL; 1281 return -EINVAL;
1275 } 1282 }
1276 if (cmd->duplex == DUPLEX_FULL) 1283 if (cmd->base.duplex == DUPLEX_FULL) {
1277 mode |= GELIC_LV1_ETHER_FULL_DUPLEX; 1284 mode |= GELIC_LV1_ETHER_FULL_DUPLEX;
1278 else if (cmd->speed == SPEED_1000) { 1285 } else if (cmd->base.speed == SPEED_1000) {
1279 pr_info("1000 half duplex is not supported.\n"); 1286 pr_info("1000 half duplex is not supported.\n");
1280 return -EINVAL; 1287 return -EINVAL;
1281 } 1288 }
@@ -1370,11 +1377,11 @@ done:
1370 1377
1371static const struct ethtool_ops gelic_ether_ethtool_ops = { 1378static const struct ethtool_ops gelic_ether_ethtool_ops = {
1372 .get_drvinfo = gelic_net_get_drvinfo, 1379 .get_drvinfo = gelic_net_get_drvinfo,
1373 .get_settings = gelic_ether_get_settings,
1374 .set_settings = gelic_ether_set_settings,
1375 .get_link = ethtool_op_get_link, 1380 .get_link = ethtool_op_get_link,
1376 .get_wol = gelic_net_get_wol, 1381 .get_wol = gelic_net_get_wol,
1377 .set_wol = gelic_net_set_wol, 1382 .set_wol = gelic_net_set_wol,
1383 .get_link_ksettings = gelic_ether_get_link_ksettings,
1384 .set_link_ksettings = gelic_ether_set_link_ksettings,
1378}; 1385};
1379 1386
1380/** 1387/**
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index ffe519382e11..16bd036d0682 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -47,19 +47,23 @@ static struct {
47}; 47};
48 48
49static int 49static int
50spider_net_ethtool_get_settings(struct net_device *netdev, 50spider_net_ethtool_get_link_ksettings(struct net_device *netdev,
51 struct ethtool_cmd *cmd) 51 struct ethtool_link_ksettings *cmd)
52{ 52{
53 struct spider_net_card *card; 53 struct spider_net_card *card;
54 card = netdev_priv(netdev); 54 card = netdev_priv(netdev);
55 55
56 cmd->supported = (SUPPORTED_1000baseT_Full | 56 ethtool_link_ksettings_zero_link_mode(cmd, supported);
57 SUPPORTED_FIBRE); 57 ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
58 cmd->advertising = (ADVERTISED_1000baseT_Full | 58 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
59 ADVERTISED_FIBRE); 59
60 cmd->port = PORT_FIBRE; 60 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
61 ethtool_cmd_speed_set(cmd, card->phy.speed); 61 ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
62 cmd->duplex = DUPLEX_FULL; 62 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
63
64 cmd->base.port = PORT_FIBRE;
65 cmd->base.speed = card->phy.speed;
66 cmd->base.duplex = DUPLEX_FULL;
63 67
64 return 0; 68 return 0;
65} 69}
@@ -166,7 +170,6 @@ static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
166} 170}
167 171
168const struct ethtool_ops spider_net_ethtool_ops = { 172const struct ethtool_ops spider_net_ethtool_ops = {
169 .get_settings = spider_net_ethtool_get_settings,
170 .get_drvinfo = spider_net_ethtool_get_drvinfo, 173 .get_drvinfo = spider_net_ethtool_get_drvinfo,
171 .get_wol = spider_net_ethtool_get_wol, 174 .get_wol = spider_net_ethtool_get_wol,
172 .get_msglevel = spider_net_ethtool_get_msglevel, 175 .get_msglevel = spider_net_ethtool_get_msglevel,
@@ -177,5 +180,6 @@ const struct ethtool_ops spider_net_ethtool_ops = {
177 .get_strings = spider_net_get_strings, 180 .get_strings = spider_net_get_strings,
178 .get_sset_count = spider_net_get_sset_count, 181 .get_sset_count = spider_net_get_sset_count,
179 .get_ethtool_stats = spider_net_get_ethtool_stats, 182 .get_ethtool_stats = spider_net_get_ethtool_stats,
183 .get_link_ksettings = spider_net_ethtool_get_link_ksettings,
180}; 184};
181 185
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c5583991da4a..5ac6eaa9e785 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1499,27 +1499,29 @@ static void tsi108_init_mac(struct net_device *dev)
1499 TSI_WRITE(TSI108_EC_INTMASK, ~0); 1499 TSI_WRITE(TSI108_EC_INTMASK, ~0);
1500} 1500}
1501 1501
1502static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1502static int tsi108_get_link_ksettings(struct net_device *dev,
1503 struct ethtool_link_ksettings *cmd)
1503{ 1504{
1504 struct tsi108_prv_data *data = netdev_priv(dev); 1505 struct tsi108_prv_data *data = netdev_priv(dev);
1505 unsigned long flags; 1506 unsigned long flags;
1506 int rc; 1507 int rc;
1507 1508
1508 spin_lock_irqsave(&data->txlock, flags); 1509 spin_lock_irqsave(&data->txlock, flags);
1509 rc = mii_ethtool_gset(&data->mii_if, cmd); 1510 rc = mii_ethtool_get_link_ksettings(&data->mii_if, cmd);
1510 spin_unlock_irqrestore(&data->txlock, flags); 1511 spin_unlock_irqrestore(&data->txlock, flags);
1511 1512
1512 return rc; 1513 return rc;
1513} 1514}
1514 1515
1515static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1516static int tsi108_set_link_ksettings(struct net_device *dev,
1517 const struct ethtool_link_ksettings *cmd)
1516{ 1518{
1517 struct tsi108_prv_data *data = netdev_priv(dev); 1519 struct tsi108_prv_data *data = netdev_priv(dev);
1518 unsigned long flags; 1520 unsigned long flags;
1519 int rc; 1521 int rc;
1520 1522
1521 spin_lock_irqsave(&data->txlock, flags); 1523 spin_lock_irqsave(&data->txlock, flags);
1522 rc = mii_ethtool_sset(&data->mii_if, cmd); 1524 rc = mii_ethtool_set_link_ksettings(&data->mii_if, cmd);
1523 spin_unlock_irqrestore(&data->txlock, flags); 1525 spin_unlock_irqrestore(&data->txlock, flags);
1524 1526
1525 return rc; 1527 return rc;
@@ -1535,8 +1537,8 @@ static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1535 1537
1536static const struct ethtool_ops tsi108_ethtool_ops = { 1538static const struct ethtool_ops tsi108_ethtool_ops = {
1537 .get_link = ethtool_op_get_link, 1539 .get_link = ethtool_op_get_link,
1538 .get_settings = tsi108_get_settings, 1540 .get_link_ksettings = tsi108_get_link_ksettings,
1539 .set_settings = tsi108_set_settings, 1541 .set_link_ksettings = tsi108_set_link_ksettings,
1540}; 1542};
1541 1543
1542static const struct net_device_ops tsi108_netdev_ops = { 1544static const struct net_device_ops tsi108_netdev_ops = {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index c068c58428f7..4cf41f779d0e 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2303,25 +2303,27 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
2303 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info)); 2303 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2304} 2304}
2305 2305
2306static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2306static int netdev_get_link_ksettings(struct net_device *dev,
2307 struct ethtool_link_ksettings *cmd)
2307{ 2308{
2308 struct rhine_private *rp = netdev_priv(dev); 2309 struct rhine_private *rp = netdev_priv(dev);
2309 int rc; 2310 int rc;
2310 2311
2311 mutex_lock(&rp->task_lock); 2312 mutex_lock(&rp->task_lock);
2312 rc = mii_ethtool_gset(&rp->mii_if, cmd); 2313 rc = mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
2313 mutex_unlock(&rp->task_lock); 2314 mutex_unlock(&rp->task_lock);
2314 2315
2315 return rc; 2316 return rc;
2316} 2317}
2317 2318
2318static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2319static int netdev_set_link_ksettings(struct net_device *dev,
2320 const struct ethtool_link_ksettings *cmd)
2319{ 2321{
2320 struct rhine_private *rp = netdev_priv(dev); 2322 struct rhine_private *rp = netdev_priv(dev);
2321 int rc; 2323 int rc;
2322 2324
2323 mutex_lock(&rp->task_lock); 2325 mutex_lock(&rp->task_lock);
2324 rc = mii_ethtool_sset(&rp->mii_if, cmd); 2326 rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2325 rhine_set_carrier(&rp->mii_if); 2327 rhine_set_carrier(&rp->mii_if);
2326 mutex_unlock(&rp->task_lock); 2328 mutex_unlock(&rp->task_lock);
2327 2329
@@ -2391,14 +2393,14 @@ static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2391 2393
2392static const struct ethtool_ops netdev_ethtool_ops = { 2394static const struct ethtool_ops netdev_ethtool_ops = {
2393 .get_drvinfo = netdev_get_drvinfo, 2395 .get_drvinfo = netdev_get_drvinfo,
2394 .get_settings = netdev_get_settings,
2395 .set_settings = netdev_set_settings,
2396 .nway_reset = netdev_nway_reset, 2396 .nway_reset = netdev_nway_reset,
2397 .get_link = netdev_get_link, 2397 .get_link = netdev_get_link,
2398 .get_msglevel = netdev_get_msglevel, 2398 .get_msglevel = netdev_get_msglevel,
2399 .set_msglevel = netdev_set_msglevel, 2399 .set_msglevel = netdev_set_msglevel,
2400 .get_wol = rhine_get_wol, 2400 .get_wol = rhine_get_wol,
2401 .set_wol = rhine_set_wol, 2401 .set_wol = rhine_set_wol,
2402 .get_link_ksettings = netdev_get_link_ksettings,
2403 .set_link_ksettings = netdev_set_link_ksettings,
2402}; 2404};
2403 2405
2404static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2406static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d088788b27a7..ef9538ee53d0 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3291,15 +3291,17 @@ static void velocity_ethtool_down(struct net_device *dev)
3291 velocity_set_power_state(vptr, PCI_D3hot); 3291 velocity_set_power_state(vptr, PCI_D3hot);
3292} 3292}
3293 3293
3294static int velocity_get_settings(struct net_device *dev, 3294static int velocity_get_link_ksettings(struct net_device *dev,
3295 struct ethtool_cmd *cmd) 3295 struct ethtool_link_ksettings *cmd)
3296{ 3296{
3297 struct velocity_info *vptr = netdev_priv(dev); 3297 struct velocity_info *vptr = netdev_priv(dev);
3298 struct mac_regs __iomem *regs = vptr->mac_regs; 3298 struct mac_regs __iomem *regs = vptr->mac_regs;
3299 u32 status; 3299 u32 status;
3300 u32 supported, advertising;
3301
3300 status = check_connection_type(vptr->mac_regs); 3302 status = check_connection_type(vptr->mac_regs);
3301 3303
3302 cmd->supported = SUPPORTED_TP | 3304 supported = SUPPORTED_TP |
3303 SUPPORTED_Autoneg | 3305 SUPPORTED_Autoneg |
3304 SUPPORTED_10baseT_Half | 3306 SUPPORTED_10baseT_Half |
3305 SUPPORTED_10baseT_Full | 3307 SUPPORTED_10baseT_Full |
@@ -3308,9 +3310,9 @@ static int velocity_get_settings(struct net_device *dev,
3308 SUPPORTED_1000baseT_Half | 3310 SUPPORTED_1000baseT_Half |
3309 SUPPORTED_1000baseT_Full; 3311 SUPPORTED_1000baseT_Full;
3310 3312
3311 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg; 3313 advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3312 if (vptr->options.spd_dpx == SPD_DPX_AUTO) { 3314 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3313 cmd->advertising |= 3315 advertising |=
3314 ADVERTISED_10baseT_Half | 3316 ADVERTISED_10baseT_Half |
3315 ADVERTISED_10baseT_Full | 3317 ADVERTISED_10baseT_Full |
3316 ADVERTISED_100baseT_Half | 3318 ADVERTISED_100baseT_Half |
@@ -3320,19 +3322,19 @@ static int velocity_get_settings(struct net_device *dev,
3320 } else { 3322 } else {
3321 switch (vptr->options.spd_dpx) { 3323 switch (vptr->options.spd_dpx) {
3322 case SPD_DPX_1000_FULL: 3324 case SPD_DPX_1000_FULL:
3323 cmd->advertising |= ADVERTISED_1000baseT_Full; 3325 advertising |= ADVERTISED_1000baseT_Full;
3324 break; 3326 break;
3325 case SPD_DPX_100_HALF: 3327 case SPD_DPX_100_HALF:
3326 cmd->advertising |= ADVERTISED_100baseT_Half; 3328 advertising |= ADVERTISED_100baseT_Half;
3327 break; 3329 break;
3328 case SPD_DPX_100_FULL: 3330 case SPD_DPX_100_FULL:
3329 cmd->advertising |= ADVERTISED_100baseT_Full; 3331 advertising |= ADVERTISED_100baseT_Full;
3330 break; 3332 break;
3331 case SPD_DPX_10_HALF: 3333 case SPD_DPX_10_HALF:
3332 cmd->advertising |= ADVERTISED_10baseT_Half; 3334 advertising |= ADVERTISED_10baseT_Half;
3333 break; 3335 break;
3334 case SPD_DPX_10_FULL: 3336 case SPD_DPX_10_FULL:
3335 cmd->advertising |= ADVERTISED_10baseT_Full; 3337 advertising |= ADVERTISED_10baseT_Full;
3336 break; 3338 break;
3337 default: 3339 default:
3338 break; 3340 break;
@@ -3340,30 +3342,35 @@ static int velocity_get_settings(struct net_device *dev,
3340 } 3342 }
3341 3343
3342 if (status & VELOCITY_SPEED_1000) 3344 if (status & VELOCITY_SPEED_1000)
3343 ethtool_cmd_speed_set(cmd, SPEED_1000); 3345 cmd->base.speed = SPEED_1000;
3344 else if (status & VELOCITY_SPEED_100) 3346 else if (status & VELOCITY_SPEED_100)
3345 ethtool_cmd_speed_set(cmd, SPEED_100); 3347 cmd->base.speed = SPEED_100;
3346 else 3348 else
3347 ethtool_cmd_speed_set(cmd, SPEED_10); 3349 cmd->base.speed = SPEED_10;
3348 3350
3349 cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3351 cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
3350 cmd->port = PORT_TP; 3352 AUTONEG_ENABLE : AUTONEG_DISABLE;
3351 cmd->transceiver = XCVR_INTERNAL; 3353 cmd->base.port = PORT_TP;
3352 cmd->phy_address = readb(&regs->MIIADR) & 0x1F; 3354 cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
3353 3355
3354 if (status & VELOCITY_DUPLEX_FULL) 3356 if (status & VELOCITY_DUPLEX_FULL)
3355 cmd->duplex = DUPLEX_FULL; 3357 cmd->base.duplex = DUPLEX_FULL;
3356 else 3358 else
3357 cmd->duplex = DUPLEX_HALF; 3359 cmd->base.duplex = DUPLEX_HALF;
3360
3361 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3362 supported);
3363 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3364 advertising);
3358 3365
3359 return 0; 3366 return 0;
3360} 3367}
3361 3368
3362static int velocity_set_settings(struct net_device *dev, 3369static int velocity_set_link_ksettings(struct net_device *dev,
3363 struct ethtool_cmd *cmd) 3370 const struct ethtool_link_ksettings *cmd)
3364{ 3371{
3365 struct velocity_info *vptr = netdev_priv(dev); 3372 struct velocity_info *vptr = netdev_priv(dev);
3366 u32 speed = ethtool_cmd_speed(cmd); 3373 u32 speed = cmd->base.speed;
3367 u32 curr_status; 3374 u32 curr_status;
3368 u32 new_status = 0; 3375 u32 new_status = 0;
3369 int ret = 0; 3376 int ret = 0;
@@ -3371,11 +3378,12 @@ static int velocity_set_settings(struct net_device *dev,
3371 curr_status = check_connection_type(vptr->mac_regs); 3378 curr_status = check_connection_type(vptr->mac_regs);
3372 curr_status &= (~VELOCITY_LINK_FAIL); 3379 curr_status &= (~VELOCITY_LINK_FAIL);
3373 3380
3374 new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); 3381 new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3375 new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0); 3382 new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3376 new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0); 3383 new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3377 new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0); 3384 new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3378 new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0); 3385 new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
3386 VELOCITY_DUPLEX_FULL : 0);
3379 3387
3380 if ((new_status & VELOCITY_AUTONEG_ENABLE) && 3388 if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3381 (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) { 3389 (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
@@ -3644,8 +3652,6 @@ static void velocity_get_ethtool_stats(struct net_device *dev,
3644} 3652}
3645 3653
3646static const struct ethtool_ops velocity_ethtool_ops = { 3654static const struct ethtool_ops velocity_ethtool_ops = {
3647 .get_settings = velocity_get_settings,
3648 .set_settings = velocity_set_settings,
3649 .get_drvinfo = velocity_get_drvinfo, 3655 .get_drvinfo = velocity_get_drvinfo,
3650 .get_wol = velocity_ethtool_get_wol, 3656 .get_wol = velocity_ethtool_get_wol,
3651 .set_wol = velocity_ethtool_set_wol, 3657 .set_wol = velocity_ethtool_set_wol,
@@ -3658,7 +3664,9 @@ static const struct ethtool_ops velocity_ethtool_ops = {
3658 .get_coalesce = velocity_get_coalesce, 3664 .get_coalesce = velocity_get_coalesce,
3659 .set_coalesce = velocity_set_coalesce, 3665 .set_coalesce = velocity_set_coalesce,
3660 .begin = velocity_ethtool_up, 3666 .begin = velocity_ethtool_up,
3661 .complete = velocity_ethtool_down 3667 .complete = velocity_ethtool_down,
3668 .get_link_ksettings = velocity_get_link_ksettings,
3669 .set_link_ksettings = velocity_set_link_ksettings,
3662}; 3670};
3663 3671
3664#if defined(CONFIG_PM) && defined(CONFIG_INET) 3672#if defined(CONFIG_PM) && defined(CONFIG_INET)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b96e96919e31..33c595f4691d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -301,7 +301,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
301 if (address) 301 if (address)
302 memcpy(ndev->dev_addr, address, ETH_ALEN); 302 memcpy(ndev->dev_addr, address, ETH_ALEN);
303 if (!is_valid_ether_addr(ndev->dev_addr)) 303 if (!is_valid_ether_addr(ndev->dev_addr))
304 eth_random_addr(ndev->dev_addr); 304 eth_hw_addr_random(ndev);
305 305
306 /* Set up unicast MAC address filter set its mac address */ 306 /* Set up unicast MAC address filter set its mac address */
307 axienet_iow(lp, XAE_UAW0_OFFSET, 307 axienet_iow(lp, XAE_UAW0_OFFSET,
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
index 6575f880f1be..7d101714c2ef 100644
--- a/drivers/net/fjes/fjes_ethtool.c
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -175,16 +175,15 @@ static void fjes_get_drvinfo(struct net_device *netdev,
175 "platform:%s", plat_dev->name); 175 "platform:%s", plat_dev->name);
176} 176}
177 177
178static int fjes_get_settings(struct net_device *netdev, 178static int fjes_get_link_ksettings(struct net_device *netdev,
179 struct ethtool_cmd *ecmd) 179 struct ethtool_link_ksettings *ecmd)
180{ 180{
181 ecmd->supported = 0; 181 ethtool_link_ksettings_zero_link_mode(ecmd, supported);
182 ecmd->advertising = 0; 182 ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
183 ecmd->duplex = DUPLEX_FULL; 183 ecmd->base.duplex = DUPLEX_FULL;
184 ecmd->autoneg = AUTONEG_DISABLE; 184 ecmd->base.autoneg = AUTONEG_DISABLE;
185 ecmd->transceiver = XCVR_DUMMY1; 185 ecmd->base.port = PORT_NONE;
186 ecmd->port = PORT_NONE; 186 ecmd->base.speed = 20000; /* 20Gb/s */
187 ethtool_cmd_speed_set(ecmd, 20000); /* 20Gb/s */
188 187
189 return 0; 188 return 0;
190} 189}
@@ -296,7 +295,6 @@ static int fjes_get_dump_data(struct net_device *netdev,
296} 295}
297 296
298static const struct ethtool_ops fjes_ethtool_ops = { 297static const struct ethtool_ops fjes_ethtool_ops = {
299 .get_settings = fjes_get_settings,
300 .get_drvinfo = fjes_get_drvinfo, 298 .get_drvinfo = fjes_get_drvinfo,
301 .get_ethtool_stats = fjes_get_ethtool_stats, 299 .get_ethtool_stats = fjes_get_ethtool_stats,
302 .get_strings = fjes_get_strings, 300 .get_strings = fjes_get_strings,
@@ -306,6 +304,7 @@ static const struct ethtool_ops fjes_ethtool_ops = {
306 .set_dump = fjes_set_dump, 304 .set_dump = fjes_set_dump,
307 .get_dump_flag = fjes_get_dump_flag, 305 .get_dump_flag = fjes_get_dump_flag,
308 .get_dump_data = fjes_get_dump_data, 306 .get_dump_data = fjes_get_dump_data,
307 .get_link_ksettings = fjes_get_link_ksettings,
309}; 308};
310 309
311void fjes_set_ethtool_ops(struct net_device *netdev) 310void fjes_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 89698741682f..3e1854f34420 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -58,6 +58,9 @@ struct pdp_ctx {
58 struct in_addr ms_addr_ip4; 58 struct in_addr ms_addr_ip4;
59 struct in_addr sgsn_addr_ip4; 59 struct in_addr sgsn_addr_ip4;
60 60
61 struct sock *sk;
62 struct net_device *dev;
63
61 atomic_t tx_seq; 64 atomic_t tx_seq;
62 struct rcu_head rcu_head; 65 struct rcu_head rcu_head;
63}; 66};
@@ -66,8 +69,8 @@ struct pdp_ctx {
66struct gtp_dev { 69struct gtp_dev {
67 struct list_head list; 70 struct list_head list;
68 71
69 struct socket *sock0; 72 struct sock *sk0;
70 struct socket *sock1u; 73 struct sock *sk1u;
71 74
72 struct net_device *dev; 75 struct net_device *dev;
73 76
@@ -84,6 +87,8 @@ struct gtp_net {
84 87
85static u32 gtp_h_initval; 88static u32 gtp_h_initval;
86 89
90static void pdp_context_delete(struct pdp_ctx *pctx);
91
87static inline u32 gtp0_hashfn(u64 tid) 92static inline u32 gtp0_hashfn(u64 tid)
88{ 93{
89 u32 *tid32 = (u32 *) &tid; 94 u32 *tid32 = (u32 *) &tid;
@@ -175,9 +180,42 @@ static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
175 return false; 180 return false;
176} 181}
177 182
183static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, unsigned int hdrlen)
184{
185 struct pcpu_sw_netstats *stats;
186
187 if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
188 netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
189 return 1;
190 }
191
192 /* Get rid of the GTP + UDP headers. */
193 if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
194 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
195 return -1;
196
197 netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
198
199 /* Now that the UDP and the GTP header have been removed, set up the
200 * new network header. This is required by the upper layer to
201 * calculate the transport header.
202 */
203 skb_reset_network_header(skb);
204
205 skb->dev = pctx->dev;
206
207 stats = this_cpu_ptr(pctx->dev->tstats);
208 u64_stats_update_begin(&stats->syncp);
209 stats->rx_packets++;
210 stats->rx_bytes += skb->len;
211 u64_stats_update_end(&stats->syncp);
212
213 netif_rx(skb);
214 return 0;
215}
216
178/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */ 217/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
179static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, 218static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
180 bool xnet)
181{ 219{
182 unsigned int hdrlen = sizeof(struct udphdr) + 220 unsigned int hdrlen = sizeof(struct udphdr) +
183 sizeof(struct gtp0_header); 221 sizeof(struct gtp0_header);
@@ -201,17 +239,10 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
201 return 1; 239 return 1;
202 } 240 }
203 241
204 if (!gtp_check_src_ms(skb, pctx, hdrlen)) { 242 return gtp_rx(pctx, skb, hdrlen);
205 netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
206 return 1;
207 }
208
209 /* Get rid of the GTP + UDP headers. */
210 return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
211} 243}
212 244
213static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, 245static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
214 bool xnet)
215{ 246{
216 unsigned int hdrlen = sizeof(struct udphdr) + 247 unsigned int hdrlen = sizeof(struct udphdr) +
217 sizeof(struct gtp1_header); 248 sizeof(struct gtp1_header);
@@ -250,37 +281,33 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
250 return 1; 281 return 1;
251 } 282 }
252 283
253 if (!gtp_check_src_ms(skb, pctx, hdrlen)) { 284 return gtp_rx(pctx, skb, hdrlen);
254 netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
255 return 1;
256 }
257
258 /* Get rid of the GTP + UDP headers. */
259 return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
260} 285}
261 286
262static void gtp_encap_disable(struct gtp_dev *gtp) 287static void gtp_encap_destroy(struct sock *sk)
263{ 288{
264 if (gtp->sock0 && gtp->sock0->sk) { 289 struct gtp_dev *gtp;
265 udp_sk(gtp->sock0->sk)->encap_type = 0;
266 rcu_assign_sk_user_data(gtp->sock0->sk, NULL);
267 }
268 if (gtp->sock1u && gtp->sock1u->sk) {
269 udp_sk(gtp->sock1u->sk)->encap_type = 0;
270 rcu_assign_sk_user_data(gtp->sock1u->sk, NULL);
271 }
272 290
273 gtp->sock0 = NULL; 291 gtp = rcu_dereference_sk_user_data(sk);
274 gtp->sock1u = NULL; 292 if (gtp) {
293 udp_sk(sk)->encap_type = 0;
294 rcu_assign_sk_user_data(sk, NULL);
295 sock_put(sk);
296 }
275} 297}
276 298
277static void gtp_encap_destroy(struct sock *sk) 299static void gtp_encap_disable_sock(struct sock *sk)
278{ 300{
279 struct gtp_dev *gtp; 301 if (!sk)
302 return;
280 303
281 gtp = rcu_dereference_sk_user_data(sk); 304 gtp_encap_destroy(sk);
282 if (gtp) 305}
283 gtp_encap_disable(gtp); 306
307static void gtp_encap_disable(struct gtp_dev *gtp)
308{
309 gtp_encap_disable_sock(gtp->sk0);
310 gtp_encap_disable_sock(gtp->sk1u);
284} 311}
285 312
286/* UDP encapsulation receive handler. See net/ipv4/udp.c. 313/* UDP encapsulation receive handler. See net/ipv4/udp.c.
@@ -288,10 +315,8 @@ static void gtp_encap_destroy(struct sock *sk)
288 */ 315 */
289static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) 316static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
290{ 317{
291 struct pcpu_sw_netstats *stats;
292 struct gtp_dev *gtp; 318 struct gtp_dev *gtp;
293 bool xnet; 319 int ret = 0;
294 int ret;
295 320
296 gtp = rcu_dereference_sk_user_data(sk); 321 gtp = rcu_dereference_sk_user_data(sk);
297 if (!gtp) 322 if (!gtp)
@@ -299,16 +324,14 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
299 324
300 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); 325 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
301 326
302 xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
303
304 switch (udp_sk(sk)->encap_type) { 327 switch (udp_sk(sk)->encap_type) {
305 case UDP_ENCAP_GTP0: 328 case UDP_ENCAP_GTP0:
306 netdev_dbg(gtp->dev, "received GTP0 packet\n"); 329 netdev_dbg(gtp->dev, "received GTP0 packet\n");
307 ret = gtp0_udp_encap_recv(gtp, skb, xnet); 330 ret = gtp0_udp_encap_recv(gtp, skb);
308 break; 331 break;
309 case UDP_ENCAP_GTP1U: 332 case UDP_ENCAP_GTP1U:
310 netdev_dbg(gtp->dev, "received GTP1U packet\n"); 333 netdev_dbg(gtp->dev, "received GTP1U packet\n");
311 ret = gtp1u_udp_encap_recv(gtp, skb, xnet); 334 ret = gtp1u_udp_encap_recv(gtp, skb);
312 break; 335 break;
313 default: 336 default:
314 ret = -1; /* Shouldn't happen. */ 337 ret = -1; /* Shouldn't happen. */
@@ -317,33 +340,17 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
317 switch (ret) { 340 switch (ret) {
318 case 1: 341 case 1:
319 netdev_dbg(gtp->dev, "pass up to the process\n"); 342 netdev_dbg(gtp->dev, "pass up to the process\n");
320 return 1; 343 break;
321 case 0: 344 case 0:
322 netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
323 break; 345 break;
324 case -1: 346 case -1:
325 netdev_dbg(gtp->dev, "GTP packet has been dropped\n"); 347 netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
326 kfree_skb(skb); 348 kfree_skb(skb);
327 return 0; 349 ret = 0;
350 break;
328 } 351 }
329 352
330 /* Now that the UDP and the GTP header have been removed, set up the 353 return ret;
331 * new network header. This is required by the upper layer to
332 * calculate the transport header.
333 */
334 skb_reset_network_header(skb);
335
336 skb->dev = gtp->dev;
337
338 stats = this_cpu_ptr(gtp->dev->tstats);
339 u64_stats_update_begin(&stats->syncp);
340 stats->rx_packets++;
341 stats->rx_bytes += skb->len;
342 u64_stats_update_end(&stats->syncp);
343
344 netif_rx(skb);
345
346 return 0;
347} 354}
348 355
349static int gtp_dev_init(struct net_device *dev) 356static int gtp_dev_init(struct net_device *dev)
@@ -367,8 +374,9 @@ static void gtp_dev_uninit(struct net_device *dev)
367 free_percpu(dev->tstats); 374 free_percpu(dev->tstats);
368} 375}
369 376
370static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4, 377static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
371 const struct sock *sk, __be32 daddr) 378 const struct sock *sk,
379 __be32 daddr)
372{ 380{
373 memset(fl4, 0, sizeof(*fl4)); 381 memset(fl4, 0, sizeof(*fl4));
374 fl4->flowi4_oif = sk->sk_bound_dev_if; 382 fl4->flowi4_oif = sk->sk_bound_dev_if;
@@ -377,7 +385,7 @@ static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
377 fl4->flowi4_tos = RT_CONN_FLAGS(sk); 385 fl4->flowi4_tos = RT_CONN_FLAGS(sk);
378 fl4->flowi4_proto = sk->sk_protocol; 386 fl4->flowi4_proto = sk->sk_protocol;
379 387
380 return ip_route_output_key(net, fl4); 388 return ip_route_output_key(sock_net(sk), fl4);
381} 389}
382 390
383static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) 391static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
@@ -466,7 +474,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
466 struct rtable *rt; 474 struct rtable *rt;
467 struct flowi4 fl4; 475 struct flowi4 fl4;
468 struct iphdr *iph; 476 struct iphdr *iph;
469 struct sock *sk;
470 __be16 df; 477 __be16 df;
471 int mtu; 478 int mtu;
472 479
@@ -482,30 +489,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
482 } 489 }
483 netdev_dbg(dev, "found PDP context %p\n", pctx); 490 netdev_dbg(dev, "found PDP context %p\n", pctx);
484 491
485 switch (pctx->gtp_version) { 492 rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->sgsn_addr_ip4.s_addr);
486 case GTP_V0:
487 if (gtp->sock0)
488 sk = gtp->sock0->sk;
489 else
490 sk = NULL;
491 break;
492 case GTP_V1:
493 if (gtp->sock1u)
494 sk = gtp->sock1u->sk;
495 else
496 sk = NULL;
497 break;
498 default:
499 return -ENOENT;
500 }
501
502 if (!sk) {
503 netdev_dbg(dev, "no userspace socket is available, skip\n");
504 return -ENOENT;
505 }
506
507 rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk,
508 pctx->sgsn_addr_ip4.s_addr);
509 if (IS_ERR(rt)) { 493 if (IS_ERR(rt)) {
510 netdev_dbg(dev, "no route to SSGN %pI4\n", 494 netdev_dbg(dev, "no route to SSGN %pI4\n",
511 &pctx->sgsn_addr_ip4.s_addr); 495 &pctx->sgsn_addr_ip4.s_addr);
@@ -550,7 +534,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
550 goto err_rt; 534 goto err_rt;
551 } 535 }
552 536
553 gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev); 537 gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
554 gtp_push_header(skb, pktinfo); 538 gtp_push_header(skb, pktinfo);
555 539
556 return 0; 540 return 0;
@@ -640,27 +624,23 @@ static void gtp_link_setup(struct net_device *dev)
640 624
641static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); 625static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
642static void gtp_hashtable_free(struct gtp_dev *gtp); 626static void gtp_hashtable_free(struct gtp_dev *gtp);
643static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, 627static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
644 int fd_gtp0, int fd_gtp1);
645 628
646static int gtp_newlink(struct net *src_net, struct net_device *dev, 629static int gtp_newlink(struct net *src_net, struct net_device *dev,
647 struct nlattr *tb[], struct nlattr *data[]) 630 struct nlattr *tb[], struct nlattr *data[])
648{ 631{
649 int hashsize, err, fd0, fd1;
650 struct gtp_dev *gtp; 632 struct gtp_dev *gtp;
651 struct gtp_net *gn; 633 struct gtp_net *gn;
634 int hashsize, err;
652 635
653 if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1]) 636 if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
654 return -EINVAL; 637 return -EINVAL;
655 638
656 gtp = netdev_priv(dev); 639 gtp = netdev_priv(dev);
657 640
658 fd0 = nla_get_u32(data[IFLA_GTP_FD0]); 641 err = gtp_encap_enable(gtp, data);
659 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
660
661 err = gtp_encap_enable(dev, gtp, fd0, fd1);
662 if (err < 0) 642 if (err < 0)
663 goto out_err; 643 return err;
664 644
665 if (!data[IFLA_GTP_PDP_HASHSIZE]) 645 if (!data[IFLA_GTP_PDP_HASHSIZE])
666 hashsize = 1024; 646 hashsize = 1024;
@@ -688,7 +668,6 @@ out_hashtable:
688 gtp_hashtable_free(gtp); 668 gtp_hashtable_free(gtp);
689out_encap: 669out_encap:
690 gtp_encap_disable(gtp); 670 gtp_encap_disable(gtp);
691out_err:
692 return err; 671 return err;
693} 672}
694 673
@@ -747,21 +726,6 @@ static struct rtnl_link_ops gtp_link_ops __read_mostly = {
747 .fill_info = gtp_fill_info, 726 .fill_info = gtp_fill_info,
748}; 727};
749 728
750static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[])
751{
752 struct net *net;
753
754 /* Examine the link attributes and figure out which network namespace
755 * we are talking about.
756 */
757 if (tb[GTPA_NET_NS_FD])
758 net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD]));
759 else
760 net = get_net(src_net);
761
762 return net;
763}
764
765static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize) 729static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
766{ 730{
767 int i; 731 int i;
@@ -791,85 +755,111 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
791 struct pdp_ctx *pctx; 755 struct pdp_ctx *pctx;
792 int i; 756 int i;
793 757
794 for (i = 0; i < gtp->hash_size; i++) { 758 for (i = 0; i < gtp->hash_size; i++)
795 hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) { 759 hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
796 hlist_del_rcu(&pctx->hlist_tid); 760 pdp_context_delete(pctx);
797 hlist_del_rcu(&pctx->hlist_addr); 761
798 kfree_rcu(pctx, rcu_head);
799 }
800 }
801 synchronize_rcu(); 762 synchronize_rcu();
802 kfree(gtp->addr_hash); 763 kfree(gtp->addr_hash);
803 kfree(gtp->tid_hash); 764 kfree(gtp->tid_hash);
804} 765}
805 766
806static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, 767static struct sock *gtp_encap_enable_socket(int fd, int type,
807 int fd_gtp0, int fd_gtp1) 768 struct gtp_dev *gtp)
808{ 769{
809 struct udp_tunnel_sock_cfg tuncfg = {NULL}; 770 struct udp_tunnel_sock_cfg tuncfg = {NULL};
810 struct socket *sock0, *sock1u; 771 struct socket *sock;
772 struct sock *sk;
811 int err; 773 int err;
812 774
813 netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1); 775 pr_debug("enable gtp on %d, %d\n", fd, type);
814 776
815 sock0 = sockfd_lookup(fd_gtp0, &err); 777 sock = sockfd_lookup(fd, &err);
816 if (sock0 == NULL) { 778 if (!sock) {
817 netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0); 779 pr_debug("gtp socket fd=%d not found\n", fd);
818 return -ENOENT; 780 return NULL;
819 } 781 }
820 782
821 if (sock0->sk->sk_protocol != IPPROTO_UDP) { 783 if (sock->sk->sk_protocol != IPPROTO_UDP) {
822 netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0); 784 pr_debug("socket fd=%d not UDP\n", fd);
823 err = -EINVAL; 785 sk = ERR_PTR(-EINVAL);
824 goto err1; 786 goto out_sock;
825 }
826
827 sock1u = sockfd_lookup(fd_gtp1, &err);
828 if (sock1u == NULL) {
829 netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1);
830 err = -ENOENT;
831 goto err1;
832 } 787 }
833 788
834 if (sock1u->sk->sk_protocol != IPPROTO_UDP) { 789 if (rcu_dereference_sk_user_data(sock->sk)) {
835 netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1); 790 sk = ERR_PTR(-EBUSY);
836 err = -EINVAL; 791 goto out_sock;
837 goto err2;
838 } 792 }
839 793
840 netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u); 794 sk = sock->sk;
841 795 sock_hold(sk);
842 gtp->sock0 = sock0;
843 gtp->sock1u = sock1u;
844 796
845 tuncfg.sk_user_data = gtp; 797 tuncfg.sk_user_data = gtp;
798 tuncfg.encap_type = type;
846 tuncfg.encap_rcv = gtp_encap_recv; 799 tuncfg.encap_rcv = gtp_encap_recv;
847 tuncfg.encap_destroy = gtp_encap_destroy; 800 tuncfg.encap_destroy = gtp_encap_destroy;
848 801
849 tuncfg.encap_type = UDP_ENCAP_GTP0; 802 setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
850 setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg);
851
852 tuncfg.encap_type = UDP_ENCAP_GTP1U;
853 setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg);
854 803
855 err = 0; 804out_sock:
856err2: 805 sockfd_put(sock);
857 sockfd_put(sock1u); 806 return sk;
858err1:
859 sockfd_put(sock0);
860 return err;
861} 807}
862 808
863static struct net_device *gtp_find_dev(struct net *net, int ifindex) 809static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
864{ 810{
865 struct gtp_net *gn = net_generic(net, gtp_net_id); 811 struct sock *sk1u = NULL;
866 struct gtp_dev *gtp; 812 struct sock *sk0 = NULL;
867 813
868 list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { 814 if (data[IFLA_GTP_FD0]) {
869 if (ifindex == gtp->dev->ifindex) 815 u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
870 return gtp->dev; 816
817 sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
818 if (IS_ERR(sk0))
819 return PTR_ERR(sk0);
871 } 820 }
872 return NULL; 821
822 if (data[IFLA_GTP_FD1]) {
823 u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
824
825 sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
826 if (IS_ERR(sk1u)) {
827 if (sk0)
828 gtp_encap_disable_sock(sk0);
829 return PTR_ERR(sk1u);
830 }
831 }
832
833 gtp->sk0 = sk0;
834 gtp->sk1u = sk1u;
835
836 return 0;
837}
838
839static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
840{
841 struct gtp_dev *gtp = NULL;
842 struct net_device *dev;
843 struct net *net;
844
845 /* Examine the link attributes and figure out which network namespace
846 * we are talking about.
847 */
848 if (nla[GTPA_NET_NS_FD])
849 net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
850 else
851 net = get_net(src_net);
852
853 if (IS_ERR(net))
854 return NULL;
855
856 /* Check if there's an existing gtpX device to configure */
857 dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
858 if (dev->netdev_ops == &gtp_netdev_ops)
859 gtp = netdev_priv(dev);
860
861 put_net(net);
862 return gtp;
873} 863}
874 864
875static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) 865static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
@@ -899,9 +889,10 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
899 } 889 }
900} 890}
901 891
902static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info) 892static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
893 struct genl_info *info)
903{ 894{
904 struct gtp_dev *gtp = netdev_priv(dev); 895 struct net_device *dev = gtp->dev;
905 u32 hash_ms, hash_tid = 0; 896 u32 hash_ms, hash_tid = 0;
906 struct pdp_ctx *pctx; 897 struct pdp_ctx *pctx;
907 bool found = false; 898 bool found = false;
@@ -940,6 +931,9 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
940 if (pctx == NULL) 931 if (pctx == NULL)
941 return -ENOMEM; 932 return -ENOMEM;
942 933
934 sock_hold(sk);
935 pctx->sk = sk;
936 pctx->dev = gtp->dev;
943 ipv4_pdp_fill(pctx, info); 937 ipv4_pdp_fill(pctx, info);
944 atomic_set(&pctx->tx_seq, 0); 938 atomic_set(&pctx->tx_seq, 0);
945 939
@@ -976,10 +970,27 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
976 return 0; 970 return 0;
977} 971}
978 972
973static void pdp_context_free(struct rcu_head *head)
974{
975 struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
976
977 sock_put(pctx->sk);
978 kfree(pctx);
979}
980
981static void pdp_context_delete(struct pdp_ctx *pctx)
982{
983 hlist_del_rcu(&pctx->hlist_tid);
984 hlist_del_rcu(&pctx->hlist_addr);
985 call_rcu(&pctx->rcu_head, pdp_context_free);
986}
987
979static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) 988static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
980{ 989{
981 struct net_device *dev; 990 unsigned int version;
982 struct net *net; 991 struct gtp_dev *gtp;
992 struct sock *sk;
993 int err;
983 994
984 if (!info->attrs[GTPA_VERSION] || 995 if (!info->attrs[GTPA_VERSION] ||
985 !info->attrs[GTPA_LINK] || 996 !info->attrs[GTPA_LINK] ||
@@ -987,7 +998,9 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
987 !info->attrs[GTPA_MS_ADDRESS]) 998 !info->attrs[GTPA_MS_ADDRESS])
988 return -EINVAL; 999 return -EINVAL;
989 1000
990 switch (nla_get_u32(info->attrs[GTPA_VERSION])) { 1001 version = nla_get_u32(info->attrs[GTPA_VERSION]);
1002
1003 switch (version) {
991 case GTP_V0: 1004 case GTP_V0:
992 if (!info->attrs[GTPA_TID] || 1005 if (!info->attrs[GTPA_TID] ||
993 !info->attrs[GTPA_FLOW]) 1006 !info->attrs[GTPA_FLOW])
@@ -1003,77 +1016,101 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
1003 return -EINVAL; 1016 return -EINVAL;
1004 } 1017 }
1005 1018
1006 net = gtp_genl_get_net(sock_net(skb->sk), info->attrs); 1019 rcu_read_lock();
1007 if (IS_ERR(net))
1008 return PTR_ERR(net);
1009 1020
1010 /* Check if there's an existing gtpX device to configure */ 1021 gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
1011 dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK])); 1022 if (!gtp) {
1012 if (dev == NULL) { 1023 err = -ENODEV;
1013 put_net(net); 1024 goto out_unlock;
1014 return -ENODEV; 1025 }
1026
1027 if (version == GTP_V0)
1028 sk = gtp->sk0;
1029 else if (version == GTP_V1)
1030 sk = gtp->sk1u;
1031 else
1032 sk = NULL;
1033
1034 if (!sk) {
1035 err = -ENODEV;
1036 goto out_unlock;
1015 } 1037 }
1016 put_net(net);
1017 1038
1018 return ipv4_pdp_add(dev, info); 1039 err = ipv4_pdp_add(gtp, sk, info);
1040
1041out_unlock:
1042 rcu_read_unlock();
1043 return err;
1019} 1044}
1020 1045
1021static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info) 1046static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
1047 struct nlattr *nla[])
1022{ 1048{
1023 struct net_device *dev;
1024 struct pdp_ctx *pctx;
1025 struct gtp_dev *gtp; 1049 struct gtp_dev *gtp;
1026 struct net *net;
1027 1050
1028 if (!info->attrs[GTPA_VERSION] || 1051 gtp = gtp_find_dev(net, nla);
1029 !info->attrs[GTPA_LINK]) 1052 if (!gtp)
1030 return -EINVAL; 1053 return ERR_PTR(-ENODEV);
1031 1054
1032 net = gtp_genl_get_net(sock_net(skb->sk), info->attrs); 1055 if (nla[GTPA_MS_ADDRESS]) {
1033 if (IS_ERR(net)) 1056 __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
1034 return PTR_ERR(net);
1035 1057
1036 /* Check if there's an existing gtpX device to configure */ 1058 return ipv4_pdp_find(gtp, ip);
1037 dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK])); 1059 } else if (nla[GTPA_VERSION]) {
1038 if (dev == NULL) { 1060 u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
1039 put_net(net); 1061
1040 return -ENODEV; 1062 if (gtp_version == GTP_V0 && nla[GTPA_TID])
1063 return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]));
1064 else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
1065 return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]));
1041 } 1066 }
1042 put_net(net);
1043 1067
1044 gtp = netdev_priv(dev); 1068 return ERR_PTR(-EINVAL);
1069}
1045 1070
1046 switch (nla_get_u32(info->attrs[GTPA_VERSION])) { 1071static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
1047 case GTP_V0: 1072{
1048 if (!info->attrs[GTPA_TID]) 1073 struct pdp_ctx *pctx;
1049 return -EINVAL;
1050 pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]));
1051 break;
1052 case GTP_V1:
1053 if (!info->attrs[GTPA_I_TEI])
1054 return -EINVAL;
1055 pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI]));
1056 break;
1057 1074
1058 default: 1075 if (nla[GTPA_LINK])
1076 pctx = gtp_find_pdp_by_link(net, nla);
1077 else
1078 pctx = ERR_PTR(-EINVAL);
1079
1080 if (!pctx)
1081 pctx = ERR_PTR(-ENOENT);
1082
1083 return pctx;
1084}
1085
1086static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
1087{
1088 struct pdp_ctx *pctx;
1089 int err = 0;
1090
1091 if (!info->attrs[GTPA_VERSION])
1059 return -EINVAL; 1092 return -EINVAL;
1060 }
1061 1093
1062 if (pctx == NULL) 1094 rcu_read_lock();
1063 return -ENOENT; 1095
1096 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
1097 if (IS_ERR(pctx)) {
1098 err = PTR_ERR(pctx);
1099 goto out_unlock;
1100 }
1064 1101
1065 if (pctx->gtp_version == GTP_V0) 1102 if (pctx->gtp_version == GTP_V0)
1066 netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n", 1103 netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
1067 pctx->u.v0.tid, pctx); 1104 pctx->u.v0.tid, pctx);
1068 else if (pctx->gtp_version == GTP_V1) 1105 else if (pctx->gtp_version == GTP_V1)
1069 netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n", 1106 netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
1070 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); 1107 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
1071 1108
1072 hlist_del_rcu(&pctx->hlist_tid); 1109 pdp_context_delete(pctx);
1073 hlist_del_rcu(&pctx->hlist_addr);
1074 kfree_rcu(pctx, rcu_head);
1075 1110
1076 return 0; 1111out_unlock:
1112 rcu_read_unlock();
1113 return err;
1077} 1114}
1078 1115
1079static struct genl_family gtp_genl_family; 1116static struct genl_family gtp_genl_family;
@@ -1117,59 +1154,17 @@ nla_put_failure:
1117static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) 1154static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
1118{ 1155{
1119 struct pdp_ctx *pctx = NULL; 1156 struct pdp_ctx *pctx = NULL;
1120 struct net_device *dev;
1121 struct sk_buff *skb2; 1157 struct sk_buff *skb2;
1122 struct gtp_dev *gtp;
1123 u32 gtp_version;
1124 struct net *net;
1125 int err; 1158 int err;
1126 1159
1127 if (!info->attrs[GTPA_VERSION] || 1160 if (!info->attrs[GTPA_VERSION])
1128 !info->attrs[GTPA_LINK])
1129 return -EINVAL; 1161 return -EINVAL;
1130 1162
1131 gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
1132 switch (gtp_version) {
1133 case GTP_V0:
1134 case GTP_V1:
1135 break;
1136 default:
1137 return -EINVAL;
1138 }
1139
1140 net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
1141 if (IS_ERR(net))
1142 return PTR_ERR(net);
1143
1144 /* Check if there's an existing gtpX device to configure */
1145 dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
1146 if (dev == NULL) {
1147 put_net(net);
1148 return -ENODEV;
1149 }
1150 put_net(net);
1151
1152 gtp = netdev_priv(dev);
1153
1154 rcu_read_lock(); 1163 rcu_read_lock();
1155 if (gtp_version == GTP_V0 &&
1156 info->attrs[GTPA_TID]) {
1157 u64 tid = nla_get_u64(info->attrs[GTPA_TID]);
1158
1159 pctx = gtp0_pdp_find(gtp, tid);
1160 } else if (gtp_version == GTP_V1 &&
1161 info->attrs[GTPA_I_TEI]) {
1162 u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]);
1163
1164 pctx = gtp1_pdp_find(gtp, tid);
1165 } else if (info->attrs[GTPA_MS_ADDRESS]) {
1166 __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
1167
1168 pctx = ipv4_pdp_find(gtp, ip);
1169 }
1170 1164
1171 if (pctx == NULL) { 1165 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
1172 err = -ENOENT; 1166 if (IS_ERR(pctx)) {
1167 err = PTR_ERR(pctx);
1173 goto err_unlock; 1168 goto err_unlock;
1174 } 1169 }
1175 1170
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index f9f3dba7a588..6b5f75217694 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -196,6 +196,7 @@ int netvsc_recv_callback(struct net_device *net,
196 const struct ndis_tcp_ip_checksum_info *csum_info, 196 const struct ndis_tcp_ip_checksum_info *csum_info,
197 const struct ndis_pkt_8021q_info *vlan); 197 const struct ndis_pkt_8021q_info *vlan);
198void netvsc_channel_cb(void *context); 198void netvsc_channel_cb(void *context);
199int netvsc_poll(struct napi_struct *napi, int budget);
199int rndis_filter_open(struct netvsc_device *nvdev); 200int rndis_filter_open(struct netvsc_device *nvdev);
200int rndis_filter_close(struct netvsc_device *nvdev); 201int rndis_filter_close(struct netvsc_device *nvdev);
201int rndis_filter_device_add(struct hv_device *dev, 202int rndis_filter_device_add(struct hv_device *dev,
@@ -722,6 +723,7 @@ struct net_device_context {
722/* Per channel data */ 723/* Per channel data */
723struct netvsc_channel { 724struct netvsc_channel {
724 struct vmbus_channel *channel; 725 struct vmbus_channel *channel;
726 struct napi_struct napi;
725 struct multi_send_data msd; 727 struct multi_send_data msd;
726 struct multi_recv_comp mrc; 728 struct multi_recv_comp mrc;
727 atomic_t queue_sends; 729 atomic_t queue_sends;
@@ -1425,9 +1427,6 @@ struct rndis_message {
1425 ((void *) rndis_msg) 1427 ((void *) rndis_msg)
1426 1428
1427 1429
1428#define __struct_bcount(x)
1429
1430
1431 1430
1432#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \ 1431#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \
1433 sizeof(union rndis_message_container)) 1432 sizeof(union rndis_message_container))
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4c1d8cca247b..989b7cd99380 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -91,15 +91,6 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
91} 91}
92 92
93 93
94static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
95 u16 q_idx)
96{
97 const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
98
99 return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
100 atomic_read(&nvchan->queue_sends) == 0;
101}
102
103static struct netvsc_device *get_outbound_net_device(struct hv_device *device) 94static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
104{ 95{
105 struct netvsc_device *net_device = hv_device_to_netvsc_device(device); 96 struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
@@ -556,6 +547,7 @@ void netvsc_device_remove(struct hv_device *device)
556 struct net_device *ndev = hv_get_drvdata(device); 547 struct net_device *ndev = hv_get_drvdata(device);
557 struct net_device_context *net_device_ctx = netdev_priv(ndev); 548 struct net_device_context *net_device_ctx = netdev_priv(ndev);
558 struct netvsc_device *net_device = net_device_ctx->nvdev; 549 struct netvsc_device *net_device = net_device_ctx->nvdev;
550 int i;
559 551
560 netvsc_disconnect_vsp(device); 552 netvsc_disconnect_vsp(device);
561 553
@@ -570,6 +562,9 @@ void netvsc_device_remove(struct hv_device *device)
570 /* Now, we can close the channel safely */ 562 /* Now, we can close the channel safely */
571 vmbus_close(device->channel); 563 vmbus_close(device->channel);
572 564
565 for (i = 0; i < net_device->num_chn; i++)
566 napi_disable(&net_device->chan_table[i].napi);
567
573 /* Release all resources */ 568 /* Release all resources */
574 free_netvsc_device(net_device); 569 free_netvsc_device(net_device);
575} 570}
@@ -600,9 +595,9 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
600static void netvsc_send_tx_complete(struct netvsc_device *net_device, 595static void netvsc_send_tx_complete(struct netvsc_device *net_device,
601 struct vmbus_channel *incoming_channel, 596 struct vmbus_channel *incoming_channel,
602 struct hv_device *device, 597 struct hv_device *device,
603 struct vmpacket_descriptor *packet) 598 const struct vmpacket_descriptor *desc)
604{ 599{
605 struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id; 600 struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
606 struct net_device *ndev = hv_get_drvdata(device); 601 struct net_device *ndev = hv_get_drvdata(device);
607 struct net_device_context *net_device_ctx = netdev_priv(ndev); 602 struct net_device_context *net_device_ctx = netdev_priv(ndev);
608 struct vmbus_channel *channel = device->channel; 603 struct vmbus_channel *channel = device->channel;
@@ -647,14 +642,11 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
647static void netvsc_send_completion(struct netvsc_device *net_device, 642static void netvsc_send_completion(struct netvsc_device *net_device,
648 struct vmbus_channel *incoming_channel, 643 struct vmbus_channel *incoming_channel,
649 struct hv_device *device, 644 struct hv_device *device,
650 struct vmpacket_descriptor *packet) 645 const struct vmpacket_descriptor *desc)
651{ 646{
652 struct nvsp_message *nvsp_packet; 647 struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
653 struct net_device *ndev = hv_get_drvdata(device); 648 struct net_device *ndev = hv_get_drvdata(device);
654 649
655 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
656 (packet->offset8 << 3));
657
658 switch (nvsp_packet->hdr.msg_type) { 650 switch (nvsp_packet->hdr.msg_type) {
659 case NVSP_MSG_TYPE_INIT_COMPLETE: 651 case NVSP_MSG_TYPE_INIT_COMPLETE:
660 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: 652 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
@@ -668,7 +660,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
668 660
669 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: 661 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
670 netvsc_send_tx_complete(net_device, incoming_channel, 662 netvsc_send_tx_complete(net_device, incoming_channel,
671 device, packet); 663 device, desc);
672 break; 664 break;
673 665
674 default: 666 default:
@@ -1066,28 +1058,29 @@ static inline struct recv_comp_data *get_recv_comp_slot(
1066 return rcd; 1058 return rcd;
1067} 1059}
1068 1060
1069static void netvsc_receive(struct net_device *ndev, 1061static int netvsc_receive(struct net_device *ndev,
1070 struct netvsc_device *net_device, 1062 struct netvsc_device *net_device,
1071 struct net_device_context *net_device_ctx, 1063 struct net_device_context *net_device_ctx,
1072 struct hv_device *device, 1064 struct hv_device *device,
1073 struct vmbus_channel *channel, 1065 struct vmbus_channel *channel,
1074 struct vmtransfer_page_packet_header *vmxferpage_packet, 1066 const struct vmpacket_descriptor *desc,
1075 struct nvsp_message *nvsp) 1067 struct nvsp_message *nvsp)
1076{ 1068{
1069 const struct vmtransfer_page_packet_header *vmxferpage_packet
1070 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1071 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1077 char *recv_buf = net_device->recv_buf; 1072 char *recv_buf = net_device->recv_buf;
1078 u32 status = NVSP_STAT_SUCCESS; 1073 u32 status = NVSP_STAT_SUCCESS;
1079 int i; 1074 int i;
1080 int count = 0; 1075 int count = 0;
1081 int ret; 1076 int ret;
1082 struct recv_comp_data *rcd;
1083 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1084 1077
1085 /* Make sure this is a valid nvsp packet */ 1078 /* Make sure this is a valid nvsp packet */
1086 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { 1079 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1087 netif_err(net_device_ctx, rx_err, ndev, 1080 netif_err(net_device_ctx, rx_err, ndev,
1088 "Unknown nvsp packet type received %u\n", 1081 "Unknown nvsp packet type received %u\n",
1089 nvsp->hdr.msg_type); 1082 nvsp->hdr.msg_type);
1090 return; 1083 return 0;
1091 } 1084 }
1092 1085
1093 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { 1086 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
@@ -1095,7 +1088,7 @@ static void netvsc_receive(struct net_device *ndev,
1095 "Invalid xfer page set id - expecting %x got %x\n", 1088 "Invalid xfer page set id - expecting %x got %x\n",
1096 NETVSC_RECEIVE_BUFFER_ID, 1089 NETVSC_RECEIVE_BUFFER_ID,
1097 vmxferpage_packet->xfer_pageset_id); 1090 vmxferpage_packet->xfer_pageset_id);
1098 return; 1091 return 0;
1099 } 1092 }
1100 1093
1101 count = vmxferpage_packet->range_cnt; 1094 count = vmxferpage_packet->range_cnt;
@@ -1111,26 +1104,26 @@ static void netvsc_receive(struct net_device *ndev,
1111 channel, data, buflen); 1104 channel, data, buflen);
1112 } 1105 }
1113 1106
1114 if (!net_device->chan_table[q_idx].mrc.buf) { 1107 if (net_device->chan_table[q_idx].mrc.buf) {
1108 struct recv_comp_data *rcd;
1109
1110 rcd = get_recv_comp_slot(net_device, channel, q_idx);
1111 if (rcd) {
1112 rcd->tid = vmxferpage_packet->d.trans_id;
1113 rcd->status = status;
1114 } else {
1115 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1116 q_idx, vmxferpage_packet->d.trans_id);
1117 }
1118 } else {
1115 ret = netvsc_send_recv_completion(channel, 1119 ret = netvsc_send_recv_completion(channel,
1116 vmxferpage_packet->d.trans_id, 1120 vmxferpage_packet->d.trans_id,
1117 status); 1121 status);
1118 if (ret) 1122 if (ret)
1119 netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n", 1123 netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
1120 q_idx, vmxferpage_packet->d.trans_id, ret); 1124 q_idx, vmxferpage_packet->d.trans_id, ret);
1121 return;
1122 }
1123
1124 rcd = get_recv_comp_slot(net_device, channel, q_idx);
1125
1126 if (!rcd) {
1127 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1128 q_idx, vmxferpage_packet->d.trans_id);
1129 return;
1130 } 1125 }
1131 1126 return count;
1132 rcd->tid = vmxferpage_packet->d.trans_id;
1133 rcd->status = status;
1134} 1127}
1135 1128
1136static void netvsc_send_table(struct hv_device *hdev, 1129static void netvsc_send_table(struct hv_device *hdev,
@@ -1176,17 +1169,15 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
1176 } 1169 }
1177} 1170}
1178 1171
1179static void netvsc_process_raw_pkt(struct hv_device *device, 1172static int netvsc_process_raw_pkt(struct hv_device *device,
1180 struct vmbus_channel *channel, 1173 struct vmbus_channel *channel,
1181 struct netvsc_device *net_device, 1174 struct netvsc_device *net_device,
1182 struct net_device *ndev, 1175 struct net_device *ndev,
1183 u64 request_id, 1176 u64 request_id,
1184 struct vmpacket_descriptor *desc) 1177 const struct vmpacket_descriptor *desc)
1185{ 1178{
1186 struct net_device_context *net_device_ctx = netdev_priv(ndev); 1179 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1187 struct nvsp_message *nvmsg 1180 struct nvsp_message *nvmsg = hv_pkt_data(desc);
1188 = (struct nvsp_message *)((unsigned long)desc
1189 + (desc->offset8 << 3));
1190 1181
1191 switch (desc->type) { 1182 switch (desc->type) {
1192 case VM_PKT_COMP: 1183 case VM_PKT_COMP:
@@ -1194,10 +1185,8 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
1194 break; 1185 break;
1195 1186
1196 case VM_PKT_DATA_USING_XFER_PAGES: 1187 case VM_PKT_DATA_USING_XFER_PAGES:
1197 netvsc_receive(ndev, net_device, net_device_ctx, 1188 return netvsc_receive(ndev, net_device, net_device_ctx,
1198 device, channel, 1189 device, channel, desc, nvmsg);
1199 (struct vmtransfer_page_packet_header *)desc,
1200 nvmsg);
1201 break; 1190 break;
1202 1191
1203 case VM_PKT_DATA_INBAND: 1192 case VM_PKT_DATA_INBAND:
@@ -1209,47 +1198,75 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
1209 desc->type, request_id); 1198 desc->type, request_id);
1210 break; 1199 break;
1211 } 1200 }
1201
1202 return 0;
1212} 1203}
1213 1204
1214void netvsc_channel_cb(void *context) 1205static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1215{ 1206{
1216 struct vmbus_channel *channel = context; 1207 struct vmbus_channel *primary = channel->primary_channel;
1217 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1218 struct hv_device *device;
1219 struct netvsc_device *net_device;
1220 struct vmpacket_descriptor *desc;
1221 struct net_device *ndev;
1222 bool need_to_commit = false;
1223 1208
1224 if (channel->primary_channel != NULL) 1209 return primary ? primary->device_obj : channel->device_obj;
1225 device = channel->primary_channel->device_obj; 1210}
1226 else
1227 device = channel->device_obj;
1228
1229 ndev = hv_get_drvdata(device);
1230 if (unlikely(!ndev))
1231 return;
1232 1211
1233 net_device = net_device_to_netvsc_device(ndev); 1212/* Network processing softirq
1234 if (unlikely(net_device->destroy) && 1213 * Process data in incoming ring buffer from host
1235 netvsc_channel_idle(net_device, q_idx)) 1214 * Stops when ring is empty or budget is met or exceeded.
1236 return; 1215 */
1216int netvsc_poll(struct napi_struct *napi, int budget)
1217{
1218 struct netvsc_channel *nvchan
1219 = container_of(napi, struct netvsc_channel, napi);
1220 struct vmbus_channel *channel = nvchan->channel;
1221 struct hv_device *device = netvsc_channel_to_device(channel);
1222 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1223 struct net_device *ndev = hv_get_drvdata(device);
1224 struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
1225 const struct vmpacket_descriptor *desc;
1226 int work_done = 0;
1237 1227
1238 /* commit_rd_index() -> hv_signal_on_read() needs this. */ 1228 desc = hv_pkt_iter_first(channel);
1239 init_cached_read_index(channel); 1229 while (desc) {
1230 int count;
1240 1231
1241 while ((desc = get_next_pkt_raw(channel)) != NULL) { 1232 count = netvsc_process_raw_pkt(device, channel, net_device,
1242 netvsc_process_raw_pkt(device, channel, net_device, 1233 ndev, desc->trans_id, desc);
1243 ndev, desc->trans_id, desc); 1234 work_done += count;
1235 desc = __hv_pkt_iter_next(channel, desc);
1244 1236
1245 put_pkt_raw(channel, desc); 1237 /* If receive packet budget is exhausted, reschedule */
1246 need_to_commit = true; 1238 if (work_done >= budget) {
1239 work_done = budget;
1240 break;
1241 }
1247 } 1242 }
1243 hv_pkt_iter_close(channel);
1248 1244
1249 if (need_to_commit) 1245 /* If budget was not exhausted and
1250 commit_rd_index(channel); 1246 * not doing busy poll
1247 * then re-enable host interrupts
1248 * and reschedule if ring is not empty.
1249 */
1250 if (work_done < budget &&
1251 napi_complete_done(napi, work_done) &&
1252 hv_end_read(&channel->inbound) != 0)
1253 napi_reschedule(napi);
1251 1254
1252 netvsc_chk_recv_comp(net_device, channel, q_idx); 1255 netvsc_chk_recv_comp(net_device, channel, q_idx);
1256 return work_done;
1257}
1258
1259/* Call back when data is available in host ring buffer.
1260 * Processing is deferred until network softirq (NAPI)
1261 */
1262void netvsc_channel_cb(void *context)
1263{
1264 struct netvsc_channel *nvchan = context;
1265
1266 /* disable interupts from host */
1267 hv_begin_read(&nvchan->channel->inbound);
1268
1269 napi_schedule(&nvchan->napi);
1253} 1270}
1254 1271
1255/* 1272/*
@@ -1271,10 +1288,16 @@ int netvsc_device_add(struct hv_device *device,
1271 1288
1272 net_device->ring_size = ring_size; 1289 net_device->ring_size = ring_size;
1273 1290
1291 /* Because the device uses NAPI, all the interrupt batching and
1292 * control is done via Net softirq, not the channel handling
1293 */
1294 set_channel_read_mode(device->channel, HV_CALL_ISR);
1295
1274 /* Open the channel */ 1296 /* Open the channel */
1275 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, 1297 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1276 ring_size * PAGE_SIZE, NULL, 0, 1298 ring_size * PAGE_SIZE, NULL, 0,
1277 netvsc_channel_cb, device->channel); 1299 netvsc_channel_cb,
1300 net_device->chan_table);
1278 1301
1279 if (ret != 0) { 1302 if (ret != 0) {
1280 netdev_err(ndev, "unable to open channel: %d\n", ret); 1303 netdev_err(ndev, "unable to open channel: %d\n", ret);
@@ -1288,8 +1311,16 @@ int netvsc_device_add(struct hv_device *device,
1288 * chn_table with the default channel to use it before subchannels are 1311 * chn_table with the default channel to use it before subchannels are
1289 * opened. 1312 * opened.
1290 */ 1313 */
1291 for (i = 0; i < VRSS_CHANNEL_MAX; i++) 1314 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1292 net_device->chan_table[i].channel = device->channel; 1315 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1316
1317 nvchan->channel = device->channel;
1318 netif_napi_add(ndev, &nvchan->napi,
1319 netvsc_poll, NAPI_POLL_WEIGHT);
1320 }
1321
1322 /* Enable NAPI handler for init callbacks */
1323 napi_enable(&net_device->chan_table[0].napi);
1293 1324
1294 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is 1325 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1295 * populated. 1326 * populated.
@@ -1309,6 +1340,8 @@ int netvsc_device_add(struct hv_device *device,
1309 return ret; 1340 return ret;
1310 1341
1311close: 1342close:
1343 napi_disable(&net_device->chan_table[0].napi);
1344
1312 /* Now, we can close the channel safely */ 1345 /* Now, we can close the channel safely */
1313 vmbus_close(device->channel); 1346 vmbus_close(device->channel);
1314 1347
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5ede87f30463..191372486a87 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -584,13 +584,14 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
584} 584}
585 585
586static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, 586static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
587 struct napi_struct *napi,
587 const struct ndis_tcp_ip_checksum_info *csum_info, 588 const struct ndis_tcp_ip_checksum_info *csum_info,
588 const struct ndis_pkt_8021q_info *vlan, 589 const struct ndis_pkt_8021q_info *vlan,
589 void *data, u32 buflen) 590 void *data, u32 buflen)
590{ 591{
591 struct sk_buff *skb; 592 struct sk_buff *skb;
592 593
593 skb = netdev_alloc_skb_ip_align(net, buflen); 594 skb = napi_alloc_skb(napi, buflen);
594 if (!skb) 595 if (!skb)
595 return skb; 596 return skb;
596 597
@@ -637,11 +638,11 @@ int netvsc_recv_callback(struct net_device *net,
637{ 638{
638 struct net_device_context *net_device_ctx = netdev_priv(net); 639 struct net_device_context *net_device_ctx = netdev_priv(net);
639 struct netvsc_device *net_device = net_device_ctx->nvdev; 640 struct netvsc_device *net_device = net_device_ctx->nvdev;
641 u16 q_idx = channel->offermsg.offer.sub_channel_index;
642 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
640 struct net_device *vf_netdev; 643 struct net_device *vf_netdev;
641 struct sk_buff *skb; 644 struct sk_buff *skb;
642 struct netvsc_stats *rx_stats; 645 struct netvsc_stats *rx_stats;
643 u16 q_idx = channel->offermsg.offer.sub_channel_index;
644
645 646
646 if (net->reg_state != NETREG_REGISTERED) 647 if (net->reg_state != NETREG_REGISTERED)
647 return NVSP_STAT_FAIL; 648 return NVSP_STAT_FAIL;
@@ -659,7 +660,8 @@ int netvsc_recv_callback(struct net_device *net,
659 net = vf_netdev; 660 net = vf_netdev;
660 661
661 /* Allocate a skb - TODO direct I/O to pages? */ 662 /* Allocate a skb - TODO direct I/O to pages? */
662 skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len); 663 skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
664 csum_info, vlan, data, len);
663 if (unlikely(!skb)) { 665 if (unlikely(!skb)) {
664 ++net->stats.rx_dropped; 666 ++net->stats.rx_dropped;
665 rcu_read_unlock(); 667 rcu_read_unlock();
@@ -674,7 +676,7 @@ int netvsc_recv_callback(struct net_device *net,
674 * on the synthetic device because modifying the VF device 676 * on the synthetic device because modifying the VF device
675 * statistics will not work correctly. 677 * statistics will not work correctly.
676 */ 678 */
677 rx_stats = &net_device->chan_table[q_idx].rx_stats; 679 rx_stats = &nvchan->rx_stats;
678 u64_stats_update_begin(&rx_stats->syncp); 680 u64_stats_update_begin(&rx_stats->syncp);
679 rx_stats->packets++; 681 rx_stats->packets++;
680 rx_stats->bytes += len; 682 rx_stats->bytes += len;
@@ -685,12 +687,7 @@ int netvsc_recv_callback(struct net_device *net,
685 ++rx_stats->multicast; 687 ++rx_stats->multicast;
686 u64_stats_update_end(&rx_stats->syncp); 688 u64_stats_update_end(&rx_stats->syncp);
687 689
688 /* 690 napi_gro_receive(&nvchan->napi, skb);
689 * Pass the skb back up. Network stack will deallocate the skb when it
690 * is done.
691 * TODO - use NAPI?
692 */
693 netif_receive_skb(skb);
694 rcu_read_unlock(); 691 rcu_read_unlock();
695 692
696 return 0; 693 return 0;
@@ -787,18 +784,19 @@ static int netvsc_set_channels(struct net_device *net,
787 return ret; 784 return ret;
788} 785}
789 786
790static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd) 787static bool
788netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
791{ 789{
792 struct ethtool_cmd diff1 = *cmd; 790 struct ethtool_link_ksettings diff1 = *cmd;
793 struct ethtool_cmd diff2 = {}; 791 struct ethtool_link_ksettings diff2 = {};
794 792
795 ethtool_cmd_speed_set(&diff1, 0); 793 diff1.base.speed = 0;
796 diff1.duplex = 0; 794 diff1.base.duplex = 0;
797 /* advertising and cmd are usually set */ 795 /* advertising and cmd are usually set */
798 diff1.advertising = 0; 796 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
799 diff1.cmd = 0; 797 diff1.base.cmd = 0;
800 /* We set port to PORT_OTHER */ 798 /* We set port to PORT_OTHER */
801 diff2.port = PORT_OTHER; 799 diff2.base.port = PORT_OTHER;
802 800
803 return !memcmp(&diff1, &diff2, sizeof(diff1)); 801 return !memcmp(&diff1, &diff2, sizeof(diff1));
804} 802}
@@ -811,30 +809,32 @@ static void netvsc_init_settings(struct net_device *dev)
811 ndc->duplex = DUPLEX_UNKNOWN; 809 ndc->duplex = DUPLEX_UNKNOWN;
812} 810}
813 811
814static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 812static int netvsc_get_link_ksettings(struct net_device *dev,
813 struct ethtool_link_ksettings *cmd)
815{ 814{
816 struct net_device_context *ndc = netdev_priv(dev); 815 struct net_device_context *ndc = netdev_priv(dev);
817 816
818 ethtool_cmd_speed_set(cmd, ndc->speed); 817 cmd->base.speed = ndc->speed;
819 cmd->duplex = ndc->duplex; 818 cmd->base.duplex = ndc->duplex;
820 cmd->port = PORT_OTHER; 819 cmd->base.port = PORT_OTHER;
821 820
822 return 0; 821 return 0;
823} 822}
824 823
825static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 824static int netvsc_set_link_ksettings(struct net_device *dev,
825 const struct ethtool_link_ksettings *cmd)
826{ 826{
827 struct net_device_context *ndc = netdev_priv(dev); 827 struct net_device_context *ndc = netdev_priv(dev);
828 u32 speed; 828 u32 speed;
829 829
830 speed = ethtool_cmd_speed(cmd); 830 speed = cmd->base.speed;
831 if (!ethtool_validate_speed(speed) || 831 if (!ethtool_validate_speed(speed) ||
832 !ethtool_validate_duplex(cmd->duplex) || 832 !ethtool_validate_duplex(cmd->base.duplex) ||
833 !netvsc_validate_ethtool_ss_cmd(cmd)) 833 !netvsc_validate_ethtool_ss_cmd(cmd))
834 return -EINVAL; 834 return -EINVAL;
835 835
836 ndc->speed = speed; 836 ndc->speed = speed;
837 ndc->duplex = cmd->duplex; 837 ndc->duplex = cmd->base.duplex;
838 838
839 return 0; 839 return 0;
840} 840}
@@ -1168,13 +1168,13 @@ static const struct ethtool_ops ethtool_ops = {
1168 .get_channels = netvsc_get_channels, 1168 .get_channels = netvsc_get_channels,
1169 .set_channels = netvsc_set_channels, 1169 .set_channels = netvsc_set_channels,
1170 .get_ts_info = ethtool_op_get_ts_info, 1170 .get_ts_info = ethtool_op_get_ts_info,
1171 .get_settings = netvsc_get_settings,
1172 .set_settings = netvsc_set_settings,
1173 .get_rxnfc = netvsc_get_rxnfc, 1171 .get_rxnfc = netvsc_get_rxnfc,
1174 .get_rxfh_key_size = netvsc_get_rxfh_key_size, 1172 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1175 .get_rxfh_indir_size = netvsc_rss_indir_size, 1173 .get_rxfh_indir_size = netvsc_rss_indir_size,
1176 .get_rxfh = netvsc_get_rxfh, 1174 .get_rxfh = netvsc_get_rxfh,
1177 .set_rxfh = netvsc_set_rxfh, 1175 .set_rxfh = netvsc_set_rxfh,
1176 .get_link_ksettings = netvsc_get_link_ksettings,
1177 .set_link_ksettings = netvsc_set_link_ksettings,
1178}; 1178};
1179 1179
1180static const struct net_device_ops device_ops = { 1180static const struct net_device_ops device_ops = {
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 19356f56b7b1..382b9a62e3c4 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -996,21 +996,28 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
996 hv_get_drvdata(new_sc->primary_channel->device_obj); 996 hv_get_drvdata(new_sc->primary_channel->device_obj);
997 struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev); 997 struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
998 u16 chn_index = new_sc->offermsg.offer.sub_channel_index; 998 u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
999 int ret; 999 struct netvsc_channel *nvchan;
1000 unsigned long flags; 1000 unsigned long flags;
1001 int ret;
1001 1002
1002 if (chn_index >= nvscdev->num_chn) 1003 if (chn_index >= nvscdev->num_chn)
1003 return; 1004 return;
1004 1005
1005 nvscdev->chan_table[chn_index].mrc.buf 1006 nvchan = nvscdev->chan_table + chn_index;
1007 nvchan->mrc.buf
1006 = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); 1008 = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
1007 1009
1010 if (!nvchan->mrc.buf)
1011 return;
1012
1008 ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, 1013 ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
1009 nvscdev->ring_size * PAGE_SIZE, NULL, 0, 1014 nvscdev->ring_size * PAGE_SIZE, NULL, 0,
1010 netvsc_channel_cb, new_sc); 1015 netvsc_channel_cb, nvchan);
1011 1016
1012 if (ret == 0) 1017 if (ret == 0)
1013 nvscdev->chan_table[chn_index].channel = new_sc; 1018 nvchan->channel = new_sc;
1019
1020 napi_enable(&nvchan->napi);
1014 1021
1015 spin_lock_irqsave(&nvscdev->sc_lock, flags); 1022 spin_lock_irqsave(&nvscdev->sc_lock, flags);
1016 nvscdev->num_sc_offered--; 1023 nvscdev->num_sc_offered--;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index b23b71981fd5..224f65cb576b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -13,7 +13,7 @@
13 * 13 *
14 * Alan Cox : Fixed oddments for NET3.014 14 * Alan Cox : Fixed oddments for NET3.014
15 * Alan Cox : Rejig for NET3.029 snap #3 15 * Alan Cox : Rejig for NET3.029 snap #3
16 * Alan Cox : Fixed NET3.029 bugs and sped up 16 * Alan Cox : Fixed NET3.029 bugs and sped up
17 * Larry McVoy : Tiny tweak to double performance 17 * Larry McVoy : Tiny tweak to double performance
18 * Alan Cox : Backed out LMV's tweak - the linux mm 18 * Alan Cox : Backed out LMV's tweak - the linux mm
19 * can't take it... 19 * can't take it...
@@ -41,7 +41,7 @@
41#include <linux/in.h> 41#include <linux/in.h>
42 42
43#include <linux/uaccess.h> 43#include <linux/uaccess.h>
44#include <asm/io.h> 44#include <linux/io.h>
45 45
46#include <linux/inet.h> 46#include <linux/inet.h>
47#include <linux/netdevice.h> 47#include <linux/netdevice.h>
@@ -55,6 +55,7 @@
55#include <linux/ip.h> 55#include <linux/ip.h>
56#include <linux/tcp.h> 56#include <linux/tcp.h>
57#include <linux/percpu.h> 57#include <linux/percpu.h>
58#include <linux/net_tstamp.h>
58#include <net/net_namespace.h> 59#include <net/net_namespace.h>
59#include <linux/u64_stats_sync.h> 60#include <linux/u64_stats_sync.h>
60 61
@@ -64,8 +65,7 @@ struct pcpu_lstats {
64 struct u64_stats_sync syncp; 65 struct u64_stats_sync syncp;
65}; 66};
66 67
67/* 68/* The higher levels take care of making this non-reentrant (it's
68 * The higher levels take care of making this non-reentrant (it's
69 * called with bh's disabled). 69 * called with bh's disabled).
70 */ 70 */
71static netdev_tx_t loopback_xmit(struct sk_buff *skb, 71static netdev_tx_t loopback_xmit(struct sk_buff *skb,
@@ -74,6 +74,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
74 struct pcpu_lstats *lb_stats; 74 struct pcpu_lstats *lb_stats;
75 int len; 75 int len;
76 76
77 skb_tx_timestamp(skb);
77 skb_orphan(skb); 78 skb_orphan(skb);
78 79
79 /* Before queueing this packet to netif_rx(), 80 /* Before queueing this packet to netif_rx(),
@@ -129,8 +130,21 @@ static u32 always_on(struct net_device *dev)
129 return 1; 130 return 1;
130} 131}
131 132
133static int loopback_get_ts_info(struct net_device *netdev,
134 struct ethtool_ts_info *ts_info)
135{
136 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
137 SOF_TIMESTAMPING_RX_SOFTWARE |
138 SOF_TIMESTAMPING_SOFTWARE;
139
140 ts_info->phc_index = -1;
141
142 return 0;
143};
144
132static const struct ethtool_ops loopback_ethtool_ops = { 145static const struct ethtool_ops loopback_ethtool_ops = {
133 .get_link = always_on, 146 .get_link = always_on,
147 .get_ts_info = loopback_get_ts_info,
134}; 148};
135 149
136static int loopback_dev_init(struct net_device *dev) 150static int loopback_dev_init(struct net_device *dev)
@@ -149,14 +163,13 @@ static void loopback_dev_free(struct net_device *dev)
149} 163}
150 164
151static const struct net_device_ops loopback_ops = { 165static const struct net_device_ops loopback_ops = {
152 .ndo_init = loopback_dev_init, 166 .ndo_init = loopback_dev_init,
153 .ndo_start_xmit= loopback_xmit, 167 .ndo_start_xmit = loopback_xmit,
154 .ndo_get_stats64 = loopback_get_stats64, 168 .ndo_get_stats64 = loopback_get_stats64,
155 .ndo_set_mac_address = eth_mac_addr, 169 .ndo_set_mac_address = eth_mac_addr,
156}; 170};
157 171
158/* 172/* The loopback device is special. There is only one instance
159 * The loopback device is special. There is only one instance
160 * per network namespace. 173 * per network namespace.
161 */ 174 */
162static void loopback_setup(struct net_device *dev) 175static void loopback_setup(struct net_device *dev)
@@ -170,7 +183,7 @@ static void loopback_setup(struct net_device *dev)
170 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 183 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
171 netif_keep_dst(dev); 184 netif_keep_dst(dev);
172 dev->hw_features = NETIF_F_GSO_SOFTWARE; 185 dev->hw_features = NETIF_F_GSO_SOFTWARE;
173 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 186 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
174 | NETIF_F_GSO_SOFTWARE 187 | NETIF_F_GSO_SOFTWARE
175 | NETIF_F_HW_CSUM 188 | NETIF_F_HW_CSUM
176 | NETIF_F_RXCSUM 189 | NETIF_F_RXCSUM
@@ -206,7 +219,6 @@ static __net_init int loopback_net_init(struct net *net)
206 net->loopback_dev = dev; 219 net->loopback_dev = dev;
207 return 0; 220 return 0;
208 221
209
210out_free_netdev: 222out_free_netdev:
211 free_netdev(dev); 223 free_netdev(dev);
212out: 224out:
@@ -217,5 +229,5 @@ out:
217 229
218/* Registered in net/core/dev.c */ 230/* Registered in net/core/dev.c */
219struct pernet_operations __net_initdata loopback_net_ops = { 231struct pernet_operations __net_initdata loopback_net_ops = {
220 .init = loopback_net_init, 232 .init = loopback_net_init,
221}; 233};
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 36877ba65516..4daf3d0926a8 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -372,18 +372,19 @@ static void ntb_get_drvinfo(struct net_device *ndev,
372 strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info)); 372 strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
373} 373}
374 374
375static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 375static int ntb_get_link_ksettings(struct net_device *dev,
376 struct ethtool_link_ksettings *cmd)
376{ 377{
377 cmd->supported = SUPPORTED_Backplane; 378 ethtool_link_ksettings_zero_link_mode(cmd, supported);
378 cmd->advertising = ADVERTISED_Backplane; 379 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
379 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 380 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
380 cmd->duplex = DUPLEX_FULL; 381 ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane);
381 cmd->port = PORT_OTHER; 382
382 cmd->phy_address = 0; 383 cmd->base.speed = SPEED_UNKNOWN;
383 cmd->transceiver = XCVR_DUMMY1; 384 cmd->base.duplex = DUPLEX_FULL;
384 cmd->autoneg = AUTONEG_ENABLE; 385 cmd->base.port = PORT_OTHER;
385 cmd->maxtxpkt = 0; 386 cmd->base.phy_address = 0;
386 cmd->maxrxpkt = 0; 387 cmd->base.autoneg = AUTONEG_ENABLE;
387 388
388 return 0; 389 return 0;
389} 390}
@@ -391,7 +392,7 @@ static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
391static const struct ethtool_ops ntb_ethtool_ops = { 392static const struct ethtool_ops ntb_ethtool_ops = {
392 .get_drvinfo = ntb_get_drvinfo, 393 .get_drvinfo = ntb_get_drvinfo,
393 .get_link = ethtool_op_get_link, 394 .get_link = ethtool_op_get_link,
394 .get_settings = ntb_get_settings, 395 .get_link_ksettings = ntb_get_link_ksettings,
395}; 396};
396 397
397static const struct ntb_queue_handlers ntb_netdev_handlers = { 398static const struct ntb_queue_handlers ntb_netdev_handlers = {
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index ab9ad689617c..9656dbeb5de5 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2015 Broadcom Corporation 2 * Copyright (C) 2015-2017 Broadcom
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as 5 * modify it under the terms of the GNU General Public License as
@@ -221,9 +221,9 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
221 return val; 221 return val;
222 222
223 if (enable) 223 if (enable)
224 val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); 224 val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
225 else 225 else
226 val &= ~(MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); 226 val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
227 227
228 phy_write_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV, 228 phy_write_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV,
229 MDIO_MMD_AN, (u32)val); 229 MDIO_MMD_AN, (u32)val);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index d1c2614dad3a..caa9f6e17f34 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Broadcom BCM7xxx internal transceivers support. 2 * Broadcom BCM7xxx internal transceivers support.
3 * 3 *
4 * Copyright (C) 2014, Broadcom Corporation 4 * Copyright (C) 2014-2017 Broadcom
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -19,7 +19,7 @@
19 19
20/* Broadcom BCM7xxx internal PHY registers */ 20/* Broadcom BCM7xxx internal PHY registers */
21 21
22/* 40nm only register definitions */ 22/* EPHY only register definitions */
23#define MII_BCM7XXX_100TX_AUX_CTL 0x10 23#define MII_BCM7XXX_100TX_AUX_CTL 0x10
24#define MII_BCM7XXX_100TX_FALSE_CAR 0x13 24#define MII_BCM7XXX_100TX_FALSE_CAR 0x13
25#define MII_BCM7XXX_100TX_DISC 0x14 25#define MII_BCM7XXX_100TX_DISC 0x14
@@ -27,6 +27,19 @@
27#define MII_BCM7XXX_64CLK_MDIO BIT(12) 27#define MII_BCM7XXX_64CLK_MDIO BIT(12)
28#define MII_BCM7XXX_TEST 0x1f 28#define MII_BCM7XXX_TEST 0x1f
29#define MII_BCM7XXX_SHD_MODE_2 BIT(2) 29#define MII_BCM7XXX_SHD_MODE_2 BIT(2)
30#define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe
31#define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf
32#define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a
33#define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3
34#define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6
35#define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400
36#define MII_BCM7XXX_SHD_3_AN_STAT 0xb
37#define MII_BCM7XXX_AN_NULL_MSG_EN BIT(0)
38#define MII_BCM7XXX_AN_EEE_EN BIT(1)
39#define MII_BCM7XXX_SHD_3_EEE_THRESH 0xe
40#define MII_BCM7XXX_EEE_THRESH_DEF 0x50
41#define MII_BCM7XXX_SHD_3_TL4 0x23
42#define MII_BCM7XXX_TL4_RST_MSK (BIT(2) | BIT(1))
30 43
31/* 28nm only register definitions */ 44/* 28nm only register definitions */
32#define MISC_ADDR(base, channel) base, channel 45#define MISC_ADDR(base, channel) base, channel
@@ -286,6 +299,181 @@ static int phy_set_clr_bits(struct phy_device *dev, int location,
286 return v; 299 return v;
287} 300}
288 301
302static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
303{
304 int ret;
305
306 /* set shadow mode 2 */
307 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
308 MII_BCM7XXX_SHD_MODE_2, 0);
309 if (ret < 0)
310 return ret;
311
312 /* Set current trim values INT_trim = -1, Ext_trim =0 */
313 ret = phy_write(phydev, MII_BCM7XXX_SHD_2_BIAS_TRIM, 0x3BE0);
314 if (ret < 0)
315 goto reset_shadow_mode;
316
317 /* Cal reset */
318 ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
319 MII_BCM7XXX_SHD_3_TL4);
320 if (ret < 0)
321 goto reset_shadow_mode;
322 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
323 MII_BCM7XXX_TL4_RST_MSK, 0);
324 if (ret < 0)
325 goto reset_shadow_mode;
326
327 /* Cal reset disable */
328 ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
329 MII_BCM7XXX_SHD_3_TL4);
330 if (ret < 0)
331 goto reset_shadow_mode;
332 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
333 0, MII_BCM7XXX_TL4_RST_MSK);
334 if (ret < 0)
335 goto reset_shadow_mode;
336
337reset_shadow_mode:
338 /* reset shadow mode 2 */
339 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
340 MII_BCM7XXX_SHD_MODE_2);
341 if (ret < 0)
342 return ret;
343
344 return 0;
345}
346
347/* The 28nm EPHY does not support Clause 45 (MMD) used by bcm-phy-lib */
348static int bcm7xxx_28nm_ephy_apd_enable(struct phy_device *phydev)
349{
350 int ret;
351
352 /* set shadow mode 1 */
353 ret = phy_set_clr_bits(phydev, MII_BRCM_FET_BRCMTEST,
354 MII_BRCM_FET_BT_SRE, 0);
355 if (ret < 0)
356 return ret;
357
358 /* Enable auto-power down */
359 ret = phy_set_clr_bits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2,
360 MII_BRCM_FET_SHDW_AS2_APDE, 0);
361 if (ret < 0)
362 return ret;
363
364 /* reset shadow mode 1 */
365 ret = phy_set_clr_bits(phydev, MII_BRCM_FET_BRCMTEST, 0,
366 MII_BRCM_FET_BT_SRE);
367 if (ret < 0)
368 return ret;
369
370 return 0;
371}
372
373static int bcm7xxx_28nm_ephy_eee_enable(struct phy_device *phydev)
374{
375 int ret;
376
377 /* set shadow mode 2 */
378 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
379 MII_BCM7XXX_SHD_MODE_2, 0);
380 if (ret < 0)
381 return ret;
382
383 /* Advertise supported modes */
384 ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
385 MII_BCM7XXX_SHD_3_AN_EEE_ADV);
386 if (ret < 0)
387 goto reset_shadow_mode;
388 ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
389 MDIO_EEE_100TX);
390 if (ret < 0)
391 goto reset_shadow_mode;
392
393 /* Restore Defaults */
394 ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
395 MII_BCM7XXX_SHD_3_PCS_CTRL_2);
396 if (ret < 0)
397 goto reset_shadow_mode;
398 ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
399 MII_BCM7XXX_PCS_CTRL_2_DEF);
400 if (ret < 0)
401 goto reset_shadow_mode;
402
403 ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
404 MII_BCM7XXX_SHD_3_EEE_THRESH);
405 if (ret < 0)
406 goto reset_shadow_mode;
407 ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
408 MII_BCM7XXX_EEE_THRESH_DEF);
409 if (ret < 0)
410 goto reset_shadow_mode;
411
412 /* Enable EEE autonegotiation */
413 ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
414 MII_BCM7XXX_SHD_3_AN_STAT);
415 if (ret < 0)
416 goto reset_shadow_mode;
417 ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
418 (MII_BCM7XXX_AN_NULL_MSG_EN | MII_BCM7XXX_AN_EEE_EN));
419 if (ret < 0)
420 goto reset_shadow_mode;
421
422reset_shadow_mode:
423 /* reset shadow mode 2 */
424 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
425 MII_BCM7XXX_SHD_MODE_2);
426 if (ret < 0)
427 return ret;
428
429 /* Restart autoneg */
430 phy_write(phydev, MII_BMCR,
431 (BMCR_SPEED100 | BMCR_ANENABLE | BMCR_ANRESTART));
432
433 return 0;
434}
435
436static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
437{
438 u8 rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
439 int ret = 0;
440
441 pr_info_once("%s: %s PHY revision: 0x%02x\n",
442 phydev_name(phydev), phydev->drv->name, rev);
443
444 /* Dummy read to a register to workaround a possible issue upon reset
445 * where the internal inverter may not allow the first MDIO transaction
446 * to pass the MDIO management controller and make us return 0xffff for
447 * such reads.
448 */
449 phy_read(phydev, MII_BMSR);
450
451 /* Apply AFE software work-around if necessary */
452 if (rev == 0x01) {
453 ret = bcm7xxx_28nm_ephy_01_afe_config_init(phydev);
454 if (ret)
455 return ret;
456 }
457
458 ret = bcm7xxx_28nm_ephy_eee_enable(phydev);
459 if (ret)
460 return ret;
461
462 return bcm7xxx_28nm_ephy_apd_enable(phydev);
463}
464
465static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
466{
467 int ret;
468
469 /* Re-apply workarounds coming out suspend/resume */
470 ret = bcm7xxx_28nm_ephy_config_init(phydev);
471 if (ret)
472 return ret;
473
474 return genphy_config_aneg(phydev);
475}
476
289static int bcm7xxx_config_init(struct phy_device *phydev) 477static int bcm7xxx_config_init(struct phy_device *phydev)
290{ 478{
291 int ret; 479 int ret;
@@ -434,6 +622,23 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
434 .probe = bcm7xxx_28nm_probe, \ 622 .probe = bcm7xxx_28nm_probe, \
435} 623}
436 624
625#define BCM7XXX_28NM_EPHY(_oui, _name) \
626{ \
627 .phy_id = (_oui), \
628 .phy_id_mask = 0xfffffff0, \
629 .name = _name, \
630 .features = PHY_BASIC_FEATURES, \
631 .flags = PHY_IS_INTERNAL, \
632 .config_init = bcm7xxx_28nm_ephy_config_init, \
633 .config_aneg = genphy_config_aneg, \
634 .read_status = genphy_read_status, \
635 .resume = bcm7xxx_28nm_ephy_resume, \
636 .get_sset_count = bcm_phy_get_sset_count, \
637 .get_strings = bcm_phy_get_strings, \
638 .get_stats = bcm7xxx_28nm_get_phy_stats, \
639 .probe = bcm7xxx_28nm_probe, \
640}
641
437#define BCM7XXX_40NM_EPHY(_oui, _name) \ 642#define BCM7XXX_40NM_EPHY(_oui, _name) \
438{ \ 643{ \
439 .phy_id = (_oui), \ 644 .phy_id = (_oui), \
@@ -450,6 +655,9 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
450 655
451static struct phy_driver bcm7xxx_driver[] = { 656static struct phy_driver bcm7xxx_driver[] = {
452 BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), 657 BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
658 BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
659 BCM7XXX_28NM_EPHY(PHY_ID_BCM7268, "Broadcom BCM7268"),
660 BCM7XXX_28NM_EPHY(PHY_ID_BCM7271, "Broadcom BCM7271"),
453 BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"), 661 BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"),
454 BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"), 662 BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
455 BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"), 663 BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"),
@@ -466,6 +674,9 @@ static struct phy_driver bcm7xxx_driver[] = {
466 674
467static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { 675static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
468 { PHY_ID_BCM7250, 0xfffffff0, }, 676 { PHY_ID_BCM7250, 0xfffffff0, },
677 { PHY_ID_BCM7260, 0xfffffff0, },
678 { PHY_ID_BCM7268, 0xfffffff0, },
679 { PHY_ID_BCM7271, 0xfffffff0, },
469 { PHY_ID_BCM7278, 0xfffffff0, }, 680 { PHY_ID_BCM7278, 0xfffffff0, },
470 { PHY_ID_BCM7364, 0xfffffff0, }, 681 { PHY_ID_BCM7364, 0xfffffff0, },
471 { PHY_ID_BCM7366, 0xfffffff0, }, 682 { PHY_ID_BCM7366, 0xfffffff0, },
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 8c73b2e771dd..34395230ce70 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Broadcom UniMAC MDIO bus controller driver 2 * Broadcom UniMAC MDIO bus controller driver
3 * 3 *
4 * Copyright (C) 2014, Broadcom Corporation 4 * Copyright (C) 2014-2017 Broadcom
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -228,6 +228,7 @@ static int unimac_mdio_remove(struct platform_device *pdev)
228} 228}
229 229
230static const struct of_device_id unimac_mdio_ids[] = { 230static const struct of_device_id unimac_mdio_ids[] = {
231 { .compatible = "brcm,genet-mdio-v5", },
231 { .compatible = "brcm,genet-mdio-v4", }, 232 { .compatible = "brcm,genet-mdio-v4", },
232 { .compatible = "brcm,genet-mdio-v3", }, 233 { .compatible = "brcm,genet-mdio-v3", },
233 { .compatible = "brcm,genet-mdio-v2", }, 234 { .compatible = "brcm,genet-mdio-v2", },
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index f095051beb54..3e2ac07b6e37 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -229,7 +229,7 @@ static int xgene_xfi_mdio_write(struct mii_bus *bus, int phy_id,
229 229
230 val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg) | 230 val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg) |
231 SET_VAL(HSTMIIMWRDAT, data); 231 SET_VAL(HSTMIIMWRDAT, data);
232 xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, data); 232 xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val);
233 233
234 val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_WRITE); 234 val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_WRITE);
235 xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val); 235 xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 34cc3c590aa5..16dfb4cb1980 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2442,18 +2442,16 @@ static struct miscdevice tun_miscdev = {
2442 2442
2443/* ethtool interface */ 2443/* ethtool interface */
2444 2444
2445static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2445static int tun_get_link_ksettings(struct net_device *dev,
2446{ 2446 struct ethtool_link_ksettings *cmd)
2447 cmd->supported = 0; 2447{
2448 cmd->advertising = 0; 2448 ethtool_link_ksettings_zero_link_mode(cmd, supported);
2449 ethtool_cmd_speed_set(cmd, SPEED_10); 2449 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
2450 cmd->duplex = DUPLEX_FULL; 2450 cmd->base.speed = SPEED_10;
2451 cmd->port = PORT_TP; 2451 cmd->base.duplex = DUPLEX_FULL;
2452 cmd->phy_address = 0; 2452 cmd->base.port = PORT_TP;
2453 cmd->transceiver = XCVR_INTERNAL; 2453 cmd->base.phy_address = 0;
2454 cmd->autoneg = AUTONEG_DISABLE; 2454 cmd->base.autoneg = AUTONEG_DISABLE;
2455 cmd->maxtxpkt = 0;
2456 cmd->maxrxpkt = 0;
2457 return 0; 2455 return 0;
2458} 2456}
2459 2457
@@ -2516,7 +2514,6 @@ static int tun_set_coalesce(struct net_device *dev,
2516} 2514}
2517 2515
2518static const struct ethtool_ops tun_ethtool_ops = { 2516static const struct ethtool_ops tun_ethtool_ops = {
2519 .get_settings = tun_get_settings,
2520 .get_drvinfo = tun_get_drvinfo, 2517 .get_drvinfo = tun_get_drvinfo,
2521 .get_msglevel = tun_get_msglevel, 2518 .get_msglevel = tun_get_msglevel,
2522 .set_msglevel = tun_set_msglevel, 2519 .set_msglevel = tun_set_msglevel,
@@ -2524,6 +2521,7 @@ static const struct ethtool_ops tun_ethtool_ops = {
2524 .get_ts_info = ethtool_op_get_ts_info, 2521 .get_ts_info = ethtool_op_get_ts_info,
2525 .get_coalesce = tun_get_coalesce, 2522 .get_coalesce = tun_get_coalesce,
2526 .set_coalesce = tun_set_coalesce, 2523 .set_coalesce = tun_set_coalesce,
2524 .get_link_ksettings = tun_get_link_ksettings,
2527}; 2525};
2528 2526
2529static int tun_queue_resize(struct tun_struct *tun) 2527static int tun_queue_resize(struct tun_struct *tun)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 0dd510604118..38456d0bcfd2 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -136,9 +136,9 @@ static const struct ethtool_ops ax88172_ethtool_ops = {
136 .get_eeprom_len = asix_get_eeprom_len, 136 .get_eeprom_len = asix_get_eeprom_len,
137 .get_eeprom = asix_get_eeprom, 137 .get_eeprom = asix_get_eeprom,
138 .set_eeprom = asix_set_eeprom, 138 .set_eeprom = asix_set_eeprom,
139 .get_settings = usbnet_get_settings,
140 .set_settings = usbnet_set_settings,
141 .nway_reset = usbnet_nway_reset, 139 .nway_reset = usbnet_nway_reset,
140 .get_link_ksettings = usbnet_get_link_ksettings,
141 .set_link_ksettings = usbnet_set_link_ksettings,
142}; 142};
143 143
144static void ax88172_set_multicast(struct net_device *net) 144static void ax88172_set_multicast(struct net_device *net)
@@ -301,9 +301,9 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
301 .get_eeprom_len = asix_get_eeprom_len, 301 .get_eeprom_len = asix_get_eeprom_len,
302 .get_eeprom = asix_get_eeprom, 302 .get_eeprom = asix_get_eeprom,
303 .set_eeprom = asix_set_eeprom, 303 .set_eeprom = asix_set_eeprom,
304 .get_settings = usbnet_get_settings,
305 .set_settings = usbnet_set_settings,
306 .nway_reset = usbnet_nway_reset, 304 .nway_reset = usbnet_nway_reset,
305 .get_link_ksettings = usbnet_get_link_ksettings,
306 .set_link_ksettings = usbnet_set_link_ksettings,
307}; 307};
308 308
309static int ax88772_link_reset(struct usbnet *dev) 309static int ax88772_link_reset(struct usbnet *dev)
@@ -775,9 +775,9 @@ static const struct ethtool_ops ax88178_ethtool_ops = {
775 .get_eeprom_len = asix_get_eeprom_len, 775 .get_eeprom_len = asix_get_eeprom_len,
776 .get_eeprom = asix_get_eeprom, 776 .get_eeprom = asix_get_eeprom,
777 .set_eeprom = asix_set_eeprom, 777 .set_eeprom = asix_set_eeprom,
778 .get_settings = usbnet_get_settings,
779 .set_settings = usbnet_set_settings,
780 .nway_reset = usbnet_nway_reset, 778 .nway_reset = usbnet_nway_reset,
779 .get_link_ksettings = usbnet_get_link_ksettings,
780 .set_link_ksettings = usbnet_set_link_ksettings,
781}; 781};
782 782
783static int marvell_phy_init(struct usbnet *dev) 783static int marvell_phy_init(struct usbnet *dev)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index a3a7db0702d8..4a0ae7ce83f6 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -620,16 +620,18 @@ ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
620 return 0; 620 return 0;
621} 621}
622 622
623static int ax88179_get_settings(struct net_device *net, struct ethtool_cmd *cmd) 623static int ax88179_get_link_ksettings(struct net_device *net,
624 struct ethtool_link_ksettings *cmd)
624{ 625{
625 struct usbnet *dev = netdev_priv(net); 626 struct usbnet *dev = netdev_priv(net);
626 return mii_ethtool_gset(&dev->mii, cmd); 627 return mii_ethtool_get_link_ksettings(&dev->mii, cmd);
627} 628}
628 629
629static int ax88179_set_settings(struct net_device *net, struct ethtool_cmd *cmd) 630static int ax88179_set_link_ksettings(struct net_device *net,
631 const struct ethtool_link_ksettings *cmd)
630{ 632{
631 struct usbnet *dev = netdev_priv(net); 633 struct usbnet *dev = netdev_priv(net);
632 return mii_ethtool_sset(&dev->mii, cmd); 634 return mii_ethtool_set_link_ksettings(&dev->mii, cmd);
633} 635}
634 636
635static int 637static int
@@ -826,11 +828,11 @@ static const struct ethtool_ops ax88179_ethtool_ops = {
826 .set_wol = ax88179_set_wol, 828 .set_wol = ax88179_set_wol,
827 .get_eeprom_len = ax88179_get_eeprom_len, 829 .get_eeprom_len = ax88179_get_eeprom_len,
828 .get_eeprom = ax88179_get_eeprom, 830 .get_eeprom = ax88179_get_eeprom,
829 .get_settings = ax88179_get_settings,
830 .set_settings = ax88179_set_settings,
831 .get_eee = ax88179_get_eee, 831 .get_eee = ax88179_get_eee,
832 .set_eee = ax88179_set_eee, 832 .set_eee = ax88179_set_eee,
833 .nway_reset = usbnet_nway_reset, 833 .nway_reset = usbnet_nway_reset,
834 .get_link_ksettings = ax88179_get_link_ksettings,
835 .set_link_ksettings = ax88179_set_link_ksettings,
834}; 836};
835 837
836static void ax88179_set_multicast(struct net_device *net) 838static void ax88179_set_multicast(struct net_device *net)
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 0acc9b640419..fce92f0e5abd 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -688,29 +688,34 @@ static void catc_get_drvinfo(struct net_device *dev,
688 usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info)); 688 usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
689} 689}
690 690
691static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 691static int catc_get_link_ksettings(struct net_device *dev,
692 struct ethtool_link_ksettings *cmd)
692{ 693{
693 struct catc *catc = netdev_priv(dev); 694 struct catc *catc = netdev_priv(dev);
694 if (!catc->is_f5u011) 695 if (!catc->is_f5u011)
695 return -EOPNOTSUPP; 696 return -EOPNOTSUPP;
696 697
697 cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP; 698 ethtool_link_ksettings_zero_link_mode(cmd, supported);
698 cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP; 699 ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
699 ethtool_cmd_speed_set(cmd, SPEED_10); 700 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
700 cmd->duplex = DUPLEX_HALF; 701
701 cmd->port = PORT_TP; 702 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
702 cmd->phy_address = 0; 703 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
703 cmd->transceiver = XCVR_INTERNAL; 704 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
704 cmd->autoneg = AUTONEG_DISABLE; 705
705 cmd->maxtxpkt = 1; 706 cmd->base.speed = SPEED_10;
706 cmd->maxrxpkt = 1; 707 cmd->base.duplex = DUPLEX_HALF;
708 cmd->base.port = PORT_TP;
709 cmd->base.phy_address = 0;
710 cmd->base.autoneg = AUTONEG_DISABLE;
711
707 return 0; 712 return 0;
708} 713}
709 714
710static const struct ethtool_ops ops = { 715static const struct ethtool_ops ops = {
711 .get_drvinfo = catc_get_drvinfo, 716 .get_drvinfo = catc_get_drvinfo,
712 .get_settings = catc_get_settings, 717 .get_link = ethtool_op_get_link,
713 .get_link = ethtool_op_get_link 718 .get_link_ksettings = catc_get_link_ksettings,
714}; 719};
715 720
716/* 721/*
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f317984f7536..b6c1d3abad96 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -131,8 +131,6 @@ static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 s
131static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx); 131static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx);
132 132
133static const struct ethtool_ops cdc_ncm_ethtool_ops = { 133static const struct ethtool_ops cdc_ncm_ethtool_ops = {
134 .get_settings = usbnet_get_settings,
135 .set_settings = usbnet_set_settings,
136 .get_link = usbnet_get_link, 134 .get_link = usbnet_get_link,
137 .nway_reset = usbnet_nway_reset, 135 .nway_reset = usbnet_nway_reset,
138 .get_drvinfo = usbnet_get_drvinfo, 136 .get_drvinfo = usbnet_get_drvinfo,
@@ -142,6 +140,8 @@ static const struct ethtool_ops cdc_ncm_ethtool_ops = {
142 .get_sset_count = cdc_ncm_get_sset_count, 140 .get_sset_count = cdc_ncm_get_sset_count,
143 .get_strings = cdc_ncm_get_strings, 141 .get_strings = cdc_ncm_get_strings,
144 .get_ethtool_stats = cdc_ncm_get_ethtool_stats, 142 .get_ethtool_stats = cdc_ncm_get_ethtool_stats,
143 .get_link_ksettings = usbnet_get_link_ksettings,
144 .set_link_ksettings = usbnet_set_link_ksettings,
145}; 145};
146 146
147static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx) 147static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx)
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 0b4bdd39106b..fea1b64ca26a 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -281,9 +281,9 @@ static const struct ethtool_ops dm9601_ethtool_ops = {
281 .set_msglevel = usbnet_set_msglevel, 281 .set_msglevel = usbnet_set_msglevel,
282 .get_eeprom_len = dm9601_get_eeprom_len, 282 .get_eeprom_len = dm9601_get_eeprom_len,
283 .get_eeprom = dm9601_get_eeprom, 283 .get_eeprom = dm9601_get_eeprom,
284 .get_settings = usbnet_get_settings,
285 .set_settings = usbnet_set_settings,
286 .nway_reset = usbnet_nway_reset, 284 .nway_reset = usbnet_nway_reset,
285 .get_link_ksettings = usbnet_get_link_ksettings,
286 .set_link_ksettings = usbnet_set_link_ksettings,
287}; 287};
288 288
289static void dm9601_set_multicast(struct net_device *net) 289static void dm9601_set_multicast(struct net_device *net)
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 4f345bd4e6e2..5771ff261fa8 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -464,9 +464,9 @@ static const struct ethtool_ops mcs7830_ethtool_ops = {
464 .get_link = usbnet_get_link, 464 .get_link = usbnet_get_link,
465 .get_msglevel = usbnet_get_msglevel, 465 .get_msglevel = usbnet_get_msglevel,
466 .set_msglevel = usbnet_set_msglevel, 466 .set_msglevel = usbnet_set_msglevel,
467 .get_settings = usbnet_get_settings,
468 .set_settings = usbnet_set_settings,
469 .nway_reset = usbnet_nway_reset, 467 .nway_reset = usbnet_nway_reset,
468 .get_link_ksettings = usbnet_get_link_ksettings,
469 .set_link_ksettings = usbnet_set_link_ksettings,
470}; 470};
471 471
472static const struct net_device_ops mcs7830_netdev_ops = { 472static const struct net_device_ops mcs7830_netdev_ops = {
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 36674484c6fb..321e059e13ae 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -953,20 +953,22 @@ static inline void pegasus_reset_wol(struct net_device *dev)
953} 953}
954 954
955static int 955static int
956pegasus_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 956pegasus_get_link_ksettings(struct net_device *dev,
957 struct ethtool_link_ksettings *ecmd)
957{ 958{
958 pegasus_t *pegasus; 959 pegasus_t *pegasus;
959 960
960 pegasus = netdev_priv(dev); 961 pegasus = netdev_priv(dev);
961 mii_ethtool_gset(&pegasus->mii, ecmd); 962 mii_ethtool_get_link_ksettings(&pegasus->mii, ecmd);
962 return 0; 963 return 0;
963} 964}
964 965
965static int 966static int
966pegasus_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 967pegasus_set_link_ksettings(struct net_device *dev,
968 const struct ethtool_link_ksettings *ecmd)
967{ 969{
968 pegasus_t *pegasus = netdev_priv(dev); 970 pegasus_t *pegasus = netdev_priv(dev);
969 return mii_ethtool_sset(&pegasus->mii, ecmd); 971 return mii_ethtool_set_link_ksettings(&pegasus->mii, ecmd);
970} 972}
971 973
972static int pegasus_nway_reset(struct net_device *dev) 974static int pegasus_nway_reset(struct net_device *dev)
@@ -995,14 +997,14 @@ static void pegasus_set_msglevel(struct net_device *dev, u32 v)
995 997
996static const struct ethtool_ops ops = { 998static const struct ethtool_ops ops = {
997 .get_drvinfo = pegasus_get_drvinfo, 999 .get_drvinfo = pegasus_get_drvinfo,
998 .get_settings = pegasus_get_settings,
999 .set_settings = pegasus_set_settings,
1000 .nway_reset = pegasus_nway_reset, 1000 .nway_reset = pegasus_nway_reset,
1001 .get_link = pegasus_get_link, 1001 .get_link = pegasus_get_link,
1002 .get_msglevel = pegasus_get_msglevel, 1002 .get_msglevel = pegasus_get_msglevel,
1003 .set_msglevel = pegasus_set_msglevel, 1003 .set_msglevel = pegasus_set_msglevel,
1004 .get_wol = pegasus_get_wol, 1004 .get_wol = pegasus_get_wol,
1005 .set_wol = pegasus_set_wol, 1005 .set_wol = pegasus_set_wol,
1006 .get_link_ksettings = pegasus_get_link_ksettings,
1007 .set_link_ksettings = pegasus_set_link_ksettings,
1006}; 1008};
1007 1009
1008static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) 1010static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 986243c932cc..3262a326aae6 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1761,6 +1761,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
1761 unsigned long flags; 1761 unsigned long flags;
1762 struct list_head *cursor, *next, rx_queue; 1762 struct list_head *cursor, *next, rx_queue;
1763 int ret = 0, work_done = 0; 1763 int ret = 0, work_done = 0;
1764 struct napi_struct *napi = &tp->napi;
1764 1765
1765 if (!skb_queue_empty(&tp->rx_queue)) { 1766 if (!skb_queue_empty(&tp->rx_queue)) {
1766 while (work_done < budget) { 1767 while (work_done < budget) {
@@ -1773,7 +1774,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
1773 break; 1774 break;
1774 1775
1775 pkt_len = skb->len; 1776 pkt_len = skb->len;
1776 napi_gro_receive(&tp->napi, skb); 1777 napi_gro_receive(napi, skb);
1777 work_done++; 1778 work_done++;
1778 stats->rx_packets++; 1779 stats->rx_packets++;
1779 stats->rx_bytes += pkt_len; 1780 stats->rx_bytes += pkt_len;
@@ -1823,7 +1824,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
1823 pkt_len -= CRC_SIZE; 1824 pkt_len -= CRC_SIZE;
1824 rx_data += sizeof(struct rx_desc); 1825 rx_data += sizeof(struct rx_desc);
1825 1826
1826 skb = napi_alloc_skb(&tp->napi, pkt_len); 1827 skb = napi_alloc_skb(napi, pkt_len);
1827 if (!skb) { 1828 if (!skb) {
1828 stats->rx_dropped++; 1829 stats->rx_dropped++;
1829 goto find_next_rx; 1830 goto find_next_rx;
@@ -1835,7 +1836,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
1835 skb->protocol = eth_type_trans(skb, netdev); 1836 skb->protocol = eth_type_trans(skb, netdev);
1836 rtl_rx_vlan_tag(rx_desc, skb); 1837 rtl_rx_vlan_tag(rx_desc, skb);
1837 if (work_done < budget) { 1838 if (work_done < budget) {
1838 napi_gro_receive(&tp->napi, skb); 1839 napi_gro_receive(napi, skb);
1839 work_done++; 1840 work_done++;
1840 stats->rx_packets++; 1841 stats->rx_packets++;
1841 stats->rx_bytes += pkt_len; 1842 stats->rx_bytes += pkt_len;
@@ -3150,6 +3151,7 @@ static bool rtl8153_in_nway(struct r8152 *tp)
3150static void set_carrier(struct r8152 *tp) 3151static void set_carrier(struct r8152 *tp)
3151{ 3152{
3152 struct net_device *netdev = tp->netdev; 3153 struct net_device *netdev = tp->netdev;
3154 struct napi_struct *napi = &tp->napi;
3153 u8 speed; 3155 u8 speed;
3154 3156
3155 speed = rtl8152_get_speed(tp); 3157 speed = rtl8152_get_speed(tp);
@@ -3159,7 +3161,7 @@ static void set_carrier(struct r8152 *tp)
3159 tp->rtl_ops.enable(tp); 3161 tp->rtl_ops.enable(tp);
3160 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 3162 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
3161 netif_stop_queue(netdev); 3163 netif_stop_queue(netdev);
3162 napi_disable(&tp->napi); 3164 napi_disable(napi);
3163 netif_carrier_on(netdev); 3165 netif_carrier_on(netdev);
3164 rtl_start_rx(tp); 3166 rtl_start_rx(tp);
3165 napi_enable(&tp->napi); 3167 napi_enable(&tp->napi);
@@ -3169,9 +3171,9 @@ static void set_carrier(struct r8152 *tp)
3169 } else { 3171 } else {
3170 if (netif_carrier_ok(netdev)) { 3172 if (netif_carrier_ok(netdev)) {
3171 netif_carrier_off(netdev); 3173 netif_carrier_off(netdev);
3172 napi_disable(&tp->napi); 3174 napi_disable(napi);
3173 tp->rtl_ops.disable(tp); 3175 tp->rtl_ops.disable(tp);
3174 napi_enable(&tp->napi); 3176 napi_enable(napi);
3175 netif_info(tp, link, netdev, "carrier off\n"); 3177 netif_info(tp, link, netdev, "carrier off\n");
3176 } 3178 }
3177 } 3179 }
@@ -3633,11 +3635,13 @@ static int rtl8152_runtime_suspend(struct r8152 *tp)
3633 tp->rtl_ops.autosuspend_en(tp, true); 3635 tp->rtl_ops.autosuspend_en(tp, true);
3634 3636
3635 if (netif_carrier_ok(netdev)) { 3637 if (netif_carrier_ok(netdev)) {
3636 napi_disable(&tp->napi); 3638 struct napi_struct *napi = &tp->napi;
3639
3640 napi_disable(napi);
3637 rtl_stop_rx(tp); 3641 rtl_stop_rx(tp);
3638 rxdy_gated_en(tp, false); 3642 rxdy_gated_en(tp, false);
3639 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); 3643 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
3640 napi_enable(&tp->napi); 3644 napi_enable(napi);
3641 } 3645 }
3642 } 3646 }
3643 3647
@@ -3653,12 +3657,14 @@ static int rtl8152_system_suspend(struct r8152 *tp)
3653 netif_device_detach(netdev); 3657 netif_device_detach(netdev);
3654 3658
3655 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { 3659 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
3660 struct napi_struct *napi = &tp->napi;
3661
3656 clear_bit(WORK_ENABLE, &tp->flags); 3662 clear_bit(WORK_ENABLE, &tp->flags);
3657 usb_kill_urb(tp->intr_urb); 3663 usb_kill_urb(tp->intr_urb);
3658 napi_disable(&tp->napi); 3664 napi_disable(napi);
3659 cancel_delayed_work_sync(&tp->schedule); 3665 cancel_delayed_work_sync(&tp->schedule);
3660 tp->rtl_ops.down(tp); 3666 tp->rtl_ops.down(tp);
3661 napi_enable(&tp->napi); 3667 napi_enable(napi);
3662 } 3668 }
3663 3669
3664 return ret; 3670 return ret;
@@ -3684,35 +3690,38 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3684static int rtl8152_resume(struct usb_interface *intf) 3690static int rtl8152_resume(struct usb_interface *intf)
3685{ 3691{
3686 struct r8152 *tp = usb_get_intfdata(intf); 3692 struct r8152 *tp = usb_get_intfdata(intf);
3693 struct net_device *netdev = tp->netdev;
3687 3694
3688 mutex_lock(&tp->control); 3695 mutex_lock(&tp->control);
3689 3696
3690 if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3697 if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3691 tp->rtl_ops.init(tp); 3698 tp->rtl_ops.init(tp);
3692 queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); 3699 queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
3693 netif_device_attach(tp->netdev); 3700 netif_device_attach(netdev);
3694 } 3701 }
3695 3702
3696 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { 3703 if (netif_running(netdev) && netdev->flags & IFF_UP) {
3697 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3704 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3705 struct napi_struct *napi = &tp->napi;
3706
3698 tp->rtl_ops.autosuspend_en(tp, false); 3707 tp->rtl_ops.autosuspend_en(tp, false);
3699 napi_disable(&tp->napi); 3708 napi_disable(napi);
3700 set_bit(WORK_ENABLE, &tp->flags); 3709 set_bit(WORK_ENABLE, &tp->flags);
3701 if (netif_carrier_ok(tp->netdev)) 3710 if (netif_carrier_ok(netdev))
3702 rtl_start_rx(tp); 3711 rtl_start_rx(tp);
3703 napi_enable(&tp->napi); 3712 napi_enable(napi);
3704 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3713 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3705 smp_mb__after_atomic(); 3714 smp_mb__after_atomic();
3706 if (!list_empty(&tp->rx_done)) 3715 if (!list_empty(&tp->rx_done))
3707 napi_schedule(&tp->napi); 3716 napi_schedule(&tp->napi);
3708 } else { 3717 } else {
3709 tp->rtl_ops.up(tp); 3718 tp->rtl_ops.up(tp);
3710 netif_carrier_off(tp->netdev); 3719 netif_carrier_off(netdev);
3711 set_bit(WORK_ENABLE, &tp->flags); 3720 set_bit(WORK_ENABLE, &tp->flags);
3712 } 3721 }
3713 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3722 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3714 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3723 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3715 if (tp->netdev->flags & IFF_UP) 3724 if (netdev->flags & IFF_UP)
3716 tp->rtl_ops.autosuspend_en(tp, false); 3725 tp->rtl_ops.autosuspend_en(tp, false);
3717 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3726 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3718 } 3727 }
@@ -3800,7 +3809,8 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
3800} 3809}
3801 3810
3802static 3811static
3803int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) 3812int rtl8152_get_link_ksettings(struct net_device *netdev,
3813 struct ethtool_link_ksettings *cmd)
3804{ 3814{
3805 struct r8152 *tp = netdev_priv(netdev); 3815 struct r8152 *tp = netdev_priv(netdev);
3806 int ret; 3816 int ret;
@@ -3814,7 +3824,7 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
3814 3824
3815 mutex_lock(&tp->control); 3825 mutex_lock(&tp->control);
3816 3826
3817 ret = mii_ethtool_gset(&tp->mii, cmd); 3827 ret = mii_ethtool_get_link_ksettings(&tp->mii, cmd);
3818 3828
3819 mutex_unlock(&tp->control); 3829 mutex_unlock(&tp->control);
3820 3830
@@ -3824,7 +3834,8 @@ out:
3824 return ret; 3834 return ret;
3825} 3835}
3826 3836
3827static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 3837static int rtl8152_set_link_ksettings(struct net_device *dev,
3838 const struct ethtool_link_ksettings *cmd)
3828{ 3839{
3829 struct r8152 *tp = netdev_priv(dev); 3840 struct r8152 *tp = netdev_priv(dev);
3830 int ret; 3841 int ret;
@@ -3835,11 +3846,12 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3835 3846
3836 mutex_lock(&tp->control); 3847 mutex_lock(&tp->control);
3837 3848
3838 ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); 3849 ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed,
3850 cmd->base.duplex);
3839 if (!ret) { 3851 if (!ret) {
3840 tp->autoneg = cmd->autoneg; 3852 tp->autoneg = cmd->base.autoneg;
3841 tp->speed = cmd->speed; 3853 tp->speed = cmd->base.speed;
3842 tp->duplex = cmd->duplex; 3854 tp->duplex = cmd->base.duplex;
3843 } 3855 }
3844 3856
3845 mutex_unlock(&tp->control); 3857 mutex_unlock(&tp->control);
@@ -4117,8 +4129,6 @@ static int rtl8152_set_coalesce(struct net_device *netdev,
4117 4129
4118static const struct ethtool_ops ops = { 4130static const struct ethtool_ops ops = {
4119 .get_drvinfo = rtl8152_get_drvinfo, 4131 .get_drvinfo = rtl8152_get_drvinfo,
4120 .get_settings = rtl8152_get_settings,
4121 .set_settings = rtl8152_set_settings,
4122 .get_link = ethtool_op_get_link, 4132 .get_link = ethtool_op_get_link,
4123 .nway_reset = rtl8152_nway_reset, 4133 .nway_reset = rtl8152_nway_reset,
4124 .get_msglevel = rtl8152_get_msglevel, 4134 .get_msglevel = rtl8152_get_msglevel,
@@ -4132,6 +4142,8 @@ static const struct ethtool_ops ops = {
4132 .set_coalesce = rtl8152_set_coalesce, 4142 .set_coalesce = rtl8152_set_coalesce,
4133 .get_eee = rtl_ethtool_get_eee, 4143 .get_eee = rtl_ethtool_get_eee,
4134 .set_eee = rtl_ethtool_set_eee, 4144 .set_eee = rtl_ethtool_set_eee,
4145 .get_link_ksettings = rtl8152_get_link_ksettings,
4146 .set_link_ksettings = rtl8152_set_link_ksettings,
4135}; 4147};
4136 4148
4137static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 4149static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -4224,44 +4236,6 @@ static const struct net_device_ops rtl8152_netdev_ops = {
4224 .ndo_features_check = rtl8152_features_check, 4236 .ndo_features_check = rtl8152_features_check,
4225}; 4237};
4226 4238
4227static void r8152b_get_version(struct r8152 *tp)
4228{
4229 u32 ocp_data;
4230 u16 version;
4231
4232 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1);
4233 version = (u16)(ocp_data & VERSION_MASK);
4234
4235 switch (version) {
4236 case 0x4c00:
4237 tp->version = RTL_VER_01;
4238 break;
4239 case 0x4c10:
4240 tp->version = RTL_VER_02;
4241 break;
4242 case 0x5c00:
4243 tp->version = RTL_VER_03;
4244 tp->mii.supports_gmii = 1;
4245 break;
4246 case 0x5c10:
4247 tp->version = RTL_VER_04;
4248 tp->mii.supports_gmii = 1;
4249 break;
4250 case 0x5c20:
4251 tp->version = RTL_VER_05;
4252 tp->mii.supports_gmii = 1;
4253 break;
4254 case 0x5c30:
4255 tp->version = RTL_VER_06;
4256 tp->mii.supports_gmii = 1;
4257 break;
4258 default:
4259 netif_info(tp, probe, tp->netdev,
4260 "Unknown version 0x%04x\n", version);
4261 break;
4262 }
4263}
4264
4265static void rtl8152_unload(struct r8152 *tp) 4239static void rtl8152_unload(struct r8152 *tp)
4266{ 4240{
4267 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 4241 if (test_bit(RTL8152_UNPLUG, &tp->flags))
@@ -4326,14 +4300,66 @@ static int rtl_ops_init(struct r8152 *tp)
4326 return ret; 4300 return ret;
4327} 4301}
4328 4302
4303static u8 rtl_get_version(struct usb_interface *intf)
4304{
4305 struct usb_device *udev = interface_to_usbdev(intf);
4306 u32 ocp_data = 0;
4307 __le32 *tmp;
4308 u8 version;
4309 int ret;
4310
4311 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
4312 if (!tmp)
4313 return 0;
4314
4315 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
4316 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
4317 PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
4318 if (ret > 0)
4319 ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
4320
4321 kfree(tmp);
4322
4323 switch (ocp_data) {
4324 case 0x4c00:
4325 version = RTL_VER_01;
4326 break;
4327 case 0x4c10:
4328 version = RTL_VER_02;
4329 break;
4330 case 0x5c00:
4331 version = RTL_VER_03;
4332 break;
4333 case 0x5c10:
4334 version = RTL_VER_04;
4335 break;
4336 case 0x5c20:
4337 version = RTL_VER_05;
4338 break;
4339 case 0x5c30:
4340 version = RTL_VER_06;
4341 break;
4342 default:
4343 version = RTL_VER_UNKNOWN;
4344 dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data);
4345 break;
4346 }
4347
4348 return version;
4349}
4350
4329static int rtl8152_probe(struct usb_interface *intf, 4351static int rtl8152_probe(struct usb_interface *intf,
4330 const struct usb_device_id *id) 4352 const struct usb_device_id *id)
4331{ 4353{
4332 struct usb_device *udev = interface_to_usbdev(intf); 4354 struct usb_device *udev = interface_to_usbdev(intf);
4355 u8 version = rtl_get_version(intf);
4333 struct r8152 *tp; 4356 struct r8152 *tp;
4334 struct net_device *netdev; 4357 struct net_device *netdev;
4335 int ret; 4358 int ret;
4336 4359
4360 if (version == RTL_VER_UNKNOWN)
4361 return -ENODEV;
4362
4337 if (udev->actconfig->desc.bConfigurationValue != 1) { 4363 if (udev->actconfig->desc.bConfigurationValue != 1) {
4338 usb_driver_set_configuration(udev, 1); 4364 usb_driver_set_configuration(udev, 1);
4339 return -ENODEV; 4365 return -ENODEV;
@@ -4353,8 +4379,18 @@ static int rtl8152_probe(struct usb_interface *intf,
4353 tp->udev = udev; 4379 tp->udev = udev;
4354 tp->netdev = netdev; 4380 tp->netdev = netdev;
4355 tp->intf = intf; 4381 tp->intf = intf;
4382 tp->version = version;
4383
4384 switch (version) {
4385 case RTL_VER_01:
4386 case RTL_VER_02:
4387 tp->mii.supports_gmii = 0;
4388 break;
4389 default:
4390 tp->mii.supports_gmii = 1;
4391 break;
4392 }
4356 4393
4357 r8152b_get_version(tp);
4358 ret = rtl_ops_init(tp); 4394 ret = rtl_ops_init(tp);
4359 if (ret) 4395 if (ret)
4360 goto out; 4396 goto out;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index c81c79110cef..daaa88a66f40 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -791,47 +791,52 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf
791 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); 791 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
792} 792}
793 793
794static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 794static int rtl8150_get_link_ksettings(struct net_device *netdev,
795 struct ethtool_link_ksettings *ecmd)
795{ 796{
796 rtl8150_t *dev = netdev_priv(netdev); 797 rtl8150_t *dev = netdev_priv(netdev);
797 short lpa, bmcr; 798 short lpa, bmcr;
799 u32 supported;
798 800
799 ecmd->supported = (SUPPORTED_10baseT_Half | 801 supported = (SUPPORTED_10baseT_Half |
800 SUPPORTED_10baseT_Full | 802 SUPPORTED_10baseT_Full |
801 SUPPORTED_100baseT_Half | 803 SUPPORTED_100baseT_Half |
802 SUPPORTED_100baseT_Full | 804 SUPPORTED_100baseT_Full |
803 SUPPORTED_Autoneg | 805 SUPPORTED_Autoneg |
804 SUPPORTED_TP | SUPPORTED_MII); 806 SUPPORTED_TP | SUPPORTED_MII);
805 ecmd->port = PORT_TP; 807 ecmd->base.port = PORT_TP;
806 ecmd->transceiver = XCVR_INTERNAL; 808 ecmd->base.phy_address = dev->phy;
807 ecmd->phy_address = dev->phy;
808 get_registers(dev, BMCR, 2, &bmcr); 809 get_registers(dev, BMCR, 2, &bmcr);
809 get_registers(dev, ANLP, 2, &lpa); 810 get_registers(dev, ANLP, 2, &lpa);
810 if (bmcr & BMCR_ANENABLE) { 811 if (bmcr & BMCR_ANENABLE) {
811 u32 speed = ((lpa & (LPA_100HALF | LPA_100FULL)) ? 812 u32 speed = ((lpa & (LPA_100HALF | LPA_100FULL)) ?
812 SPEED_100 : SPEED_10); 813 SPEED_100 : SPEED_10);
813 ethtool_cmd_speed_set(ecmd, speed); 814 ecmd->base.speed = speed;
814 ecmd->autoneg = AUTONEG_ENABLE; 815 ecmd->base.autoneg = AUTONEG_ENABLE;
815 if (speed == SPEED_100) 816 if (speed == SPEED_100)
816 ecmd->duplex = (lpa & LPA_100FULL) ? 817 ecmd->base.duplex = (lpa & LPA_100FULL) ?
817 DUPLEX_FULL : DUPLEX_HALF; 818 DUPLEX_FULL : DUPLEX_HALF;
818 else 819 else
819 ecmd->duplex = (lpa & LPA_10FULL) ? 820 ecmd->base.duplex = (lpa & LPA_10FULL) ?
820 DUPLEX_FULL : DUPLEX_HALF; 821 DUPLEX_FULL : DUPLEX_HALF;
821 } else { 822 } else {
822 ecmd->autoneg = AUTONEG_DISABLE; 823 ecmd->base.autoneg = AUTONEG_DISABLE;
823 ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED100) ? 824 ecmd->base.speed = ((bmcr & BMCR_SPEED100) ?
824 SPEED_100 : SPEED_10)); 825 SPEED_100 : SPEED_10);
825 ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? 826 ecmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
826 DUPLEX_FULL : DUPLEX_HALF; 827 DUPLEX_FULL : DUPLEX_HALF;
827 } 828 }
829
830 ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
831 supported);
832
828 return 0; 833 return 0;
829} 834}
830 835
831static const struct ethtool_ops ops = { 836static const struct ethtool_ops ops = {
832 .get_drvinfo = rtl8150_get_drvinfo, 837 .get_drvinfo = rtl8150_get_drvinfo,
833 .get_settings = rtl8150_get_settings, 838 .get_link = ethtool_op_get_link,
834 .get_link = ethtool_op_get_link 839 .get_link_ksettings = rtl8150_get_link_ksettings,
835}; 840};
836 841
837static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 842static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index ac69f28d92d2..c8f60b887c22 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -648,9 +648,9 @@ static const struct ethtool_ops sierra_net_ethtool_ops = {
648 .get_link = sierra_net_get_link, 648 .get_link = sierra_net_get_link,
649 .get_msglevel = usbnet_get_msglevel, 649 .get_msglevel = usbnet_get_msglevel,
650 .set_msglevel = usbnet_set_msglevel, 650 .set_msglevel = usbnet_set_msglevel,
651 .get_settings = usbnet_get_settings,
652 .set_settings = usbnet_set_settings,
653 .nway_reset = usbnet_nway_reset, 651 .nway_reset = usbnet_nway_reset,
652 .get_link_ksettings = usbnet_get_link_ksettings,
653 .set_link_ksettings = usbnet_set_link_ksettings,
654}; 654};
655 655
656static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) 656static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 0b17b40d7a4f..1ab0ff43c6a2 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -743,13 +743,13 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
743 .get_drvinfo = usbnet_get_drvinfo, 743 .get_drvinfo = usbnet_get_drvinfo,
744 .get_msglevel = usbnet_get_msglevel, 744 .get_msglevel = usbnet_get_msglevel,
745 .set_msglevel = usbnet_set_msglevel, 745 .set_msglevel = usbnet_set_msglevel,
746 .get_settings = usbnet_get_settings,
747 .set_settings = usbnet_set_settings,
748 .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, 746 .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
749 .get_eeprom = smsc75xx_ethtool_get_eeprom, 747 .get_eeprom = smsc75xx_ethtool_get_eeprom,
750 .set_eeprom = smsc75xx_ethtool_set_eeprom, 748 .set_eeprom = smsc75xx_ethtool_set_eeprom,
751 .get_wol = smsc75xx_ethtool_get_wol, 749 .get_wol = smsc75xx_ethtool_get_wol,
752 .set_wol = smsc75xx_ethtool_set_wol, 750 .set_wol = smsc75xx_ethtool_set_wol,
751 .get_link_ksettings = usbnet_get_link_ksettings,
752 .set_link_ksettings = usbnet_set_link_ksettings,
753}; 753};
754 754
755static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 755static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 831aa33d078a..4a8bf960cbb9 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -853,32 +853,32 @@ static void set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
853 pdata->mdix_ctrl = mdix_ctrl; 853 pdata->mdix_ctrl = mdix_ctrl;
854} 854}
855 855
856static int smsc95xx_get_settings(struct net_device *net, 856static int smsc95xx_get_link_ksettings(struct net_device *net,
857 struct ethtool_cmd *cmd) 857 struct ethtool_link_ksettings *cmd)
858{ 858{
859 struct usbnet *dev = netdev_priv(net); 859 struct usbnet *dev = netdev_priv(net);
860 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 860 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
861 int retval; 861 int retval;
862 862
863 retval = usbnet_get_settings(net, cmd); 863 retval = usbnet_get_link_ksettings(net, cmd);
864 864
865 cmd->eth_tp_mdix = pdata->mdix_ctrl; 865 cmd->base.eth_tp_mdix = pdata->mdix_ctrl;
866 cmd->eth_tp_mdix_ctrl = pdata->mdix_ctrl; 866 cmd->base.eth_tp_mdix_ctrl = pdata->mdix_ctrl;
867 867
868 return retval; 868 return retval;
869} 869}
870 870
871static int smsc95xx_set_settings(struct net_device *net, 871static int smsc95xx_set_link_ksettings(struct net_device *net,
872 struct ethtool_cmd *cmd) 872 const struct ethtool_link_ksettings *cmd)
873{ 873{
874 struct usbnet *dev = netdev_priv(net); 874 struct usbnet *dev = netdev_priv(net);
875 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 875 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
876 int retval; 876 int retval;
877 877
878 if (pdata->mdix_ctrl != cmd->eth_tp_mdix_ctrl) 878 if (pdata->mdix_ctrl != cmd->base.eth_tp_mdix_ctrl)
879 set_mdix_status(net, cmd->eth_tp_mdix_ctrl); 879 set_mdix_status(net, cmd->base.eth_tp_mdix_ctrl);
880 880
881 retval = usbnet_set_settings(net, cmd); 881 retval = usbnet_set_link_ksettings(net, cmd);
882 882
883 return retval; 883 return retval;
884} 884}
@@ -889,8 +889,6 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
889 .get_drvinfo = usbnet_get_drvinfo, 889 .get_drvinfo = usbnet_get_drvinfo,
890 .get_msglevel = usbnet_get_msglevel, 890 .get_msglevel = usbnet_get_msglevel,
891 .set_msglevel = usbnet_set_msglevel, 891 .set_msglevel = usbnet_set_msglevel,
892 .get_settings = smsc95xx_get_settings,
893 .set_settings = smsc95xx_set_settings,
894 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len, 892 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
895 .get_eeprom = smsc95xx_ethtool_get_eeprom, 893 .get_eeprom = smsc95xx_ethtool_get_eeprom,
896 .set_eeprom = smsc95xx_ethtool_set_eeprom, 894 .set_eeprom = smsc95xx_ethtool_set_eeprom,
@@ -898,6 +896,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
898 .get_regs = smsc95xx_ethtool_getregs, 896 .get_regs = smsc95xx_ethtool_getregs,
899 .get_wol = smsc95xx_ethtool_get_wol, 897 .get_wol = smsc95xx_ethtool_get_wol,
900 .set_wol = smsc95xx_ethtool_set_wol, 898 .set_wol = smsc95xx_ethtool_set_wol,
899 .get_link_ksettings = smsc95xx_get_link_ksettings,
900 .set_link_ksettings = smsc95xx_set_link_ksettings,
901}; 901};
902 902
903static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 903static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 4a1e9c489f1f..950a3a9466bd 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -249,9 +249,9 @@ static const struct ethtool_ops sr9700_ethtool_ops = {
249 .set_msglevel = usbnet_set_msglevel, 249 .set_msglevel = usbnet_set_msglevel,
250 .get_eeprom_len = sr9700_get_eeprom_len, 250 .get_eeprom_len = sr9700_get_eeprom_len,
251 .get_eeprom = sr9700_get_eeprom, 251 .get_eeprom = sr9700_get_eeprom,
252 .get_settings = usbnet_get_settings,
253 .set_settings = usbnet_set_settings,
254 .nway_reset = usbnet_nway_reset, 252 .nway_reset = usbnet_nway_reset,
253 .get_link_ksettings = usbnet_get_link_ksettings,
254 .set_link_ksettings = usbnet_set_link_ksettings,
255}; 255};
256 256
257static void sr9700_set_multicast(struct net_device *netdev) 257static void sr9700_set_multicast(struct net_device *netdev)
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index a50df0d8fb9a..a696b628782c 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -524,9 +524,9 @@ static const struct ethtool_ops sr9800_ethtool_ops = {
524 .set_wol = sr_set_wol, 524 .set_wol = sr_set_wol,
525 .get_eeprom_len = sr_get_eeprom_len, 525 .get_eeprom_len = sr_get_eeprom_len,
526 .get_eeprom = sr_get_eeprom, 526 .get_eeprom = sr_get_eeprom,
527 .get_settings = usbnet_get_settings,
528 .set_settings = usbnet_set_settings,
529 .nway_reset = usbnet_nway_reset, 527 .nway_reset = usbnet_nway_reset,
528 .get_link_ksettings = usbnet_get_link_ksettings,
529 .set_link_ksettings = usbnet_set_link_ksettings,
530}; 530};
531 531
532static int sr9800_link_reset(struct usbnet *dev) 532static int sr9800_link_reset(struct usbnet *dev)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3de65ea6531a..13d4ec5f6f34 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -947,18 +947,20 @@ EXPORT_SYMBOL_GPL(usbnet_open);
947 * they'll probably want to use this base set. 947 * they'll probably want to use this base set.
948 */ 948 */
949 949
950int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd) 950int usbnet_get_link_ksettings(struct net_device *net,
951 struct ethtool_link_ksettings *cmd)
951{ 952{
952 struct usbnet *dev = netdev_priv(net); 953 struct usbnet *dev = netdev_priv(net);
953 954
954 if (!dev->mii.mdio_read) 955 if (!dev->mii.mdio_read)
955 return -EOPNOTSUPP; 956 return -EOPNOTSUPP;
956 957
957 return mii_ethtool_gset(&dev->mii, cmd); 958 return mii_ethtool_get_link_ksettings(&dev->mii, cmd);
958} 959}
959EXPORT_SYMBOL_GPL(usbnet_get_settings); 960EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings);
960 961
961int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) 962int usbnet_set_link_ksettings(struct net_device *net,
963 const struct ethtool_link_ksettings *cmd)
962{ 964{
963 struct usbnet *dev = netdev_priv(net); 965 struct usbnet *dev = netdev_priv(net);
964 int retval; 966 int retval;
@@ -966,7 +968,7 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
966 if (!dev->mii.mdio_write) 968 if (!dev->mii.mdio_write)
967 return -EOPNOTSUPP; 969 return -EOPNOTSUPP;
968 970
969 retval = mii_ethtool_sset(&dev->mii, cmd); 971 retval = mii_ethtool_set_link_ksettings(&dev->mii, cmd);
970 972
971 /* link speed/duplex might have changed */ 973 /* link speed/duplex might have changed */
972 if (dev->driver_info->link_reset) 974 if (dev->driver_info->link_reset)
@@ -976,9 +978,8 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
976 usbnet_update_max_qlen(dev); 978 usbnet_update_max_qlen(dev);
977 979
978 return retval; 980 return retval;
979
980} 981}
981EXPORT_SYMBOL_GPL(usbnet_set_settings); 982EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings);
982 983
983u32 usbnet_get_link (struct net_device *net) 984u32 usbnet_get_link (struct net_device *net)
984{ 985{
@@ -1038,14 +1039,14 @@ EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
1038 1039
1039/* drivers may override default ethtool_ops in their bind() routine */ 1040/* drivers may override default ethtool_ops in their bind() routine */
1040static const struct ethtool_ops usbnet_ethtool_ops = { 1041static const struct ethtool_ops usbnet_ethtool_ops = {
1041 .get_settings = usbnet_get_settings,
1042 .set_settings = usbnet_set_settings,
1043 .get_link = usbnet_get_link, 1042 .get_link = usbnet_get_link,
1044 .nway_reset = usbnet_nway_reset, 1043 .nway_reset = usbnet_nway_reset,
1045 .get_drvinfo = usbnet_get_drvinfo, 1044 .get_drvinfo = usbnet_get_drvinfo,
1046 .get_msglevel = usbnet_get_msglevel, 1045 .get_msglevel = usbnet_get_msglevel,
1047 .set_msglevel = usbnet_set_msglevel, 1046 .set_msglevel = usbnet_set_msglevel,
1048 .get_ts_info = ethtool_op_get_ts_info, 1047 .get_ts_info = ethtool_op_get_ts_info,
1048 .get_link_ksettings = usbnet_get_link_ksettings,
1049 .set_link_ksettings = usbnet_set_link_ksettings,
1049}; 1050};
1050 1051
1051/*-------------------------------------------------------------------------*/ 1052/*-------------------------------------------------------------------------*/
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index fea687f35b5a..7f28021d9d93 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -747,14 +747,18 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
747{ 747{
748 int ret; 748 int ret;
749 749
750 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
750 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL); 751 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
751 if (ret < 0) 752 if (ret < 0)
752 return ret; 753 goto err;
753 754
754 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
755 cycle_netdev(port_dev); 755 cycle_netdev(port_dev);
756 756
757 return 0; 757 return 0;
758
759err:
760 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
761 return ret;
758} 762}
759 763
760static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev) 764static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bdb6ae16d4a8..09855be219e9 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -276,9 +276,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
276 send_eth = send_ip = true; 276 send_eth = send_ip = true;
277 277
278 if (type == RTM_GETNEIGH) { 278 if (type == RTM_GETNEIGH) {
279 ndm->ndm_family = AF_INET;
280 send_ip = !vxlan_addr_any(&rdst->remote_ip); 279 send_ip = !vxlan_addr_any(&rdst->remote_ip);
281 send_eth = !is_zero_ether_addr(fdb->eth_addr); 280 send_eth = !is_zero_ether_addr(fdb->eth_addr);
281 ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
282 } else 282 } else
283 ndm->ndm_family = AF_BRIDGE; 283 ndm->ndm_family = AF_BRIDGE;
284 ndm->ndm_state = fdb->state; 284 ndm->ndm_state = fdb->state;
diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile
index 64e9f507ce32..414f2a772a5f 100644
--- a/drivers/scsi/qedf/Makefile
+++ b/drivers/scsi/qedf/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_QEDF) := qedf.o 1obj-$(CONFIG_QEDF) := qedf.o
2qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \ 2qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
3 qedf_attr.o qedf_els.o 3 qedf_attr.o qedf_els.o drv_scsi_fw_funcs.o drv_fcoe_fw_funcs.o
4 4
5qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o 5qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
new file mode 100644
index 000000000000..bb812db48da6
--- /dev/null
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -0,0 +1,190 @@
1/* QLogic FCoE Offload Driver
2 * Copyright (c) 2016 Cavium Inc.
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8#include "drv_fcoe_fw_funcs.h"
9#include "drv_scsi_fw_funcs.h"
10
11#define FCOE_RX_ID ((u32)0x0000FFFF)
12
13static inline void init_common_sqe(struct fcoe_task_params *task_params,
14 enum fcoe_sqe_request_type request_type)
15{
16 memset(task_params->sqe, 0, sizeof(*(task_params->sqe)));
17 SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE,
18 request_type);
19 task_params->sqe->task_id = task_params->itid;
20}
21
22int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
23 struct scsi_sgl_task_params *sgl_task_params,
24 struct regpair sense_data_buffer_phys_addr,
25 u32 task_retry_id,
26 u8 fcp_cmd_payload[32])
27{
28 struct fcoe_task_context *ctx = task_params->context;
29 struct ystorm_fcoe_task_st_ctx *y_st_ctx;
30 struct tstorm_fcoe_task_st_ctx *t_st_ctx;
31 struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
32 struct mstorm_fcoe_task_st_ctx *m_st_ctx;
33 u32 io_size, val;
34 bool slow_sgl;
35
36 memset(ctx, 0, sizeof(*(ctx)));
37 slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
38 sgl_task_params->small_mid_sge);
39 io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
40 task_params->tx_io_size : task_params->rx_io_size);
41
42 /* Ystorm ctx */
43 y_st_ctx = &ctx->ystorm_st_context;
44 y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
45 y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
46 y_st_ctx->task_type = task_params->task_type;
47 memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
48 fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
49
50 /* Tstorm ctx */
51 t_st_ctx = &ctx->tstorm_st_context;
52 t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
53 FCOE_TASK_DEV_TYPE_TAPE :
54 FCOE_TASK_DEV_TYPE_DISK);
55 t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
56 val = cpu_to_le32(task_params->cq_rss_number);
57 t_st_ctx->read_only.glbl_q_num = val;
58 t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
59 t_st_ctx->read_only.task_type = task_params->task_type;
60 SET_FIELD(t_st_ctx->read_write.flags,
61 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
62 t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
63
64 /* Ustorm ctx */
65 u_ag_ctx = &ctx->ustorm_ag_context;
66 u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
67
68 /* Mstorm buffer for sense/rsp data placement */
69 m_st_ctx = &ctx->mstorm_st_context;
70 val = cpu_to_le32(sense_data_buffer_phys_addr.hi);
71 m_st_ctx->rsp_buf_addr.hi = val;
72 val = cpu_to_le32(sense_data_buffer_phys_addr.lo);
73 m_st_ctx->rsp_buf_addr.lo = val;
74
75 if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
76 /* Ystorm ctx */
77 y_st_ctx->expect_first_xfer = 1;
78
79 /* Set the amount of super SGEs. Can be up to 4. */
80 SET_FIELD(y_st_ctx->sgl_mode,
81 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
82 (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
83 init_scsi_sgl_context(&y_st_ctx->sgl_params,
84 &y_st_ctx->data_desc,
85 sgl_task_params);
86
87 /* Mstorm ctx */
88 SET_FIELD(m_st_ctx->flags,
89 MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
90 (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
91 } else {
92 /* Tstorm ctx */
93 SET_FIELD(t_st_ctx->read_write.flags,
94 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
95 (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
96
97 /* Mstorm ctx */
98 m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
99 init_scsi_sgl_context(&m_st_ctx->sgl_params,
100 &m_st_ctx->data_desc,
101 sgl_task_params);
102 }
103
104 init_common_sqe(task_params, SEND_FCOE_CMD);
105 return 0;
106}
107
108int init_initiator_midpath_unsolicited_fcoe_task(
109 struct fcoe_task_params *task_params,
110 struct fcoe_tx_mid_path_params *mid_path_fc_header,
111 struct scsi_sgl_task_params *tx_sgl_task_params,
112 struct scsi_sgl_task_params *rx_sgl_task_params,
113 u8 fw_to_place_fc_header)
114{
115 struct fcoe_task_context *ctx = task_params->context;
116 struct ystorm_fcoe_task_st_ctx *y_st_ctx;
117 struct tstorm_fcoe_task_st_ctx *t_st_ctx;
118 struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
119 struct mstorm_fcoe_task_st_ctx *m_st_ctx;
120 u32 val;
121
122 memset(ctx, 0, sizeof(*(ctx)));
123
124 /* Init Ystorm */
125 y_st_ctx = &ctx->ystorm_st_context;
126 init_scsi_sgl_context(&y_st_ctx->sgl_params,
127 &y_st_ctx->data_desc,
128 tx_sgl_task_params);
129 SET_FIELD(y_st_ctx->sgl_mode,
130 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
131 y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
132 y_st_ctx->task_type = task_params->task_type;
133 memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
134 mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
135
136 /* Init Mstorm */
137 m_st_ctx = &ctx->mstorm_st_context;
138 init_scsi_sgl_context(&m_st_ctx->sgl_params,
139 &m_st_ctx->data_desc,
140 rx_sgl_task_params);
141 SET_FIELD(m_st_ctx->flags,
142 MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER,
143 fw_to_place_fc_header);
144 m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size);
145
146 /* Init Tstorm */
147 t_st_ctx = &ctx->tstorm_st_context;
148 t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
149 val = cpu_to_le32(task_params->cq_rss_number);
150 t_st_ctx->read_only.glbl_q_num = val;
151 t_st_ctx->read_only.task_type = task_params->task_type;
152 SET_FIELD(t_st_ctx->read_write.flags,
153 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
154 t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
155
156 /* Init Ustorm */
157 u_ag_ctx = &ctx->ustorm_ag_context;
158 u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
159
160 /* Init SQE */
161 init_common_sqe(task_params, SEND_FCOE_MIDPATH);
162 task_params->sqe->additional_info_union.burst_length =
163 tx_sgl_task_params->total_buffer_size;
164 SET_FIELD(task_params->sqe->flags,
165 FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges);
166 SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE,
167 SCSI_FAST_SGL);
168
169 return 0;
170}
171
172int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params)
173{
174 init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST);
175 return 0;
176}
177
178int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
179{
180 init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP);
181 return 0;
182}
183
184int init_initiator_sequence_recovery_fcoe_task(
185 struct fcoe_task_params *task_params, u32 off)
186{
187 init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
188 task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
189 return 0;
190}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
new file mode 100644
index 000000000000..617529b058f4
--- /dev/null
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -0,0 +1,93 @@
1/* QLogic FCoE Offload Driver
2 * Copyright (c) 2016 Cavium Inc.
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8#ifndef _FCOE_FW_FUNCS_H
9#define _FCOE_FW_FUNCS_H
10#include "drv_scsi_fw_funcs.h"
11#include "qedf_hsi.h"
12#include <linux/qed/qed_if.h>
13
14struct fcoe_task_params {
15 /* Output parameter [set/filled by the HSI function] */
16 struct fcoe_task_context *context;
17
18 /* Output parameter [set/filled by the HSI function] */
19 struct fcoe_wqe *sqe;
20 enum fcoe_task_type task_type;
21 u32 tx_io_size; /* in bytes */
22 u32 rx_io_size; /* in bytes */
23 u32 conn_cid;
24 u16 itid;
25 u8 cq_rss_number;
26
27 /* Whether it's Tape device or not (0=Disk, 1=Tape) */
28 u8 is_tape_device;
29};
30
31/**
32 * @brief init_initiator_rw_fcoe_task - Initializes FCoE task context for
33 * read/write task types and init fcoe_sqe
34 *
35 * @param task_params - Pointer to task parameters struct
36 * @param sgl_task_params - Pointer to SGL task params
37 * @param sense_data_buffer_phys_addr - Pointer to sense data buffer
38 * @param task_retry_id - retry identification - Used only for Tape device
39 * @param fcp_cmnd_payload - FCP CMD Payload
40 */
41int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
42 struct scsi_sgl_task_params *sgl_task_params,
43 struct regpair sense_data_buffer_phys_addr,
44 u32 task_retry_id,
45 u8 fcp_cmd_payload[32]);
46
47/**
48 * @brief init_initiator_midpath_fcoe_task - Initializes FCoE task context for
49 * midpath/unsolicited task types and init fcoe_sqe
50 *
51 * @param task_params - Pointer to task parameters struct
52 * @param mid_path_fc_header - FC header
53 * @param tx_sgl_task_params - Pointer to Tx SGL task params
54 * @param rx_sgl_task_params - Pointer to Rx SGL task params
55 * @param fw_to_place_fc_header - Indication if the FW will place the FC header
56 * in addition to the data arrives.
57 */
58int init_initiator_midpath_unsolicited_fcoe_task(
59 struct fcoe_task_params *task_params,
60 struct fcoe_tx_mid_path_params *mid_path_fc_header,
61 struct scsi_sgl_task_params *tx_sgl_task_params,
62 struct scsi_sgl_task_params *rx_sgl_task_params,
63 u8 fw_to_place_fc_header);
64
65/**
66 * @brief init_initiator_abort_fcoe_task - Initializes FCoE task context for
67 * abort task types and init fcoe_sqe
68 *
69 * @param task_params - Pointer to task parameters struct
70 */
71int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params);
72
73/**
74 * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
75 * cleanup task types and init fcoe_sqe
76 *
77 *
78 * @param task_params - Pointer to task parameters struct
79 */
80int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params);
81
82/**
83 * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
84 * sequence recovery task types and init fcoe_sqe
85 *
86 *
87 * @param task_params - Pointer to task parameters struct
88 * @param desired_offset - The desired offest the task will be re-sent from
89 */
90int init_initiator_sequence_recovery_fcoe_task(
91 struct fcoe_task_params *task_params,
92 u32 desired_offset);
93#endif
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
new file mode 100644
index 000000000000..11e0cc082ec0
--- /dev/null
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
@@ -0,0 +1,44 @@
1/* QLogic FCoE Offload Driver
2 * Copyright (c) 2016 Cavium Inc.
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8#include "drv_scsi_fw_funcs.h"
9
10#define SCSI_NUM_SGES_IN_CACHE 0x4
11
12bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
13{
14 return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
15}
16
17void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
18 struct scsi_cached_sges *ctx_data_desc,
19 struct scsi_sgl_task_params *sgl_task_params)
20{
21 /* no need to check for sgl_task_params->sgl validity */
22 u8 num_sges_to_init = sgl_task_params->num_sges >
23 SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE :
24 sgl_task_params->num_sges;
25 u8 sge_index;
26 u32 val;
27
28 val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
29 ctx_sgl_params->sgl_addr.lo = val;
30 val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
31 ctx_sgl_params->sgl_addr.hi = val;
32 val = cpu_to_le32(sgl_task_params->total_buffer_size);
33 ctx_sgl_params->sgl_total_length = val;
34 ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
35
36 for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
37 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
38 ctx_data_desc->sge[sge_index].sge_addr.lo = val;
39 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
40 ctx_data_desc->sge[sge_index].sge_addr.hi = val;
41 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
42 ctx_data_desc->sge[sge_index].sge_len = val;
43 }
44}
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
new file mode 100644
index 000000000000..9cb45410bc45
--- /dev/null
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
@@ -0,0 +1,85 @@
1/* QLogic FCoE Offload Driver
2 * Copyright (c) 2016 Cavium Inc.
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8#ifndef _SCSI_FW_FUNCS_H
9#define _SCSI_FW_FUNCS_H
10#include <linux/qed/common_hsi.h>
11#include <linux/qed/storage_common.h>
12#include <linux/qed/fcoe_common.h>
13
14struct scsi_sgl_task_params {
15 struct scsi_sge *sgl;
16 struct regpair sgl_phys_addr;
17 u32 total_buffer_size;
18 u16 num_sges;
19
20 /* true if SGL contains a small (< 4KB) SGE in middle(not 1st or last)
21 * -> relevant for tx only
22 */
23 bool small_mid_sge;
24};
25
26struct scsi_dif_task_params {
27 u32 initial_ref_tag;
28 bool initial_ref_tag_is_valid;
29 u16 application_tag;
30 u16 application_tag_mask;
31 u16 dif_block_size_log;
32 bool dif_on_network;
33 bool dif_on_host;
34 u8 host_guard_type;
35 u8 protection_type;
36 u8 ref_tag_mask;
37 bool crc_seed;
38
39 /* Enable Connection error upon DIF error (segments with DIF errors are
40 * dropped)
41 */
42 bool tx_dif_conn_err_en;
43 bool ignore_app_tag;
44 bool keep_ref_tag_const;
45 bool validate_guard;
46 bool validate_app_tag;
47 bool validate_ref_tag;
48 bool forward_guard;
49 bool forward_app_tag;
50 bool forward_ref_tag;
51 bool forward_app_tag_with_mask;
52 bool forward_ref_tag_with_mask;
53};
54
55struct scsi_initiator_cmd_params {
56 /* for cdb_size > default CDB size (extended CDB > 16 bytes) ->
57 * pointer to the CDB buffer SGE
58 */
59 struct scsi_sge extended_cdb_sge;
60
61 /* Physical address of sense data buffer for sense data - 256B buffer */
62 struct regpair sense_data_buffer_phys_addr;
63};
64
65/**
66 * @brief scsi_is_slow_sgl - checks for slow SGL
67 *
68 * @param num_sges - number of sges in SGL
69 * @param small_mid_sge - True is the SGL contains an SGE which is smaller than
70 * 4KB and its not the 1st or last SGE in the SGL
71 */
72bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge);
73
74/**
75 * @brief init_scsi_sgl_context - initializes SGL task context
76 *
77 * @param sgl_params - SGL context parameters to initialize (output parameter)
78 * @param data_desc - context struct containing SGEs array to set (output
79 * parameter)
80 * @param sgl_task_params - SGL parameters (input)
81 */
82void init_scsi_sgl_context(struct scsi_sgl_params *sgl_params,
83 struct scsi_cached_sges *ctx_data_desc,
84 struct scsi_sgl_task_params *sgl_task_params);
85#endif
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 96346a1b1515..40aeb6bb96a2 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -26,6 +26,7 @@
26#include <linux/qed/qed_ll2_if.h> 26#include <linux/qed/qed_ll2_if.h>
27#include "qedf_version.h" 27#include "qedf_version.h"
28#include "qedf_dbg.h" 28#include "qedf_dbg.h"
29#include "drv_fcoe_fw_funcs.h"
29 30
30/* Helpers to extract upper and lower 32-bits of pointer */ 31/* Helpers to extract upper and lower 32-bits of pointer */
31#define U64_HI(val) ((u32)(((u64)(val)) >> 32)) 32#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
@@ -59,19 +60,17 @@
59#define UPSTREAM_KEEP 1 60#define UPSTREAM_KEEP 1
60 61
61struct qedf_mp_req { 62struct qedf_mp_req {
62 uint8_t tm_flags;
63
64 uint32_t req_len; 63 uint32_t req_len;
65 void *req_buf; 64 void *req_buf;
66 dma_addr_t req_buf_dma; 65 dma_addr_t req_buf_dma;
67 struct fcoe_sge *mp_req_bd; 66 struct scsi_sge *mp_req_bd;
68 dma_addr_t mp_req_bd_dma; 67 dma_addr_t mp_req_bd_dma;
69 struct fc_frame_header req_fc_hdr; 68 struct fc_frame_header req_fc_hdr;
70 69
71 uint32_t resp_len; 70 uint32_t resp_len;
72 void *resp_buf; 71 void *resp_buf;
73 dma_addr_t resp_buf_dma; 72 dma_addr_t resp_buf_dma;
74 struct fcoe_sge *mp_resp_bd; 73 struct scsi_sge *mp_resp_bd;
75 dma_addr_t mp_resp_bd_dma; 74 dma_addr_t mp_resp_bd_dma;
76 struct fc_frame_header resp_fc_hdr; 75 struct fc_frame_header resp_fc_hdr;
77}; 76};
@@ -119,6 +118,7 @@ struct qedf_ioreq {
119#define QEDF_CMD_IN_CLEANUP 0x2 118#define QEDF_CMD_IN_CLEANUP 0x2
120#define QEDF_CMD_SRR_SENT 0x3 119#define QEDF_CMD_SRR_SENT 0x3
121 u8 io_req_flags; 120 u8 io_req_flags;
121 uint8_t tm_flags;
122 struct qedf_rport *fcport; 122 struct qedf_rport *fcport;
123 unsigned long flags; 123 unsigned long flags;
124 enum qedf_ioreq_event event; 124 enum qedf_ioreq_event event;
@@ -130,6 +130,8 @@ struct qedf_ioreq {
130 struct completion tm_done; 130 struct completion tm_done;
131 struct completion abts_done; 131 struct completion abts_done;
132 struct fcoe_task_context *task; 132 struct fcoe_task_context *task;
133 struct fcoe_task_params *task_params;
134 struct scsi_sgl_task_params *sgl_task_params;
133 int idx; 135 int idx;
134/* 136/*
135 * Need to allocate enough room for both sense data and FCP response data 137 * Need to allocate enough room for both sense data and FCP response data
@@ -199,8 +201,8 @@ struct qedf_rport {
199 dma_addr_t sq_pbl_dma; 201 dma_addr_t sq_pbl_dma;
200 u32 sq_pbl_size; 202 u32 sq_pbl_size;
201 u32 sid; 203 u32 sid;
202#define QEDF_RPORT_TYPE_DISK 1 204#define QEDF_RPORT_TYPE_DISK 0
203#define QEDF_RPORT_TYPE_TAPE 2 205#define QEDF_RPORT_TYPE_TAPE 1
204 uint dev_type; /* Disk or tape */ 206 uint dev_type; /* Disk or tape */
205 struct list_head peers; 207 struct list_head peers;
206}; 208};
@@ -391,7 +393,7 @@ struct qedf_ctx {
391 393
392struct io_bdt { 394struct io_bdt {
393 struct qedf_ioreq *io_req; 395 struct qedf_ioreq *io_req;
394 struct fcoe_sge *bd_tbl; 396 struct scsi_sge *bd_tbl;
395 dma_addr_t bd_tbl_dma; 397 dma_addr_t bd_tbl_dma;
396 u16 bd_valid; 398 u16 bd_valid;
397}; 399};
@@ -400,7 +402,7 @@ struct qedf_cmd_mgr {
400 struct qedf_ctx *qedf; 402 struct qedf_ctx *qedf;
401 u16 idx; 403 u16 idx;
402 struct io_bdt **io_bdt_pool; 404 struct io_bdt **io_bdt_pool;
403#define FCOE_PARAMS_NUM_TASKS 4096 405#define FCOE_PARAMS_NUM_TASKS 2048
404 struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS]; 406 struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
405 spinlock_t lock; 407 spinlock_t lock;
406 atomic_t free_list_cnt; 408 atomic_t free_list_cnt;
@@ -465,9 +467,8 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
465 unsigned int timer_msec); 467 unsigned int timer_msec);
466extern int qedf_init_mp_req(struct qedf_ioreq *io_req); 468extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
467extern void qedf_init_mp_task(struct qedf_ioreq *io_req, 469extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
468 struct fcoe_task_context *task_ctx); 470 struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
469extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, 471extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
470 u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset);
471extern void qedf_ring_doorbell(struct qedf_rport *fcport); 472extern void qedf_ring_doorbell(struct qedf_rport *fcport);
472extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 473extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
473 struct qedf_ioreq *els_req); 474 struct qedf_ioreq *els_req);
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 59f3e5c73a13..c505d41f6dc8 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -25,6 +25,9 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
25 uint16_t xid; 25 uint16_t xid;
26 uint32_t start_time = jiffies / HZ; 26 uint32_t start_time = jiffies / HZ;
27 uint32_t current_time; 27 uint32_t current_time;
28 struct fcoe_wqe *sqe;
29 unsigned long flags;
30 u16 sqe_idx;
28 31
29 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n"); 32 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
30 33
@@ -113,20 +116,25 @@ retry_els:
113 /* Obtain exchange id */ 116 /* Obtain exchange id */
114 xid = els_req->xid; 117 xid = els_req->xid;
115 118
119 spin_lock_irqsave(&fcport->rport_lock, flags);
120
121 sqe_idx = qedf_get_sqe_idx(fcport);
122 sqe = &fcport->sq[sqe_idx];
123 memset(sqe, 0, sizeof(struct fcoe_wqe));
124
116 /* Initialize task context for this IO request */ 125 /* Initialize task context for this IO request */
117 task = qedf_get_task_mem(&qedf->tasks, xid); 126 task = qedf_get_task_mem(&qedf->tasks, xid);
118 qedf_init_mp_task(els_req, task); 127 qedf_init_mp_task(els_req, task, sqe);
119 128
120 /* Put timer on original I/O request */ 129 /* Put timer on original I/O request */
121 if (timer_msec) 130 if (timer_msec)
122 qedf_cmd_timer_set(qedf, els_req, timer_msec); 131 qedf_cmd_timer_set(qedf, els_req, timer_msec);
123 132
124 qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
125
126 /* Ring doorbell */ 133 /* Ring doorbell */
127 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " 134 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
128 "req\n"); 135 "req\n");
129 qedf_ring_doorbell(fcport); 136 qedf_ring_doorbell(fcport);
137 spin_unlock_irqrestore(&fcport->rport_lock, flags);
130els_err: 138els_err:
131 return rc; 139 return rc;
132} 140}
@@ -604,6 +612,8 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
604 struct qedf_rport *fcport; 612 struct qedf_rport *fcport;
605 unsigned long flags; 613 unsigned long flags;
606 struct qedf_els_cb_arg *cb_arg; 614 struct qedf_els_cb_arg *cb_arg;
615 struct fcoe_wqe *sqe;
616 u16 sqe_idx;
607 617
608 fcport = orig_io_req->fcport; 618 fcport = orig_io_req->fcport;
609 619
@@ -631,8 +641,13 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
631 641
632 spin_lock_irqsave(&fcport->rport_lock, flags); 642 spin_lock_irqsave(&fcport->rport_lock, flags);
633 643
634 qedf_add_to_sq(fcport, orig_io_req->xid, 0, 644 sqe_idx = qedf_get_sqe_idx(fcport);
635 FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset); 645 sqe = &fcport->sq[sqe_idx];
646 memset(sqe, 0, sizeof(struct fcoe_wqe));
647 orig_io_req->task_params->sqe = sqe;
648
649 init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
650 offset);
636 qedf_ring_doorbell(fcport); 651 qedf_ring_doorbell(fcport);
637 652
638 spin_unlock_irqrestore(&fcport->rport_lock, flags); 653 spin_unlock_irqrestore(&fcport->rport_lock, flags);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 46debe5034af..1d7f90d0adc1 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -96,7 +96,7 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
96 if (!cmgr->io_bdt_pool) 96 if (!cmgr->io_bdt_pool)
97 goto free_cmd_pool; 97 goto free_cmd_pool;
98 98
99 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge); 99 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
100 for (i = 0; i < num_ios; i++) { 100 for (i = 0; i < num_ios; i++) {
101 bdt_info = cmgr->io_bdt_pool[i]; 101 bdt_info = cmgr->io_bdt_pool[i];
102 if (bdt_info->bd_tbl) { 102 if (bdt_info->bd_tbl) {
@@ -119,6 +119,8 @@ free_cmd_pool:
119 119
120 for (i = 0; i < num_ios; i++) { 120 for (i = 0; i < num_ios; i++) {
121 io_req = &cmgr->cmds[i]; 121 io_req = &cmgr->cmds[i];
122 kfree(io_req->sgl_task_params);
123 kfree(io_req->task_params);
122 /* Make sure we free per command sense buffer */ 124 /* Make sure we free per command sense buffer */
123 if (io_req->sense_buffer) 125 if (io_req->sense_buffer)
124 dma_free_coherent(&qedf->pdev->dev, 126 dma_free_coherent(&qedf->pdev->dev,
@@ -178,7 +180,7 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
178 spin_lock_init(&cmgr->lock); 180 spin_lock_init(&cmgr->lock);
179 181
180 /* 182 /*
181 * Initialize list of qedf_ioreq. 183 * Initialize I/O request fields.
182 */ 184 */
183 xid = QEDF_MIN_XID; 185 xid = QEDF_MIN_XID;
184 186
@@ -196,6 +198,29 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
196 GFP_KERNEL); 198 GFP_KERNEL);
197 if (!io_req->sense_buffer) 199 if (!io_req->sense_buffer)
198 goto mem_err; 200 goto mem_err;
201
202 /* Allocate task parameters to pass to f/w init funcions */
203 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
204 GFP_KERNEL);
205 if (!io_req->task_params) {
206 QEDF_ERR(&(qedf->dbg_ctx),
207 "Failed to allocate task_params for xid=0x%x\n",
208 i);
209 goto mem_err;
210 }
211
212 /*
213 * Allocate scatter/gather list info to pass to f/w init
214 * functions.
215 */
216 io_req->sgl_task_params = kzalloc(
217 sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
218 if (!io_req->sgl_task_params) {
219 QEDF_ERR(&(qedf->dbg_ctx),
220 "Failed to allocate sgl_task_params for xid=0x%x\n",
221 i);
222 goto mem_err;
223 }
199 } 224 }
200 225
201 /* Allocate pool of io_bdts - one for each qedf_ioreq */ 226 /* Allocate pool of io_bdts - one for each qedf_ioreq */
@@ -211,8 +236,8 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
211 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt), 236 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
212 GFP_KERNEL); 237 GFP_KERNEL);
213 if (!cmgr->io_bdt_pool[i]) { 238 if (!cmgr->io_bdt_pool[i]) {
214 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc " 239 QEDF_WARN(&(qedf->dbg_ctx),
215 "io_bdt_pool[%d].\n", i); 240 "Failed to alloc io_bdt_pool[%d].\n", i);
216 goto mem_err; 241 goto mem_err;
217 } 242 }
218 } 243 }
@@ -220,11 +245,11 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
220 for (i = 0; i < num_ios; i++) { 245 for (i = 0; i < num_ios; i++) {
221 bdt_info = cmgr->io_bdt_pool[i]; 246 bdt_info = cmgr->io_bdt_pool[i];
222 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev, 247 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
223 QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge), 248 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
224 &bdt_info->bd_tbl_dma, GFP_KERNEL); 249 &bdt_info->bd_tbl_dma, GFP_KERNEL);
225 if (!bdt_info->bd_tbl) { 250 if (!bdt_info->bd_tbl) {
226 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc " 251 QEDF_WARN(&(qedf->dbg_ctx),
227 "bdt_tbl[%d].\n", i); 252 "Failed to alloc bdt_tbl[%d].\n", i);
228 goto mem_err; 253 goto mem_err;
229 } 254 }
230 } 255 }
@@ -318,6 +343,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
318 } 343 }
319 bd_tbl->io_req = io_req; 344 bd_tbl->io_req = io_req;
320 io_req->cmd_type = cmd_type; 345 io_req->cmd_type = cmd_type;
346 io_req->tm_flags = 0;
321 347
322 /* Reset sequence offset data */ 348 /* Reset sequence offset data */
323 io_req->rx_buf_off = 0; 349 io_req->rx_buf_off = 0;
@@ -336,10 +362,9 @@ static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
336{ 362{
337 struct qedf_mp_req *mp_req = &(io_req->mp_req); 363 struct qedf_mp_req *mp_req = &(io_req->mp_req);
338 struct qedf_ctx *qedf = io_req->fcport->qedf; 364 struct qedf_ctx *qedf = io_req->fcport->qedf;
339 uint64_t sz = sizeof(struct fcoe_sge); 365 uint64_t sz = sizeof(struct scsi_sge);
340 366
341 /* clear tm flags */ 367 /* clear tm flags */
342 mp_req->tm_flags = 0;
343 if (mp_req->mp_req_bd) { 368 if (mp_req->mp_req_bd) {
344 dma_free_coherent(&qedf->pdev->dev, sz, 369 dma_free_coherent(&qedf->pdev->dev, sz,
345 mp_req->mp_req_bd, mp_req->mp_req_bd_dma); 370 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
@@ -387,7 +412,7 @@ void qedf_release_cmd(struct kref *ref)
387static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len, 412static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
388 int bd_index) 413 int bd_index)
389{ 414{
390 struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; 415 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
391 int frag_size, sg_frags; 416 int frag_size, sg_frags;
392 417
393 sg_frags = 0; 418 sg_frags = 0;
@@ -398,7 +423,7 @@ static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
398 frag_size = sg_len; 423 frag_size = sg_len;
399 bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr); 424 bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
400 bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr); 425 bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
401 bd[bd_index + sg_frags].size = (uint16_t)frag_size; 426 bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
402 427
403 addr += (u64)frag_size; 428 addr += (u64)frag_size;
404 sg_frags++; 429 sg_frags++;
@@ -413,7 +438,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
413 struct Scsi_Host *host = sc->device->host; 438 struct Scsi_Host *host = sc->device->host;
414 struct fc_lport *lport = shost_priv(host); 439 struct fc_lport *lport = shost_priv(host);
415 struct qedf_ctx *qedf = lport_priv(lport); 440 struct qedf_ctx *qedf = lport_priv(lport);
416 struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; 441 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
417 struct scatterlist *sg; 442 struct scatterlist *sg;
418 int byte_count = 0; 443 int byte_count = 0;
419 int sg_count = 0; 444 int sg_count = 0;
@@ -439,7 +464,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
439 464
440 bd[bd_count].sge_addr.lo = (addr & 0xffffffff); 465 bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
441 bd[bd_count].sge_addr.hi = (addr >> 32); 466 bd[bd_count].sge_addr.hi = (addr >> 32);
442 bd[bd_count].size = (u16)sg_len; 467 bd[bd_count].sge_len = (u16)sg_len;
443 468
444 return ++bd_count; 469 return ++bd_count;
445 } 470 }
@@ -480,7 +505,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
480 sg_frags = 1; 505 sg_frags = 1;
481 bd[bd_count].sge_addr.lo = U64_LO(addr); 506 bd[bd_count].sge_addr.lo = U64_LO(addr);
482 bd[bd_count].sge_addr.hi = U64_HI(addr); 507 bd[bd_count].sge_addr.hi = U64_HI(addr);
483 bd[bd_count].size = (uint16_t)sg_len; 508 bd[bd_count].sge_len = (uint16_t)sg_len;
484 } 509 }
485 510
486 bd_count += sg_frags; 511 bd_count += sg_frags;
@@ -498,7 +523,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
498static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req) 523static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
499{ 524{
500 struct scsi_cmnd *sc = io_req->sc_cmd; 525 struct scsi_cmnd *sc = io_req->sc_cmd;
501 struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; 526 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
502 int bd_count; 527 int bd_count;
503 528
504 if (scsi_sg_count(sc)) { 529 if (scsi_sg_count(sc)) {
@@ -508,7 +533,7 @@ static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
508 } else { 533 } else {
509 bd_count = 0; 534 bd_count = 0;
510 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0; 535 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
511 bd[0].size = 0; 536 bd[0].sge_len = 0;
512 } 537 }
513 io_req->bd_tbl->bd_valid = bd_count; 538 io_req->bd_tbl->bd_valid = bd_count;
514 539
@@ -529,430 +554,223 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
529 554
530 /* 4 bytes: flag info */ 555 /* 4 bytes: flag info */
531 fcp_cmnd->fc_pri_ta = 0; 556 fcp_cmnd->fc_pri_ta = 0;
532 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; 557 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
533 fcp_cmnd->fc_flags = io_req->io_req_flags; 558 fcp_cmnd->fc_flags = io_req->io_req_flags;
534 fcp_cmnd->fc_cmdref = 0; 559 fcp_cmnd->fc_cmdref = 0;
535 560
536 /* Populate data direction */ 561 /* Populate data direction */
537 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) 562 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
538 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
539 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
540 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; 563 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
564 } else {
565 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
566 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
567 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
568 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
569 }
541 570
542 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 571 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
543 572
544 /* 16 bytes: CDB information */ 573 /* 16 bytes: CDB information */
545 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); 574 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
575 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
546 576
547 /* 4 bytes: FCP data length */ 577 /* 4 bytes: FCP data length */
548 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); 578 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
549
550} 579}
551 580
552static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, 581static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
553 struct qedf_ioreq *io_req, u32 *ptu_invalidate, 582 struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
554 struct fcoe_task_context *task_ctx) 583 struct fcoe_wqe *sqe)
555{ 584{
556 enum fcoe_task_type task_type; 585 enum fcoe_task_type task_type;
557 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 586 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
558 struct io_bdt *bd_tbl = io_req->bd_tbl; 587 struct io_bdt *bd_tbl = io_req->bd_tbl;
559 union fcoe_data_desc_ctx *data_desc; 588 u8 fcp_cmnd[32];
560 u32 *fcp_cmnd;
561 u32 tmp_fcp_cmnd[8]; 589 u32 tmp_fcp_cmnd[8];
562 int cnt, i; 590 int bd_count = 0;
563 int bd_count;
564 struct qedf_ctx *qedf = fcport->qedf; 591 struct qedf_ctx *qedf = fcport->qedf;
565 uint16_t cq_idx = smp_processor_id() % qedf->num_queues; 592 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
566 u8 tmp_sgl_mode = 0; 593 struct regpair sense_data_buffer_phys_addr;
567 u8 mst_sgl_mode = 0; 594 u32 tx_io_size = 0;
595 u32 rx_io_size = 0;
596 int i, cnt;
568 597
569 memset(task_ctx, 0, sizeof(struct fcoe_task_context)); 598 /* Note init_initiator_rw_fcoe_task memsets the task context */
570 io_req->task = task_ctx; 599 io_req->task = task_ctx;
600 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
601 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
602 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
571 603
572 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) 604 /* Set task type bassed on DMA directio of command */
573 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR; 605 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
574 else
575 task_type = FCOE_TASK_TYPE_READ_INITIATOR; 606 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
576
577 /* Y Storm context */
578 task_ctx->ystorm_st_context.expect_first_xfer = 1;
579 task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len;
580 /* Check if this is required */
581 task_ctx->ystorm_st_context.ox_id = io_req->xid;
582 task_ctx->ystorm_st_context.task_rety_identifier =
583 io_req->task_retry_identifier;
584
585 /* T Storm ag context */
586 SET_FIELD(task_ctx->tstorm_ag_context.flags0,
587 TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE);
588 task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
589
590 /* T Storm st context */
591 SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
592 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
593 1);
594 task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
595
596 task_ctx->tstorm_st_context.read_only.dev_type =
597 FCOE_TASK_DEV_TYPE_DISK;
598 task_ctx->tstorm_st_context.read_only.conf_supported = 0;
599 task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
600
601 /* Completion queue for response. */
602 task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx;
603 task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size =
604 io_req->data_xfer_len;
605 task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val =
606 lport->e_d_tov;
607
608 task_ctx->ustorm_ag_context.global_cq_num = cq_idx;
609 io_req->fp_idx = cq_idx;
610
611 bd_count = bd_tbl->bd_valid;
612 if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
613 /* Setup WRITE task */
614 struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
615
616 task_ctx->ystorm_st_context.task_type =
617 FCOE_TASK_TYPE_WRITE_INITIATOR;
618 data_desc = &task_ctx->ystorm_st_context.data_desc;
619
620 if (io_req->use_slowpath) {
621 SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
622 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
623 FCOE_SLOW_SGL);
624 data_desc->slow.base_sgl_addr.lo =
625 U64_LO(bd_tbl->bd_tbl_dma);
626 data_desc->slow.base_sgl_addr.hi =
627 U64_HI(bd_tbl->bd_tbl_dma);
628 data_desc->slow.remainder_num_sges = bd_count;
629 data_desc->slow.curr_sge_off = 0;
630 data_desc->slow.curr_sgl_index = 0;
631 qedf->slow_sge_ios++;
632 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
633 } else {
634 SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
635 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
636 (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
637 FCOE_MUL_FAST_SGES);
638
639 if (bd_count == 1) {
640 data_desc->single_sge.sge_addr.lo =
641 fcoe_bd_tbl->sge_addr.lo;
642 data_desc->single_sge.sge_addr.hi =
643 fcoe_bd_tbl->sge_addr.hi;
644 data_desc->single_sge.size =
645 fcoe_bd_tbl->size;
646 data_desc->single_sge.is_valid_sge = 0;
647 qedf->single_sge_ios++;
648 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
649 } else {
650 data_desc->fast.sgl_start_addr.lo =
651 U64_LO(bd_tbl->bd_tbl_dma);
652 data_desc->fast.sgl_start_addr.hi =
653 U64_HI(bd_tbl->bd_tbl_dma);
654 data_desc->fast.sgl_byte_offset =
655 data_desc->fast.sgl_start_addr.lo &
656 (QEDF_PAGE_SIZE - 1);
657 if (data_desc->fast.sgl_byte_offset > 0)
658 QEDF_ERR(&(qedf->dbg_ctx),
659 "byte_offset=%u for xid=0x%x.\n",
660 io_req->xid,
661 data_desc->fast.sgl_byte_offset);
662 data_desc->fast.task_reuse_cnt =
663 io_req->reuse_count;
664 io_req->reuse_count++;
665 if (io_req->reuse_count == QEDF_MAX_REUSE) {
666 *ptu_invalidate = 1;
667 io_req->reuse_count = 0;
668 }
669 qedf->fast_sge_ios++;
670 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
671 }
672 }
673
674 /* T Storm context */
675 task_ctx->tstorm_st_context.read_only.task_type =
676 FCOE_TASK_TYPE_WRITE_INITIATOR;
677
678 /* M Storm context */
679 tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
680 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE);
681 SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
682 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE,
683 tmp_sgl_mode);
684
685 } else { 607 } else {
686 /* Setup READ task */ 608 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
687 609 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
688 /* M Storm context */ 610 tx_io_size = io_req->data_xfer_len;
689 struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
690
691 data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
692 task_ctx->mstorm_st_context.fp.data_2_trns_rem =
693 io_req->data_xfer_len;
694
695 if (io_req->use_slowpath) {
696 SET_FIELD(
697 task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
698 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
699 FCOE_SLOW_SGL);
700 data_desc->slow.base_sgl_addr.lo =
701 U64_LO(bd_tbl->bd_tbl_dma);
702 data_desc->slow.base_sgl_addr.hi =
703 U64_HI(bd_tbl->bd_tbl_dma);
704 data_desc->slow.remainder_num_sges =
705 bd_count;
706 data_desc->slow.curr_sge_off = 0;
707 data_desc->slow.curr_sgl_index = 0;
708 qedf->slow_sge_ios++;
709 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
710 } else { 611 } else {
711 SET_FIELD( 612 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
712 task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, 613 rx_io_size = io_req->data_xfer_len;
713 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
714 (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
715 FCOE_MUL_FAST_SGES);
716
717 if (bd_count == 1) {
718 data_desc->single_sge.sge_addr.lo =
719 fcoe_bd_tbl->sge_addr.lo;
720 data_desc->single_sge.sge_addr.hi =
721 fcoe_bd_tbl->sge_addr.hi;
722 data_desc->single_sge.size =
723 fcoe_bd_tbl->size;
724 data_desc->single_sge.is_valid_sge = 0;
725 qedf->single_sge_ios++;
726 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
727 } else {
728 data_desc->fast.sgl_start_addr.lo =
729 U64_LO(bd_tbl->bd_tbl_dma);
730 data_desc->fast.sgl_start_addr.hi =
731 U64_HI(bd_tbl->bd_tbl_dma);
732 data_desc->fast.sgl_byte_offset = 0;
733 data_desc->fast.task_reuse_cnt =
734 io_req->reuse_count;
735 io_req->reuse_count++;
736 if (io_req->reuse_count == QEDF_MAX_REUSE) {
737 *ptu_invalidate = 1;
738 io_req->reuse_count = 0;
739 }
740 qedf->fast_sge_ios++;
741 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
742 }
743 } 614 }
744
745 /* Y Storm context */
746 task_ctx->ystorm_st_context.expect_first_xfer = 0;
747 task_ctx->ystorm_st_context.task_type =
748 FCOE_TASK_TYPE_READ_INITIATOR;
749
750 /* T Storm context */
751 task_ctx->tstorm_st_context.read_only.task_type =
752 FCOE_TASK_TYPE_READ_INITIATOR;
753 mst_sgl_mode = GET_FIELD(
754 task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
755 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE);
756 SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
757 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
758 mst_sgl_mode);
759 } 615 }
760 616
617 /* Setup the fields for fcoe_task_params */
618 io_req->task_params->context = task_ctx;
619 io_req->task_params->sqe = sqe;
620 io_req->task_params->task_type = task_type;
621 io_req->task_params->tx_io_size = tx_io_size;
622 io_req->task_params->rx_io_size = rx_io_size;
623 io_req->task_params->conn_cid = fcport->fw_cid;
624 io_req->task_params->itid = io_req->xid;
625 io_req->task_params->cq_rss_number = cq_idx;
626 io_req->task_params->is_tape_device = fcport->dev_type;
627
628 /* Fill in information for scatter/gather list */
629 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
630 bd_count = bd_tbl->bd_valid;
631 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
632 io_req->sgl_task_params->sgl_phys_addr.lo =
633 U64_LO(bd_tbl->bd_tbl_dma);
634 io_req->sgl_task_params->sgl_phys_addr.hi =
635 U64_HI(bd_tbl->bd_tbl_dma);
636 io_req->sgl_task_params->num_sges = bd_count;
637 io_req->sgl_task_params->total_buffer_size =
638 scsi_bufflen(io_req->sc_cmd);
639 io_req->sgl_task_params->small_mid_sge =
640 io_req->use_slowpath;
641 }
642
643 /* Fill in physical address of sense buffer */
644 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
645 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
646
761 /* fill FCP_CMND IU */ 647 /* fill FCP_CMND IU */
762 fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque; 648 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
763 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
764 649
765 /* Swap fcp_cmnd since FC is big endian */ 650 /* Swap fcp_cmnd since FC is big endian */
766 cnt = sizeof(struct fcp_cmnd) / sizeof(u32); 651 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
767
768 for (i = 0; i < cnt; i++) { 652 for (i = 0; i < cnt; i++) {
769 *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]); 653 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
770 fcp_cmnd++; 654 }
655 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
656
657 init_initiator_rw_fcoe_task(io_req->task_params,
658 io_req->sgl_task_params,
659 sense_data_buffer_phys_addr,
660 io_req->task_retry_identifier, fcp_cmnd);
661
662 /* Increment SGL type counters */
663 if (bd_count == 1) {
664 qedf->single_sge_ios++;
665 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
666 } else if (io_req->use_slowpath) {
667 qedf->slow_sge_ios++;
668 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
669 } else {
670 qedf->fast_sge_ios++;
671 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
771 } 672 }
772
773 /* M Storm context - Sense buffer */
774 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
775 U64_LO(io_req->sense_buffer_dma);
776 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
777 U64_HI(io_req->sense_buffer_dma);
778} 673}
779 674
780void qedf_init_mp_task(struct qedf_ioreq *io_req, 675void qedf_init_mp_task(struct qedf_ioreq *io_req,
781 struct fcoe_task_context *task_ctx) 676 struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
782{ 677{
783 struct qedf_mp_req *mp_req = &(io_req->mp_req); 678 struct qedf_mp_req *mp_req = &(io_req->mp_req);
784 struct qedf_rport *fcport = io_req->fcport; 679 struct qedf_rport *fcport = io_req->fcport;
785 struct qedf_ctx *qedf = io_req->fcport->qedf; 680 struct qedf_ctx *qedf = io_req->fcport->qedf;
786 struct fc_frame_header *fc_hdr; 681 struct fc_frame_header *fc_hdr;
787 enum fcoe_task_type task_type = 0; 682 struct fcoe_tx_mid_path_params task_fc_hdr;
788 union fcoe_data_desc_ctx *data_desc; 683 struct scsi_sgl_task_params tx_sgl_task_params;
684 struct scsi_sgl_task_params rx_sgl_task_params;
789 685
790 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task " 686 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
791 "for cmd_type = %d\n", io_req->cmd_type); 687 "Initializing MP task for cmd_type=%d\n",
688 io_req->cmd_type);
792 689
793 qedf->control_requests++; 690 qedf->control_requests++;
794 691
795 /* Obtain task_type */ 692 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
796 if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) || 693 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
797 (io_req->cmd_type == QEDF_ELS)) {
798 task_type = FCOE_TASK_TYPE_MIDPATH;
799 } else if (io_req->cmd_type == QEDF_ABTS) {
800 task_type = FCOE_TASK_TYPE_ABTS;
801 }
802
803 memset(task_ctx, 0, sizeof(struct fcoe_task_context)); 694 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
695 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
804 696
805 /* Setup the task from io_req for easy reference */ 697 /* Setup the task from io_req for easy reference */
806 io_req->task = task_ctx; 698 io_req->task = task_ctx;
807 699
808 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n", 700 /* Setup the fields for fcoe_task_params */
809 task_type); 701 io_req->task_params->context = task_ctx;
810 702 io_req->task_params->sqe = sqe;
811 /* YSTORM only */ 703 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
812 { 704 io_req->task_params->tx_io_size = io_req->data_xfer_len;
813 /* Initialize YSTORM task context */ 705 /* rx_io_size tells the f/w how large a response buffer we have */
814 struct fcoe_tx_mid_path_params *task_fc_hdr = 706 io_req->task_params->rx_io_size = PAGE_SIZE;
815 &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path; 707 io_req->task_params->conn_cid = fcport->fw_cid;
816 memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); 708 io_req->task_params->itid = io_req->xid;
817 task_ctx->ystorm_st_context.task_rety_identifier = 709 /* Return middle path commands on CQ 0 */
818 io_req->task_retry_identifier; 710 io_req->task_params->cq_rss_number = 0;
819 711 io_req->task_params->is_tape_device = fcport->dev_type;
820 /* Init SGL parameters */ 712
821 if ((task_type == FCOE_TASK_TYPE_MIDPATH) || 713 fc_hdr = &(mp_req->req_fc_hdr);
822 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { 714 /* Set OX_ID and RX_ID based on driver task id */
823 data_desc = &task_ctx->ystorm_st_context.data_desc; 715 fc_hdr->fh_ox_id = io_req->xid;
824 data_desc->slow.base_sgl_addr.lo = 716 fc_hdr->fh_rx_id = htons(0xffff);
825 U64_LO(mp_req->mp_req_bd_dma); 717
826 data_desc->slow.base_sgl_addr.hi = 718 /* Set up FC header information */
827 U64_HI(mp_req->mp_req_bd_dma); 719 task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
828 data_desc->slow.remainder_num_sges = 1; 720 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
829 data_desc->slow.curr_sge_off = 0; 721 task_fc_hdr.type = fc_hdr->fh_type;
830 data_desc->slow.curr_sgl_index = 0; 722 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
831 } 723 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
832 724 task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
833 fc_hdr = &(mp_req->req_fc_hdr); 725 task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
834 if (task_type == FCOE_TASK_TYPE_MIDPATH) { 726
835 fc_hdr->fh_ox_id = io_req->xid; 727 /* Set up s/g list parameters for request buffer */
836 fc_hdr->fh_rx_id = htons(0xffff); 728 tx_sgl_task_params.sgl = mp_req->mp_req_bd;
837 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { 729 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
838 fc_hdr->fh_rx_id = io_req->xid; 730 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
839 } 731 tx_sgl_task_params.num_sges = 1;
732 /* Set PAGE_SIZE for now since sg element is that size ??? */
733 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
734 tx_sgl_task_params.small_mid_sge = 0;
735
736 /* Set up s/g list parameters for request buffer */
737 rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
738 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
739 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
740 rx_sgl_task_params.num_sges = 1;
741 /* Set PAGE_SIZE for now since sg element is that size ??? */
742 rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
743 rx_sgl_task_params.small_mid_sge = 0;
840 744
841 /* Fill FC Header into middle path buffer */
842 task_fc_hdr->parameter = fc_hdr->fh_parm_offset;
843 task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl;
844 task_fc_hdr->type = fc_hdr->fh_type;
845 task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl;
846 task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl;
847 task_fc_hdr->rx_id = fc_hdr->fh_rx_id;
848 task_fc_hdr->ox_id = fc_hdr->fh_ox_id;
849
850 task_ctx->ystorm_st_context.data_2_trns_rem =
851 io_req->data_xfer_len;
852 task_ctx->ystorm_st_context.task_type = task_type;
853 }
854
855 /* TSTORM ONLY */
856 {
857 task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
858 task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
859 /* Always send middle-path repsonses on CQ #0 */
860 task_ctx->tstorm_st_context.read_only.glbl_q_num = 0;
861 io_req->fp_idx = 0;
862 SET_FIELD(task_ctx->tstorm_ag_context.flags0,
863 TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE,
864 PROTOCOLID_FCOE);
865 task_ctx->tstorm_st_context.read_only.task_type = task_type;
866 SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
867 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
868 1);
869 task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
870 }
871
872 /* MSTORM only */
873 {
874 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
875 /* Initialize task context */
876 data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
877
878 /* Set cache sges address and length */
879 data_desc->slow.base_sgl_addr.lo =
880 U64_LO(mp_req->mp_resp_bd_dma);
881 data_desc->slow.base_sgl_addr.hi =
882 U64_HI(mp_req->mp_resp_bd_dma);
883 data_desc->slow.remainder_num_sges = 1;
884 data_desc->slow.curr_sge_off = 0;
885 data_desc->slow.curr_sgl_index = 0;
886 745
887 /* 746 /*
888 * Also need to fil in non-fastpath response address 747 * Last arg is 0 as previous code did not set that we wanted the
889 * for middle path commands. 748 * fc header information.
890 */ 749 */
891 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo = 750 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
892 U64_LO(mp_req->mp_resp_bd_dma); 751 &task_fc_hdr,
893 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi = 752 &tx_sgl_task_params,
894 U64_HI(mp_req->mp_resp_bd_dma); 753 &rx_sgl_task_params, 0);
895 }
896 }
897
898 /* USTORM ONLY */
899 {
900 task_ctx->ustorm_ag_context.global_cq_num = 0;
901 }
902 754
903 /* I/O stats. Middle path commands always use slow SGEs */ 755 /* Midpath requests always consume 1 SGE */
904 qedf->slow_sge_ios++; 756 qedf->single_sge_ios++;
905 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
906} 757}
907 758
908void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate, 759/* Presumed that fcport->rport_lock is held */
909 enum fcoe_task_type req_type, u32 offset) 760u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
910{ 761{
911 struct fcoe_wqe *sqe;
912 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); 762 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
763 u16 rval;
913 764
914 sqe = &fcport->sq[fcport->sq_prod_idx]; 765 rval = fcport->sq_prod_idx;
915 766
767 /* Adjust ring index */
916 fcport->sq_prod_idx++; 768 fcport->sq_prod_idx++;
917 fcport->fw_sq_prod_idx++; 769 fcport->fw_sq_prod_idx++;
918 if (fcport->sq_prod_idx == total_sqe) 770 if (fcport->sq_prod_idx == total_sqe)
919 fcport->sq_prod_idx = 0; 771 fcport->sq_prod_idx = 0;
920 772
921 switch (req_type) { 773 return rval;
922 case FCOE_TASK_TYPE_WRITE_INITIATOR:
923 case FCOE_TASK_TYPE_READ_INITIATOR:
924 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD);
925 if (ptu_invalidate)
926 SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1);
927 break;
928 case FCOE_TASK_TYPE_MIDPATH:
929 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH);
930 break;
931 case FCOE_TASK_TYPE_ABTS:
932 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
933 SEND_FCOE_ABTS_REQUEST);
934 break;
935 case FCOE_TASK_TYPE_EXCHANGE_CLEANUP:
936 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
937 FCOE_EXCHANGE_CLEANUP);
938 break;
939 case FCOE_TASK_TYPE_SEQUENCE_CLEANUP:
940 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
941 FCOE_SEQUENCE_RECOVERY);
942 /* NOTE: offset param only used for sequence recovery */
943 sqe->additional_info_union.seq_rec_updated_offset = offset;
944 break;
945 case FCOE_TASK_TYPE_UNSOLICITED:
946 break;
947 default:
948 break;
949 }
950
951 sqe->task_id = xid;
952
953 /* Make sure SQ data is coherent */
954 wmb();
955
956} 774}
957 775
958void qedf_ring_doorbell(struct qedf_rport *fcport) 776void qedf_ring_doorbell(struct qedf_rport *fcport)
@@ -1029,7 +847,8 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
1029 struct fcoe_task_context *task_ctx; 847 struct fcoe_task_context *task_ctx;
1030 u16 xid; 848 u16 xid;
1031 enum fcoe_task_type req_type = 0; 849 enum fcoe_task_type req_type = 0;
1032 u32 ptu_invalidate = 0; 850 struct fcoe_wqe *sqe;
851 u16 sqe_idx;
1033 852
1034 /* Initialize rest of io_req fileds */ 853 /* Initialize rest of io_req fileds */
1035 io_req->data_xfer_len = scsi_bufflen(sc_cmd); 854 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
@@ -1061,6 +880,16 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
1061 return -EAGAIN; 880 return -EAGAIN;
1062 } 881 }
1063 882
883 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
884 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
885 kref_put(&io_req->refcount, qedf_release_cmd);
886 }
887
888 /* Obtain free SQE */
889 sqe_idx = qedf_get_sqe_idx(fcport);
890 sqe = &fcport->sq[sqe_idx];
891 memset(sqe, 0, sizeof(struct fcoe_wqe));
892
1064 /* Get the task context */ 893 /* Get the task context */
1065 task_ctx = qedf_get_task_mem(&qedf->tasks, xid); 894 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1066 if (!task_ctx) { 895 if (!task_ctx) {
@@ -1070,15 +899,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
1070 return -EINVAL; 899 return -EINVAL;
1071 } 900 }
1072 901
1073 qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx); 902 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
1074
1075 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1076 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
1077 kref_put(&io_req->refcount, qedf_release_cmd);
1078 }
1079
1080 /* Obtain free SQ entry */
1081 qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0);
1082 903
1083 /* Ring doorbell */ 904 /* Ring doorbell */
1084 qedf_ring_doorbell(fcport); 905 qedf_ring_doorbell(fcport);
@@ -1661,6 +1482,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1661 u32 r_a_tov = 0; 1482 u32 r_a_tov = 0;
1662 int rc = 0; 1483 int rc = 0;
1663 unsigned long flags; 1484 unsigned long flags;
1485 struct fcoe_wqe *sqe;
1486 u16 sqe_idx;
1664 1487
1665 r_a_tov = rdata->r_a_tov; 1488 r_a_tov = rdata->r_a_tov;
1666 lport = qedf->lport; 1489 lport = qedf->lport;
@@ -1712,10 +1535,12 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1712 1535
1713 spin_lock_irqsave(&fcport->rport_lock, flags); 1536 spin_lock_irqsave(&fcport->rport_lock, flags);
1714 1537
1715 /* Add ABTS to send queue */ 1538 sqe_idx = qedf_get_sqe_idx(fcport);
1716 qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0); 1539 sqe = &fcport->sq[sqe_idx];
1540 memset(sqe, 0, sizeof(struct fcoe_wqe));
1541 io_req->task_params->sqe = sqe;
1717 1542
1718 /* Ring doorbell */ 1543 init_initiator_abort_fcoe_task(io_req->task_params);
1719 qedf_ring_doorbell(fcport); 1544 qedf_ring_doorbell(fcport);
1720 1545
1721 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1546 spin_unlock_irqrestore(&fcport->rport_lock, flags);
@@ -1784,8 +1609,8 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1784int qedf_init_mp_req(struct qedf_ioreq *io_req) 1609int qedf_init_mp_req(struct qedf_ioreq *io_req)
1785{ 1610{
1786 struct qedf_mp_req *mp_req; 1611 struct qedf_mp_req *mp_req;
1787 struct fcoe_sge *mp_req_bd; 1612 struct scsi_sge *mp_req_bd;
1788 struct fcoe_sge *mp_resp_bd; 1613 struct scsi_sge *mp_resp_bd;
1789 struct qedf_ctx *qedf = io_req->fcport->qedf; 1614 struct qedf_ctx *qedf = io_req->fcport->qedf;
1790 dma_addr_t addr; 1615 dma_addr_t addr;
1791 uint64_t sz; 1616 uint64_t sz;
@@ -1819,7 +1644,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
1819 } 1644 }
1820 1645
1821 /* Allocate and map mp_req_bd and mp_resp_bd */ 1646 /* Allocate and map mp_req_bd and mp_resp_bd */
1822 sz = sizeof(struct fcoe_sge); 1647 sz = sizeof(struct scsi_sge);
1823 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, 1648 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1824 &mp_req->mp_req_bd_dma, GFP_KERNEL); 1649 &mp_req->mp_req_bd_dma, GFP_KERNEL);
1825 if (!mp_req->mp_req_bd) { 1650 if (!mp_req->mp_req_bd) {
@@ -1841,7 +1666,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
1841 mp_req_bd = mp_req->mp_req_bd; 1666 mp_req_bd = mp_req->mp_req_bd;
1842 mp_req_bd->sge_addr.lo = U64_LO(addr); 1667 mp_req_bd->sge_addr.lo = U64_LO(addr);
1843 mp_req_bd->sge_addr.hi = U64_HI(addr); 1668 mp_req_bd->sge_addr.hi = U64_HI(addr);
1844 mp_req_bd->size = QEDF_PAGE_SIZE; 1669 mp_req_bd->sge_len = QEDF_PAGE_SIZE;
1845 1670
1846 /* 1671 /*
1847 * MP buffer is either a task mgmt command or an ELS. 1672 * MP buffer is either a task mgmt command or an ELS.
@@ -1852,7 +1677,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
1852 addr = mp_req->resp_buf_dma; 1677 addr = mp_req->resp_buf_dma;
1853 mp_resp_bd->sge_addr.lo = U64_LO(addr); 1678 mp_resp_bd->sge_addr.lo = U64_LO(addr);
1854 mp_resp_bd->sge_addr.hi = U64_HI(addr); 1679 mp_resp_bd->sge_addr.hi = U64_HI(addr);
1855 mp_resp_bd->size = QEDF_PAGE_SIZE; 1680 mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
1856 1681
1857 return 0; 1682 return 0;
1858} 1683}
@@ -1895,6 +1720,8 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
1895 int tmo = 0; 1720 int tmo = 0;
1896 int rc = SUCCESS; 1721 int rc = SUCCESS;
1897 unsigned long flags; 1722 unsigned long flags;
1723 struct fcoe_wqe *sqe;
1724 u16 sqe_idx;
1898 1725
1899 fcport = io_req->fcport; 1726 fcport = io_req->fcport;
1900 if (!fcport) { 1727 if (!fcport) {
@@ -1940,12 +1767,16 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
1940 1767
1941 init_completion(&io_req->tm_done); 1768 init_completion(&io_req->tm_done);
1942 1769
1943 /* Obtain free SQ entry */
1944 spin_lock_irqsave(&fcport->rport_lock, flags); 1770 spin_lock_irqsave(&fcport->rport_lock, flags);
1945 qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0);
1946 1771
1947 /* Ring doorbell */ 1772 sqe_idx = qedf_get_sqe_idx(fcport);
1773 sqe = &fcport->sq[sqe_idx];
1774 memset(sqe, 0, sizeof(struct fcoe_wqe));
1775 io_req->task_params->sqe = sqe;
1776
1777 init_initiator_cleanup_fcoe_task(io_req->task_params);
1948 qedf_ring_doorbell(fcport); 1778 qedf_ring_doorbell(fcport);
1779
1949 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1780 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1950 1781
1951 tmo = wait_for_completion_timeout(&io_req->tm_done, 1782 tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -1991,16 +1822,15 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
1991 uint8_t tm_flags) 1822 uint8_t tm_flags)
1992{ 1823{
1993 struct qedf_ioreq *io_req; 1824 struct qedf_ioreq *io_req;
1994 struct qedf_mp_req *tm_req;
1995 struct fcoe_task_context *task; 1825 struct fcoe_task_context *task;
1996 struct fc_frame_header *fc_hdr;
1997 struct fcp_cmnd *fcp_cmnd;
1998 struct qedf_ctx *qedf = fcport->qedf; 1826 struct qedf_ctx *qedf = fcport->qedf;
1827 struct fc_lport *lport = qedf->lport;
1999 int rc = 0; 1828 int rc = 0;
2000 uint16_t xid; 1829 uint16_t xid;
2001 uint32_t sid, did;
2002 int tmo = 0; 1830 int tmo = 0;
2003 unsigned long flags; 1831 unsigned long flags;
1832 struct fcoe_wqe *sqe;
1833 u16 sqe_idx;
2004 1834
2005 if (!sc_cmd) { 1835 if (!sc_cmd) {
2006 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n"); 1836 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
@@ -2031,36 +1861,14 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2031 /* Set the return CPU to be the same as the request one */ 1861 /* Set the return CPU to be the same as the request one */
2032 io_req->cpu = smp_processor_id(); 1862 io_req->cpu = smp_processor_id();
2033 1863
2034 tm_req = (struct qedf_mp_req *)&(io_req->mp_req);
2035
2036 rc = qedf_init_mp_req(io_req);
2037 if (rc == FAILED) {
2038 QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init "
2039 "failed\n");
2040 kref_put(&io_req->refcount, qedf_release_cmd);
2041 goto reset_tmf_err;
2042 }
2043
2044 /* Set TM flags */ 1864 /* Set TM flags */
2045 io_req->io_req_flags = 0; 1865 io_req->io_req_flags = QEDF_READ;
2046 tm_req->tm_flags = tm_flags; 1866 io_req->data_xfer_len = 0;
1867 io_req->tm_flags = tm_flags;
2047 1868
2048 /* Default is to return a SCSI command when an error occurs */ 1869 /* Default is to return a SCSI command when an error occurs */
2049 io_req->return_scsi_cmd_on_abts = true; 1870 io_req->return_scsi_cmd_on_abts = true;
2050 1871
2051 /* Fill FCP_CMND */
2052 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
2053 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
2054 memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN);
2055 fcp_cmnd->fc_dl = 0;
2056
2057 /* Fill FC header */
2058 fc_hdr = &(tm_req->req_fc_hdr);
2059 sid = fcport->sid;
2060 did = fcport->rdata->ids.port_id;
2061 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did,
2062 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
2063 FC_FC_SEQ_INIT, 0);
2064 /* Obtain exchange id */ 1872 /* Obtain exchange id */
2065 xid = io_req->xid; 1873 xid = io_req->xid;
2066 1874
@@ -2069,16 +1877,18 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2069 1877
2070 /* Initialize task context for this IO request */ 1878 /* Initialize task context for this IO request */
2071 task = qedf_get_task_mem(&qedf->tasks, xid); 1879 task = qedf_get_task_mem(&qedf->tasks, xid);
2072 qedf_init_mp_task(io_req, task);
2073 1880
2074 init_completion(&io_req->tm_done); 1881 init_completion(&io_req->tm_done);
2075 1882
2076 /* Obtain free SQ entry */
2077 spin_lock_irqsave(&fcport->rport_lock, flags); 1883 spin_lock_irqsave(&fcport->rport_lock, flags);
2078 qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
2079 1884
2080 /* Ring doorbell */ 1885 sqe_idx = qedf_get_sqe_idx(fcport);
1886 sqe = &fcport->sq[sqe_idx];
1887 memset(sqe, 0, sizeof(struct fcoe_wqe));
1888
1889 qedf_init_task(fcport, lport, io_req, task, sqe);
2081 qedf_ring_doorbell(fcport); 1890 qedf_ring_doorbell(fcport);
1891
2082 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1892 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2083 1893
2084 tmo = wait_for_completion_timeout(&io_req->tm_done, 1894 tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -2162,14 +1972,6 @@ void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2162 struct qedf_ioreq *io_req) 1972 struct qedf_ioreq *io_req)
2163{ 1973{
2164 struct fcoe_cqe_rsp_info *fcp_rsp; 1974 struct fcoe_cqe_rsp_info *fcp_rsp;
2165 struct fcoe_cqe_midpath_info *mp_info;
2166
2167
2168 /* Get TMF response length from CQE */
2169 mp_info = &cqe->cqe_info.midpath_info;
2170 io_req->mp_req.resp_len = mp_info->data_placement_size;
2171 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2172 "Response len is %d.\n", io_req->mp_req.resp_len);
2173 1975
2174 fcp_rsp = &cqe->cqe_info.rsp_info; 1976 fcp_rsp = &cqe->cqe_info.rsp_info;
2175 qedf_parse_fcp_rsp(io_req, fcp_rsp); 1977 qedf_parse_fcp_rsp(io_req, fcp_rsp);
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
index 2b3e16b24299..90a6925577cc 100644
--- a/drivers/scsi/qedi/Makefile
+++ b/drivers/scsi/qedi/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_QEDI) := qedi.o 1obj-$(CONFIG_QEDI) := qedi.o
2qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \ 2qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
3 qedi_dbg.o 3 qedi_dbg.o qedi_fw_api.o
4 4
5qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o 5qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 2bce3efc66a4..d6978cbc56f0 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -14,6 +14,8 @@
14#include "qedi.h" 14#include "qedi.h"
15#include "qedi_iscsi.h" 15#include "qedi_iscsi.h"
16#include "qedi_gbl.h" 16#include "qedi_gbl.h"
17#include "qedi_fw_iscsi.h"
18#include "qedi_fw_scsi.h"
17 19
18static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, 20static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
19 struct iscsi_task *mtask); 21 struct iscsi_task *mtask);
@@ -53,8 +55,8 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
53 resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn); 55 resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
54 resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn); 56 resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
55 57
56 resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait); 58 resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait);
57 resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain); 59 resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain);
58 60
59 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, 61 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
60 "Freeing tid=0x%x for cid=0x%x\n", 62 "Freeing tid=0x%x for cid=0x%x\n",
@@ -975,81 +977,6 @@ exit_fp_process:
975 return; 977 return;
976} 978}
977 979
978static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
979 u16 tid, uint16_t ptu_invalidate, int is_cleanup)
980{
981 struct iscsi_wqe *wqe;
982 struct iscsi_wqe_field *cont_field;
983 struct qedi_endpoint *ep;
984 struct scsi_cmnd *sc = task->sc;
985 struct iscsi_login_req *login_hdr;
986 struct qedi_cmd *cmd = task->dd_data;
987
988 login_hdr = (struct iscsi_login_req *)task->hdr;
989 ep = qedi_conn->ep;
990 wqe = &ep->sq[ep->sq_prod_idx];
991
992 memset(wqe, 0, sizeof(*wqe));
993
994 ep->sq_prod_idx++;
995 ep->fw_sq_prod_idx++;
996 if (ep->sq_prod_idx == QEDI_SQ_SIZE)
997 ep->sq_prod_idx = 0;
998
999 if (is_cleanup) {
1000 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1001 ISCSI_WQE_TYPE_TASK_CLEANUP);
1002 wqe->task_id = tid;
1003 return;
1004 }
1005
1006 if (ptu_invalidate) {
1007 SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
1008 ISCSI_WQE_SET_PTU_INVALIDATE);
1009 }
1010
1011 cont_field = &wqe->cont_prevtid_union.cont_field;
1012
1013 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1014 case ISCSI_OP_LOGIN:
1015 case ISCSI_OP_TEXT:
1016 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1017 ISCSI_WQE_TYPE_MIDDLE_PATH);
1018 SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
1019 1);
1020 cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
1021 break;
1022 case ISCSI_OP_LOGOUT:
1023 case ISCSI_OP_NOOP_OUT:
1024 case ISCSI_OP_SCSI_TMFUNC:
1025 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1026 ISCSI_WQE_TYPE_NORMAL);
1027 break;
1028 default:
1029 if (!sc)
1030 break;
1031
1032 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1033 ISCSI_WQE_TYPE_NORMAL);
1034 cont_field->contlen_cdbsize_field =
1035 (sc->sc_data_direction == DMA_TO_DEVICE) ?
1036 scsi_bufflen(sc) : 0;
1037 if (cmd->use_slowpath)
1038 SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
1039 else
1040 SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
1041 (sc->sc_data_direction ==
1042 DMA_TO_DEVICE) ?
1043 min((u16)QEDI_FAST_SGE_COUNT,
1044 (u16)cmd->io_tbl.sge_valid) : 0);
1045 break;
1046 }
1047
1048 wqe->task_id = tid;
1049 /* Make sure SQ data is coherent */
1050 wmb();
1051}
1052
1053static void qedi_ring_doorbell(struct qedi_conn *qedi_conn) 980static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
1054{ 981{
1055 struct iscsi_db_data dbell = { 0 }; 982 struct iscsi_db_data dbell = { 0 };
@@ -1076,96 +1003,116 @@ static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
1076 qedi_conn->iscsi_conn_id); 1003 qedi_conn->iscsi_conn_id);
1077} 1004}
1078 1005
1006static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn)
1007{
1008 struct qedi_endpoint *ep;
1009 u16 rval;
1010
1011 ep = qedi_conn->ep;
1012 rval = ep->sq_prod_idx;
1013
1014 /* Increament SQ index */
1015 ep->sq_prod_idx++;
1016 ep->fw_sq_prod_idx++;
1017 if (ep->sq_prod_idx == QEDI_SQ_SIZE)
1018 ep->sq_prod_idx = 0;
1019
1020 return rval;
1021}
1022
1079int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, 1023int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
1080 struct iscsi_task *task) 1024 struct iscsi_task *task)
1081{ 1025{
1082 struct qedi_ctx *qedi = qedi_conn->qedi; 1026 struct iscsi_login_req_hdr login_req_pdu_header;
1027 struct scsi_sgl_task_params tx_sgl_task_params;
1028 struct scsi_sgl_task_params rx_sgl_task_params;
1029 struct iscsi_task_params task_params;
1083 struct iscsi_task_context *fw_task_ctx; 1030 struct iscsi_task_context *fw_task_ctx;
1031 struct qedi_ctx *qedi = qedi_conn->qedi;
1084 struct iscsi_login_req *login_hdr; 1032 struct iscsi_login_req *login_hdr;
1085 struct iscsi_login_req_hdr *fw_login_req = NULL; 1033 struct scsi_sge *req_sge = NULL;
1086 struct iscsi_cached_sge_ctx *cached_sge = NULL; 1034 struct scsi_sge *resp_sge = NULL;
1087 struct iscsi_sge *single_sge = NULL;
1088 struct iscsi_sge *req_sge = NULL;
1089 struct iscsi_sge *resp_sge = NULL;
1090 struct qedi_cmd *qedi_cmd; 1035 struct qedi_cmd *qedi_cmd;
1091 s16 ptu_invalidate = 0; 1036 struct qedi_endpoint *ep;
1092 s16 tid = 0; 1037 s16 tid = 0;
1038 u16 sq_idx = 0;
1039 int rval = 0;
1093 1040
1094 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; 1041 req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1095 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; 1042 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1096 qedi_cmd = (struct qedi_cmd *)task->dd_data; 1043 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1044 ep = qedi_conn->ep;
1097 login_hdr = (struct iscsi_login_req *)task->hdr; 1045 login_hdr = (struct iscsi_login_req *)task->hdr;
1098 1046
1099 tid = qedi_get_task_idx(qedi); 1047 tid = qedi_get_task_idx(qedi);
1100 if (tid == -1) 1048 if (tid == -1)
1101 return -ENOMEM; 1049 return -ENOMEM;
1102 1050
1103 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); 1051 fw_task_ctx =
1052 (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
1104 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); 1053 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1105 1054
1106 qedi_cmd->task_id = tid; 1055 qedi_cmd->task_id = tid;
1107 1056
1108 /* Ystorm context */ 1057 memset(&task_params, 0, sizeof(task_params));
1109 fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req; 1058 memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header));
1110 fw_login_req->opcode = login_hdr->opcode; 1059 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
1111 fw_login_req->version_min = login_hdr->min_version; 1060 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
1112 fw_login_req->version_max = login_hdr->max_version; 1061 /* Update header info */
1113 fw_login_req->flags_attr = login_hdr->flags; 1062 login_req_pdu_header.opcode = login_hdr->opcode;
1114 fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2); 1063 login_req_pdu_header.version_min = login_hdr->min_version;
1115 fw_login_req->isid_d = *((u32 *)login_hdr->isid); 1064 login_req_pdu_header.version_max = login_hdr->max_version;
1116 fw_login_req->tsih = login_hdr->tsih; 1065 login_req_pdu_header.flags_attr = login_hdr->flags;
1117 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); 1066 login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid);
1118 fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt)); 1067 login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]);
1119 fw_login_req->cid = qedi_conn->iscsi_conn_id; 1068
1120 fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn); 1069 login_req_pdu_header.tsih = login_hdr->tsih;
1121 fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); 1070 login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength);
1122 fw_login_req->exp_stat_sn = 0;
1123
1124 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
1125 ptu_invalidate = 1;
1126 qedi->tid_reuse_count[tid] = 0;
1127 }
1128 1071
1129 fw_task_ctx->ystorm_st_context.state.reuse_count = 1072 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1130 qedi->tid_reuse_count[tid]; 1073 login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
1131 fw_task_ctx->mstorm_st_context.reuse_count = 1074 login_req_pdu_header.cid = qedi_conn->iscsi_conn_id;
1132 qedi->tid_reuse_count[tid]++; 1075 login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn);
1133 cached_sge = 1076 login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
1134 &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge; 1077 login_req_pdu_header.exp_stat_sn = 0;
1135 cached_sge->sge.sge_len = req_sge->sge_len; 1078
1136 cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr); 1079 /* Fill tx AHS and rx buffer */
1137 cached_sge->sge.sge_addr.hi = 1080 tx_sgl_task_params.sgl =
1138 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); 1081 (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1139 1082 tx_sgl_task_params.sgl_phys_addr.lo =
1140 /* Mstorm context */ 1083 (u32)(qedi_conn->gen_pdu.req_dma_addr);
1141 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge; 1084 tx_sgl_task_params.sgl_phys_addr.hi =
1142 fw_task_ctx->mstorm_st_context.task_type = 0x2; 1085 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1143 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; 1086 tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength);
1144 single_sge->sge_addr.lo = resp_sge->sge_addr.lo; 1087 tx_sgl_task_params.num_sges = 1;
1145 single_sge->sge_addr.hi = resp_sge->sge_addr.hi; 1088
1146 single_sge->sge_len = resp_sge->sge_len; 1089 rx_sgl_task_params.sgl =
1147 1090 (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1148 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, 1091 rx_sgl_task_params.sgl_phys_addr.lo =
1149 ISCSI_MFLAGS_SINGLE_SGE, 1); 1092 (u32)(qedi_conn->gen_pdu.resp_dma_addr);
1150 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, 1093 rx_sgl_task_params.sgl_phys_addr.hi =
1151 ISCSI_MFLAGS_SLOW_IO, 0); 1094 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
1152 fw_task_ctx->mstorm_st_context.sgl_size = 1; 1095 rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
1153 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len; 1096 rx_sgl_task_params.num_sges = 1;
1154 1097
1155 /* Ustorm context */ 1098 /* Fill fw input params */
1156 fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len; 1099 task_params.context = fw_task_ctx;
1157 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 1100 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
1158 ntoh24(login_hdr->dlength); 1101 task_params.itid = tid;
1159 fw_task_ctx->ustorm_st_context.exp_data_sn = 0; 1102 task_params.cq_rss_number = 0;
1160 fw_task_ctx->ustorm_st_context.cq_rss_number = 0; 1103 task_params.tx_io_size = ntoh24(login_hdr->dlength);
1161 fw_task_ctx->ustorm_st_context.task_type = 0x2; 1104 task_params.rx_io_size = resp_sge->sge_len;
1162 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; 1105
1163 fw_task_ctx->ustorm_ag_context.exp_data_acked = 1106 sq_idx = qedi_get_wqe_idx(qedi_conn);
1164 ntoh24(login_hdr->dlength); 1107 task_params.sqe = &ep->sq[sq_idx];
1165 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, 1108
1166 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); 1109 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1167 SET_FIELD(fw_task_ctx->ustorm_st_context.flags, 1110 rval = init_initiator_login_request_task(&task_params,
1168 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0); 1111 &login_req_pdu_header,
1112 &tx_sgl_task_params,
1113 &rx_sgl_task_params);
1114 if (rval)
1115 return -1;
1169 1116
1170 spin_lock(&qedi_conn->list_lock); 1117 spin_lock(&qedi_conn->list_lock);
1171 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); 1118 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1173,7 +1120,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
1173 qedi_conn->active_cmd_count++; 1120 qedi_conn->active_cmd_count++;
1174 spin_unlock(&qedi_conn->list_lock); 1121 spin_unlock(&qedi_conn->list_lock);
1175 1122
1176 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1177 qedi_ring_doorbell(qedi_conn); 1123 qedi_ring_doorbell(qedi_conn);
1178 return 0; 1124 return 0;
1179} 1125}
@@ -1181,65 +1127,64 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
1181int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, 1127int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
1182 struct iscsi_task *task) 1128 struct iscsi_task *task)
1183{ 1129{
1184 struct qedi_ctx *qedi = qedi_conn->qedi; 1130 struct iscsi_logout_req_hdr logout_pdu_header;
1185 struct iscsi_logout_req_hdr *fw_logout_req = NULL; 1131 struct scsi_sgl_task_params tx_sgl_task_params;
1186 struct iscsi_task_context *fw_task_ctx = NULL; 1132 struct scsi_sgl_task_params rx_sgl_task_params;
1133 struct iscsi_task_params task_params;
1134 struct iscsi_task_context *fw_task_ctx;
1187 struct iscsi_logout *logout_hdr = NULL; 1135 struct iscsi_logout *logout_hdr = NULL;
1188 struct qedi_cmd *qedi_cmd = NULL; 1136 struct qedi_ctx *qedi = qedi_conn->qedi;
1189 s16 tid = 0; 1137 struct qedi_cmd *qedi_cmd;
1190 s16 ptu_invalidate = 0; 1138 struct qedi_endpoint *ep;
1139 s16 tid = 0;
1140 u16 sq_idx = 0;
1141 int rval = 0;
1191 1142
1192 qedi_cmd = (struct qedi_cmd *)task->dd_data; 1143 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1193 logout_hdr = (struct iscsi_logout *)task->hdr; 1144 logout_hdr = (struct iscsi_logout *)task->hdr;
1145 ep = qedi_conn->ep;
1194 1146
1195 tid = qedi_get_task_idx(qedi); 1147 tid = qedi_get_task_idx(qedi);
1196 if (tid == -1) 1148 if (tid == -1)
1197 return -ENOMEM; 1149 return -ENOMEM;
1198 1150
1199 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); 1151 fw_task_ctx =
1200 1152 (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
1201 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); 1153 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1154
1202 qedi_cmd->task_id = tid; 1155 qedi_cmd->task_id = tid;
1203 1156
1204 /* Ystorm context */ 1157 memset(&task_params, 0, sizeof(task_params));
1205 fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req; 1158 memset(&logout_pdu_header, 0, sizeof(logout_pdu_header));
1206 fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST; 1159 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
1207 fw_logout_req->reason_code = 0x80 | logout_hdr->flags; 1160 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
1208 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1209 fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
1210 fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
1211 fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
1212 1161
1213 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { 1162 /* Update header info */
1214 ptu_invalidate = 1; 1163 logout_pdu_header.opcode = logout_hdr->opcode;
1215 qedi->tid_reuse_count[tid] = 0; 1164 logout_pdu_header.reason_code = 0x80 | logout_hdr->flags;
1216 } 1165 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1217 fw_task_ctx->ystorm_st_context.state.reuse_count = 1166 logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
1218 qedi->tid_reuse_count[tid]; 1167 logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
1219 fw_task_ctx->mstorm_st_context.reuse_count = 1168 logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
1220 qedi->tid_reuse_count[tid]++; 1169 logout_pdu_header.cid = qedi_conn->iscsi_conn_id;
1221 fw_logout_req->cid = qedi_conn->iscsi_conn_id; 1170
1222 fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0; 1171 /* Fill fw input params */
1223 1172 task_params.context = fw_task_ctx;
1224 /* Mstorm context */ 1173 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
1225 fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; 1174 task_params.itid = tid;
1226 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; 1175 task_params.cq_rss_number = 0;
1227 1176 task_params.tx_io_size = 0;
1228 /* Ustorm context */ 1177 task_params.rx_io_size = 0;
1229 fw_task_ctx->ustorm_st_context.rem_rcv_len = 0; 1178
1230 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0; 1179 sq_idx = qedi_get_wqe_idx(qedi_conn);
1231 fw_task_ctx->ustorm_st_context.exp_data_sn = 0; 1180 task_params.sqe = &ep->sq[sq_idx];
1232 fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; 1181 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1233 fw_task_ctx->ustorm_st_context.cq_rss_number = 0; 1182
1234 1183 rval = init_initiator_logout_request_task(&task_params,
1235 SET_FIELD(fw_task_ctx->ustorm_st_context.flags, 1184 &logout_pdu_header,
1236 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0); 1185 NULL, NULL);
1237 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, 1186 if (rval)
1238 ISCSI_REG1_NUM_FAST_SGES, 0); 1187 return -1;
1239
1240 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1241 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1242 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1243 1188
1244 spin_lock(&qedi_conn->list_lock); 1189 spin_lock(&qedi_conn->list_lock);
1245 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); 1190 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1247,9 +1192,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
1247 qedi_conn->active_cmd_count++; 1192 qedi_conn->active_cmd_count++;
1248 spin_unlock(&qedi_conn->list_lock); 1193 spin_unlock(&qedi_conn->list_lock);
1249 1194
1250 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1251 qedi_ring_doorbell(qedi_conn); 1195 qedi_ring_doorbell(qedi_conn);
1252
1253 return 0; 1196 return 0;
1254} 1197}
1255 1198
@@ -1533,47 +1476,46 @@ ldel_exit:
1533static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, 1476static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
1534 struct iscsi_task *mtask) 1477 struct iscsi_task *mtask)
1535{ 1478{
1536 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 1479 struct iscsi_tmf_request_hdr tmf_pdu_header;
1480 struct iscsi_task_params task_params;
1537 struct qedi_ctx *qedi = qedi_conn->qedi; 1481 struct qedi_ctx *qedi = qedi_conn->qedi;
1538 struct iscsi_task_context *fw_task_ctx; 1482 struct iscsi_task_context *fw_task_ctx;
1539 struct iscsi_tmf_request_hdr *fw_tmf_request; 1483 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
1540 struct iscsi_sge *single_sge;
1541 struct qedi_cmd *qedi_cmd;
1542 struct qedi_cmd *cmd;
1543 struct iscsi_task *ctask; 1484 struct iscsi_task *ctask;
1544 struct iscsi_tm *tmf_hdr; 1485 struct iscsi_tm *tmf_hdr;
1545 struct iscsi_sge *req_sge; 1486 struct qedi_cmd *qedi_cmd;
1546 struct iscsi_sge *resp_sge; 1487 struct qedi_cmd *cmd;
1547 u32 lun[2]; 1488 struct qedi_endpoint *ep;
1548 s16 tid = 0, ptu_invalidate = 0; 1489 u32 scsi_lun[2];
1490 s16 tid = 0;
1491 u16 sq_idx = 0;
1492 int rval = 0;
1549 1493
1550 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1551 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1552 qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
1553 tmf_hdr = (struct iscsi_tm *)mtask->hdr; 1494 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
1495 qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
1496 ep = qedi_conn->ep;
1554 1497
1555 tid = qedi_cmd->task_id; 1498 tid = qedi_get_task_idx(qedi);
1556 qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd); 1499 if (tid == -1)
1500 return -ENOMEM;
1557 1501
1558 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); 1502 fw_task_ctx =
1503 (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
1559 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); 1504 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1560 1505
1561 fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request; 1506 qedi_cmd->task_id = tid;
1562 fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
1563 fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
1564 1507
1565 memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun)); 1508 memset(&task_params, 0, sizeof(task_params));
1566 fw_tmf_request->lun.lo = be32_to_cpu(lun[0]); 1509 memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header));
1567 fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
1568 1510
1569 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { 1511 /* Update header info */
1570 ptu_invalidate = 1; 1512 qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
1571 qedi->tid_reuse_count[tid] = 0; 1513 tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt));
1572 } 1514 tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
1573 fw_task_ctx->ystorm_st_context.state.reuse_count = 1515
1574 qedi->tid_reuse_count[tid]; 1516 memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
1575 fw_task_ctx->mstorm_st_context.reuse_count = 1517 tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
1576 qedi->tid_reuse_count[tid]++; 1518 tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
1577 1519
1578 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == 1520 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1579 ISCSI_TM_FUNC_ABORT_TASK) { 1521 ISCSI_TM_FUNC_ABORT_TASK) {
@@ -1584,53 +1526,34 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
1584 return 0; 1526 return 0;
1585 } 1527 }
1586 cmd = (struct qedi_cmd *)ctask->dd_data; 1528 cmd = (struct qedi_cmd *)ctask->dd_data;
1587 fw_tmf_request->rtt = 1529 tmf_pdu_header.rtt =
1588 qedi_set_itt(cmd->task_id, 1530 qedi_set_itt(cmd->task_id,
1589 get_itt(tmf_hdr->rtt)); 1531 get_itt(tmf_hdr->rtt));
1590 } else { 1532 } else {
1591 fw_tmf_request->rtt = ISCSI_RESERVED_TAG; 1533 tmf_pdu_header.rtt = ISCSI_RESERVED_TAG;
1592 } 1534 }
1593 1535
1594 fw_tmf_request->opcode = tmf_hdr->opcode; 1536 tmf_pdu_header.opcode = tmf_hdr->opcode;
1595 fw_tmf_request->function = tmf_hdr->flags; 1537 tmf_pdu_header.function = tmf_hdr->flags;
1596 fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength); 1538 tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength);
1597 fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn); 1539 tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
1598
1599 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
1600 fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1601 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1602 single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
1603 single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
1604 single_sge->sge_len = resp_sge->sge_len;
1605
1606 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1607 ISCSI_MFLAGS_SINGLE_SGE, 1);
1608 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1609 ISCSI_MFLAGS_SLOW_IO, 0);
1610 fw_task_ctx->mstorm_st_context.sgl_size = 1;
1611 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
1612
1613 /* Ustorm context */
1614 fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
1615 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
1616 fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
1617 fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1618 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1619
1620 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
1621 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
1622 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
1623 ISCSI_REG1_NUM_FAST_SGES, 0);
1624
1625 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1626 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1627 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1628 fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
1629 fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
1630 1540
1631 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 1541 /* Fill fw input params */
1632 "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n", 1542 task_params.context = fw_task_ctx;
1633 tid, mtask->itt, qedi_conn->iscsi_conn_id); 1543 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
1544 task_params.itid = tid;
1545 task_params.cq_rss_number = 0;
1546 task_params.tx_io_size = 0;
1547 task_params.rx_io_size = 0;
1548
1549 sq_idx = qedi_get_wqe_idx(qedi_conn);
1550 task_params.sqe = &ep->sq[sq_idx];
1551
1552 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1553 rval = init_initiator_tmf_request_task(&task_params,
1554 &tmf_pdu_header);
1555 if (rval)
1556 return -1;
1634 1557
1635 spin_lock(&qedi_conn->list_lock); 1558 spin_lock(&qedi_conn->list_lock);
1636 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); 1559 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1638,7 +1561,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
1638 qedi_conn->active_cmd_count++; 1561 qedi_conn->active_cmd_count++;
1639 spin_unlock(&qedi_conn->list_lock); 1562 spin_unlock(&qedi_conn->list_lock);
1640 1563
1641 qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
1642 qedi_ring_doorbell(qedi_conn); 1564 qedi_ring_doorbell(qedi_conn);
1643 return 0; 1565 return 0;
1644} 1566}
@@ -1689,101 +1611,98 @@ int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
1689int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, 1611int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
1690 struct iscsi_task *task) 1612 struct iscsi_task *task)
1691{ 1613{
1692 struct qedi_ctx *qedi = qedi_conn->qedi; 1614 struct iscsi_text_request_hdr text_request_pdu_header;
1615 struct scsi_sgl_task_params tx_sgl_task_params;
1616 struct scsi_sgl_task_params rx_sgl_task_params;
1617 struct iscsi_task_params task_params;
1693 struct iscsi_task_context *fw_task_ctx; 1618 struct iscsi_task_context *fw_task_ctx;
1694 struct iscsi_text_request_hdr *fw_text_request; 1619 struct qedi_ctx *qedi = qedi_conn->qedi;
1695 struct iscsi_cached_sge_ctx *cached_sge;
1696 struct iscsi_sge *single_sge;
1697 struct qedi_cmd *qedi_cmd;
1698 /* For 6.5 hdr iscsi_hdr */
1699 struct iscsi_text *text_hdr; 1620 struct iscsi_text *text_hdr;
1700 struct iscsi_sge *req_sge; 1621 struct scsi_sge *req_sge = NULL;
1701 struct iscsi_sge *resp_sge; 1622 struct scsi_sge *resp_sge = NULL;
1702 s16 ptu_invalidate = 0; 1623 struct qedi_cmd *qedi_cmd;
1624 struct qedi_endpoint *ep;
1703 s16 tid = 0; 1625 s16 tid = 0;
1626 u16 sq_idx = 0;
1627 int rval = 0;
1704 1628
1705 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; 1629 req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1706 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; 1630 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1707 qedi_cmd = (struct qedi_cmd *)task->dd_data; 1631 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1708 text_hdr = (struct iscsi_text *)task->hdr; 1632 text_hdr = (struct iscsi_text *)task->hdr;
1633 ep = qedi_conn->ep;
1709 1634
1710 tid = qedi_get_task_idx(qedi); 1635 tid = qedi_get_task_idx(qedi);
1711 if (tid == -1) 1636 if (tid == -1)
1712 return -ENOMEM; 1637 return -ENOMEM;
1713 1638
1714 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); 1639 fw_task_ctx =
1640 (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
1715 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); 1641 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1716 1642
1717 qedi_cmd->task_id = tid; 1643 qedi_cmd->task_id = tid;
1718 1644
1719 /* Ystorm context */ 1645 memset(&task_params, 0, sizeof(task_params));
1720 fw_text_request = 1646 memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header));
1721 &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request; 1647 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
1722 fw_text_request->opcode = text_hdr->opcode; 1648 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
1723 fw_text_request->flags_attr = text_hdr->flags; 1649
1650 /* Update header info */
1651 text_request_pdu_header.opcode = text_hdr->opcode;
1652 text_request_pdu_header.flags_attr = text_hdr->flags;
1724 1653
1725 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); 1654 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1726 fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt)); 1655 text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
1727 fw_text_request->ttt = text_hdr->ttt; 1656 text_request_pdu_header.ttt = text_hdr->ttt;
1728 fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn); 1657 text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn);
1729 fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn); 1658 text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
1730 fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength); 1659 text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength);
1731 1660
1732 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { 1661 /* Fill tx AHS and rx buffer */
1733 ptu_invalidate = 1; 1662 tx_sgl_task_params.sgl =
1734 qedi->tid_reuse_count[tid] = 0; 1663 (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1735 } 1664 tx_sgl_task_params.sgl_phys_addr.lo =
1736 fw_task_ctx->ystorm_st_context.state.reuse_count = 1665 (u32)(qedi_conn->gen_pdu.req_dma_addr);
1737 qedi->tid_reuse_count[tid]; 1666 tx_sgl_task_params.sgl_phys_addr.hi =
1738 fw_task_ctx->mstorm_st_context.reuse_count =
1739 qedi->tid_reuse_count[tid]++;
1740
1741 cached_sge =
1742 &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
1743 cached_sge->sge.sge_len = req_sge->sge_len;
1744 cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
1745 cached_sge->sge.sge_addr.hi =
1746 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); 1667 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1668 tx_sgl_task_params.total_buffer_size = req_sge->sge_len;
1669 tx_sgl_task_params.num_sges = 1;
1670
1671 rx_sgl_task_params.sgl =
1672 (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1673 rx_sgl_task_params.sgl_phys_addr.lo =
1674 (u32)(qedi_conn->gen_pdu.resp_dma_addr);
1675 rx_sgl_task_params.sgl_phys_addr.hi =
1676 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
1677 rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
1678 rx_sgl_task_params.num_sges = 1;
1679
1680 /* Fill fw input params */
1681 task_params.context = fw_task_ctx;
1682 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
1683 task_params.itid = tid;
1684 task_params.cq_rss_number = 0;
1685 task_params.tx_io_size = ntoh24(text_hdr->dlength);
1686 task_params.rx_io_size = resp_sge->sge_len;
1687
1688 sq_idx = qedi_get_wqe_idx(qedi_conn);
1689 task_params.sqe = &ep->sq[sq_idx];
1690
1691 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1692 rval = init_initiator_text_request_task(&task_params,
1693 &text_request_pdu_header,
1694 &tx_sgl_task_params,
1695 &rx_sgl_task_params);
1696 if (rval)
1697 return -1;
1747 1698
1748 /* Mstorm context */
1749 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
1750 fw_task_ctx->mstorm_st_context.task_type = 0x2;
1751 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1752 single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
1753 single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
1754 single_sge->sge_len = resp_sge->sge_len;
1755
1756 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1757 ISCSI_MFLAGS_SINGLE_SGE, 1);
1758 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1759 ISCSI_MFLAGS_SLOW_IO, 0);
1760 fw_task_ctx->mstorm_st_context.sgl_size = 1;
1761 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
1762
1763 /* Ustorm context */
1764 fw_task_ctx->ustorm_ag_context.exp_data_acked =
1765 ntoh24(text_hdr->dlength);
1766 fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
1767 fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
1768 ntoh24(text_hdr->dlength);
1769 fw_task_ctx->ustorm_st_context.exp_data_sn =
1770 be32_to_cpu(text_hdr->exp_statsn);
1771 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1772 fw_task_ctx->ustorm_st_context.task_type = 0x2;
1773 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1774 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1775 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1776
1777 /* Add command in active command list */
1778 spin_lock(&qedi_conn->list_lock); 1699 spin_lock(&qedi_conn->list_lock);
1779 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); 1700 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1780 qedi_cmd->io_cmd_in_list = true; 1701 qedi_cmd->io_cmd_in_list = true;
1781 qedi_conn->active_cmd_count++; 1702 qedi_conn->active_cmd_count++;
1782 spin_unlock(&qedi_conn->list_lock); 1703 spin_unlock(&qedi_conn->list_lock);
1783 1704
1784 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1785 qedi_ring_doorbell(qedi_conn); 1705 qedi_ring_doorbell(qedi_conn);
1786
1787 return 0; 1706 return 0;
1788} 1707}
1789 1708
@@ -1791,58 +1710,62 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
1791 struct iscsi_task *task, 1710 struct iscsi_task *task,
1792 char *datap, int data_len, int unsol) 1711 char *datap, int data_len, int unsol)
1793{ 1712{
1713 struct iscsi_nop_out_hdr nop_out_pdu_header;
1714 struct scsi_sgl_task_params tx_sgl_task_params;
1715 struct scsi_sgl_task_params rx_sgl_task_params;
1716 struct iscsi_task_params task_params;
1794 struct qedi_ctx *qedi = qedi_conn->qedi; 1717 struct qedi_ctx *qedi = qedi_conn->qedi;
1795 struct iscsi_task_context *fw_task_ctx; 1718 struct iscsi_task_context *fw_task_ctx;
1796 struct iscsi_nop_out_hdr *fw_nop_out;
1797 struct qedi_cmd *qedi_cmd;
1798 /* For 6.5 hdr iscsi_hdr */
1799 struct iscsi_nopout *nopout_hdr; 1719 struct iscsi_nopout *nopout_hdr;
1800 struct iscsi_cached_sge_ctx *cached_sge; 1720 struct scsi_sge *req_sge = NULL;
1801 struct iscsi_sge *single_sge; 1721 struct scsi_sge *resp_sge = NULL;
1802 struct iscsi_sge *req_sge; 1722 struct qedi_cmd *qedi_cmd;
1803 struct iscsi_sge *resp_sge; 1723 struct qedi_endpoint *ep;
1804 u32 lun[2]; 1724 u32 scsi_lun[2];
1805 s16 ptu_invalidate = 0;
1806 s16 tid = 0; 1725 s16 tid = 0;
1726 u16 sq_idx = 0;
1727 int rval = 0;
1807 1728
1808 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; 1729 req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1809 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; 1730 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1810 qedi_cmd = (struct qedi_cmd *)task->dd_data; 1731 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1811 nopout_hdr = (struct iscsi_nopout *)task->hdr; 1732 nopout_hdr = (struct iscsi_nopout *)task->hdr;
1733 ep = qedi_conn->ep;
1812 1734
1813 tid = qedi_get_task_idx(qedi); 1735 tid = qedi_get_task_idx(qedi);
1814 if (tid == -1) { 1736 if (tid == -1)
1815 QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
1816 return -ENOMEM; 1737 return -ENOMEM;
1817 }
1818
1819 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
1820 1738
1739 fw_task_ctx =
1740 (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
1821 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); 1741 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1742
1822 qedi_cmd->task_id = tid; 1743 qedi_cmd->task_id = tid;
1823 1744
1824 /* Ystorm context */ 1745 memset(&task_params, 0, sizeof(task_params));
1825 fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out; 1746 memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header));
1826 SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1); 1747 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
1827 SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0); 1748 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
1749
1750 /* Update header info */
1751 nop_out_pdu_header.opcode = nopout_hdr->opcode;
1752 SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
1753 SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
1828 1754
1829 memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun)); 1755 memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
1830 fw_nop_out->lun.lo = be32_to_cpu(lun[0]); 1756 nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
1831 fw_nop_out->lun.hi = be32_to_cpu(lun[1]); 1757 nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
1758 nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
1759 nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
1832 1760
1833 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); 1761 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1834 1762
1835 if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) { 1763 if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
1836 fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt); 1764 nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt);
1837 fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt); 1765 nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt);
1838 fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
1839 fw_task_ctx->ystorm_st_context.state.local_comp = 1;
1840 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
1841 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
1842 } else { 1766 } else {
1843 fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt)); 1767 nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
1844 fw_nop_out->ttt = ISCSI_TTT_ALL_ONES; 1768 nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES;
1845 fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
1846 1769
1847 spin_lock(&qedi_conn->list_lock); 1770 spin_lock(&qedi_conn->list_lock);
1848 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); 1771 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1851,53 +1774,46 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
1851 spin_unlock(&qedi_conn->list_lock); 1774 spin_unlock(&qedi_conn->list_lock);
1852 } 1775 }
1853 1776
1854 fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT; 1777 /* Fill tx AHS and rx buffer */
1855 fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); 1778 if (data_len) {
1856 fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn); 1779 tx_sgl_task_params.sgl =
1857 1780 (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1858 cached_sge = 1781 tx_sgl_task_params.sgl_phys_addr.lo =
1859 &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge; 1782 (u32)(qedi_conn->gen_pdu.req_dma_addr);
1860 cached_sge->sge.sge_len = req_sge->sge_len; 1783 tx_sgl_task_params.sgl_phys_addr.hi =
1861 cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr); 1784 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1862 cached_sge->sge.sge_addr.hi = 1785 tx_sgl_task_params.total_buffer_size = data_len;
1863 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); 1786 tx_sgl_task_params.num_sges = 1;
1864 1787
1865 /* Mstorm context */ 1788 rx_sgl_task_params.sgl =
1866 fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; 1789 (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1867 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; 1790 rx_sgl_task_params.sgl_phys_addr.lo =
1868 1791 (u32)(qedi_conn->gen_pdu.resp_dma_addr);
1869 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge; 1792 rx_sgl_task_params.sgl_phys_addr.hi =
1870 single_sge->sge_addr.lo = resp_sge->sge_addr.lo; 1793 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
1871 single_sge->sge_addr.hi = resp_sge->sge_addr.hi; 1794 rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
1872 single_sge->sge_len = resp_sge->sge_len; 1795 rx_sgl_task_params.num_sges = 1;
1873 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len; 1796 }
1874 1797
1875 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { 1798 /* Fill fw input params */
1876 ptu_invalidate = 1; 1799 task_params.context = fw_task_ctx;
1877 qedi->tid_reuse_count[tid] = 0; 1800 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
1878 } 1801 task_params.itid = tid;
1879 fw_task_ctx->ystorm_st_context.state.reuse_count = 1802 task_params.cq_rss_number = 0;
1880 qedi->tid_reuse_count[tid]; 1803 task_params.tx_io_size = data_len;
1881 fw_task_ctx->mstorm_st_context.reuse_count = 1804 task_params.rx_io_size = resp_sge->sge_len;
1882 qedi->tid_reuse_count[tid]++; 1805
1883 /* Ustorm context */ 1806 sq_idx = qedi_get_wqe_idx(qedi_conn);
1884 fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len; 1807 task_params.sqe = &ep->sq[sq_idx];
1885 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len; 1808
1886 fw_task_ctx->ustorm_st_context.exp_data_sn = 0; 1809 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1887 fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; 1810 rval = init_initiator_nop_out_task(&task_params,
1888 fw_task_ctx->ustorm_st_context.cq_rss_number = 0; 1811 &nop_out_pdu_header,
1889 1812 &tx_sgl_task_params,
1890 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, 1813 &rx_sgl_task_params);
1891 ISCSI_REG1_NUM_FAST_SGES, 0); 1814 if (rval)
1892 1815 return -1;
1893 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; 1816
1894 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1895 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1896
1897 fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
1898 fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
1899
1900 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1901 qedi_ring_doorbell(qedi_conn); 1817 qedi_ring_doorbell(qedi_conn);
1902 return 0; 1818 return 0;
1903} 1819}
@@ -1905,7 +1821,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
1905static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len, 1821static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
1906 int bd_index) 1822 int bd_index)
1907{ 1823{
1908 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; 1824 struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
1909 int frag_size, sg_frags; 1825 int frag_size, sg_frags;
1910 1826
1911 sg_frags = 0; 1827 sg_frags = 0;
@@ -1938,7 +1854,7 @@ static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
1938static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd) 1854static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
1939{ 1855{
1940 struct scsi_cmnd *sc = cmd->scsi_cmd; 1856 struct scsi_cmnd *sc = cmd->scsi_cmd;
1941 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; 1857 struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
1942 struct scatterlist *sg; 1858 struct scatterlist *sg;
1943 int byte_count = 0; 1859 int byte_count = 0;
1944 int bd_count = 0; 1860 int bd_count = 0;
@@ -2040,7 +1956,7 @@ static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
2040 if (bd_count == 0) 1956 if (bd_count == 0)
2041 return; 1957 return;
2042 } else { 1958 } else {
2043 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; 1959 struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
2044 1960
2045 bd[0].sge_addr.lo = 0; 1961 bd[0].sge_addr.lo = 0;
2046 bd[0].sge_addr.hi = 0; 1962 bd[0].sge_addr.hi = 0;
@@ -2136,244 +2052,182 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
2136 struct qedi_conn *qedi_conn = conn->dd_data; 2052 struct qedi_conn *qedi_conn = conn->dd_data;
2137 struct qedi_cmd *cmd = task->dd_data; 2053 struct qedi_cmd *cmd = task->dd_data;
2138 struct scsi_cmnd *sc = task->sc; 2054 struct scsi_cmnd *sc = task->sc;
2055 struct iscsi_cmd_hdr cmd_pdu_header;
2056 struct scsi_sgl_task_params tx_sgl_task_params;
2057 struct scsi_sgl_task_params rx_sgl_task_params;
2058 struct scsi_sgl_task_params *prx_sgl = NULL;
2059 struct scsi_sgl_task_params *ptx_sgl = NULL;
2060 struct iscsi_task_params task_params;
2061 struct iscsi_conn_params conn_params;
2062 struct scsi_initiator_cmd_params cmd_params;
2139 struct iscsi_task_context *fw_task_ctx; 2063 struct iscsi_task_context *fw_task_ctx;
2140 struct iscsi_cached_sge_ctx *cached_sge; 2064 struct iscsi_cls_conn *cls_conn;
2141 struct iscsi_phys_sgl_ctx *phys_sgl;
2142 struct iscsi_virt_sgl_ctx *virt_sgl;
2143 struct ystorm_iscsi_task_st_ctx *yst_cxt;
2144 struct mstorm_iscsi_task_st_ctx *mst_cxt;
2145 struct iscsi_sgl *sgl_struct;
2146 struct iscsi_sge *single_sge;
2147 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; 2065 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
2148 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; 2066 enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
2149 enum iscsi_task_type task_type; 2067 struct qedi_endpoint *ep;
2150 struct iscsi_cmd_hdr *fw_cmd; 2068 u32 scsi_lun[2];
2151 u32 lun[2];
2152 u32 exp_data;
2153 u16 cq_idx = smp_processor_id() % qedi->num_queues;
2154 s16 ptu_invalidate = 0;
2155 s16 tid = 0; 2069 s16 tid = 0;
2156 u8 num_fast_sgs; 2070 u16 sq_idx = 0;
2071 u16 cq_idx;
2072 int rval = 0;
2157 2073
2158 tid = qedi_get_task_idx(qedi); 2074 ep = qedi_conn->ep;
2159 if (tid == -1) 2075 cls_conn = qedi_conn->cls_conn;
2160 return -ENOMEM; 2076 conn = cls_conn->dd_data;
2161 2077
2162 qedi_iscsi_map_sg_list(cmd); 2078 qedi_iscsi_map_sg_list(cmd);
2079 int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
2163 2080
2164 int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun); 2081 tid = qedi_get_task_idx(qedi);
2165 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); 2082 if (tid == -1)
2083 return -ENOMEM;
2166 2084
2085 fw_task_ctx =
2086 (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
2167 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); 2087 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
2168 cmd->task_id = tid;
2169 2088
2170 /* Ystorm context */ 2089 cmd->task_id = tid;
2171 fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
2172 SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
2173 2090
2091 memset(&task_params, 0, sizeof(task_params));
2092 memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header));
2093 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
2094 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
2095 memset(&conn_params, 0, sizeof(conn_params));
2096 memset(&cmd_params, 0, sizeof(cmd_params));
2097
2098 cq_idx = smp_processor_id() % qedi->num_queues;
2099 /* Update header info */
2100 SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
2101 ISCSI_ATTR_SIMPLE);
2174 if (sc->sc_data_direction == DMA_TO_DEVICE) { 2102 if (sc->sc_data_direction == DMA_TO_DEVICE) {
2175 if (conn->session->initial_r2t_en) { 2103 SET_FIELD(cmd_pdu_header.flags_attr,
2176 exp_data = min((conn->session->imm_data_en * 2104 ISCSI_CMD_HDR_WRITE, 1);
2177 conn->max_xmit_dlength),
2178 conn->session->first_burst);
2179 exp_data = min(exp_data, scsi_bufflen(sc));
2180 fw_task_ctx->ustorm_ag_context.exp_data_acked =
2181 cpu_to_le32(exp_data);
2182 } else {
2183 fw_task_ctx->ustorm_ag_context.exp_data_acked =
2184 min(conn->session->first_burst, scsi_bufflen(sc));
2185 }
2186
2187 SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
2188 task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE; 2105 task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
2189 } else { 2106 } else {
2190 if (scsi_bufflen(sc)) 2107 SET_FIELD(cmd_pdu_header.flags_attr,
2191 SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1); 2108 ISCSI_CMD_HDR_READ, 1);
2192 task_type = ISCSI_TASK_TYPE_INITIATOR_READ; 2109 task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
2193 } 2110 }
2194 2111
2195 fw_cmd->lun.lo = be32_to_cpu(lun[0]); 2112 cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
2196 fw_cmd->lun.hi = be32_to_cpu(lun[1]); 2113 cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
2197 2114
2198 qedi_update_itt_map(qedi, tid, task->itt, cmd); 2115 qedi_update_itt_map(qedi, tid, task->itt, cmd);
2199 fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt)); 2116 cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
2200 fw_cmd->expected_transfer_length = scsi_bufflen(sc); 2117 cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
2201 fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 2118 cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
2202 fw_cmd->opcode = hdr->opcode; 2119 cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
2203 qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb); 2120 cmd_pdu_header.opcode = hdr->opcode;
2204 2121 qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
2205 /* Mstorm context */ 2122
2206 fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma; 2123 /* Fill tx AHS and rx buffer */
2207 fw_task_ctx->mstorm_st_context.sense_db.hi =
2208 (u32)((u64)cmd->sense_buffer_dma >> 32);
2209 fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
2210 fw_task_ctx->mstorm_st_context.task_type = task_type;
2211
2212 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
2213 ptu_invalidate = 1;
2214 qedi->tid_reuse_count[tid] = 0;
2215 }
2216 fw_task_ctx->ystorm_st_context.state.reuse_count =
2217 qedi->tid_reuse_count[tid];
2218 fw_task_ctx->mstorm_st_context.reuse_count =
2219 qedi->tid_reuse_count[tid]++;
2220
2221 /* Ustorm context */
2222 fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
2223 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
2224 fw_task_ctx->ustorm_st_context.exp_data_sn =
2225 be32_to_cpu(hdr->exp_statsn);
2226 fw_task_ctx->ustorm_st_context.task_type = task_type;
2227 fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
2228 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
2229
2230 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
2231 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
2232 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
2233 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
2234
2235 num_fast_sgs = (cmd->io_tbl.sge_valid ?
2236 min((u16)QEDI_FAST_SGE_COUNT,
2237 (u16)cmd->io_tbl.sge_valid) : 0);
2238 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2239 ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
2240
2241 fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
2242 fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
2243
2244 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
2245 cmd->io_tbl.sge_valid);
2246
2247 yst_cxt = &fw_task_ctx->ystorm_st_context;
2248 mst_cxt = &fw_task_ctx->mstorm_st_context;
2249 /* Tx path */
2250 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { 2124 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
2251 /* not considering superIO or FastIO */ 2125 tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
2252 if (cmd->io_tbl.sge_valid == 1) { 2126 tx_sgl_task_params.sgl_phys_addr.lo =
2253 cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge; 2127 (u32)(cmd->io_tbl.sge_tbl_dma);
2254 cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo; 2128 tx_sgl_task_params.sgl_phys_addr.hi =
2255 cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
2256 cached_sge->sge.sge_len = bd[0].sge_len;
2257 qedi->cached_sgls++;
2258 } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
2259 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2260 ISCSI_MFLAGS_SLOW_IO, 1);
2261 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2262 ISCSI_REG1_NUM_FAST_SGES, 0);
2263 phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
2264 phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
2265 phys_sgl->sgl_base.hi =
2266 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2267 phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
2268 qedi->slow_sgls++;
2269 } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
2270 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2271 ISCSI_MFLAGS_SLOW_IO, 0);
2272 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2273 ISCSI_REG1_NUM_FAST_SGES,
2274 min((u16)QEDI_FAST_SGE_COUNT,
2275 (u16)cmd->io_tbl.sge_valid));
2276 virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
2277 virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
2278 virt_sgl->sgl_base.hi =
2279 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); 2129 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2280 virt_sgl->sgl_initial_offset = 2130 tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
2281 (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1); 2131 tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
2282 qedi->fast_sgls++;
2283 }
2284 fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
2285 fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
2286 } else {
2287 /* Rx path */
2288 if (cmd->io_tbl.sge_valid == 1) {
2289 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2290 ISCSI_MFLAGS_SLOW_IO, 0);
2291 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2292 ISCSI_MFLAGS_SINGLE_SGE, 1);
2293 single_sge = &mst_cxt->sgl_union.single_sge;
2294 single_sge->sge_addr.lo = bd[0].sge_addr.lo;
2295 single_sge->sge_addr.hi = bd[0].sge_addr.hi;
2296 single_sge->sge_len = bd[0].sge_len;
2297 qedi->cached_sgls++;
2298 } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
2299 sgl_struct = &mst_cxt->sgl_union.sgl_struct;
2300 sgl_struct->sgl_addr.lo =
2301 (u32)(cmd->io_tbl.sge_tbl_dma);
2302 sgl_struct->sgl_addr.hi =
2303 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2304 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2305 ISCSI_MFLAGS_SLOW_IO, 1);
2306 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2307 ISCSI_REG1_NUM_FAST_SGES, 0);
2308 sgl_struct->updated_sge_size = 0;
2309 sgl_struct->updated_sge_offset = 0;
2310 qedi->slow_sgls++;
2311 } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
2312 sgl_struct = &mst_cxt->sgl_union.sgl_struct;
2313 sgl_struct->sgl_addr.lo =
2314 (u32)(cmd->io_tbl.sge_tbl_dma);
2315 sgl_struct->sgl_addr.hi =
2316 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2317 sgl_struct->byte_offset =
2318 (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
2319 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2320 ISCSI_MFLAGS_SLOW_IO, 0);
2321 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2322 ISCSI_REG1_NUM_FAST_SGES, 0);
2323 sgl_struct->updated_sge_size = 0;
2324 sgl_struct->updated_sge_offset = 0;
2325 qedi->fast_sgls++;
2326 }
2327 fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
2328 fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
2329 }
2330
2331 if (cmd->io_tbl.sge_valid == 1)
2332 /* Singel-SGL */
2333 qedi->use_cached_sge = true;
2334 else {
2335 if (cmd->use_slowpath) 2132 if (cmd->use_slowpath)
2336 qedi->use_slow_sge = true; 2133 tx_sgl_task_params.small_mid_sge = true;
2337 else 2134 } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
2338 qedi->use_fast_sge = true; 2135 rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
2339 } 2136 rx_sgl_task_params.sgl_phys_addr.lo =
2137 (u32)(cmd->io_tbl.sge_tbl_dma);
2138 rx_sgl_task_params.sgl_phys_addr.hi =
2139 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2140 rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
2141 rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
2142 }
2143
2144 /* Add conn param */
2145 conn_params.first_burst_length = conn->session->first_burst;
2146 conn_params.max_send_pdu_length = conn->max_xmit_dlength;
2147 conn_params.max_burst_length = conn->session->max_burst;
2148 if (conn->session->initial_r2t_en)
2149 conn_params.initial_r2t = true;
2150 if (conn->session->imm_data_en)
2151 conn_params.immediate_data = true;
2152
2153 /* Add cmd params */
2154 cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma;
2155 cmd_params.sense_data_buffer_phys_addr.hi =
2156 (u32)((u64)cmd->sense_buffer_dma >> 32);
2157 /* Fill fw input params */
2158 task_params.context = fw_task_ctx;
2159 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
2160 task_params.itid = tid;
2161 task_params.cq_rss_number = cq_idx;
2162 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE)
2163 task_params.tx_io_size = scsi_bufflen(sc);
2164 else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ)
2165 task_params.rx_io_size = scsi_bufflen(sc);
2166
2167 sq_idx = qedi_get_wqe_idx(qedi_conn);
2168 task_params.sqe = &ep->sq[sq_idx];
2169
2340 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, 2170 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
2341 "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x", 2171 "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n",
2342 (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ? 2172 (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
2343 "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ? 2173 "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
2344 "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"), 2174 "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
2345 (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma), 2175 (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc),
2176 (u32)(cmd->io_tbl.sge_tbl_dma),
2346 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32)); 2177 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
2347 2178
2348 /* Add command in active command list */ 2179 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
2180
2181 if (task_params.tx_io_size != 0)
2182 ptx_sgl = &tx_sgl_task_params;
2183 if (task_params.rx_io_size != 0)
2184 prx_sgl = &rx_sgl_task_params;
2185
2186 rval = init_initiator_rw_iscsi_task(&task_params, &conn_params,
2187 &cmd_params, &cmd_pdu_header,
2188 ptx_sgl, prx_sgl,
2189 NULL);
2190 if (rval)
2191 return -1;
2192
2349 spin_lock(&qedi_conn->list_lock); 2193 spin_lock(&qedi_conn->list_lock);
2350 list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list); 2194 list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
2351 cmd->io_cmd_in_list = true; 2195 cmd->io_cmd_in_list = true;
2352 qedi_conn->active_cmd_count++; 2196 qedi_conn->active_cmd_count++;
2353 spin_unlock(&qedi_conn->list_lock); 2197 spin_unlock(&qedi_conn->list_lock);
2354 2198
2355 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
2356 qedi_ring_doorbell(qedi_conn); 2199 qedi_ring_doorbell(qedi_conn);
2357 if (qedi_io_tracing)
2358 qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
2359
2360 return 0; 2200 return 0;
2361} 2201}
2362 2202
2363int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted) 2203int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
2364{ 2204{
2205 struct iscsi_task_params task_params;
2206 struct qedi_endpoint *ep;
2365 struct iscsi_conn *conn = task->conn; 2207 struct iscsi_conn *conn = task->conn;
2366 struct qedi_conn *qedi_conn = conn->dd_data; 2208 struct qedi_conn *qedi_conn = conn->dd_data;
2367 struct qedi_cmd *cmd = task->dd_data; 2209 struct qedi_cmd *cmd = task->dd_data;
2368 s16 ptu_invalidate = 0; 2210 u16 sq_idx = 0;
2211 int rval = 0;
2369 2212
2370 QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 2213 QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
2371 "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n", 2214 "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
2372 cmd->task_id, get_itt(task->itt), task->state, 2215 cmd->task_id, get_itt(task->itt), task->state,
2373 cmd->state, qedi_conn->iscsi_conn_id); 2216 cmd->state, qedi_conn->iscsi_conn_id);
2374 2217
2375 qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true); 2218 memset(&task_params, 0, sizeof(task_params));
2376 qedi_ring_doorbell(qedi_conn); 2219 ep = qedi_conn->ep;
2220
2221 sq_idx = qedi_get_wqe_idx(qedi_conn);
2222
2223 task_params.sqe = &ep->sq[sq_idx];
2224 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
2225 task_params.itid = cmd->task_id;
2377 2226
2227 rval = init_cleanup_task(&task_params);
2228 if (rval)
2229 return rval;
2230
2231 qedi_ring_doorbell(qedi_conn);
2378 return 0; 2232 return 0;
2379} 2233}
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
new file mode 100644
index 000000000000..fd354d4e03eb
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -0,0 +1,781 @@
1/* QLogic iSCSI Offload Driver
2 * Copyright (c) 2016 Cavium Inc.
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include "qedi_hsi.h"
12#include <linux/qed/qed_if.h>
13
14#include "qedi_fw_iscsi.h"
15#include "qedi_fw_scsi.h"
16
17#define SCSI_NUM_SGES_IN_CACHE 0x4
18
19static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
20{
21 return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
22}
23
24static
25void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
26 struct scsi_cached_sges *ctx_data_desc,
27 struct scsi_sgl_task_params *sgl_task_params)
28{
29 u8 sge_index;
30 u8 num_sges;
31 u32 val;
32
33 num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
34 SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
35
36 /* sgl params */
37 val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
38 ctx_sgl_params->sgl_addr.lo = val;
39 val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
40 ctx_sgl_params->sgl_addr.hi = val;
41 val = cpu_to_le32(sgl_task_params->total_buffer_size);
42 ctx_sgl_params->sgl_total_length = val;
43 ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
44
45 for (sge_index = 0; sge_index < num_sges; sge_index++) {
46 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
47 ctx_data_desc->sge[sge_index].sge_addr.lo = val;
48 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
49 ctx_data_desc->sge[sge_index].sge_addr.hi = val;
50 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
51 ctx_data_desc->sge[sge_index].sge_len = val;
52 }
53}
54
55static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
56 enum iscsi_task_type task_type,
57 struct scsi_sgl_task_params *sgl_task_params,
58 struct scsi_dif_task_params *dif_task_params)
59{
60 u32 io_size;
61
62 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
63 task_type == ISCSI_TASK_TYPE_TARGET_READ)
64 io_size = task_params->tx_io_size;
65 else
66 io_size = task_params->rx_io_size;
67
68 if (!io_size)
69 return 0;
70
71 if (!dif_task_params)
72 return io_size;
73
74 return !dif_task_params->dif_on_network ?
75 io_size : sgl_task_params->total_buffer_size;
76}
77
78static void
79init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
80 struct scsi_dif_task_params *dif_task_params)
81{
82 if (!dif_task_params)
83 return;
84
85 SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
86 dif_task_params->dif_block_size_log);
87 SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
88 dif_task_params->dif_on_network ? 1 : 0);
89 SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
90 dif_task_params->dif_on_host ? 1 : 0);
91}
92
93static void init_sqe(struct iscsi_task_params *task_params,
94 struct scsi_sgl_task_params *sgl_task_params,
95 struct scsi_dif_task_params *dif_task_params,
96 struct iscsi_common_hdr *pdu_header,
97 struct scsi_initiator_cmd_params *cmd_params,
98 enum iscsi_task_type task_type,
99 bool is_cleanup)
100{
101 if (!task_params->sqe)
102 return;
103
104 memset(task_params->sqe, 0, sizeof(*task_params->sqe));
105 task_params->sqe->task_id = cpu_to_le16(task_params->itid);
106 if (is_cleanup) {
107 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
108 ISCSI_WQE_TYPE_TASK_CLEANUP);
109 return;
110 }
111
112 switch (task_type) {
113 case ISCSI_TASK_TYPE_INITIATOR_WRITE:
114 {
115 u32 buf_size = 0;
116 u32 num_sges = 0;
117
118 init_dif_context_flags(&task_params->sqe->prot_flags,
119 dif_task_params);
120
121 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
122 ISCSI_WQE_TYPE_NORMAL);
123
124 if (task_params->tx_io_size) {
125 buf_size = calc_rw_task_size(task_params, task_type,
126 sgl_task_params,
127 dif_task_params);
128
129 if (scsi_is_slow_sgl(sgl_task_params->num_sges,
130 sgl_task_params->small_mid_sge))
131 num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
132 else
133 num_sges = min(sgl_task_params->num_sges,
134 (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
135 }
136
137 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
138 SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
139 buf_size);
140
141 if (GET_FIELD(pdu_header->hdr_second_dword,
142 ISCSI_CMD_HDR_TOTAL_AHS_LEN))
143 SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
144 cmd_params->extended_cdb_sge.sge_len);
145 }
146 break;
147 case ISCSI_TASK_TYPE_INITIATOR_READ:
148 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
149 ISCSI_WQE_TYPE_NORMAL);
150
151 if (GET_FIELD(pdu_header->hdr_second_dword,
152 ISCSI_CMD_HDR_TOTAL_AHS_LEN))
153 SET_FIELD(task_params->sqe->contlen_cdbsize,
154 ISCSI_WQE_CDB_SIZE,
155 cmd_params->extended_cdb_sge.sge_len);
156 break;
157 case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
158 case ISCSI_TASK_TYPE_MIDPATH:
159 {
160 bool advance_statsn = true;
161
162 if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
163 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
164 ISCSI_WQE_TYPE_LOGIN);
165 else
166 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
167 ISCSI_WQE_TYPE_MIDDLE_PATH);
168
169 if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
170 u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
171 ISCSI_COMMON_HDR_OPCODE);
172
173 if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
174 (opcode != ISCSI_OPCODE_NOP_IN ||
175 pdu_header->itt == ISCSI_TTT_ALL_ONES))
176 advance_statsn = false;
177 }
178
179 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
180 advance_statsn ? 1 : 0);
181
182 if (task_params->tx_io_size) {
183 SET_FIELD(task_params->sqe->contlen_cdbsize,
184 ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
185
186 if (scsi_is_slow_sgl(sgl_task_params->num_sges,
187 sgl_task_params->small_mid_sge))
188 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
189 ISCSI_WQE_NUM_SGES_SLOWIO);
190 else
191 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
192 min(sgl_task_params->num_sges,
193 (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
194 }
195 }
196 break;
197 default:
198 break;
199 }
200}
201
202static void init_default_iscsi_task(struct iscsi_task_params *task_params,
203 struct data_hdr *pdu_header,
204 enum iscsi_task_type task_type)
205{
206 struct iscsi_task_context *context;
207 u16 index;
208 u32 val;
209
210 context = task_params->context;
211 memset(context, 0, sizeof(*context));
212
213 for (index = 0; index <
214 ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
215 index++) {
216 val = cpu_to_le32(pdu_header->data[index]);
217 context->ystorm_st_context.pdu_hdr.data.data[index] = val;
218 }
219
220 context->mstorm_st_context.task_type = task_type;
221 context->mstorm_ag_context.task_cid =
222 cpu_to_le16(task_params->conn_icid);
223
224 SET_FIELD(context->ustorm_ag_context.flags1,
225 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
226
227 context->ustorm_st_context.task_type = task_type;
228 context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
229 context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
230}
231
232static
233void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
234 struct scsi_initiator_cmd_params *cmd)
235{
236 union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
237 u32 val;
238
239 if (!cmd->extended_cdb_sge.sge_len)
240 return;
241
242 SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
243 ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
244 cmd->extended_cdb_sge.sge_len);
245 val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
246 ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
247 val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
248 ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
249 val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
250 ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len = val;
251}
252
253static
254void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
255 struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
256 u32 remaining_recv_len,
257 u32 expected_data_transfer_len,
258 u8 num_sges, bool tx_dif_conn_err_en)
259{
260 u32 val;
261
262 ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
263 ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
264 val = cpu_to_le32(expected_data_transfer_len);
265 ustorm_st_cxt->exp_data_transfer_len = val;
266 SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
267 SET_FIELD(ustorm_ag_cxt->flags2,
268 USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
269 tx_dif_conn_err_en ? 1 : 0);
270}
271
272static
273void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
274 struct iscsi_conn_params *conn_params,
275 enum iscsi_task_type task_type,
276 u32 task_size,
277 u32 exp_data_transfer_len,
278 u8 total_ahs_length)
279{
280 u32 max_unsolicited_data = 0, val;
281
282 if (total_ahs_length &&
283 (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
284 task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
285 SET_FIELD(context->ustorm_st_context.flags2,
286 USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
287
288 switch (task_type) {
289 case ISCSI_TASK_TYPE_INITIATOR_WRITE:
290 if (!conn_params->initial_r2t)
291 max_unsolicited_data = conn_params->first_burst_length;
292 else if (conn_params->immediate_data)
293 max_unsolicited_data =
294 min(conn_params->first_burst_length,
295 conn_params->max_send_pdu_length);
296
297 context->ustorm_ag_context.exp_data_acked =
298 cpu_to_le32(total_ahs_length == 0 ?
299 min(exp_data_transfer_len,
300 max_unsolicited_data) :
301 ((u32)(total_ahs_length +
302 ISCSI_AHS_CNTL_SIZE)));
303 break;
304 case ISCSI_TASK_TYPE_TARGET_READ:
305 val = cpu_to_le32(exp_data_transfer_len);
306 context->ustorm_ag_context.exp_data_acked = val;
307 break;
308 case ISCSI_TASK_TYPE_INITIATOR_READ:
309 context->ustorm_ag_context.exp_data_acked =
310 cpu_to_le32((total_ahs_length == 0 ? 0 :
311 total_ahs_length +
312 ISCSI_AHS_CNTL_SIZE));
313 break;
314 case ISCSI_TASK_TYPE_TARGET_WRITE:
315 val = cpu_to_le32(task_size);
316 context->ustorm_ag_context.exp_cont_len = val;
317 break;
318 default:
319 break;
320 }
321}
322
323static
324void init_rtdif_task_context(struct rdif_task_context *rdif_context,
325 struct tdif_task_context *tdif_context,
326 struct scsi_dif_task_params *dif_task_params,
327 enum iscsi_task_type task_type)
328{
329 u32 val;
330
331 if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
332 return;
333
334 if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
335 task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
336 rdif_context->app_tag_value =
337 cpu_to_le16(dif_task_params->application_tag);
338 rdif_context->partial_crc_value = cpu_to_le16(0xffff);
339 val = cpu_to_le32(dif_task_params->initial_ref_tag);
340 rdif_context->initial_ref_tag = val;
341 rdif_context->app_tag_mask =
342 cpu_to_le16(dif_task_params->application_tag_mask);
343 SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
344 dif_task_params->crc_seed ? 1 : 0);
345 SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
346 dif_task_params->host_guard_type);
347 SET_FIELD(rdif_context->flags0,
348 RDIF_TASK_CONTEXT_PROTECTIONTYPE,
349 dif_task_params->protection_type);
350 SET_FIELD(rdif_context->flags0,
351 RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
352 SET_FIELD(rdif_context->flags0,
353 RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
354 dif_task_params->keep_ref_tag_const ? 1 : 0);
355 SET_FIELD(rdif_context->flags1,
356 RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
357 (dif_task_params->validate_app_tag &&
358 dif_task_params->dif_on_network) ? 1 : 0);
359 SET_FIELD(rdif_context->flags1,
360 RDIF_TASK_CONTEXT_VALIDATEGUARD,
361 (dif_task_params->validate_guard &&
362 dif_task_params->dif_on_network) ? 1 : 0);
363 SET_FIELD(rdif_context->flags1,
364 RDIF_TASK_CONTEXT_VALIDATEREFTAG,
365 (dif_task_params->validate_ref_tag &&
366 dif_task_params->dif_on_network) ? 1 : 0);
367 SET_FIELD(rdif_context->flags1,
368 RDIF_TASK_CONTEXT_HOSTINTERFACE,
369 dif_task_params->dif_on_host ? 1 : 0);
370 SET_FIELD(rdif_context->flags1,
371 RDIF_TASK_CONTEXT_NETWORKINTERFACE,
372 dif_task_params->dif_on_network ? 1 : 0);
373 SET_FIELD(rdif_context->flags1,
374 RDIF_TASK_CONTEXT_FORWARDGUARD,
375 dif_task_params->forward_guard ? 1 : 0);
376 SET_FIELD(rdif_context->flags1,
377 RDIF_TASK_CONTEXT_FORWARDAPPTAG,
378 dif_task_params->forward_app_tag ? 1 : 0);
379 SET_FIELD(rdif_context->flags1,
380 RDIF_TASK_CONTEXT_FORWARDREFTAG,
381 dif_task_params->forward_ref_tag ? 1 : 0);
382 SET_FIELD(rdif_context->flags1,
383 RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
384 dif_task_params->forward_app_tag_with_mask ? 1 : 0);
385 SET_FIELD(rdif_context->flags1,
386 RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
387 dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
388 SET_FIELD(rdif_context->flags1,
389 RDIF_TASK_CONTEXT_INTERVALSIZE,
390 dif_task_params->dif_block_size_log - 9);
391 SET_FIELD(rdif_context->state,
392 RDIF_TASK_CONTEXT_REFTAGMASK,
393 dif_task_params->ref_tag_mask);
394 SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
395 dif_task_params->ignore_app_tag);
396 }
397
398 if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
399 task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
400 tdif_context->app_tag_value =
401 cpu_to_le16(dif_task_params->application_tag);
402 tdif_context->partial_crc_valueB =
403 cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
404 tdif_context->partial_crc_value_a =
405 cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
406 SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
407 dif_task_params->crc_seed ? 1 : 0);
408
409 SET_FIELD(tdif_context->flags0,
410 TDIF_TASK_CONTEXT_SETERRORWITHEOP,
411 dif_task_params->tx_dif_conn_err_en ? 1 : 0);
412 SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
413 dif_task_params->forward_guard ? 1 : 0);
414 SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
415 dif_task_params->forward_app_tag ? 1 : 0);
416 SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
417 dif_task_params->forward_ref_tag ? 1 : 0);
418 SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
419 dif_task_params->dif_block_size_log - 9);
420 SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
421 dif_task_params->dif_on_host ? 1 : 0);
422 SET_FIELD(tdif_context->flags1,
423 TDIF_TASK_CONTEXT_NETWORKINTERFACE,
424 dif_task_params->dif_on_network ? 1 : 0);
425 val = cpu_to_le32(dif_task_params->initial_ref_tag);
426 tdif_context->initial_ref_tag = val;
427 tdif_context->app_tag_mask =
428 cpu_to_le16(dif_task_params->application_tag_mask);
429 SET_FIELD(tdif_context->flags0,
430 TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
431 dif_task_params->host_guard_type);
432 SET_FIELD(tdif_context->flags0,
433 TDIF_TASK_CONTEXT_PROTECTIONTYPE,
434 dif_task_params->protection_type);
435 SET_FIELD(tdif_context->flags0,
436 TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
437 dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
438 SET_FIELD(tdif_context->flags0,
439 TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
440 dif_task_params->keep_ref_tag_const ? 1 : 0);
441 SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
442 (dif_task_params->validate_guard &&
443 dif_task_params->dif_on_host) ? 1 : 0);
444 SET_FIELD(tdif_context->flags1,
445 TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
446 (dif_task_params->validate_app_tag &&
447 dif_task_params->dif_on_host) ? 1 : 0);
448 SET_FIELD(tdif_context->flags1,
449 TDIF_TASK_CONTEXT_VALIDATEREFTAG,
450 (dif_task_params->validate_ref_tag &&
451 dif_task_params->dif_on_host) ? 1 : 0);
452 SET_FIELD(tdif_context->flags1,
453 TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
454 dif_task_params->forward_app_tag_with_mask ? 1 : 0);
455 SET_FIELD(tdif_context->flags1,
456 TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
457 dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
458 SET_FIELD(tdif_context->flags1,
459 TDIF_TASK_CONTEXT_REFTAGMASK,
460 dif_task_params->ref_tag_mask);
461 SET_FIELD(tdif_context->flags0,
462 TDIF_TASK_CONTEXT_IGNOREAPPTAG,
463 dif_task_params->ignore_app_tag ? 1 : 0);
464 }
465}
466
467static void set_local_completion_context(struct iscsi_task_context *context)
468{
469 SET_FIELD(context->ystorm_st_context.state.flags,
470 YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
471 SET_FIELD(context->ustorm_st_context.flags,
472 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
473}
474
475static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
476 enum iscsi_task_type task_type,
477 struct iscsi_conn_params *conn_params,
478 struct iscsi_common_hdr *pdu_header,
479 struct scsi_sgl_task_params *sgl_task_params,
480 struct scsi_initiator_cmd_params *cmd_params,
481 struct scsi_dif_task_params *dif_task_params)
482{
483 u32 exp_data_transfer_len = conn_params->max_burst_length;
484 struct iscsi_task_context *cxt;
485 bool slow_io = false;
486 u32 task_size, val;
487 u8 num_sges = 0;
488
489 task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
490 dif_task_params);
491
492 init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
493 task_type);
494
495 cxt = task_params->context;
496
497 val = cpu_to_le32(task_size);
498 cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
499 init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
500 cmd_params);
501 val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
502 cxt->mstorm_st_context.sense_db.lo = val;
503
504 val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
505 cxt->mstorm_st_context.sense_db.hi = val;
506
507 if (task_params->tx_io_size) {
508 init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
509 dif_task_params);
510 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
511 &cxt->ystorm_st_context.state.data_desc,
512 sgl_task_params);
513
514 slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
515 sgl_task_params->small_mid_sge);
516
517 num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
518 (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
519 ISCSI_WQE_NUM_SGES_SLOWIO;
520
521 if (slow_io) {
522 SET_FIELD(cxt->ystorm_st_context.state.flags,
523 YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
524 }
525 } else if (task_params->rx_io_size) {
526 init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
527 dif_task_params);
528 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
529 &cxt->mstorm_st_context.data_desc,
530 sgl_task_params);
531 num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
532 sgl_task_params->small_mid_sge) ?
533 min_t(u16, sgl_task_params->num_sges,
534 (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
535 ISCSI_WQE_NUM_SGES_SLOWIO;
536 cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
537 }
538
539 if (exp_data_transfer_len > task_size ||
540 task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
541 exp_data_transfer_len = task_size;
542
543 init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
544 &task_params->context->ustorm_ag_context,
545 task_size, exp_data_transfer_len, num_sges,
546 dif_task_params ?
547 dif_task_params->tx_dif_conn_err_en : false);
548
549 set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
550 task_type, task_size,
551 exp_data_transfer_len,
552 GET_FIELD(pdu_header->hdr_second_dword,
553 ISCSI_CMD_HDR_TOTAL_AHS_LEN));
554
555 if (dif_task_params)
556 init_rtdif_task_context(&task_params->context->rdif_context,
557 &task_params->context->tdif_context,
558 dif_task_params, task_type);
559
560 init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
561 cmd_params, task_type, false);
562
563 return 0;
564}
565
566int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
567 struct iscsi_conn_params *conn_params,
568 struct scsi_initiator_cmd_params *cmd_params,
569 struct iscsi_cmd_hdr *cmd_header,
570 struct scsi_sgl_task_params *tx_sgl_params,
571 struct scsi_sgl_task_params *rx_sgl_params,
572 struct scsi_dif_task_params *dif_task_params)
573{
574 if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
575 return init_rw_iscsi_task(task_params,
576 ISCSI_TASK_TYPE_INITIATOR_WRITE,
577 conn_params,
578 (struct iscsi_common_hdr *)cmd_header,
579 tx_sgl_params, cmd_params,
580 dif_task_params);
581 else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ))
582 return init_rw_iscsi_task(task_params,
583 ISCSI_TASK_TYPE_INITIATOR_READ,
584 conn_params,
585 (struct iscsi_common_hdr *)cmd_header,
586 rx_sgl_params, cmd_params,
587 dif_task_params);
588 else
589 return -1;
590}
591
592int init_initiator_login_request_task(struct iscsi_task_params *task_params,
593 struct iscsi_login_req_hdr *login_header,
594 struct scsi_sgl_task_params *tx_params,
595 struct scsi_sgl_task_params *rx_params)
596{
597 struct iscsi_task_context *cxt;
598
599 cxt = task_params->context;
600
601 init_default_iscsi_task(task_params,
602 (struct data_hdr *)login_header,
603 ISCSI_TASK_TYPE_MIDPATH);
604
605 init_ustorm_task_contexts(&cxt->ustorm_st_context,
606 &cxt->ustorm_ag_context,
607 task_params->rx_io_size ?
608 rx_params->total_buffer_size : 0,
609 task_params->tx_io_size ?
610 tx_params->total_buffer_size : 0, 0,
611 0);
612
613 if (task_params->tx_io_size)
614 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
615 &cxt->ystorm_st_context.state.data_desc,
616 tx_params);
617
618 if (task_params->rx_io_size)
619 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
620 &cxt->mstorm_st_context.data_desc,
621 rx_params);
622
623 cxt->mstorm_st_context.rem_task_size =
624 cpu_to_le32(task_params->rx_io_size ?
625 rx_params->total_buffer_size : 0);
626
627 init_sqe(task_params, tx_params, NULL,
628 (struct iscsi_common_hdr *)login_header, NULL,
629 ISCSI_TASK_TYPE_MIDPATH, false);
630
631 return 0;
632}
633
634int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
635 struct iscsi_nop_out_hdr *nop_out_pdu_header,
636 struct scsi_sgl_task_params *tx_sgl_task_params,
637 struct scsi_sgl_task_params *rx_sgl_task_params)
638{
639 struct iscsi_task_context *cxt;
640
641 cxt = task_params->context;
642
643 init_default_iscsi_task(task_params,
644 (struct data_hdr *)nop_out_pdu_header,
645 ISCSI_TASK_TYPE_MIDPATH);
646
647 if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
648 set_local_completion_context(task_params->context);
649
650 if (task_params->tx_io_size)
651 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
652 &cxt->ystorm_st_context.state.data_desc,
653 tx_sgl_task_params);
654
655 if (task_params->rx_io_size)
656 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
657 &cxt->mstorm_st_context.data_desc,
658 rx_sgl_task_params);
659
660 init_ustorm_task_contexts(&cxt->ustorm_st_context,
661 &cxt->ustorm_ag_context,
662 task_params->rx_io_size ?
663 rx_sgl_task_params->total_buffer_size : 0,
664 task_params->tx_io_size ?
665 tx_sgl_task_params->total_buffer_size : 0,
666 0, 0);
667
668 cxt->mstorm_st_context.rem_task_size =
669 cpu_to_le32(task_params->rx_io_size ?
670 rx_sgl_task_params->total_buffer_size :
671 0);
672
673 init_sqe(task_params, tx_sgl_task_params, NULL,
674 (struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
675 ISCSI_TASK_TYPE_MIDPATH, false);
676
677 return 0;
678}
679
680int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
681 struct iscsi_logout_req_hdr *logout_hdr,
682 struct scsi_sgl_task_params *tx_params,
683 struct scsi_sgl_task_params *rx_params)
684{
685 struct iscsi_task_context *cxt;
686
687 cxt = task_params->context;
688
689 init_default_iscsi_task(task_params,
690 (struct data_hdr *)logout_hdr,
691 ISCSI_TASK_TYPE_MIDPATH);
692
693 if (task_params->tx_io_size)
694 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
695 &cxt->ystorm_st_context.state.data_desc,
696 tx_params);
697
698 if (task_params->rx_io_size)
699 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
700 &cxt->mstorm_st_context.data_desc,
701 rx_params);
702
703 init_ustorm_task_contexts(&cxt->ustorm_st_context,
704 &cxt->ustorm_ag_context,
705 task_params->rx_io_size ?
706 rx_params->total_buffer_size : 0,
707 task_params->tx_io_size ?
708 tx_params->total_buffer_size : 0,
709 0, 0);
710
711 cxt->mstorm_st_context.rem_task_size =
712 cpu_to_le32(task_params->rx_io_size ?
713 rx_params->total_buffer_size : 0);
714
715 init_sqe(task_params, tx_params, NULL,
716 (struct iscsi_common_hdr *)logout_hdr, NULL,
717 ISCSI_TASK_TYPE_MIDPATH, false);
718
719 return 0;
720}
721
722int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
723 struct iscsi_tmf_request_hdr *tmf_header)
724{
725 init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
726 ISCSI_TASK_TYPE_MIDPATH);
727
728 init_sqe(task_params, NULL, NULL,
729 (struct iscsi_common_hdr *)tmf_header, NULL,
730 ISCSI_TASK_TYPE_MIDPATH, false);
731
732 return 0;
733}
734
735int init_initiator_text_request_task(struct iscsi_task_params *task_params,
736 struct iscsi_text_request_hdr *text_header,
737 struct scsi_sgl_task_params *tx_params,
738 struct scsi_sgl_task_params *rx_params)
739{
740 struct iscsi_task_context *cxt;
741
742 cxt = task_params->context;
743
744 init_default_iscsi_task(task_params,
745 (struct data_hdr *)text_header,
746 ISCSI_TASK_TYPE_MIDPATH);
747
748 if (task_params->tx_io_size)
749 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
750 &cxt->ystorm_st_context.state.data_desc,
751 tx_params);
752
753 if (task_params->rx_io_size)
754 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
755 &cxt->mstorm_st_context.data_desc,
756 rx_params);
757
758 cxt->mstorm_st_context.rem_task_size =
759 cpu_to_le32(task_params->rx_io_size ?
760 rx_params->total_buffer_size : 0);
761
762 init_ustorm_task_contexts(&cxt->ustorm_st_context,
763 &cxt->ustorm_ag_context,
764 task_params->rx_io_size ?
765 rx_params->total_buffer_size : 0,
766 task_params->tx_io_size ?
767 tx_params->total_buffer_size : 0, 0, 0);
768
769 init_sqe(task_params, tx_params, NULL,
770 (struct iscsi_common_hdr *)text_header, NULL,
771 ISCSI_TASK_TYPE_MIDPATH, false);
772
773 return 0;
774}
775
776int init_cleanup_task(struct iscsi_task_params *task_params)
777{
778 init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
779 true);
780 return 0;
781}
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
new file mode 100644
index 000000000000..b6f24f91849d
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -0,0 +1,117 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef _QEDI_FW_ISCSI_H_
11#define _QEDI_FW_ISCSI_H_
12
13#include "qedi_fw_scsi.h"
14
15struct iscsi_task_params {
16 struct iscsi_task_context *context;
17 struct iscsi_wqe *sqe;
18 u32 tx_io_size;
19 u32 rx_io_size;
20 u16 conn_icid;
21 u16 itid;
22 u8 cq_rss_number;
23};
24
25struct iscsi_conn_params {
26 u32 first_burst_length;
27 u32 max_send_pdu_length;
28 u32 max_burst_length;
29 bool initial_r2t;
30 bool immediate_data;
31};
32
33/* @brief init_initiator_read_iscsi_task - initializes iSCSI Initiator Read
34 * task context.
35 *
36 * @param task_params - Pointer to task parameters struct
37 * @param conn_params - Connection Parameters
38 * @param cmd_params - command specific parameters
39 * @param cmd_pdu_header - PDU Header Parameters
40 * @param sgl_task_params - Pointer to SGL task params
41 * @param dif_task_params - Pointer to DIF parameters struct
42 */
43int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
44 struct iscsi_conn_params *conn_params,
45 struct scsi_initiator_cmd_params *cmd_params,
46 struct iscsi_cmd_hdr *cmd_pdu_header,
47 struct scsi_sgl_task_params *tx_sgl_params,
48 struct scsi_sgl_task_params *rx_sgl_params,
49 struct scsi_dif_task_params *dif_task_params);
50
51/* @brief init_initiator_login_request_task - initializes iSCSI Initiator Login
52 * Request task context.
53 *
54 * @param task_params - Pointer to task parameters struct
55 * @param login_req_pdu_header - PDU Header Parameters
56 * @param tx_sgl_task_params - Pointer to SGL task params
57 * @param rx_sgl_task_params - Pointer to SGL task params
58 */
59int init_initiator_login_request_task(struct iscsi_task_params *task_params,
60 struct iscsi_login_req_hdr *login_header,
61 struct scsi_sgl_task_params *tx_params,
62 struct scsi_sgl_task_params *rx_params);
63
64/* @brief init_initiator_nop_out_task - initializes iSCSI Initiator NOP Out
65 * task context.
66 *
67 * @param task_params - Pointer to task parameters struct
68 * @param nop_out_pdu_header - PDU Header Parameters
69 * @param tx_sgl_task_params - Pointer to SGL task params
70 * @param rx_sgl_task_params - Pointer to SGL task params
71 */
72int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
73 struct iscsi_nop_out_hdr *nop_out_pdu_header,
74 struct scsi_sgl_task_params *tx_sgl_params,
75 struct scsi_sgl_task_params *rx_sgl_params);
76
77/* @brief init_initiator_logout_request_task - initializes iSCSI Initiator
78 * Logout Request task context.
79 *
80 * @param task_params - Pointer to task parameters struct
81 * @param logout_pdu_header - PDU Header Parameters
82 * @param tx_sgl_task_params - Pointer to SGL task params
83 * @param rx_sgl_task_params - Pointer to SGL task params
84 */
85int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
86 struct iscsi_logout_req_hdr *logout_hdr,
87 struct scsi_sgl_task_params *tx_params,
88 struct scsi_sgl_task_params *rx_params);
89
90/* @brief init_initiator_tmf_request_task - initializes iSCSI Initiator TMF
91 * task context.
92 *
93 * @param task_params - Pointer to task parameters struct
94 * @param tmf_pdu_header - PDU Header Parameters
95 */
96int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
97 struct iscsi_tmf_request_hdr *tmf_header);
98
99/* @brief init_initiator_text_request_task - initializes iSCSI Initiator Text
100 * Request task context.
101 *
102 * @param task_params - Pointer to task parameters struct
103 * @param text_request_pdu_header - PDU Header Parameters
104 * @param tx_sgl_task_params - Pointer to Tx SGL task params
105 * @param rx_sgl_task_params - Pointer to Rx SGL task params
106 */
107int init_initiator_text_request_task(struct iscsi_task_params *task_params,
108 struct iscsi_text_request_hdr *text_header,
109 struct scsi_sgl_task_params *tx_params,
110 struct scsi_sgl_task_params *rx_params);
111
112/* @brief init_cleanup_task - initializes Clean task (SQE)
113 *
114 * @param task_params - Pointer to task parameters struct
115 */
116int init_cleanup_task(struct iscsi_task_params *task_params);
117#endif
diff --git a/drivers/scsi/qedi/qedi_fw_scsi.h b/drivers/scsi/qedi/qedi_fw_scsi.h
new file mode 100644
index 000000000000..cdaf918f1019
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_scsi.h
@@ -0,0 +1,55 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef _QEDI_FW_SCSI_H_
11#define _QEDI_FW_SCSI_H_
12
13#include <linux/types.h>
14#include <asm/byteorder.h>
15#include "qedi_hsi.h"
16#include <linux/qed/qed_if.h>
17
18struct scsi_sgl_task_params {
19 struct scsi_sge *sgl;
20 struct regpair sgl_phys_addr;
21 u32 total_buffer_size;
22 u16 num_sges;
23 bool small_mid_sge;
24};
25
26struct scsi_dif_task_params {
27 u32 initial_ref_tag;
28 bool initial_ref_tag_is_valid;
29 u16 application_tag;
30 u16 application_tag_mask;
31 u16 dif_block_size_log;
32 bool dif_on_network;
33 bool dif_on_host;
34 u8 host_guard_type;
35 u8 protection_type;
36 u8 ref_tag_mask;
37 bool crc_seed;
38 bool tx_dif_conn_err_en;
39 bool ignore_app_tag;
40 bool keep_ref_tag_const;
41 bool validate_guard;
42 bool validate_app_tag;
43 bool validate_ref_tag;
44 bool forward_guard;
45 bool forward_app_tag;
46 bool forward_ref_tag;
47 bool forward_app_tag_with_mask;
48 bool forward_ref_tag_with_mask;
49};
50
51struct scsi_initiator_cmd_params {
52 struct scsi_sge extended_cdb_sge;
53 struct regpair sense_data_buffer_phys_addr;
54};
55#endif
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 4cc474364c50..d1de172bebac 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -175,7 +175,7 @@ static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
175 if (cmd->io_tbl.sge_tbl) 175 if (cmd->io_tbl.sge_tbl)
176 dma_free_coherent(&qedi->pdev->dev, 176 dma_free_coherent(&qedi->pdev->dev,
177 QEDI_ISCSI_MAX_BDS_PER_CMD * 177 QEDI_ISCSI_MAX_BDS_PER_CMD *
178 sizeof(struct iscsi_sge), 178 sizeof(struct scsi_sge),
179 cmd->io_tbl.sge_tbl, 179 cmd->io_tbl.sge_tbl,
180 cmd->io_tbl.sge_tbl_dma); 180 cmd->io_tbl.sge_tbl_dma);
181 181
@@ -191,7 +191,7 @@ static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
191 struct qedi_cmd *cmd) 191 struct qedi_cmd *cmd)
192{ 192{
193 struct qedi_io_bdt *io = &cmd->io_tbl; 193 struct qedi_io_bdt *io = &cmd->io_tbl;
194 struct iscsi_sge *sge; 194 struct scsi_sge *sge;
195 195
196 io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev, 196 io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
197 QEDI_ISCSI_MAX_BDS_PER_CMD * 197 QEDI_ISCSI_MAX_BDS_PER_CMD *
@@ -708,22 +708,20 @@ static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
708 708
709static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn) 709static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
710{ 710{
711 struct iscsi_sge *bd_tbl; 711 struct scsi_sge *bd_tbl;
712 712
713 bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; 713 bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
714 714
715 bd_tbl->sge_addr.hi = 715 bd_tbl->sge_addr.hi =
716 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); 716 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
717 bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr; 717 bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
718 bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr - 718 bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
719 qedi_conn->gen_pdu.req_buf; 719 qedi_conn->gen_pdu.req_buf;
720 bd_tbl->reserved0 = 0; 720 bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
721 bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
722 bd_tbl->sge_addr.hi = 721 bd_tbl->sge_addr.hi =
723 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); 722 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
724 bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr; 723 bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
725 bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN; 724 bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
726 bd_tbl->reserved0 = 0;
727} 725}
728 726
729static int qedi_iscsi_send_generic_request(struct iscsi_task *task) 727static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index d3c06bbddb4e..3247287cb0e7 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -102,7 +102,7 @@ struct qedi_endpoint {
102#define QEDI_SQ_WQES_MIN 16 102#define QEDI_SQ_WQES_MIN 16
103 103
104struct qedi_io_bdt { 104struct qedi_io_bdt {
105 struct iscsi_sge *sge_tbl; 105 struct scsi_sge *sge_tbl;
106 dma_addr_t sge_tbl_dma; 106 dma_addr_t sge_tbl_dma;
107 u16 sge_valid; 107 u16 sge_valid;
108}; 108};
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index 9543a1b139d4..d61e3ac22e67 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@
7 * this source tree. 7 * this source tree.
8 */ 8 */
9 9
10#define QEDI_MODULE_VERSION "8.10.3.0" 10#define QEDI_MODULE_VERSION "8.10.4.0"
11#define QEDI_DRIVER_MAJOR_VER 8 11#define QEDI_DRIVER_MAJOR_VER 8
12#define QEDI_DRIVER_MINOR_VER 10 12#define QEDI_DRIVER_MINOR_VER 10
13#define QEDI_DRIVER_REV_VER 3 13#define QEDI_DRIVER_REV_VER 4
14#define QEDI_DRIVER_ENG_VER 0 14#define QEDI_DRIVER_ENG_VER 0
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 909fc033173a..da8c64ca8dc9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -35,6 +35,7 @@ struct bpf_map_ops {
35 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 35 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
36 int fd); 36 int fd);
37 void (*map_fd_put_ptr)(void *ptr); 37 void (*map_fd_put_ptr)(void *ptr);
38 u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
38}; 39};
39 40
40struct bpf_map { 41struct bpf_map {
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index a13b031dc6b8..5efb4db44e1e 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -66,7 +66,10 @@ struct bpf_verifier_state_list {
66}; 66};
67 67
68struct bpf_insn_aux_data { 68struct bpf_insn_aux_data {
69 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 69 union {
70 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
71 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
72 };
70}; 73};
71 74
72#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 75#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 55e517130311..abcda9b458ab 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -25,6 +25,9 @@
25#define PHY_ID_BCM57780 0x03625d90 25#define PHY_ID_BCM57780 0x03625d90
26 26
27#define PHY_ID_BCM7250 0xae025280 27#define PHY_ID_BCM7250 0xae025280
28#define PHY_ID_BCM7260 0xae025190
29#define PHY_ID_BCM7268 0xae025090
30#define PHY_ID_BCM7271 0xae0253b0
28#define PHY_ID_BCM7278 0xae0251a0 31#define PHY_ID_BCM7278 0xae0251a0
29#define PHY_ID_BCM7364 0xae025260 32#define PHY_ID_BCM7364 0xae025260
30#define PHY_ID_BCM7366 0x600d8490 33#define PHY_ID_BCM7366 0x600d8490
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index c62b709b1ce0..2d9f80848d4b 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -447,21 +447,6 @@ static inline void eth_addr_dec(u8 *addr)
447} 447}
448 448
449/** 449/**
450 * ether_addr_greater - Compare two Ethernet addresses
451 * @addr1: Pointer to a six-byte array containing the Ethernet address
452 * @addr2: Pointer other six-byte array containing the Ethernet address
453 *
454 * Compare two Ethernet addresses, returns true addr1 is greater than addr2
455 */
456static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2)
457{
458 u64 u1 = ether_addr_to_u64(addr1);
459 u64 u2 = ether_addr_to_u64(addr2);
460
461 return u1 > u2;
462}
463
464/**
465 * is_etherdev_addr - Tell if given Ethernet address belongs to the device. 450 * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
466 * @dev: Pointer to a device structure 451 * @dev: Pointer to a device structure
467 * @addr: Pointer to a six-byte array containing the Ethernet address 452 * @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 9ded8c6d8176..83cc9863444b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -60,6 +60,7 @@ enum ethtool_phys_id_state {
60enum { 60enum {
61 ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ 61 ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */
62 ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ 62 ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */
63 ETH_RSS_HASH_CRC32_BIT, /* Configurable RSS hash function - Crc32 */
63 64
64 /* 65 /*
65 * Add your fresh new hash function bits above and remember to update 66 * Add your fresh new hash function bits above and remember to update
@@ -73,6 +74,7 @@ enum {
73 74
74#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) 75#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP)
75#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) 76#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR)
77#define ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32)
76 78
77#define ETH_RSS_HASH_UNKNOWN 0 79#define ETH_RSS_HASH_UNKNOWN 0
78#define ETH_RSS_HASH_NO_CHANGE 0 80#define ETH_RSS_HASH_NO_CHANGE 0
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fbf7b39e8103..dffa072b7b79 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -693,6 +693,11 @@ static inline bool bpf_jit_is_ebpf(void)
693# endif 693# endif
694} 694}
695 695
696static inline bool ebpf_jit_enabled(void)
697{
698 return bpf_jit_enable && bpf_jit_is_ebpf();
699}
700
696static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) 701static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
697{ 702{
698 return fp->jited && bpf_jit_is_ebpf(); 703 return fp->jited && bpf_jit_is_ebpf();
@@ -753,6 +758,11 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp);
753 758
754#else /* CONFIG_BPF_JIT */ 759#else /* CONFIG_BPF_JIT */
755 760
761static inline bool ebpf_jit_enabled(void)
762{
763 return false;
764}
765
756static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) 766static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
757{ 767{
758 return false; 768 return false;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 62bbf3c1aa4a..36162485d663 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1504,14 +1504,6 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
1504 return; 1504 return;
1505} 1505}
1506 1506
1507static inline void
1508init_cached_read_index(struct vmbus_channel *channel)
1509{
1510 struct hv_ring_buffer_info *rbi = &channel->inbound;
1511
1512 rbi->cached_read_index = rbi->ring_buffer->read_index;
1513}
1514
1515/* 1507/*
1516 * Mask off host interrupt callback notifications 1508 * Mask off host interrupt callback notifications
1517 */ 1509 */
@@ -1545,76 +1537,48 @@ static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1545/* 1537/*
1546 * An API to support in-place processing of incoming VMBUS packets. 1538 * An API to support in-place processing of incoming VMBUS packets.
1547 */ 1539 */
1548#define VMBUS_PKT_TRAILER 8
1549 1540
1550static inline struct vmpacket_descriptor * 1541/* Get data payload associated with descriptor */
1551get_next_pkt_raw(struct vmbus_channel *channel) 1542static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1552{ 1543{
1553 struct hv_ring_buffer_info *ring_info = &channel->inbound; 1544 return (void *)((unsigned long)desc + (desc->offset8 << 3));
1554 u32 priv_read_loc = ring_info->priv_read_index;
1555 void *ring_buffer = hv_get_ring_buffer(ring_info);
1556 u32 dsize = ring_info->ring_datasize;
1557 /*
1558 * delta is the difference between what is available to read and
1559 * what was already consumed in place. We commit read index after
1560 * the whole batch is processed.
1561 */
1562 u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
1563 priv_read_loc - ring_info->ring_buffer->read_index :
1564 (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
1565 u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
1566
1567 if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
1568 return NULL;
1569
1570 return ring_buffer + priv_read_loc;
1571} 1545}
1572 1546
1573/* 1547/* Get data size associated with descriptor */
1574 * A helper function to step through packets "in-place" 1548static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1575 * This API is to be called after each successful call
1576 * get_next_pkt_raw().
1577 */
1578static inline void put_pkt_raw(struct vmbus_channel *channel,
1579 struct vmpacket_descriptor *desc)
1580{ 1549{
1581 struct hv_ring_buffer_info *ring_info = &channel->inbound; 1550 return (desc->len8 << 3) - (desc->offset8 << 3);
1582 u32 packetlen = desc->len8 << 3;
1583 u32 dsize = ring_info->ring_datasize;
1584
1585 /*
1586 * Include the packet trailer.
1587 */
1588 ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
1589 ring_info->priv_read_index %= dsize;
1590} 1551}
1591 1552
1553
1554struct vmpacket_descriptor *
1555hv_pkt_iter_first(struct vmbus_channel *channel);
1556
1557struct vmpacket_descriptor *
1558__hv_pkt_iter_next(struct vmbus_channel *channel,
1559 const struct vmpacket_descriptor *pkt);
1560
1561void hv_pkt_iter_close(struct vmbus_channel *channel);
1562
1592/* 1563/*
1593 * This call commits the read index and potentially signals the host. 1564 * Get next packet descriptor from iterator
1594 * Here is the pattern for using the "in-place" consumption APIs: 1565 * If at end of list, return NULL and update host.
1595 *
1596 * init_cached_read_index();
1597 *
1598 * while (get_next_pkt_raw() {
1599 * process the packet "in-place";
1600 * put_pkt_raw();
1601 * }
1602 * if (packets processed in place)
1603 * commit_rd_index();
1604 */ 1566 */
1605static inline void commit_rd_index(struct vmbus_channel *channel) 1567static inline struct vmpacket_descriptor *
1568hv_pkt_iter_next(struct vmbus_channel *channel,
1569 const struct vmpacket_descriptor *pkt)
1606{ 1570{
1607 struct hv_ring_buffer_info *ring_info = &channel->inbound; 1571 struct vmpacket_descriptor *nxt;
1608 /* 1572
1609 * Make sure all reads are done before we update the read index since 1573 nxt = __hv_pkt_iter_next(channel, pkt);
1610 * the writer may start writing to the read area once the read index 1574 if (!nxt)
1611 * is updated. 1575 hv_pkt_iter_close(channel);
1612 */
1613 virt_rmb();
1614 ring_info->ring_buffer->read_index = ring_info->priv_read_index;
1615 1576
1616 hv_signal_on_read(channel); 1577 return nxt;
1617} 1578}
1618 1579
1580#define foreach_vmbus_pkt(pkt, channel) \
1581 for (pkt = hv_pkt_iter_first(channel); pkt; \
1582 pkt = hv_pkt_iter_next(channel, pkt))
1619 1583
1620#endif /* _HYPERV_H */ 1584#endif /* _HYPERV_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 71be5b330d21..f0d79bd054ca 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -70,6 +70,7 @@ struct ipv6_devconf {
70#endif 70#endif
71 __u32 enhanced_dad; 71 __u32 enhanced_dad;
72 __u32 addr_gen_mode; 72 __u32 addr_gen_mode;
73 __s32 disable_policy;
73 74
74 struct ctl_table_header *sysctl_header; 75 struct ctl_table_header *sysctl_header;
75}; 76};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 97456b2539e4..b7365b587818 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -786,11 +786,11 @@ struct tc_cls_u32_offload;
786struct tc_to_netdev { 786struct tc_to_netdev {
787 unsigned int type; 787 unsigned int type;
788 union { 788 union {
789 u8 tc;
790 struct tc_cls_u32_offload *cls_u32; 789 struct tc_cls_u32_offload *cls_u32;
791 struct tc_cls_flower_offload *cls_flower; 790 struct tc_cls_flower_offload *cls_flower;
792 struct tc_cls_matchall_offload *cls_mall; 791 struct tc_cls_matchall_offload *cls_mall;
793 struct tc_cls_bpf_offload *cls_bpf; 792 struct tc_cls_bpf_offload *cls_bpf;
793 struct tc_mqprio_qopt *mqprio;
794 }; 794 };
795 bool egress_dev; 795 bool egress_dev;
796}; 796};
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 52966b9bfde3..fbab6e0514f0 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -100,8 +100,8 @@
100#define MAX_NUM_LL2_TX_STATS_COUNTERS 32 100#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
101 101
102#define FW_MAJOR_VERSION 8 102#define FW_MAJOR_VERSION 8
103#define FW_MINOR_VERSION 10 103#define FW_MINOR_VERSION 15
104#define FW_REVISION_VERSION 10 104#define FW_REVISION_VERSION 3
105#define FW_ENGINEERING_VERSION 0 105#define FW_ENGINEERING_VERSION 0
106 106
107/***********************/ 107/***********************/
@@ -187,6 +187,9 @@
187 187
188/* DEMS */ 188/* DEMS */
189#define DQ_DEMS_LEGACY 0 189#define DQ_DEMS_LEGACY 0
190#define DQ_DEMS_TOE_MORE_TO_SEND 3
191#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
192#define DQ_DEMS_ROCE_CQ_CONS 7
190 193
191/* XCM agg val selection */ 194/* XCM agg val selection */
192#define DQ_XCM_AGG_VAL_SEL_WORD2 0 195#define DQ_XCM_AGG_VAL_SEL_WORD2 0
@@ -214,6 +217,9 @@
214#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 217#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
215#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 218#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
216#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 219#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
220#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
221#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
222#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
217 223
218/* UCM agg val selection (HW) */ 224/* UCM agg val selection (HW) */
219#define DQ_UCM_AGG_VAL_SEL_WORD0 0 225#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -269,6 +275,8 @@
269#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) 275#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
270#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) 276#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
271#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) 277#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
278#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
279#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
272 280
273/* UCM agg counter flag selection (HW) */ 281/* UCM agg counter flag selection (HW) */
274#define DQ_UCM_AGG_FLG_SHIFT_CF0 0 282#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -285,6 +293,9 @@
285#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) 293#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
286#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) 294#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
287#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) 295#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
296#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF3)
297#define DQ_UCM_TOE_SLOW_PATH_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
298#define DQ_UCM_TOE_DQ_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
288 299
289/* TCM agg counter flag selection (HW) */ 300/* TCM agg counter flag selection (HW) */
290#define DQ_TCM_AGG_FLG_SHIFT_CF0 0 301#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
@@ -301,6 +312,9 @@
301#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) 312#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
302#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) 313#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
303#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) 314#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
315#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
316#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
317#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
304 318
305/* PWM address mapping */ 319/* PWM address mapping */
306#define DQ_PWM_OFFSET_DPM_BASE 0x0 320#define DQ_PWM_OFFSET_DPM_BASE 0x0
@@ -689,6 +703,16 @@ struct iscsi_eqe_data {
689#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 703#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
690}; 704};
691 705
706struct rdma_eqe_destroy_qp {
707 __le32 cid;
708 u8 reserved[4];
709};
710
711union rdma_eqe_data {
712 struct regpair async_handle;
713 struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
714};
715
692struct malicious_vf_eqe_data { 716struct malicious_vf_eqe_data {
693 u8 vf_id; 717 u8 vf_id;
694 u8 err_id; 718 u8 err_id;
@@ -705,9 +729,9 @@ union event_ring_data {
705 u8 bytes[8]; 729 u8 bytes[8];
706 struct vf_pf_channel_eqe_data vf_pf_channel; 730 struct vf_pf_channel_eqe_data vf_pf_channel;
707 struct iscsi_eqe_data iscsi_info; 731 struct iscsi_eqe_data iscsi_info;
732 union rdma_eqe_data rdma_data;
708 struct malicious_vf_eqe_data malicious_vf; 733 struct malicious_vf_eqe_data malicious_vf;
709 struct initial_cleanup_eqe_data vf_init_cleanup; 734 struct initial_cleanup_eqe_data vf_init_cleanup;
710 struct regpair roce_handle;
711}; 735};
712 736
713/* Event Ring Entry */ 737/* Event Ring Entry */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 4b402fb0eaad..34d93eb5bfba 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -49,6 +49,9 @@
49#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 49#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
50#define ETH_RX_NUM_NEXT_PAGE_BDS 2 50#define ETH_RX_NUM_NEXT_PAGE_BDS 2
51 51
52#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
53#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
54
52#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 55#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
53#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 56#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
54#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 57#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 2e417a45c5f7..947a635d04bb 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -109,13 +109,6 @@ struct fcoe_conn_terminate_ramrod_data {
109 struct regpair terminate_params_addr; 109 struct regpair terminate_params_addr;
110}; 110};
111 111
112struct fcoe_fast_sgl_ctx {
113 struct regpair sgl_start_addr;
114 __le32 sgl_byte_offset;
115 __le16 task_reuse_cnt;
116 __le16 init_offset_in_first_sge;
117};
118
119struct fcoe_slow_sgl_ctx { 112struct fcoe_slow_sgl_ctx {
120 struct regpair base_sgl_addr; 113 struct regpair base_sgl_addr;
121 __le16 curr_sge_off; 114 __le16 curr_sge_off;
@@ -124,23 +117,16 @@ struct fcoe_slow_sgl_ctx {
124 __le16 reserved; 117 __le16 reserved;
125}; 118};
126 119
127struct fcoe_sge {
128 struct regpair sge_addr;
129 __le16 size;
130 __le16 reserved0;
131 u8 reserved1[3];
132 u8 is_valid_sge;
133};
134
135union fcoe_data_desc_ctx {
136 struct fcoe_fast_sgl_ctx fast;
137 struct fcoe_slow_sgl_ctx slow;
138 struct fcoe_sge single_sge;
139};
140
141union fcoe_dix_desc_ctx { 120union fcoe_dix_desc_ctx {
142 struct fcoe_slow_sgl_ctx dix_sgl; 121 struct fcoe_slow_sgl_ctx dix_sgl;
143 struct fcoe_sge cached_dix_sge; 122 struct scsi_sge cached_dix_sge;
123};
124
125struct fcoe_fast_sgl_ctx {
126 struct regpair sgl_start_addr;
127 __le32 sgl_byte_offset;
128 __le16 task_reuse_cnt;
129 __le16 init_offset_in_first_sge;
144}; 130};
145 131
146struct fcoe_fcp_cmd_payload { 132struct fcoe_fcp_cmd_payload {
@@ -172,57 +158,6 @@ enum fcoe_mode_type {
172 MAX_FCOE_MODE_TYPE 158 MAX_FCOE_MODE_TYPE
173}; 159};
174 160
175struct fcoe_mstorm_fcoe_task_st_ctx_fp {
176 __le16 flags;
177#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF
178#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0
179#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1
180#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15
181 __le16 difDataResidue;
182 __le16 parent_id;
183 __le16 single_sge_saved_offset;
184 __le32 data_2_trns_rem;
185 __le32 offset_in_io;
186 union fcoe_dix_desc_ctx dix_desc;
187 union fcoe_data_desc_ctx data_desc;
188};
189
190struct fcoe_mstorm_fcoe_task_st_ctx_non_fp {
191 __le16 flags;
192#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3
193#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0
194#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1
195#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2
196#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1
197#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3
198#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF
199#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4
200#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3
201#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8
202#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1
203#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10
204#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1
205#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11
206#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1
207#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12
208#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1
209#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13
210#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1
211#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14
212#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1
213#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15
214 u8 tx_rx_sgl_mode;
215#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7
216#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0
217#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7
218#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3
219#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3
220#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6
221 u8 rsrv2;
222 __le32 num_prm_zero_read;
223 struct regpair rsp_buf_addr;
224};
225
226struct fcoe_rx_stat { 161struct fcoe_rx_stat {
227 struct regpair fcoe_rx_byte_cnt; 162 struct regpair fcoe_rx_byte_cnt;
228 struct regpair fcoe_rx_data_pkt_cnt; 163 struct regpair fcoe_rx_data_pkt_cnt;
@@ -236,16 +171,6 @@ struct fcoe_rx_stat {
236 __le32 rsrv; 171 __le32 rsrv;
237}; 172};
238 173
239enum fcoe_sgl_mode {
240 FCOE_SLOW_SGL,
241 FCOE_SINGLE_FAST_SGE,
242 FCOE_2_FAST_SGE,
243 FCOE_3_FAST_SGE,
244 FCOE_4_FAST_SGE,
245 FCOE_MUL_FAST_SGES,
246 MAX_FCOE_SGL_MODE
247};
248
249struct fcoe_stat_ramrod_data { 174struct fcoe_stat_ramrod_data {
250 struct regpair stat_params_addr; 175 struct regpair stat_params_addr;
251}; 176};
@@ -328,22 +253,24 @@ union fcoe_tx_info_union_ctx {
328struct ystorm_fcoe_task_st_ctx { 253struct ystorm_fcoe_task_st_ctx {
329 u8 task_type; 254 u8 task_type;
330 u8 sgl_mode; 255 u8 sgl_mode;
331#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7 256#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
332#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 257#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
333#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F 258#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
334#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3 259#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
335 u8 cached_dix_sge; 260 u8 cached_dix_sge;
336 u8 expect_first_xfer; 261 u8 expect_first_xfer;
337 __le32 num_pbf_zero_write; 262 __le32 num_pbf_zero_write;
338 union protection_info_union_ctx protection_info_union; 263 union protection_info_union_ctx protection_info_union;
339 __le32 data_2_trns_rem; 264 __le32 data_2_trns_rem;
265 struct scsi_sgl_params sgl_params;
266 u8 reserved1[12];
340 union fcoe_tx_info_union_ctx tx_info_union; 267 union fcoe_tx_info_union_ctx tx_info_union;
341 union fcoe_dix_desc_ctx dix_desc; 268 union fcoe_dix_desc_ctx dix_desc;
342 union fcoe_data_desc_ctx data_desc; 269 struct scsi_cached_sges data_desc;
343 __le16 ox_id; 270 __le16 ox_id;
344 __le16 rx_id; 271 __le16 rx_id;
345 __le32 task_rety_identifier; 272 __le32 task_rety_identifier;
346 __le32 reserved1[2]; 273 u8 reserved2[8];
347}; 274};
348 275
349struct ystorm_fcoe_task_ag_ctx { 276struct ystorm_fcoe_task_ag_ctx {
@@ -484,22 +411,22 @@ struct tstorm_fcoe_task_ag_ctx {
484struct fcoe_tstorm_fcoe_task_st_ctx_read_write { 411struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
485 union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; 412 union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
486 __le16 flags; 413 __le16 flags;
487#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7 414#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
488#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 415#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
489#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 416#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
490#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3 417#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
491#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 418#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
492#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4 419#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
493#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 420#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
494#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5 421#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
495#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 422#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
496#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6 423#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
497#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 424#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
498#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7 425#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
499#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 426#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
500#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8 427#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
501#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F 428#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
502#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10 429#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
503 __le16 seq_cnt; 430 __le16 seq_cnt;
504 u8 seq_id; 431 u8 seq_id;
505 u8 ooo_rx_seq_id; 432 u8 ooo_rx_seq_id;
@@ -582,8 +509,34 @@ struct mstorm_fcoe_task_ag_ctx {
582}; 509};
583 510
584struct mstorm_fcoe_task_st_ctx { 511struct mstorm_fcoe_task_st_ctx {
585 struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp; 512 struct regpair rsp_buf_addr;
586 struct fcoe_mstorm_fcoe_task_st_ctx_fp fp; 513 __le32 rsrv[2];
514 struct scsi_sgl_params sgl_params;
515 __le32 data_2_trns_rem;
516 __le32 data_buffer_offset;
517 __le16 parent_id;
518 __le16 flags;
519#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
520#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
521#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
522#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
523#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
524#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
525#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
526#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
527#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
528#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
529#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
530#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
531#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
532#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
533#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
534#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
535#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
536#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
537#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
538#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
539 struct scsi_cached_sges data_desc;
587}; 540};
588 541
589struct ustorm_fcoe_task_ag_ctx { 542struct ustorm_fcoe_task_ag_ctx {
@@ -646,6 +599,7 @@ struct ustorm_fcoe_task_ag_ctx {
646 599
647struct fcoe_task_context { 600struct fcoe_task_context {
648 struct ystorm_fcoe_task_st_ctx ystorm_st_context; 601 struct ystorm_fcoe_task_st_ctx ystorm_st_context;
602 struct regpair ystorm_st_padding[2];
649 struct tdif_task_context tdif_context; 603 struct tdif_task_context tdif_context;
650 struct ystorm_fcoe_task_ag_ctx ystorm_ag_context; 604 struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
651 struct tstorm_fcoe_task_ag_ctx tstorm_ag_context; 605 struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
@@ -668,20 +622,20 @@ struct fcoe_tx_stat {
668struct fcoe_wqe { 622struct fcoe_wqe {
669 __le16 task_id; 623 __le16 task_id;
670 __le16 flags; 624 __le16 flags;
671#define FCOE_WQE_REQ_TYPE_MASK 0xF 625#define FCOE_WQE_REQ_TYPE_MASK 0xF
672#define FCOE_WQE_REQ_TYPE_SHIFT 0 626#define FCOE_WQE_REQ_TYPE_SHIFT 0
673#define FCOE_WQE_SGL_MODE_MASK 0x7 627#define FCOE_WQE_SGL_MODE_MASK 0x1
674#define FCOE_WQE_SGL_MODE_SHIFT 4 628#define FCOE_WQE_SGL_MODE_SHIFT 4
675#define FCOE_WQE_CONTINUATION_MASK 0x1 629#define FCOE_WQE_CONTINUATION_MASK 0x1
676#define FCOE_WQE_CONTINUATION_SHIFT 7 630#define FCOE_WQE_CONTINUATION_SHIFT 5
677#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1 631#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
678#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8 632#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
679#define FCOE_WQE_SUPER_IO_MASK 0x1 633#define FCOE_WQE_RESERVED_MASK 0x1
680#define FCOE_WQE_SUPER_IO_SHIFT 9 634#define FCOE_WQE_RESERVED_SHIFT 7
681#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 635#define FCOE_WQE_NUM_SGES_MASK 0xF
682#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10 636#define FCOE_WQE_NUM_SGES_SHIFT 8
683#define FCOE_WQE_RESERVED0_MASK 0x1F 637#define FCOE_WQE_RESERVED1_MASK 0xF
684#define FCOE_WQE_RESERVED0_SHIFT 11 638#define FCOE_WQE_RESERVED1_SHIFT 12
685 union fcoe_additional_info_union additional_info_union; 639 union fcoe_additional_info_union additional_info_union;
686}; 640};
687 641
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 4c5747babcf6..69949f8e354b 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -39,17 +39,9 @@
39/* iSCSI HSI constants */ 39/* iSCSI HSI constants */
40#define ISCSI_DEFAULT_MTU (1500) 40#define ISCSI_DEFAULT_MTU (1500)
41 41
42/* Current iSCSI HSI version number composed of two fields (16 bit) */
43#define ISCSI_HSI_MAJOR_VERSION (0)
44#define ISCSI_HSI_MINOR_VERSION (0)
45
46/* KWQ (kernel work queue) layer codes */ 42/* KWQ (kernel work queue) layer codes */
47#define ISCSI_SLOW_PATH_LAYER_CODE (6) 43#define ISCSI_SLOW_PATH_LAYER_CODE (6)
48 44
49/* CQE completion status */
50#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
51#define ISCSI_EQE_RST_CONN_RCVD (0x1)
52
53/* iSCSI parameter defaults */ 45/* iSCSI parameter defaults */
54#define ISCSI_DEFAULT_HEADER_DIGEST (0) 46#define ISCSI_DEFAULT_HEADER_DIGEST (0)
55#define ISCSI_DEFAULT_DATA_DIGEST (0) 47#define ISCSI_DEFAULT_DATA_DIGEST (0)
@@ -68,6 +60,10 @@
68#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) 60#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
69#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) 61#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
70 62
63#define ISCSI_AHS_CNTL_SIZE 4
64
65#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
66
71/* iSCSI reserved params */ 67/* iSCSI reserved params */
72#define ISCSI_ITT_ALL_ONES (0xffffffff) 68#define ISCSI_ITT_ALL_ONES (0xffffffff)
73#define ISCSI_TTT_ALL_ONES (0xffffffff) 69#define ISCSI_TTT_ALL_ONES (0xffffffff)
@@ -173,19 +169,6 @@ struct iscsi_async_msg_hdr {
173 __le32 reserved7; 169 __le32 reserved7;
174}; 170};
175 171
176struct iscsi_sge {
177 struct regpair sge_addr;
178 __le16 sge_len;
179 __le16 reserved0;
180 __le32 reserved1;
181};
182
183struct iscsi_cached_sge_ctx {
184 struct iscsi_sge sge;
185 struct regpair reserved;
186 __le32 dsgl_curr_offset[2];
187};
188
189struct iscsi_cmd_hdr { 172struct iscsi_cmd_hdr {
190 __le16 reserved1; 173 __le16 reserved1;
191 u8 flags_attr; 174 u8 flags_attr;
@@ -229,8 +212,13 @@ struct iscsi_common_hdr {
229#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 212#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
230#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF 213#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
231#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 214#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
232 __le32 lun_reserved[4]; 215 struct regpair lun_reserved;
233 __le32 data[6]; 216 __le32 itt;
217 __le32 ttt;
218 __le32 cmdstat_sn;
219 __le32 exp_statcmd_sn;
220 __le32 max_cmd_sn;
221 __le32 data[3];
234}; 222};
235 223
236struct iscsi_conn_offload_params { 224struct iscsi_conn_offload_params {
@@ -246,8 +234,10 @@ struct iscsi_conn_offload_params {
246#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 234#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
247#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 235#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
248#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 236#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
249#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F 237#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
250#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2 238#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
239#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
240#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
251 u8 pbl_page_size_log; 241 u8 pbl_page_size_log;
252 u8 pbe_page_size_log; 242 u8 pbe_page_size_log;
253 u8 default_cq; 243 u8 default_cq;
@@ -278,8 +268,12 @@ struct iscsi_conn_update_ramrod_params {
278#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 268#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
279#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 269#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
280#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 270#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
281#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF 271#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
282#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4 272#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
273#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
274#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
275#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
276#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
283 u8 reserved0[3]; 277 u8 reserved0[3];
284 __le32 max_seq_size; 278 __le32 max_seq_size;
285 __le32 max_send_pdu_length; 279 __le32 max_send_pdu_length;
@@ -312,7 +306,7 @@ struct iscsi_ext_cdb_cmd_hdr {
312 __le32 expected_transfer_length; 306 __le32 expected_transfer_length;
313 __le32 cmd_sn; 307 __le32 cmd_sn;
314 __le32 exp_stat_sn; 308 __le32 exp_stat_sn;
315 struct iscsi_sge cdb_sge; 309 struct scsi_sge cdb_sge;
316}; 310};
317 311
318struct iscsi_login_req_hdr { 312struct iscsi_login_req_hdr {
@@ -519,8 +513,8 @@ struct iscsi_logout_response_hdr {
519 __le32 exp_cmd_sn; 513 __le32 exp_cmd_sn;
520 __le32 max_cmd_sn; 514 __le32 max_cmd_sn;
521 __le32 reserved4; 515 __le32 reserved4;
522 __le16 time2retain; 516 __le16 time_2_retain;
523 __le16 time2wait; 517 __le16 time_2_wait;
524 __le32 reserved5[1]; 518 __le32 reserved5[1];
525}; 519};
526 520
@@ -602,7 +596,7 @@ struct iscsi_tmf_response_hdr {
602#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 596#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
603 struct regpair reserved0; 597 struct regpair reserved0;
604 __le32 itt; 598 __le32 itt;
605 __le32 rtt; 599 __le32 reserved1;
606 __le32 stat_sn; 600 __le32 stat_sn;
607 __le32 exp_cmd_sn; 601 __le32 exp_cmd_sn;
608 __le32 max_cmd_sn; 602 __le32 max_cmd_sn;
@@ -641,7 +635,7 @@ struct iscsi_reject_hdr {
641#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF 635#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
642#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 636#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
643 struct regpair reserved0; 637 struct regpair reserved0;
644 __le32 reserved1; 638 __le32 all_ones;
645 __le32 reserved2; 639 __le32 reserved2;
646 __le32 stat_sn; 640 __le32 stat_sn;
647 __le32 exp_cmd_sn; 641 __le32 exp_cmd_sn;
@@ -688,7 +682,9 @@ struct iscsi_cqe_solicited {
688 __le16 itid; 682 __le16 itid;
689 u8 task_type; 683 u8 task_type;
690 u8 fw_dbg_field; 684 u8 fw_dbg_field;
691 __le32 reserved1[2]; 685 u8 caused_conn_err;
686 u8 reserved0[3];
687 __le32 reserved1[1];
692 union iscsi_task_hdr iscsi_hdr; 688 union iscsi_task_hdr iscsi_hdr;
693}; 689};
694 690
@@ -727,35 +723,6 @@ enum iscsi_cqe_unsolicited_type {
727 MAX_ISCSI_CQE_UNSOLICITED_TYPE 723 MAX_ISCSI_CQE_UNSOLICITED_TYPE
728}; 724};
729 725
730struct iscsi_virt_sgl_ctx {
731 struct regpair sgl_base;
732 struct regpair dsgl_base;
733 __le32 sgl_initial_offset;
734 __le32 dsgl_initial_offset;
735 __le32 dsgl_curr_offset[2];
736};
737
738struct iscsi_sgl_var_params {
739 u8 sgl_ptr;
740 u8 dsgl_ptr;
741 __le16 sge_offset;
742 __le16 dsge_offset;
743};
744
745struct iscsi_phys_sgl_ctx {
746 struct regpair sgl_base;
747 struct regpair dsgl_base;
748 u8 sgl_size;
749 u8 dsgl_size;
750 __le16 reserved;
751 struct iscsi_sgl_var_params var_params[2];
752};
753
754union iscsi_data_desc_ctx {
755 struct iscsi_virt_sgl_ctx virt_sgl;
756 struct iscsi_phys_sgl_ctx phys_sgl;
757 struct iscsi_cached_sge_ctx cached_sge;
758};
759 726
760struct iscsi_debug_modes { 727struct iscsi_debug_modes {
761 u8 flags; 728 u8 flags;
@@ -771,8 +738,10 @@ struct iscsi_debug_modes {
771#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 738#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
772#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 739#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
773#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 740#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
774#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3 741#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1
775#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6 742#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
743#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1
744#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
776}; 745};
777 746
778struct iscsi_dif_flags { 747struct iscsi_dif_flags {
@@ -806,7 +775,6 @@ enum iscsi_eqe_opcode {
806 ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2, 775 ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
807 ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR, 776 ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
808 ISCSI_EVENT_TYPE_TCP_CONN_ERROR, 777 ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
809 ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
810 MAX_ISCSI_EQE_OPCODE 778 MAX_ISCSI_EQE_OPCODE
811}; 779};
812 780
@@ -856,31 +824,11 @@ enum iscsi_error_types {
856 ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX, 824 ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
857 ISCSI_CONN_ERROR_SENSE_DATA_LENGTH, 825 ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
858 ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR, 826 ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
827 ISCSI_CONN_ERROR_INVALID_ITT,
859 ISCSI_ERROR_UNKNOWN, 828 ISCSI_ERROR_UNKNOWN,
860 MAX_ISCSI_ERROR_TYPES 829 MAX_ISCSI_ERROR_TYPES
861}; 830};
862 831
863struct iscsi_mflags {
864 u8 mflags;
865#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1
866#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0
867#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1
868#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
869#define ISCSI_MFLAGS_RESERVED_MASK 0x3F
870#define ISCSI_MFLAGS_RESERVED_SHIFT 2
871};
872
873struct iscsi_sgl {
874 struct regpair sgl_addr;
875 __le16 updated_sge_size;
876 __le16 updated_sge_offset;
877 __le32 byte_offset;
878};
879
880union iscsi_mstorm_sgl {
881 struct iscsi_sgl sgl_struct;
882 struct iscsi_sge single_sge;
883};
884 832
885enum iscsi_ramrod_cmd_id { 833enum iscsi_ramrod_cmd_id {
886 ISCSI_RAMROD_CMD_ID_UNUSED = 0, 834 ISCSI_RAMROD_CMD_ID_UNUSED = 0,
@@ -896,10 +844,10 @@ enum iscsi_ramrod_cmd_id {
896 844
897struct iscsi_reg1 { 845struct iscsi_reg1 {
898 __le32 reg1_map; 846 __le32 reg1_map;
899#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7 847#define ISCSI_REG1_NUM_SGES_MASK 0xF
900#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0 848#define ISCSI_REG1_NUM_SGES_SHIFT 0
901#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF 849#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
902#define ISCSI_REG1_RESERVED1_SHIFT 3 850#define ISCSI_REG1_RESERVED1_SHIFT 4
903}; 851};
904 852
905union iscsi_seq_num { 853union iscsi_seq_num {
@@ -967,22 +915,33 @@ struct iscsi_spe_func_init {
967}; 915};
968 916
969struct ystorm_iscsi_task_state { 917struct ystorm_iscsi_task_state {
970 union iscsi_data_desc_ctx sgl_ctx_union; 918 struct scsi_cached_sges data_desc;
971 __le32 buffer_offset[2]; 919 struct scsi_sgl_params sgl_params;
972 __le16 bytes_nxt_dif;
973 __le16 rxmit_bytes_nxt_dif;
974 union iscsi_seq_num seq_num_union;
975 u8 dif_bytes_leftover;
976 u8 rxmit_dif_bytes_leftover;
977 __le16 reuse_count;
978 struct iscsi_dif_flags dif_flags;
979 u8 local_comp;
980 __le32 exp_r2t_sn; 920 __le32 exp_r2t_sn;
981 __le32 sgl_offset[2]; 921 __le32 buffer_offset;
922 union iscsi_seq_num seq_num;
923 struct iscsi_dif_flags dif_flags;
924 u8 flags;
925#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
926#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
927#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
928#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
929#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
930#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
931};
932
933struct ystorm_iscsi_task_rxmit_opt {
934 __le32 fast_rxmit_sge_offset;
935 __le32 scan_start_buffer_offset;
936 __le32 fast_rxmit_buffer_offset;
937 u8 scan_start_sgl_index;
938 u8 fast_rxmit_sgl_index;
939 __le16 reserved;
982}; 940};
983 941
984struct ystorm_iscsi_task_st_ctx { 942struct ystorm_iscsi_task_st_ctx {
985 struct ystorm_iscsi_task_state state; 943 struct ystorm_iscsi_task_state state;
944 struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
986 union iscsi_task_hdr pdu_hdr; 945 union iscsi_task_hdr pdu_hdr;
987}; 946};
988 947
@@ -1152,25 +1111,16 @@ struct ustorm_iscsi_task_ag_ctx {
1152}; 1111};
1153 1112
1154struct mstorm_iscsi_task_st_ctx { 1113struct mstorm_iscsi_task_st_ctx {
1155 union iscsi_mstorm_sgl sgl_union; 1114 struct scsi_cached_sges data_desc;
1156 struct iscsi_dif_flags dif_flags; 1115 struct scsi_sgl_params sgl_params;
1157 struct iscsi_mflags flags; 1116 __le32 rem_task_size;
1158 u8 sgl_size; 1117 __le32 data_buffer_offset;
1159 u8 host_sge_index;
1160 __le16 dix_cur_sge_offset;
1161 __le16 dix_cur_sge_size;
1162 __le32 data_offset_rtid;
1163 u8 dif_offset;
1164 u8 dix_sgl_size;
1165 u8 dix_sge_index;
1166 u8 task_type; 1118 u8 task_type;
1119 struct iscsi_dif_flags dif_flags;
1120 u8 reserved0[2];
1167 struct regpair sense_db; 1121 struct regpair sense_db;
1168 struct regpair dix_sgl_cur_sge; 1122 __le32 expected_itt;
1169 __le32 rem_task_size; 1123 __le32 reserved1;
1170 __le16 reuse_count;
1171 __le16 dif_data_residue;
1172 u8 reserved0[4];
1173 __le32 reserved1[1];
1174}; 1124};
1175 1125
1176struct ustorm_iscsi_task_st_ctx { 1126struct ustorm_iscsi_task_st_ctx {
@@ -1184,7 +1134,7 @@ struct ustorm_iscsi_task_st_ctx {
1184#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 1134#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
1185#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F 1135#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
1186#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 1136#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
1187 u8 reserved2; 1137 struct iscsi_dif_flags dif_flags;
1188 __le16 reserved3; 1138 __le16 reserved3;
1189 __le32 reserved4; 1139 __le32 reserved4;
1190 __le32 reserved5; 1140 __le32 reserved5;
@@ -1207,10 +1157,10 @@ struct ustorm_iscsi_task_st_ctx {
1207#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 1157#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
1208#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 1158#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
1209#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 1159#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
1210#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1 1160#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
1211#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4 1161#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
1212#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1 1162#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
1213#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5 1163#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
1214#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 1164#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
1215#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 1165#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
1216#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 1166#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
@@ -1220,7 +1170,6 @@ struct ustorm_iscsi_task_st_ctx {
1220 1170
1221struct iscsi_task_context { 1171struct iscsi_task_context {
1222 struct ystorm_iscsi_task_st_ctx ystorm_st_context; 1172 struct ystorm_iscsi_task_st_ctx ystorm_st_context;
1223 struct regpair ystorm_st_padding[2];
1224 struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; 1173 struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
1225 struct regpair ystorm_ag_padding[2]; 1174 struct regpair ystorm_ag_padding[2];
1226 struct tdif_task_context tdif_context; 1175 struct tdif_task_context tdif_context;
@@ -1272,32 +1221,22 @@ struct iscsi_uhqe {
1272#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 1221#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
1273}; 1222};
1274 1223
1275struct iscsi_wqe_field {
1276 __le32 contlen_cdbsize_field;
1277#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF
1278#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
1279#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF
1280#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
1281};
1282
1283union iscsi_wqe_field_union {
1284 struct iscsi_wqe_field cont_field;
1285 __le32 prev_tid;
1286};
1287 1224
1288struct iscsi_wqe { 1225struct iscsi_wqe {
1289 __le16 task_id; 1226 __le16 task_id;
1290 u8 flags; 1227 u8 flags;
1291#define ISCSI_WQE_WQE_TYPE_MASK 0x7 1228#define ISCSI_WQE_WQE_TYPE_MASK 0x7
1292#define ISCSI_WQE_WQE_TYPE_SHIFT 0 1229#define ISCSI_WQE_WQE_TYPE_SHIFT 0
1293#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7 1230#define ISCSI_WQE_NUM_SGES_MASK 0xF
1294#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3 1231#define ISCSI_WQE_NUM_SGES_SHIFT 3
1295#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1
1296#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
1297#define ISCSI_WQE_RESPONSE_MASK 0x1 1232#define ISCSI_WQE_RESPONSE_MASK 0x1
1298#define ISCSI_WQE_RESPONSE_SHIFT 7 1233#define ISCSI_WQE_RESPONSE_SHIFT 7
1299 struct iscsi_dif_flags prot_flags; 1234 struct iscsi_dif_flags prot_flags;
1300 union iscsi_wqe_field_union cont_prevtid_union; 1235 __le32 contlen_cdbsize;
1236#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
1237#define ISCSI_WQE_CONT_LEN_SHIFT 0
1238#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
1239#define ISCSI_WQE_CDB_SIZE_SHIFT 24
1301}; 1240};
1302 1241
1303enum iscsi_wqe_type { 1242enum iscsi_wqe_type {
@@ -1318,17 +1257,15 @@ struct iscsi_xhqe {
1318 u8 total_ahs_length; 1257 u8 total_ahs_length;
1319 u8 opcode; 1258 u8 opcode;
1320 u8 flags; 1259 u8 flags;
1321#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7 1260#define ISCSI_XHQE_FINAL_MASK 0x1
1322#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0 1261#define ISCSI_XHQE_FINAL_SHIFT 0
1323#define ISCSI_XHQE_FINAL_MASK 0x1 1262#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
1324#define ISCSI_XHQE_FINAL_SHIFT 3 1263#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
1325#define ISCSI_XHQE_SUPER_IO_MASK 0x1 1264#define ISCSI_XHQE_NUM_SGES_MASK 0xF
1326#define ISCSI_XHQE_SUPER_IO_SHIFT 4 1265#define ISCSI_XHQE_NUM_SGES_SHIFT 2
1327#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 1266#define ISCSI_XHQE_RESERVED0_MASK 0x3
1328#define ISCSI_XHQE_STATUS_BIT_SHIFT 5 1267#define ISCSI_XHQE_RESERVED0_SHIFT 6
1329#define ISCSI_XHQE_RESERVED_MASK 0x3 1268 union iscsi_seq_num seq_num;
1330#define ISCSI_XHQE_RESERVED_SHIFT 6
1331 union iscsi_seq_num seq_num_union;
1332 __le16 reserved1; 1269 __le16 reserved1;
1333}; 1270};
1334 1271
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index fde56c436f71..8e0065c52857 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -300,6 +300,11 @@ struct qed_sb_info {
300 struct qed_dev *cdev; 300 struct qed_dev *cdev;
301}; 301};
302 302
303enum qed_dev_type {
304 QED_DEV_TYPE_BB,
305 QED_DEV_TYPE_AH,
306};
307
303struct qed_dev_info { 308struct qed_dev_info {
304 unsigned long pci_mem_start; 309 unsigned long pci_mem_start;
305 unsigned long pci_mem_end; 310 unsigned long pci_mem_end;
@@ -325,6 +330,8 @@ struct qed_dev_info {
325 u16 mtu; 330 u16 mtu;
326 331
327 bool wol_support; 332 bool wol_support;
333
334 enum qed_dev_type dev_type;
328}; 335};
329 336
330enum qed_sb_type { 337enum qed_sb_type {
@@ -752,7 +759,7 @@ enum qed_mf_mode {
752 QED_MF_NPAR, 759 QED_MF_NPAR,
753}; 760};
754 761
755struct qed_eth_stats { 762struct qed_eth_stats_common {
756 u64 no_buff_discards; 763 u64 no_buff_discards;
757 u64 packet_too_big_discard; 764 u64 packet_too_big_discard;
758 u64 ttl0_discard; 765 u64 ttl0_discard;
@@ -784,11 +791,6 @@ struct qed_eth_stats {
784 u64 rx_256_to_511_byte_packets; 791 u64 rx_256_to_511_byte_packets;
785 u64 rx_512_to_1023_byte_packets; 792 u64 rx_512_to_1023_byte_packets;
786 u64 rx_1024_to_1518_byte_packets; 793 u64 rx_1024_to_1518_byte_packets;
787 u64 rx_1519_to_1522_byte_packets;
788 u64 rx_1519_to_2047_byte_packets;
789 u64 rx_2048_to_4095_byte_packets;
790 u64 rx_4096_to_9216_byte_packets;
791 u64 rx_9217_to_16383_byte_packets;
792 u64 rx_crc_errors; 794 u64 rx_crc_errors;
793 u64 rx_mac_crtl_frames; 795 u64 rx_mac_crtl_frames;
794 u64 rx_pause_frames; 796 u64 rx_pause_frames;
@@ -805,14 +807,8 @@ struct qed_eth_stats {
805 u64 tx_256_to_511_byte_packets; 807 u64 tx_256_to_511_byte_packets;
806 u64 tx_512_to_1023_byte_packets; 808 u64 tx_512_to_1023_byte_packets;
807 u64 tx_1024_to_1518_byte_packets; 809 u64 tx_1024_to_1518_byte_packets;
808 u64 tx_1519_to_2047_byte_packets;
809 u64 tx_2048_to_4095_byte_packets;
810 u64 tx_4096_to_9216_byte_packets;
811 u64 tx_9217_to_16383_byte_packets;
812 u64 tx_pause_frames; 810 u64 tx_pause_frames;
813 u64 tx_pfc_frames; 811 u64 tx_pfc_frames;
814 u64 tx_lpi_entry_count;
815 u64 tx_total_collisions;
816 u64 brb_truncates; 812 u64 brb_truncates;
817 u64 brb_discards; 813 u64 brb_discards;
818 u64 rx_mac_bytes; 814 u64 rx_mac_bytes;
@@ -827,6 +823,34 @@ struct qed_eth_stats {
827 u64 tx_mac_ctrl_frames; 823 u64 tx_mac_ctrl_frames;
828}; 824};
829 825
826struct qed_eth_stats_bb {
827 u64 rx_1519_to_1522_byte_packets;
828 u64 rx_1519_to_2047_byte_packets;
829 u64 rx_2048_to_4095_byte_packets;
830 u64 rx_4096_to_9216_byte_packets;
831 u64 rx_9217_to_16383_byte_packets;
832 u64 tx_1519_to_2047_byte_packets;
833 u64 tx_2048_to_4095_byte_packets;
834 u64 tx_4096_to_9216_byte_packets;
835 u64 tx_9217_to_16383_byte_packets;
836 u64 tx_lpi_entry_count;
837 u64 tx_total_collisions;
838};
839
840struct qed_eth_stats_ah {
841 u64 rx_1519_to_max_byte_packets;
842 u64 tx_1519_to_max_byte_packets;
843};
844
845struct qed_eth_stats {
846 struct qed_eth_stats_common common;
847
848 union {
849 struct qed_eth_stats_bb bb;
850 struct qed_eth_stats_ah ah;
851 };
852};
853
830#define QED_SB_IDX 0x0002 854#define QED_SB_IDX 0x0002
831 855
832#define RX_PI 0 856#define RX_PI 0
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index f773aa5e746f..72c770f9f666 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -52,7 +52,8 @@
52#define RDMA_MAX_PDS (64 * 1024) 52#define RDMA_MAX_PDS (64 * 1024)
53 53
54#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS 54#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
55#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB 55#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
56#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
56 57
57#define RDMA_TASK_TYPE (PROTOCOLID_ROCE) 58#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
58 59
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index bad02df213df..866f063026de 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -38,4 +38,21 @@
38 38
39#define ROCE_MAX_QPS (32 * 1024) 39#define ROCE_MAX_QPS (32 * 1024)
40 40
41enum roce_async_events_type {
42 ROCE_ASYNC_EVENT_NONE = 0,
43 ROCE_ASYNC_EVENT_COMM_EST = 1,
44 ROCE_ASYNC_EVENT_SQ_DRAINED,
45 ROCE_ASYNC_EVENT_SRQ_LIMIT,
46 ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
47 ROCE_ASYNC_EVENT_CQ_ERR,
48 ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
49 ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
50 ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
51 ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
52 ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
53 ROCE_ASYNC_EVENT_SRQ_EMPTY,
54 ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
55 MAX_ROCE_ASYNC_EVENTS_TYPE
56};
57
41#endif /* __ROCE_COMMON__ */ 58#endif /* __ROCE_COMMON__ */
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 03f3e37ab059..08df82a096b6 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -40,6 +40,8 @@
40#define BDQ_ID_IMM_DATA (1) 40#define BDQ_ID_IMM_DATA (1)
41#define BDQ_NUM_IDS (2) 41#define BDQ_NUM_IDS (2)
42 42
43#define SCSI_NUM_SGES_SLOW_SGL_THR 8
44
43#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) 45#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
44 46
45struct scsi_bd { 47struct scsi_bd {
@@ -52,6 +54,16 @@ struct scsi_bdq_ram_drv_data {
52 __le16 reserved0[3]; 54 __le16 reserved0[3];
53}; 55};
54 56
57struct scsi_sge {
58 struct regpair sge_addr;
59 __le32 sge_len;
60 __le32 reserved;
61};
62
63struct scsi_cached_sges {
64 struct scsi_sge sge[4];
65};
66
55struct scsi_drv_cmdq { 67struct scsi_drv_cmdq {
56 __le16 cmdq_cons; 68 __le16 cmdq_cons;
57 __le16 reserved0; 69 __le16 reserved0;
@@ -99,11 +111,19 @@ struct scsi_ram_per_bdq_resource_drv_data {
99 struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; 111 struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
100}; 112};
101 113
102struct scsi_sge { 114enum scsi_sgl_mode {
103 struct regpair sge_addr; 115 SCSI_TX_SLOW_SGL,
104 __le16 sge_len; 116 SCSI_FAST_SGL,
105 __le16 reserved0; 117 MAX_SCSI_SGL_MODE
106 __le32 reserved1; 118};
119
120struct scsi_sgl_params {
121 struct regpair sgl_addr;
122 __le32 sgl_total_length;
123 __le32 sge_offset;
124 __le16 sgl_num_sges;
125 u8 sgl_index;
126 u8 reserved;
107}; 127};
108 128
109struct scsi_terminate_extra_params { 129struct scsi_terminate_extra_params {
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index 46fe7856f1b2..a5e843268f0e 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -173,6 +173,7 @@ enum tcp_seg_placement_event {
173 TCP_EVENT_ADD_ISLE_RIGHT, 173 TCP_EVENT_ADD_ISLE_RIGHT,
174 TCP_EVENT_ADD_ISLE_LEFT, 174 TCP_EVENT_ADD_ISLE_LEFT,
175 TCP_EVENT_JOIN, 175 TCP_EVENT_JOIN,
176 TCP_EVENT_DELETE_ISLES,
176 TCP_EVENT_NOP, 177 TCP_EVENT_NOP,
177 MAX_TCP_SEG_PLACEMENT_EVENT 178 MAX_TCP_SEG_PLACEMENT_EVENT
178}; 179};
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 092292b6675e..e507290cd2c7 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -916,6 +916,28 @@ static inline int rhashtable_lookup_insert_fast(
916} 916}
917 917
918/** 918/**
919 * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
920 * @ht: hash table
921 * @obj: pointer to hash head inside object
922 * @params: hash table parameters
923 *
924 * Just like rhashtable_lookup_insert_fast(), but this function returns the
925 * object if it exists, NULL if it did not and the insertion was successful,
926 * and an ERR_PTR otherwise.
927 */
928static inline void *rhashtable_lookup_get_insert_fast(
929 struct rhashtable *ht, struct rhash_head *obj,
930 const struct rhashtable_params params)
931{
932 const char *key = rht_obj(ht, obj);
933
934 BUG_ON(ht->p.obj_hashfn);
935
936 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
937 false);
938}
939
940/**
919 * rhashtable_lookup_insert_key - search and insert object to hash table 941 * rhashtable_lookup_insert_key - search and insert object to hash table
920 * with explicit key 942 * with explicit key
921 * @ht: hash table 943 * @ht: hash table
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index fc273e9d5f67..cd98ee232ad1 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -28,6 +28,9 @@
28 28
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30 30
31#define MTL_MAX_RX_QUEUES 8
32#define MTL_MAX_TX_QUEUES 8
33
31#define STMMAC_RX_COE_NONE 0 34#define STMMAC_RX_COE_NONE 0
32#define STMMAC_RX_COE_TYPE1 1 35#define STMMAC_RX_COE_TYPE1 1
33#define STMMAC_RX_COE_TYPE2 2 36#define STMMAC_RX_COE_TYPE2 2
@@ -44,6 +47,18 @@
44#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */ 47#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
45#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */ 48#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
46 49
50/* MTL algorithms identifiers */
51#define MTL_TX_ALGORITHM_WRR 0x0
52#define MTL_TX_ALGORITHM_WFQ 0x1
53#define MTL_TX_ALGORITHM_DWRR 0x2
54#define MTL_TX_ALGORITHM_SP 0x3
55#define MTL_RX_ALGORITHM_SP 0x4
56#define MTL_RX_ALGORITHM_WSP 0x5
57
58/* RX/TX Queue Mode */
59#define MTL_QUEUE_DCB 0x0
60#define MTL_QUEUE_AVB 0x1
61
47/* The MDC clock could be set higher than the IEEE 802.3 62/* The MDC clock could be set higher than the IEEE 802.3
48 * specified frequency limit 0f 2.5 MHz, by programming a clock divider 63 * specified frequency limit 0f 2.5 MHz, by programming a clock divider
49 * of value different than the above defined values. The resultant MDIO 64 * of value different than the above defined values. The resultant MDIO
@@ -109,6 +124,26 @@ struct stmmac_axi {
109 bool axi_rb; 124 bool axi_rb;
110}; 125};
111 126
127struct stmmac_rxq_cfg {
128 u8 mode_to_use;
129 u8 chan;
130 u8 pkt_route;
131 bool use_prio;
132 u32 prio;
133};
134
135struct stmmac_txq_cfg {
136 u8 weight;
137 u8 mode_to_use;
138 /* Credit Base Shaper parameters */
139 u32 send_slope;
140 u32 idle_slope;
141 u32 high_credit;
142 u32 low_credit;
143 bool use_prio;
144 u32 prio;
145};
146
112struct plat_stmmacenet_data { 147struct plat_stmmacenet_data {
113 int bus_id; 148 int bus_id;
114 int phy_addr; 149 int phy_addr;
@@ -133,6 +168,12 @@ struct plat_stmmacenet_data {
133 int unicast_filter_entries; 168 int unicast_filter_entries;
134 int tx_fifo_size; 169 int tx_fifo_size;
135 int rx_fifo_size; 170 int rx_fifo_size;
171 u8 rx_queues_to_use;
172 u8 tx_queues_to_use;
173 u8 rx_sched_algorithm;
174 u8 tx_sched_algorithm;
175 struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
176 struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
136 void (*fix_mac_speed)(void *priv, unsigned int speed); 177 void (*fix_mac_speed)(void *priv, unsigned int speed);
137 int (*init)(struct platform_device *pdev, void *priv); 178 int (*init)(struct platform_device *pdev, void *priv);
138 void (*exit)(struct platform_device *pdev, void *priv); 179 void (*exit)(struct platform_device *pdev, void *priv);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 6e0ce8c7b8cb..e2b56917450f 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -261,10 +261,10 @@ extern void usbnet_pause_rx(struct usbnet *);
261extern void usbnet_resume_rx(struct usbnet *); 261extern void usbnet_resume_rx(struct usbnet *);
262extern void usbnet_purge_paused_rxq(struct usbnet *); 262extern void usbnet_purge_paused_rxq(struct usbnet *);
263 263
264extern int usbnet_get_settings(struct net_device *net, 264extern int usbnet_get_link_ksettings(struct net_device *net,
265 struct ethtool_cmd *cmd); 265 struct ethtool_link_ksettings *cmd);
266extern int usbnet_set_settings(struct net_device *net, 266extern int usbnet_set_link_ksettings(struct net_device *net,
267 struct ethtool_cmd *cmd); 267 const struct ethtool_link_ksettings *cmd);
268extern u32 usbnet_get_link(struct net_device *net); 268extern u32 usbnet_get_link(struct net_device *net);
269extern u32 usbnet_get_msglevel(struct net_device *); 269extern u32 usbnet_get_msglevel(struct net_device *);
270extern void usbnet_set_msglevel(struct net_device *, u32); 270extern void usbnet_set_msglevel(struct net_device *, u32);
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 4e13e695f025..e42897fd7a96 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -233,6 +233,10 @@ struct dsa_switch {
233 u32 phys_mii_mask; 233 u32 phys_mii_mask;
234 struct mii_bus *slave_mii_bus; 234 struct mii_bus *slave_mii_bus;
235 235
236 /* Ageing Time limits in msecs */
237 unsigned int ageing_time_min;
238 unsigned int ageing_time_max;
239
236 /* Dynamically allocated ports, keep last */ 240 /* Dynamically allocated ports, keep last */
237 size_t num_ports; 241 size_t num_ports;
238 struct dsa_port ports[]; 242 struct dsa_port ports[];
@@ -248,6 +252,11 @@ static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
248 return !!((ds->dsa_port_mask) & (1 << p)); 252 return !!((ds->dsa_port_mask) & (1 << p));
249} 253}
250 254
255static inline bool dsa_is_normal_port(struct dsa_switch *ds, int p)
256{
257 return !dsa_is_cpu_port(ds, p) && !dsa_is_dsa_port(ds, p);
258}
259
251static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p) 260static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
252{ 261{
253 return ds->enabled_port_mask & (1 << p) && ds->ports[p].netdev; 262 return ds->enabled_port_mask & (1 << p) && ds->ports[p].netdev;
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 8dbfdf728cd8..1243b9c7694e 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -141,6 +141,7 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
141 struct fib_lookup_arg *); 141 struct fib_lookup_arg *);
142int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table, 142int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
143 u32 flags); 143 u32 flags);
144bool fib_rule_matchall(const struct fib_rule *rule);
144 145
145int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh); 146int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh);
146int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh); 147int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 368bb4024b78..6692c5758b33 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -213,6 +213,11 @@ struct fib_entry_notifier_info {
213 u32 tb_id; 213 u32 tb_id;
214}; 214};
215 215
216struct fib_rule_notifier_info {
217 struct fib_notifier_info info; /* must be first */
218 struct fib_rule *rule;
219};
220
216struct fib_nh_notifier_info { 221struct fib_nh_notifier_info {
217 struct fib_notifier_info info; /* must be first */ 222 struct fib_notifier_info info; /* must be first */
218 struct fib_nh *fib_nh; 223 struct fib_nh *fib_nh;
@@ -232,9 +237,21 @@ enum fib_event_type {
232int register_fib_notifier(struct notifier_block *nb, 237int register_fib_notifier(struct notifier_block *nb,
233 void (*cb)(struct notifier_block *nb)); 238 void (*cb)(struct notifier_block *nb));
234int unregister_fib_notifier(struct notifier_block *nb); 239int unregister_fib_notifier(struct notifier_block *nb);
240int call_fib_notifier(struct notifier_block *nb, struct net *net,
241 enum fib_event_type event_type,
242 struct fib_notifier_info *info);
235int call_fib_notifiers(struct net *net, enum fib_event_type event_type, 243int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
236 struct fib_notifier_info *info); 244 struct fib_notifier_info *info);
237 245
246void fib_notify(struct net *net, struct notifier_block *nb);
247#ifdef CONFIG_IP_MULTIPLE_TABLES
248void fib_rules_notify(struct net *net, struct notifier_block *nb);
249#else
250static inline void fib_rules_notify(struct net *net, struct notifier_block *nb)
251{
252}
253#endif
254
238struct fib_table { 255struct fib_table {
239 struct hlist_node tb_hlist; 256 struct hlist_node tb_hlist;
240 u32 tb_id; 257 u32 tb_id;
@@ -299,6 +316,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
299 return err; 316 return err;
300} 317}
301 318
319static inline bool fib4_rule_default(const struct fib_rule *rule)
320{
321 return true;
322}
323
302#else /* CONFIG_IP_MULTIPLE_TABLES */ 324#else /* CONFIG_IP_MULTIPLE_TABLES */
303int __net_init fib4_rules_init(struct net *net); 325int __net_init fib4_rules_init(struct net *net);
304void __net_exit fib4_rules_exit(struct net *net); 326void __net_exit fib4_rules_exit(struct net *net);
@@ -343,6 +365,8 @@ out:
343 return err; 365 return err;
344} 366}
345 367
368bool fib4_rule_default(const struct fib_rule *rule);
369
346#endif /* CONFIG_IP_MULTIPLE_TABLES */ 370#endif /* CONFIG_IP_MULTIPLE_TABLES */
347 371
348/* Exported by fib_frontend.c */ 372/* Exported by fib_frontend.c */
@@ -371,17 +395,13 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
371int fib_sync_down_addr(struct net_device *dev, __be32 local); 395int fib_sync_down_addr(struct net_device *dev, __be32 local);
372int fib_sync_up(struct net_device *dev, unsigned int nh_flags); 396int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
373 397
374extern u32 fib_multipath_secret __read_mostly; 398#ifdef CONFIG_IP_ROUTE_MULTIPATH
375 399int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
376static inline int fib_multipath_hash(__be32 saddr, __be32 daddr) 400 const struct sk_buff *skb);
377{ 401#endif
378 return jhash_2words((__force u32)saddr, (__force u32)daddr,
379 fib_multipath_secret) >> 1;
380}
381
382void fib_select_multipath(struct fib_result *res, int hash); 402void fib_select_multipath(struct fib_result *res, int hash);
383void fib_select_path(struct net *net, struct fib_result *res, 403void fib_select_path(struct net *net, struct fib_result *res,
384 struct flowi4 *fl4, int mp_hash); 404 struct flowi4 *fl4, const struct sk_buff *skb);
385 405
386/* Exported by fib_trie.c */ 406/* Exported by fib_trie.c */
387void fib_trie_init(void); 407void fib_trie_init(void);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 7bdfa7d78363..8a4a57b887fb 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -12,6 +12,8 @@
12#include <linux/list.h> /* for struct list_head */ 12#include <linux/list.h> /* for struct list_head */
13#include <linux/spinlock.h> /* for struct rwlock_t */ 13#include <linux/spinlock.h> /* for struct rwlock_t */
14#include <linux/atomic.h> /* for struct atomic_t */ 14#include <linux/atomic.h> /* for struct atomic_t */
15#include <linux/refcount.h> /* for struct refcount_t */
16
15#include <linux/compiler.h> 17#include <linux/compiler.h>
16#include <linux/timer.h> 18#include <linux/timer.h>
17#include <linux/bug.h> 19#include <linux/bug.h>
@@ -525,7 +527,7 @@ struct ip_vs_conn {
525 struct netns_ipvs *ipvs; 527 struct netns_ipvs *ipvs;
526 528
527 /* counter and timer */ 529 /* counter and timer */
528 atomic_t refcnt; /* reference count */ 530 refcount_t refcnt; /* reference count */
529 struct timer_list timer; /* Expiration timer */ 531 struct timer_list timer; /* Expiration timer */
530 volatile unsigned long timeout; /* timeout */ 532 volatile unsigned long timeout; /* timeout */
531 533
@@ -667,7 +669,7 @@ struct ip_vs_dest {
667 atomic_t conn_flags; /* flags to copy to conn */ 669 atomic_t conn_flags; /* flags to copy to conn */
668 atomic_t weight; /* server weight */ 670 atomic_t weight; /* server weight */
669 671
670 atomic_t refcnt; /* reference counter */ 672 refcount_t refcnt; /* reference counter */
671 struct ip_vs_stats stats; /* statistics */ 673 struct ip_vs_stats stats; /* statistics */
672 unsigned long idle_start; /* start time, jiffies */ 674 unsigned long idle_start; /* start time, jiffies */
673 675
@@ -1211,14 +1213,14 @@ struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
1211 */ 1213 */
1212static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) 1214static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp)
1213{ 1215{
1214 return atomic_inc_not_zero(&cp->refcnt); 1216 return refcount_inc_not_zero(&cp->refcnt);
1215} 1217}
1216 1218
1217/* put back the conn without restarting its timer */ 1219/* put back the conn without restarting its timer */
1218static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) 1220static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
1219{ 1221{
1220 smp_mb__before_atomic(); 1222 smp_mb__before_atomic();
1221 atomic_dec(&cp->refcnt); 1223 refcount_dec(&cp->refcnt);
1222} 1224}
1223void ip_vs_conn_put(struct ip_vs_conn *cp); 1225void ip_vs_conn_put(struct ip_vs_conn *cp);
1224void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); 1226void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
@@ -1410,18 +1412,18 @@ void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
1410 1412
1411static inline void ip_vs_dest_hold(struct ip_vs_dest *dest) 1413static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
1412{ 1414{
1413 atomic_inc(&dest->refcnt); 1415 refcount_inc(&dest->refcnt);
1414} 1416}
1415 1417
1416static inline void ip_vs_dest_put(struct ip_vs_dest *dest) 1418static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
1417{ 1419{
1418 smp_mb__before_atomic(); 1420 smp_mb__before_atomic();
1419 atomic_dec(&dest->refcnt); 1421 refcount_dec(&dest->refcnt);
1420} 1422}
1421 1423
1422static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) 1424static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
1423{ 1425{
1424 if (atomic_dec_and_test(&dest->refcnt)) 1426 if (refcount_dec_and_test(&dest->refcnt))
1425 kfree(dest); 1427 kfree(dest);
1426} 1428}
1427 1429
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
index 179253f9dcfd..a18af6a16eb5 100644
--- a/include/net/mpls_iptunnel.h
+++ b/include/net/mpls_iptunnel.h
@@ -19,6 +19,8 @@
19struct mpls_iptunnel_encap { 19struct mpls_iptunnel_encap {
20 u32 label[MAX_NEW_LABELS]; 20 u32 label[MAX_NEW_LABELS];
21 u8 labels; 21 u8 labels;
22 u8 ttl_propagate;
23 u8 default_ttl;
22}; 24};
23 25
24static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate) 26static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 5ed33ea4718e..65cc2cb005d9 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -5,6 +5,8 @@
5#ifndef _NF_CONNTRACK_EXPECT_H 5#ifndef _NF_CONNTRACK_EXPECT_H
6#define _NF_CONNTRACK_EXPECT_H 6#define _NF_CONNTRACK_EXPECT_H
7 7
8#include <linux/refcount.h>
9
8#include <net/netfilter/nf_conntrack.h> 10#include <net/netfilter/nf_conntrack.h>
9#include <net/netfilter/nf_conntrack_zones.h> 11#include <net/netfilter/nf_conntrack_zones.h>
10 12
@@ -37,7 +39,7 @@ struct nf_conntrack_expect {
37 struct timer_list timeout; 39 struct timer_list timeout;
38 40
39 /* Usage count. */ 41 /* Usage count. */
40 atomic_t use; 42 refcount_t use;
41 43
42 /* Flags */ 44 /* Flags */
43 unsigned int flags; 45 unsigned int flags;
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 5cc5e9e6171a..d40b89355fdd 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -4,6 +4,7 @@
4#include <net/net_namespace.h> 4#include <net/net_namespace.h>
5#include <linux/netfilter/nf_conntrack_common.h> 5#include <linux/netfilter/nf_conntrack_common.h>
6#include <linux/netfilter/nf_conntrack_tuple_common.h> 6#include <linux/netfilter/nf_conntrack_tuple_common.h>
7#include <linux/refcount.h>
7#include <net/netfilter/nf_conntrack.h> 8#include <net/netfilter/nf_conntrack.h>
8#include <net/netfilter/nf_conntrack_extend.h> 9#include <net/netfilter/nf_conntrack_extend.h>
9 10
@@ -12,7 +13,7 @@
12struct ctnl_timeout { 13struct ctnl_timeout {
13 struct list_head head; 14 struct list_head head;
14 struct rcu_head rcu_head; 15 struct rcu_head rcu_head;
15 atomic_t refcnt; 16 refcount_t refcnt;
16 char name[CTNL_TIMEOUT_NAME_MAX]; 17 char name[CTNL_TIMEOUT_NAME_MAX];
17 __u16 l3num; 18 __u16 l3num;
18 struct nf_conntrack_l4proto *l4proto; 19 struct nf_conntrack_l4proto *l4proto;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2aa8a9d80fbe..49436849d7d7 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -385,10 +385,11 @@ static inline struct nft_set *nft_set_container_of(const void *priv)
385 return (void *)priv - offsetof(struct nft_set, data); 385 return (void *)priv - offsetof(struct nft_set, data);
386} 386}
387 387
388struct nft_set *nf_tables_set_lookup(const struct nft_table *table, 388struct nft_set *nft_set_lookup(const struct net *net,
389 const struct nlattr *nla, u8 genmask); 389 const struct nft_table *table,
390struct nft_set *nf_tables_set_lookup_byid(const struct net *net, 390 const struct nlattr *nla_set_name,
391 const struct nlattr *nla, u8 genmask); 391 const struct nlattr *nla_set_id,
392 u8 genmask);
392 393
393static inline unsigned long nft_set_gc_interval(const struct nft_set *set) 394static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
394{ 395{
@@ -1016,7 +1017,8 @@ struct nft_object_type {
1016 unsigned int maxattr; 1017 unsigned int maxattr;
1017 struct module *owner; 1018 struct module *owner;
1018 const struct nla_policy *policy; 1019 const struct nla_policy *policy;
1019 int (*init)(const struct nlattr * const tb[], 1020 int (*init)(const struct nft_ctx *ctx,
1021 const struct nlattr *const tb[],
1020 struct nft_object *obj); 1022 struct nft_object *obj);
1021 void (*destroy)(struct nft_object *obj); 1023 void (*destroy)(struct nft_object *obj);
1022 int (*dump)(struct sk_buff *skb, 1024 int (*dump)(struct sk_buff *skb,
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index 5ceb2205e4e3..381af9469e6a 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -32,6 +32,6 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
32void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, 32void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
33 const struct nft_pktinfo *pkt); 33 const struct nft_pktinfo *pkt);
34 34
35void nft_fib_store_result(void *reg, enum nft_fib_result r, 35void nft_fib_store_result(void *reg, const struct nft_fib *priv,
36 const struct nft_pktinfo *pkt, int index); 36 const struct nft_pktinfo *pkt, int index);
37#endif 37#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 622d2da27135..a0e89190a3e9 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -33,7 +33,6 @@ struct inet_timewait_death_row {
33 atomic_t tw_count; 33 atomic_t tw_count;
34 34
35 struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp; 35 struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp;
36 int sysctl_tw_recycle;
37 int sysctl_max_tw_buckets; 36 int sysctl_max_tw_buckets;
38}; 37};
39 38
@@ -152,6 +151,7 @@ struct netns_ipv4 {
152#endif 151#endif
153#ifdef CONFIG_IP_ROUTE_MULTIPATH 152#ifdef CONFIG_IP_ROUTE_MULTIPATH
154 int sysctl_fib_multipath_use_neigh; 153 int sysctl_fib_multipath_use_neigh;
154 int sysctl_fib_multipath_hash_policy;
155#endif 155#endif
156 156
157 unsigned int fib_seq; /* protected by rtnl_mutex */ 157 unsigned int fib_seq; /* protected by rtnl_mutex */
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
index d29203651c01..6608b3693385 100644
--- a/include/net/netns/mpls.h
+++ b/include/net/netns/mpls.h
@@ -9,8 +9,11 @@ struct mpls_route;
9struct ctl_table_header; 9struct ctl_table_header;
10 10
11struct netns_mpls { 11struct netns_mpls {
12 int ip_ttl_propagate;
13 int default_ttl;
12 size_t platform_labels; 14 size_t platform_labels;
13 struct mpls_route __rcu * __rcu *platform_label; 15 struct mpls_route __rcu * __rcu *platform_label;
16
14 struct ctl_table_header *ctl; 17 struct ctl_table_header *ctl;
15}; 18};
16 19
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index f1b76b8e6d2d..bec46f63f10c 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -92,7 +92,7 @@ int unregister_qdisc(struct Qdisc_ops *qops);
92void qdisc_get_default(char *id, size_t len); 92void qdisc_get_default(char *id, size_t len);
93int qdisc_set_default(const char *id); 93int qdisc_set_default(const char *id);
94 94
95void qdisc_hash_add(struct Qdisc *q); 95void qdisc_hash_add(struct Qdisc *q, bool invisible);
96void qdisc_hash_del(struct Qdisc *q); 96void qdisc_hash_del(struct Qdisc *q);
97struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); 97struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
98struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); 98struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
diff --git a/include/net/route.h b/include/net/route.h
index c0874c87c173..2cc0e14c6359 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -113,13 +113,13 @@ struct in_device;
113int ip_rt_init(void); 113int ip_rt_init(void);
114void rt_cache_flush(struct net *net); 114void rt_cache_flush(struct net *net);
115void rt_flush_dev(struct net_device *dev); 115void rt_flush_dev(struct net_device *dev);
116struct rtable *__ip_route_output_key_hash(struct net *, struct flowi4 *flp, 116struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *flp,
117 int mp_hash); 117 const struct sk_buff *skb);
118 118
119static inline struct rtable *__ip_route_output_key(struct net *net, 119static inline struct rtable *__ip_route_output_key(struct net *net,
120 struct flowi4 *flp) 120 struct flowi4 *flp)
121{ 121{
122 return __ip_route_output_key_hash(net, flp, -1); 122 return __ip_route_output_key_hash(net, flp, NULL);
123} 123}
124 124
125struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, 125struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index aeec4086afb2..65d502610314 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -66,6 +66,7 @@ struct Qdisc {
66#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 66#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
67 * qdisc_tree_decrease_qlen() should stop. 67 * qdisc_tree_decrease_qlen() should stop.
68 */ 68 */
69#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
69 u32 limit; 70 u32 limit;
70 const struct Qdisc_ops *ops; 71 const struct Qdisc_ops *ops;
71 struct qdisc_size_table __rcu *stab; 72 struct qdisc_size_table __rcu *stab;
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index b6f682ec184a..47113f2c4b0a 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -293,6 +293,22 @@ struct sctp_chunk *sctp_process_strreset_inreq(
293 struct sctp_association *asoc, 293 struct sctp_association *asoc,
294 union sctp_params param, 294 union sctp_params param,
295 struct sctp_ulpevent **evp); 295 struct sctp_ulpevent **evp);
296struct sctp_chunk *sctp_process_strreset_tsnreq(
297 struct sctp_association *asoc,
298 union sctp_params param,
299 struct sctp_ulpevent **evp);
300struct sctp_chunk *sctp_process_strreset_addstrm_out(
301 struct sctp_association *asoc,
302 union sctp_params param,
303 struct sctp_ulpevent **evp);
304struct sctp_chunk *sctp_process_strreset_addstrm_in(
305 struct sctp_association *asoc,
306 union sctp_params param,
307 struct sctp_ulpevent **evp);
308struct sctp_chunk *sctp_process_strreset_resp(
309 struct sctp_association *asoc,
310 union sctp_params param,
311 struct sctp_ulpevent **evp);
296 312
297/* Prototypes for statetable processing. */ 313/* Prototypes for statetable processing. */
298 314
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 324b5965fc4d..1060494ac230 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -132,6 +132,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
132 const struct sctp_association *asoc, __u16 flags, 132 const struct sctp_association *asoc, __u16 flags,
133 __u16 stream_num, __u16 *stream_list, gfp_t gfp); 133 __u16 stream_num, __u16 *stream_list, gfp_t gfp);
134 134
135struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
136 const struct sctp_association *asoc, __u16 flags,
137 __u32 local_tsn, __u32 remote_tsn, gfp_t gfp);
138
139struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
140 const struct sctp_association *asoc, __u16 flags,
141 __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
142
135void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, 143void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
136 struct msghdr *); 144 struct msghdr *);
137void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, 145void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index 0caee631a836..fe236b3429f0 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -6,10 +6,10 @@
6u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); 6u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
7u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, 7u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
8 __be16 dport); 8 __be16 dport);
9u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, 9u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr,
10 __be16 sport, __be16 dport, u32 *tsoff);
11u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
10 __be16 sport, __be16 dport, u32 *tsoff); 12 __be16 sport, __be16 dport, u32 *tsoff);
11u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
12 __be16 sport, __be16 dport, u32 *tsoff);
13u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, 13u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
14 __be16 sport, __be16 dport); 14 __be16 sport, __be16 dport);
15u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, 15u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
diff --git a/include/net/sock.h b/include/net/sock.h
index 03252d53975d..08142be8938e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1783,11 +1783,8 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1783 1783
1784 sk_tx_queue_clear(sk); 1784 sk_tx_queue_clear(sk);
1785 sk->sk_dst_pending_confirm = 0; 1785 sk->sk_dst_pending_confirm = 0;
1786 /* 1786 old_dst = rcu_dereference_protected(sk->sk_dst_cache,
1787 * This can be called while sk is owned by the caller only, 1787 lockdep_sock_is_held(sk));
1788 * with no state that can be checked in a rcu_dereference_check() cond
1789 */
1790 old_dst = rcu_dereference_raw(sk->sk_dst_cache);
1791 rcu_assign_pointer(sk->sk_dst_cache, dst); 1788 rcu_assign_pointer(sk->sk_dst_cache, dst);
1792 dst_release(old_dst); 1789 dst_release(old_dst);
1793} 1790}
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 48cca321ee6c..9690c047b6cf 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -49,4 +49,9 @@ static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
49 return to_vlan(a)->tcfv_push_proto; 49 return to_vlan(a)->tcfv_push_proto;
50} 50}
51 51
52static inline u8 tcf_vlan_push_prio(const struct tc_action *a)
53{
54 return to_vlan(a)->tcfv_push_prio;
55}
56
52#endif /* __NET_TC_VLAN_H */ 57#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6ec4ea652f3f..e614ad4d613e 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -406,11 +406,7 @@ void tcp_clear_retrans(struct tcp_sock *tp);
406void tcp_update_metrics(struct sock *sk); 406void tcp_update_metrics(struct sock *sk);
407void tcp_init_metrics(struct sock *sk); 407void tcp_init_metrics(struct sock *sk);
408void tcp_metrics_init(void); 408void tcp_metrics_init(void);
409bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, 409bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
410 bool paws_check, bool timestamps);
411bool tcp_remember_stamp(struct sock *sk);
412bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
413void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
414void tcp_disable_fack(struct tcp_sock *tp); 410void tcp_disable_fack(struct tcp_sock *tp);
415void tcp_close(struct sock *sk, long timeout); 411void tcp_close(struct sock *sk, long timeout);
416void tcp_init_sock(struct sock *sk); 412void tcp_init_sock(struct sock *sk);
@@ -1814,9 +1810,8 @@ struct tcp_request_sock_ops {
1814 __u16 *mss); 1810 __u16 *mss);
1815#endif 1811#endif
1816 struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, 1812 struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1817 const struct request_sock *req, 1813 const struct request_sock *req);
1818 bool *strict); 1814 __u32 (*init_seq_tsoff)(const struct sk_buff *skb, u32 *tsoff);
1819 __u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff);
1820 int (*send_synack)(const struct sock *sk, struct dst_entry *dst, 1815 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1821 struct flowi *fl, struct request_sock *req, 1816 struct flowi *fl, struct request_sock *req,
1822 struct tcp_fastopen_cookie *foc, 1817 struct tcp_fastopen_cookie *foc,
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 8ef9e75e004e..d8f6a1ac9af4 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -183,6 +183,7 @@ enum {
183 DEVCONF_SEG6_REQUIRE_HMAC, 183 DEVCONF_SEG6_REQUIRE_HMAC,
184 DEVCONF_ENHANCED_DAD, 184 DEVCONF_ENHANCED_DAD,
185 DEVCONF_ADDR_GEN_MODE, 185 DEVCONF_ADDR_GEN_MODE,
186 DEVCONF_DISABLE_POLICY,
186 DEVCONF_MAX 187 DEVCONF_MAX
187}; 188};
188 189
diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h
index d80a0498f77e..f5e45095b0bb 100644
--- a/include/uapi/linux/mpls_iptunnel.h
+++ b/include/uapi/linux/mpls_iptunnel.h
@@ -16,11 +16,13 @@
16/* MPLS tunnel attributes 16/* MPLS tunnel attributes
17 * [RTA_ENCAP] = { 17 * [RTA_ENCAP] = {
18 * [MPLS_IPTUNNEL_DST] 18 * [MPLS_IPTUNNEL_DST]
19 * [MPLS_IPTUNNEL_TTL]
19 * } 20 * }
20 */ 21 */
21enum { 22enum {
22 MPLS_IPTUNNEL_UNSPEC, 23 MPLS_IPTUNNEL_UNSPEC,
23 MPLS_IPTUNNEL_DST, 24 MPLS_IPTUNNEL_DST,
25 MPLS_IPTUNNEL_TTL,
24 __MPLS_IPTUNNEL_MAX, 26 __MPLS_IPTUNNEL_MAX,
25}; 27};
26#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1) 28#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 05215d30fe5c..8f3842690d17 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -816,6 +816,17 @@ enum nft_rt_keys {
816}; 816};
817 817
818/** 818/**
819 * enum nft_hash_types - nf_tables hash expression types
820 *
821 * @NFT_HASH_JENKINS: Jenkins Hash
822 * @NFT_HASH_SYM: Symmetric Hash
823 */
824enum nft_hash_types {
825 NFT_HASH_JENKINS,
826 NFT_HASH_SYM,
827};
828
829/**
819 * enum nft_hash_attributes - nf_tables hash expression netlink attributes 830 * enum nft_hash_attributes - nf_tables hash expression netlink attributes
820 * 831 *
821 * @NFTA_HASH_SREG: source register (NLA_U32) 832 * @NFTA_HASH_SREG: source register (NLA_U32)
@@ -824,6 +835,7 @@ enum nft_rt_keys {
824 * @NFTA_HASH_MODULUS: modulus value (NLA_U32) 835 * @NFTA_HASH_MODULUS: modulus value (NLA_U32)
825 * @NFTA_HASH_SEED: seed value (NLA_U32) 836 * @NFTA_HASH_SEED: seed value (NLA_U32)
826 * @NFTA_HASH_OFFSET: add this offset value to hash result (NLA_U32) 837 * @NFTA_HASH_OFFSET: add this offset value to hash result (NLA_U32)
838 * @NFTA_HASH_TYPE: hash operation (NLA_U32: nft_hash_types)
827 */ 839 */
828enum nft_hash_attributes { 840enum nft_hash_attributes {
829 NFTA_HASH_UNSPEC, 841 NFTA_HASH_UNSPEC,
@@ -833,6 +845,7 @@ enum nft_hash_attributes {
833 NFTA_HASH_MODULUS, 845 NFTA_HASH_MODULUS,
834 NFTA_HASH_SEED, 846 NFTA_HASH_SEED,
835 NFTA_HASH_OFFSET, 847 NFTA_HASH_OFFSET,
848 NFTA_HASH_TYPE,
836 __NFTA_HASH_MAX, 849 __NFTA_HASH_MAX,
837}; 850};
838#define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1) 851#define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1)
@@ -1244,12 +1257,23 @@ enum nft_fib_flags {
1244 NFTA_FIB_F_MARK = 1 << 2, /* use skb->mark */ 1257 NFTA_FIB_F_MARK = 1 << 2, /* use skb->mark */
1245 NFTA_FIB_F_IIF = 1 << 3, /* restrict to iif */ 1258 NFTA_FIB_F_IIF = 1 << 3, /* restrict to iif */
1246 NFTA_FIB_F_OIF = 1 << 4, /* restrict to oif */ 1259 NFTA_FIB_F_OIF = 1 << 4, /* restrict to oif */
1260 NFTA_FIB_F_PRESENT = 1 << 5, /* check existence only */
1261};
1262
1263enum nft_ct_helper_attributes {
1264 NFTA_CT_HELPER_UNSPEC,
1265 NFTA_CT_HELPER_NAME,
1266 NFTA_CT_HELPER_L3PROTO,
1267 NFTA_CT_HELPER_L4PROTO,
1268 __NFTA_CT_HELPER_MAX,
1247}; 1269};
1270#define NFTA_CT_HELPER_MAX (__NFTA_CT_HELPER_MAX - 1)
1248 1271
1249#define NFT_OBJECT_UNSPEC 0 1272#define NFT_OBJECT_UNSPEC 0
1250#define NFT_OBJECT_COUNTER 1 1273#define NFT_OBJECT_COUNTER 1
1251#define NFT_OBJECT_QUOTA 2 1274#define NFT_OBJECT_QUOTA 2
1252#define __NFT_OBJECT_MAX 3 1275#define NFT_OBJECT_CT_HELPER 3
1276#define __NFT_OBJECT_MAX 4
1253#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1) 1277#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
1254 1278
1255/** 1279/**
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index df7451d35131..099bf5528fed 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -617,6 +617,14 @@ struct tc_drr_stats {
617#define TC_QOPT_BITMASK 15 617#define TC_QOPT_BITMASK 15
618#define TC_QOPT_MAX_QUEUE 16 618#define TC_QOPT_MAX_QUEUE 16
619 619
620enum {
621 TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */
622 TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */
623 __TC_MQPRIO_HW_OFFLOAD_MAX
624};
625
626#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
627
620struct tc_mqprio_qopt { 628struct tc_mqprio_qopt {
621 __u8 num_tc; 629 __u8 num_tc;
622 __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; 630 __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 6546917d605a..3dd72aee4d32 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -319,6 +319,7 @@ enum rtattr_type_t {
319 RTA_EXPIRES, 319 RTA_EXPIRES,
320 RTA_PAD, 320 RTA_PAD,
321 RTA_UID, 321 RTA_UID,
322 RTA_TTL_PROPAGATE,
322 __RTA_MAX 323 __RTA_MAX
323}; 324};
324 325
@@ -545,6 +546,7 @@ enum {
545 TCA_STATS2, 546 TCA_STATS2,
546 TCA_STAB, 547 TCA_STAB,
547 TCA_PAD, 548 TCA_PAD,
549 TCA_DUMP_INVISIBLE,
548 __TCA_MAX 550 __TCA_MAX
549}; 551};
550 552
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index d3ae381fcf33..7212870ef5d7 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -115,6 +115,7 @@ typedef __s32 sctp_assoc_t;
115#define SCTP_PR_SUPPORTED 113 115#define SCTP_PR_SUPPORTED 113
116#define SCTP_DEFAULT_PRINFO 114 116#define SCTP_DEFAULT_PRINFO 114
117#define SCTP_PR_ASSOC_STATUS 115 117#define SCTP_PR_ASSOC_STATUS 115
118#define SCTP_RECONFIG_SUPPORTED 117
118#define SCTP_ENABLE_STREAM_RESET 118 119#define SCTP_ENABLE_STREAM_RESET 118
119#define SCTP_RESET_STREAMS 119 120#define SCTP_RESET_STREAMS 119
120#define SCTP_RESET_ASSOC 120 121#define SCTP_RESET_ASSOC 120
@@ -502,6 +503,28 @@ struct sctp_stream_reset_event {
502 __u16 strreset_stream_list[]; 503 __u16 strreset_stream_list[];
503}; 504};
504 505
506#define SCTP_ASSOC_RESET_DENIED 0x0004
507#define SCTP_ASSOC_RESET_FAILED 0x0008
508struct sctp_assoc_reset_event {
509 __u16 assocreset_type;
510 __u16 assocreset_flags;
511 __u32 assocreset_length;
512 sctp_assoc_t assocreset_assoc_id;
513 __u32 assocreset_local_tsn;
514 __u32 assocreset_remote_tsn;
515};
516
517#define SCTP_ASSOC_CHANGE_DENIED 0x0004
518#define SCTP_ASSOC_CHANGE_FAILED 0x0008
519struct sctp_stream_change_event {
520 __u16 strchange_type;
521 __u16 strchange_flags;
522 __u32 strchange_length;
523 sctp_assoc_t strchange_assoc_id;
524 __u16 strchange_instrms;
525 __u16 strchange_outstrms;
526};
527
505/* 528/*
506 * Described in Section 7.3 529 * Described in Section 7.3
507 * Ancillary Data and Notification Interest Options 530 * Ancillary Data and Notification Interest Options
@@ -518,6 +541,8 @@ struct sctp_event_subscribe {
518 __u8 sctp_authentication_event; 541 __u8 sctp_authentication_event;
519 __u8 sctp_sender_dry_event; 542 __u8 sctp_sender_dry_event;
520 __u8 sctp_stream_reset_event; 543 __u8 sctp_stream_reset_event;
544 __u8 sctp_assoc_reset_event;
545 __u8 sctp_stream_change_event;
521}; 546};
522 547
523/* 548/*
@@ -543,6 +568,8 @@ union sctp_notification {
543 struct sctp_authkey_event sn_authkey_event; 568 struct sctp_authkey_event sn_authkey_event;
544 struct sctp_sender_dry_event sn_sender_dry_event; 569 struct sctp_sender_dry_event sn_sender_dry_event;
545 struct sctp_stream_reset_event sn_strreset_event; 570 struct sctp_stream_reset_event sn_strreset_event;
571 struct sctp_assoc_reset_event sn_assocreset_event;
572 struct sctp_stream_change_event sn_strchange_event;
546}; 573};
547 574
548/* Section 5.3.1 575/* Section 5.3.1
@@ -572,6 +599,10 @@ enum sctp_sn_type {
572#define SCTP_SENDER_DRY_EVENT SCTP_SENDER_DRY_EVENT 599#define SCTP_SENDER_DRY_EVENT SCTP_SENDER_DRY_EVENT
573 SCTP_STREAM_RESET_EVENT, 600 SCTP_STREAM_RESET_EVENT,
574#define SCTP_STREAM_RESET_EVENT SCTP_STREAM_RESET_EVENT 601#define SCTP_STREAM_RESET_EVENT SCTP_STREAM_RESET_EVENT
602 SCTP_ASSOC_RESET_EVENT,
603#define SCTP_ASSOC_RESET_EVENT SCTP_ASSOC_RESET_EVENT
604 SCTP_STREAM_CHANGE_EVENT,
605#define SCTP_STREAM_CHANGE_EVENT SCTP_STREAM_CHANGE_EVENT
575}; 606};
576 607
577/* Notification error codes used to fill up the error fields in some 608/* Notification error codes used to fill up the error fields in some
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 3b2bed7ca9a4..cec0e171d20c 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -177,7 +177,6 @@ enum
177 LINUX_MIB_TIMEWAITED, /* TimeWaited */ 177 LINUX_MIB_TIMEWAITED, /* TimeWaited */
178 LINUX_MIB_TIMEWAITRECYCLED, /* TimeWaitRecycled */ 178 LINUX_MIB_TIMEWAITRECYCLED, /* TimeWaitRecycled */
179 LINUX_MIB_TIMEWAITKILLED, /* TimeWaitKilled */ 179 LINUX_MIB_TIMEWAITKILLED, /* TimeWaitKilled */
180 LINUX_MIB_PAWSPASSIVEREJECTED, /* PAWSPassiveRejected */
181 LINUX_MIB_PAWSACTIVEREJECTED, /* PAWSActiveRejected */ 180 LINUX_MIB_PAWSACTIVEREJECTED, /* PAWSActiveRejected */
182 LINUX_MIB_PAWSESTABREJECTED, /* PAWSEstabRejected */ 181 LINUX_MIB_PAWSESTABREJECTED, /* PAWSEstabRejected */
183 LINUX_MIB_DELAYEDACKS, /* DelayedACKs */ 182 LINUX_MIB_DELAYEDACKS, /* DelayedACKs */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 6b6f41f0b211..bcf9955fac95 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -1,4 +1,5 @@
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016,2017 Facebook
2 * 3 *
3 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -113,6 +114,33 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
113 return array->value + array->elem_size * index; 114 return array->value + array->elem_size * index;
114} 115}
115 116
117/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
118static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
119{
120 struct bpf_array *array = container_of(map, struct bpf_array, map);
121 struct bpf_insn *insn = insn_buf;
122 u32 elem_size = array->elem_size;
123 const int ret = BPF_REG_0;
124 const int map_ptr = BPF_REG_1;
125 const int index = BPF_REG_2;
126
127 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
128 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
129 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, array->map.max_entries,
130 elem_size == 1 ? 2 : 3);
131 if (elem_size == 1) {
132 /* nop */
133 } else if (is_power_of_2(elem_size)) {
134 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
135 } else {
136 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
137 }
138 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
139 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
140 *insn++ = BPF_MOV64_IMM(ret, 0);
141 return insn - insn_buf;
142}
143
116/* Called from eBPF program */ 144/* Called from eBPF program */
117static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) 145static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
118{ 146{
@@ -267,6 +295,7 @@ static const struct bpf_map_ops array_ops = {
267 .map_lookup_elem = array_map_lookup_elem, 295 .map_lookup_elem = array_map_lookup_elem,
268 .map_update_elem = array_map_update_elem, 296 .map_update_elem = array_map_update_elem,
269 .map_delete_elem = array_map_delete_elem, 297 .map_delete_elem = array_map_delete_elem,
298 .map_gen_lookup = array_map_gen_lookup,
270}; 299};
271 300
272static struct bpf_map_type_list array_type __ro_after_init = { 301static struct bpf_map_type_list array_type __ro_after_init = {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index afe5bab376c9..000153acb6d5 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -419,7 +419,11 @@ again:
419 return NULL; 419 return NULL;
420} 420}
421 421
422/* Called from syscall or from eBPF program */ 422/* Called from syscall or from eBPF program directly, so
423 * arguments have to match bpf_map_lookup_elem() exactly.
424 * The return value is adjusted by BPF instructions
425 * in htab_map_gen_lookup().
426 */
423static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) 427static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
424{ 428{
425 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 429 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
@@ -451,6 +455,30 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
451 return NULL; 455 return NULL;
452} 456}
453 457
458/* inline bpf_map_lookup_elem() call.
459 * Instead of:
460 * bpf_prog
461 * bpf_map_lookup_elem
462 * map->ops->map_lookup_elem
463 * htab_map_lookup_elem
464 * __htab_map_lookup_elem
465 * do:
466 * bpf_prog
467 * __htab_map_lookup_elem
468 */
469static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
470{
471 struct bpf_insn *insn = insn_buf;
472 const int ret = BPF_REG_0;
473
474 *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
475 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
476 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
477 offsetof(struct htab_elem, key) +
478 round_up(map->key_size, 8));
479 return insn - insn_buf;
480}
481
454static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) 482static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
455{ 483{
456 struct htab_elem *l = __htab_map_lookup_elem(map, key); 484 struct htab_elem *l = __htab_map_lookup_elem(map, key);
@@ -1062,6 +1090,7 @@ static const struct bpf_map_ops htab_ops = {
1062 .map_lookup_elem = htab_map_lookup_elem, 1090 .map_lookup_elem = htab_map_lookup_elem,
1063 .map_update_elem = htab_map_update_elem, 1091 .map_update_elem = htab_map_update_elem,
1064 .map_delete_elem = htab_map_delete_elem, 1092 .map_delete_elem = htab_map_delete_elem,
1093 .map_gen_lookup = htab_map_gen_lookup,
1065}; 1094};
1066 1095
1067static struct bpf_map_type_list htab_type __ro_after_init = { 1096static struct bpf_map_type_list htab_type __ro_after_init = {
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 7af0dcc5d755..48c914b983bd 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -586,59 +586,6 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl)
586 list_add(&tl->list_node, &bpf_prog_types); 586 list_add(&tl->list_node, &bpf_prog_types);
587} 587}
588 588
589/* fixup insn->imm field of bpf_call instructions:
590 * if (insn->imm == BPF_FUNC_map_lookup_elem)
591 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
592 * else if (insn->imm == BPF_FUNC_map_update_elem)
593 * insn->imm = bpf_map_update_elem - __bpf_call_base;
594 * else ...
595 *
596 * this function is called after eBPF program passed verification
597 */
598static void fixup_bpf_calls(struct bpf_prog *prog)
599{
600 const struct bpf_func_proto *fn;
601 int i;
602
603 for (i = 0; i < prog->len; i++) {
604 struct bpf_insn *insn = &prog->insnsi[i];
605
606 if (insn->code == (BPF_JMP | BPF_CALL)) {
607 /* we reach here when program has bpf_call instructions
608 * and it passed bpf_check(), means that
609 * ops->get_func_proto must have been supplied, check it
610 */
611 BUG_ON(!prog->aux->ops->get_func_proto);
612
613 if (insn->imm == BPF_FUNC_get_route_realm)
614 prog->dst_needed = 1;
615 if (insn->imm == BPF_FUNC_get_prandom_u32)
616 bpf_user_rnd_init_once();
617 if (insn->imm == BPF_FUNC_xdp_adjust_head)
618 prog->xdp_adjust_head = 1;
619 if (insn->imm == BPF_FUNC_tail_call) {
620 /* mark bpf_tail_call as different opcode
621 * to avoid conditional branch in
622 * interpeter for every normal call
623 * and to prevent accidental JITing by
624 * JIT compiler that doesn't support
625 * bpf_tail_call yet
626 */
627 insn->imm = 0;
628 insn->code |= BPF_X;
629 continue;
630 }
631
632 fn = prog->aux->ops->get_func_proto(insn->imm);
633 /* all functions that have prototype and verifier allowed
634 * programs to call them, must be real in-kernel functions
635 */
636 BUG_ON(!fn->func);
637 insn->imm = fn->func - __bpf_call_base;
638 }
639 }
640}
641
642/* drop refcnt on maps used by eBPF program and free auxilary data */ 589/* drop refcnt on maps used by eBPF program and free auxilary data */
643static void free_used_maps(struct bpf_prog_aux *aux) 590static void free_used_maps(struct bpf_prog_aux *aux)
644{ 591{
@@ -892,9 +839,6 @@ static int bpf_prog_load(union bpf_attr *attr)
892 if (err < 0) 839 if (err < 0)
893 goto free_used_maps; 840 goto free_used_maps;
894 841
895 /* fixup BPF_CALL->imm field */
896 fixup_bpf_calls(prog);
897
898 /* eBPF program is ready to be JITed */ 842 /* eBPF program is ready to be JITed */
899 prog = bpf_prog_select_runtime(prog, &err); 843 prog = bpf_prog_select_runtime(prog, &err);
900 if (err < 0) 844 if (err < 0)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 796b68d00119..90bf46787603 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1273,7 +1273,7 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
1273 } 1273 }
1274} 1274}
1275 1275
1276static int check_call(struct bpf_verifier_env *env, int func_id) 1276static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1277{ 1277{
1278 struct bpf_verifier_state *state = &env->cur_state; 1278 struct bpf_verifier_state *state = &env->cur_state;
1279 const struct bpf_func_proto *fn = NULL; 1279 const struct bpf_func_proto *fn = NULL;
@@ -1369,6 +1369,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id)
1369 } 1369 }
1370 regs[BPF_REG_0].map_ptr = meta.map_ptr; 1370 regs[BPF_REG_0].map_ptr = meta.map_ptr;
1371 regs[BPF_REG_0].id = ++env->id_gen; 1371 regs[BPF_REG_0].id = ++env->id_gen;
1372 env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
1372 } else { 1373 } else {
1373 verbose("unknown return type %d of func %s#%d\n", 1374 verbose("unknown return type %d of func %s#%d\n",
1374 fn->ret_type, func_id_name(func_id), func_id); 1375 fn->ret_type, func_id_name(func_id), func_id);
@@ -2940,7 +2941,7 @@ static int do_check(struct bpf_verifier_env *env)
2940 return -EINVAL; 2941 return -EINVAL;
2941 } 2942 }
2942 2943
2943 err = check_call(env, insn->imm); 2944 err = check_call(env, insn->imm, insn_idx);
2944 if (err) 2945 if (err)
2945 return err; 2946 return err;
2946 2947
@@ -3162,6 +3163,41 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
3162 insn->src_reg = 0; 3163 insn->src_reg = 0;
3163} 3164}
3164 3165
3166/* single env->prog->insni[off] instruction was replaced with the range
3167 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
3168 * [0, off) and [off, end) to new locations, so the patched range stays zero
3169 */
3170static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
3171 u32 off, u32 cnt)
3172{
3173 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
3174
3175 if (cnt == 1)
3176 return 0;
3177 new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
3178 if (!new_data)
3179 return -ENOMEM;
3180 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
3181 memcpy(new_data + off + cnt - 1, old_data + off,
3182 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
3183 env->insn_aux_data = new_data;
3184 vfree(old_data);
3185 return 0;
3186}
3187
3188static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
3189 const struct bpf_insn *patch, u32 len)
3190{
3191 struct bpf_prog *new_prog;
3192
3193 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
3194 if (!new_prog)
3195 return NULL;
3196 if (adjust_insn_aux_data(env, new_prog->len, off, len))
3197 return NULL;
3198 return new_prog;
3199}
3200
3165/* convert load instructions that access fields of 'struct __sk_buff' 3201/* convert load instructions that access fields of 'struct __sk_buff'
3166 * into sequence of instructions that access fields of 'struct sk_buff' 3202 * into sequence of instructions that access fields of 'struct sk_buff'
3167 */ 3203 */
@@ -3181,10 +3217,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3181 verbose("bpf verifier is misconfigured\n"); 3217 verbose("bpf verifier is misconfigured\n");
3182 return -EINVAL; 3218 return -EINVAL;
3183 } else if (cnt) { 3219 } else if (cnt) {
3184 new_prog = bpf_patch_insn_single(env->prog, 0, 3220 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
3185 insn_buf, cnt);
3186 if (!new_prog) 3221 if (!new_prog)
3187 return -ENOMEM; 3222 return -ENOMEM;
3223
3188 env->prog = new_prog; 3224 env->prog = new_prog;
3189 delta += cnt - 1; 3225 delta += cnt - 1;
3190 } 3226 }
@@ -3209,7 +3245,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3209 else 3245 else
3210 continue; 3246 continue;
3211 3247
3212 if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX) 3248 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
3213 continue; 3249 continue;
3214 3250
3215 cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog); 3251 cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
@@ -3218,8 +3254,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3218 return -EINVAL; 3254 return -EINVAL;
3219 } 3255 }
3220 3256
3221 new_prog = bpf_patch_insn_single(env->prog, i + delta, insn_buf, 3257 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
3222 cnt);
3223 if (!new_prog) 3258 if (!new_prog)
3224 return -ENOMEM; 3259 return -ENOMEM;
3225 3260
@@ -3233,6 +3268,83 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3233 return 0; 3268 return 0;
3234} 3269}
3235 3270
3271/* fixup insn->imm field of bpf_call instructions
3272 * and inline eligible helpers as explicit sequence of BPF instructions
3273 *
3274 * this function is called after eBPF program passed verification
3275 */
3276static int fixup_bpf_calls(struct bpf_verifier_env *env)
3277{
3278 struct bpf_prog *prog = env->prog;
3279 struct bpf_insn *insn = prog->insnsi;
3280 const struct bpf_func_proto *fn;
3281 const int insn_cnt = prog->len;
3282 struct bpf_insn insn_buf[16];
3283 struct bpf_prog *new_prog;
3284 struct bpf_map *map_ptr;
3285 int i, cnt, delta = 0;
3286
3287 for (i = 0; i < insn_cnt; i++, insn++) {
3288 if (insn->code != (BPF_JMP | BPF_CALL))
3289 continue;
3290
3291 if (insn->imm == BPF_FUNC_get_route_realm)
3292 prog->dst_needed = 1;
3293 if (insn->imm == BPF_FUNC_get_prandom_u32)
3294 bpf_user_rnd_init_once();
3295 if (insn->imm == BPF_FUNC_xdp_adjust_head)
3296 prog->xdp_adjust_head = 1;
3297 if (insn->imm == BPF_FUNC_tail_call) {
3298 /* mark bpf_tail_call as different opcode to avoid
3299 * conditional branch in the interpeter for every normal
3300 * call and to prevent accidental JITing by JIT compiler
3301 * that doesn't support bpf_tail_call yet
3302 */
3303 insn->imm = 0;
3304 insn->code |= BPF_X;
3305 continue;
3306 }
3307
3308 if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) {
3309 map_ptr = env->insn_aux_data[i + delta].map_ptr;
3310 if (!map_ptr->ops->map_gen_lookup)
3311 goto patch_call_imm;
3312
3313 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
3314 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
3315 verbose("bpf verifier is misconfigured\n");
3316 return -EINVAL;
3317 }
3318
3319 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
3320 cnt);
3321 if (!new_prog)
3322 return -ENOMEM;
3323
3324 delta += cnt - 1;
3325
3326 /* keep walking new program and skip insns we just inserted */
3327 env->prog = prog = new_prog;
3328 insn = new_prog->insnsi + i + delta;
3329 continue;
3330 }
3331
3332patch_call_imm:
3333 fn = prog->aux->ops->get_func_proto(insn->imm);
3334 /* all functions that have prototype and verifier allowed
3335 * programs to call them, must be real in-kernel functions
3336 */
3337 if (!fn->func) {
3338 verbose("kernel subsystem misconfigured func %s#%d\n",
3339 func_id_name(insn->imm), insn->imm);
3340 return -EFAULT;
3341 }
3342 insn->imm = fn->func - __bpf_call_base;
3343 }
3344
3345 return 0;
3346}
3347
3236static void free_states(struct bpf_verifier_env *env) 3348static void free_states(struct bpf_verifier_env *env)
3237{ 3349{
3238 struct bpf_verifier_state_list *sl, *sln; 3350 struct bpf_verifier_state_list *sl, *sln;
@@ -3328,6 +3440,9 @@ skip_full_check:
3328 /* program is valid, convert *(u32*)(ctx + off) accesses */ 3440 /* program is valid, convert *(u32*)(ctx + off) accesses */
3329 ret = convert_ctx_accesses(env); 3441 ret = convert_ctx_accesses(env);
3330 3442
3443 if (ret == 0)
3444 ret = fixup_bpf_calls(env);
3445
3331 if (log_level && log_len >= log_size - 1) { 3446 if (log_level && log_len >= log_size - 1) {
3332 BUG_ON(log_len >= log_size); 3447 BUG_ON(log_len >= log_size);
3333 /* verifier log exceeded user supplied buffer */ 3448 /* verifier log exceeded user supplied buffer */
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e97ab824e368..9ee5787634e5 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -562,8 +562,7 @@ static int vlan_dev_init(struct net_device *dev)
562 NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC | 562 NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
563 NETIF_F_ALL_FCOE; 563 NETIF_F_ALL_FCOE;
564 564
565 dev->features |= real_dev->vlan_features | NETIF_F_LLTX | 565 dev->features |= dev->hw_features | NETIF_F_LLTX;
566 NETIF_F_GSO_SOFTWARE;
567 dev->gso_max_size = real_dev->gso_max_size; 566 dev->gso_max_size = real_dev->gso_max_size;
568 dev->gso_max_segs = real_dev->gso_max_segs; 567 dev->gso_max_segs = real_dev->gso_max_segs;
569 if (dev->features & NETIF_F_VLAN_FEATURES) 568 if (dev->features & NETIF_F_VLAN_FEATURES)
diff --git a/net/atm/common.c b/net/atm/common.c
index 9613381f5db0..f06422f4108d 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -62,21 +62,16 @@ static void vcc_remove_socket(struct sock *sk)
62 write_unlock_irq(&vcc_sklist_lock); 62 write_unlock_irq(&vcc_sklist_lock);
63} 63}
64 64
65static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size) 65static bool vcc_tx_ready(struct atm_vcc *vcc, unsigned int size)
66{ 66{
67 struct sk_buff *skb;
68 struct sock *sk = sk_atm(vcc); 67 struct sock *sk = sk_atm(vcc);
69 68
70 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) { 69 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
71 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", 70 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
72 sk_wmem_alloc_get(sk), size, sk->sk_sndbuf); 71 sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
73 return NULL; 72 return false;
74 } 73 }
75 while (!(skb = alloc_skb(size, GFP_KERNEL))) 74 return true;
76 schedule();
77 pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
78 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
79 return skb;
80} 75}
81 76
82static void vcc_sock_destruct(struct sock *sk) 77static void vcc_sock_destruct(struct sock *sk)
@@ -606,7 +601,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
606 eff = (size+3) & ~3; /* align to word boundary */ 601 eff = (size+3) & ~3; /* align to word boundary */
607 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 602 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
608 error = 0; 603 error = 0;
609 while (!(skb = alloc_tx(vcc, eff))) { 604 while (!vcc_tx_ready(vcc, eff)) {
610 if (m->msg_flags & MSG_DONTWAIT) { 605 if (m->msg_flags & MSG_DONTWAIT) {
611 error = -EAGAIN; 606 error = -EAGAIN;
612 break; 607 break;
@@ -628,6 +623,15 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
628 finish_wait(sk_sleep(sk), &wait); 623 finish_wait(sk_sleep(sk), &wait);
629 if (error) 624 if (error)
630 goto out; 625 goto out;
626
627 skb = alloc_skb(eff, GFP_KERNEL);
628 if (!skb) {
629 error = -ENOMEM;
630 goto out;
631 }
632 pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
633 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
634
631 skb->dev = NULL; /* for paths shared with net_device interfaces */ 635 skb->dev = NULL; /* for paths shared with net_device interfaces */
632 ATM_SKB(skb)->atm_options = vcc->atm_options; 636 ATM_SKB(skb)->atm_options = vcc->atm_options;
633 if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) { 637 if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index fa87fbd62bb7..d20b01b8d103 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -995,13 +995,10 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net,
995 if (!elem) 995 if (!elem)
996 return okfn(net, sk, skb); 996 return okfn(net, sk, skb);
997 997
998 /* We may already have this, but read-locks nest anyway */
999 rcu_read_lock();
1000 nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev, 998 nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev,
1001 sk, net, okfn); 999 sk, net, okfn);
1002 1000
1003 ret = nf_hook_slow(skb, &state, elem); 1001 ret = nf_hook_slow(skb, &state, elem);
1004 rcu_read_unlock();
1005 if (ret == 1) 1002 if (ret == 1)
1006 ret = okfn(net, sk, skb); 1003 ret = okfn(net, sk, skb);
1007 1004
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 98b9c8e8615e..707caea39743 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -62,10 +62,10 @@ print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
62 pptr = skb_header_pointer(skb, offset, 62 pptr = skb_header_pointer(skb, offset,
63 sizeof(_ports), &_ports); 63 sizeof(_ports), &_ports);
64 if (pptr == NULL) { 64 if (pptr == NULL) {
65 printk(" INCOMPLETE TCP/UDP header"); 65 pr_cont(" INCOMPLETE TCP/UDP header");
66 return; 66 return;
67 } 67 }
68 printk(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst)); 68 pr_cont(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst));
69 } 69 }
70} 70}
71 71
@@ -100,11 +100,11 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
100 100
101 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); 101 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
102 if (ih == NULL) { 102 if (ih == NULL) {
103 printk(" INCOMPLETE IP header"); 103 pr_cont(" INCOMPLETE IP header");
104 goto out; 104 goto out;
105 } 105 }
106 printk(" IP SRC=%pI4 IP DST=%pI4, IP tos=0x%02X, IP proto=%d", 106 pr_cont(" IP SRC=%pI4 IP DST=%pI4, IP tos=0x%02X, IP proto=%d",
107 &ih->saddr, &ih->daddr, ih->tos, ih->protocol); 107 &ih->saddr, &ih->daddr, ih->tos, ih->protocol);
108 print_ports(skb, ih->protocol, ih->ihl*4); 108 print_ports(skb, ih->protocol, ih->ihl*4);
109 goto out; 109 goto out;
110 } 110 }
@@ -120,11 +120,11 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
120 120
121 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); 121 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
122 if (ih == NULL) { 122 if (ih == NULL) {
123 printk(" INCOMPLETE IPv6 header"); 123 pr_cont(" INCOMPLETE IPv6 header");
124 goto out; 124 goto out;
125 } 125 }
126 printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d", 126 pr_cont(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d",
127 &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr); 127 &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr);
128 nexthdr = ih->nexthdr; 128 nexthdr = ih->nexthdr;
129 offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off); 129 offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off);
130 if (offset_ph == -1) 130 if (offset_ph == -1)
@@ -142,12 +142,12 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
142 142
143 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); 143 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
144 if (ah == NULL) { 144 if (ah == NULL) {
145 printk(" INCOMPLETE ARP header"); 145 pr_cont(" INCOMPLETE ARP header");
146 goto out; 146 goto out;
147 } 147 }
148 printk(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d", 148 pr_cont(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d",
149 ntohs(ah->ar_hrd), ntohs(ah->ar_pro), 149 ntohs(ah->ar_hrd), ntohs(ah->ar_pro),
150 ntohs(ah->ar_op)); 150 ntohs(ah->ar_op));
151 151
152 /* If it's for Ethernet and the lengths are OK, 152 /* If it's for Ethernet and the lengths are OK,
153 * then log the ARP payload 153 * then log the ARP payload
@@ -161,17 +161,17 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
161 ap = skb_header_pointer(skb, sizeof(_arph), 161 ap = skb_header_pointer(skb, sizeof(_arph),
162 sizeof(_arpp), &_arpp); 162 sizeof(_arpp), &_arpp);
163 if (ap == NULL) { 163 if (ap == NULL) {
164 printk(" INCOMPLETE ARP payload"); 164 pr_cont(" INCOMPLETE ARP payload");
165 goto out; 165 goto out;
166 } 166 }
167 printk(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4", 167 pr_cont(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4",
168 ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst); 168 ap->mac_src, ap->ip_src,
169 ap->mac_dst, ap->ip_dst);
169 } 170 }
170 } 171 }
171out: 172out:
172 printk("\n"); 173 pr_cont("\n");
173 spin_unlock_bh(&ebt_log_lock); 174 spin_unlock_bh(&ebt_log_lock);
174
175} 175}
176 176
177static unsigned int 177static unsigned int
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 206dc266ecd2..346ef6b00b8f 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -375,11 +375,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx,
375 const struct nlattr * const tb[]) 375 const struct nlattr * const tb[])
376{ 376{
377 struct nft_reject *priv = nft_expr_priv(expr); 377 struct nft_reject *priv = nft_expr_priv(expr);
378 int icmp_code, err; 378 int icmp_code;
379
380 err = nft_reject_bridge_validate(ctx, expr, NULL);
381 if (err < 0)
382 return err;
383 379
384 if (tb[NFTA_REJECT_TYPE] == NULL) 380 if (tb[NFTA_REJECT_TYPE] == NULL)
385 return -EINVAL; 381 return -EINVAL;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index fb55327dcfea..70ccda233bd1 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -412,9 +412,8 @@ static int __init init_net_drop_monitor(void)
412 for_each_possible_cpu(cpu) { 412 for_each_possible_cpu(cpu) {
413 data = &per_cpu(dm_cpu_data, cpu); 413 data = &per_cpu(dm_cpu_data, cpu);
414 INIT_WORK(&data->dm_alert_work, send_dm_alert); 414 INIT_WORK(&data->dm_alert_work, send_dm_alert);
415 init_timer(&data->send_timer); 415 setup_timer(&data->send_timer, sched_send_work,
416 data->send_timer.data = (unsigned long)data; 416 (unsigned long)data);
417 data->send_timer.function = sched_send_work;
418 spin_lock_init(&data->lock); 417 spin_lock_init(&data->lock);
419 reset_per_cpu_data(data); 418 reset_per_cpu_data(data);
420 } 419 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index aecb2c7241b6..905a88ad28e0 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -109,6 +109,7 @@ static const char
109rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = { 109rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = {
110 [ETH_RSS_HASH_TOP_BIT] = "toeplitz", 110 [ETH_RSS_HASH_TOP_BIT] = "toeplitz",
111 [ETH_RSS_HASH_XOR_BIT] = "xor", 111 [ETH_RSS_HASH_XOR_BIT] = "xor",
112 [ETH_RSS_HASH_CRC32_BIT] = "crc32",
112}; 113};
113 114
114static const char 115static const char
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index b6791d94841d..816e3ccb0ec9 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -23,6 +23,20 @@ static const struct fib_kuid_range fib_kuid_range_unset = {
23 KUIDT_INIT(~0), 23 KUIDT_INIT(~0),
24}; 24};
25 25
26bool fib_rule_matchall(const struct fib_rule *rule)
27{
28 if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
29 rule->flags)
30 return false;
31 if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
32 return false;
33 if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) ||
34 !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end))
35 return false;
36 return true;
37}
38EXPORT_SYMBOL_GPL(fib_rule_matchall);
39
26int fib_default_rule_add(struct fib_rules_ops *ops, 40int fib_default_rule_add(struct fib_rules_ops *ops,
27 u32 pref, u32 table, u32 flags) 41 u32 pref, u32 table, u32 flags)
28{ 42{
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index c35aae13c8d2..5f3ae922fcd1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -113,6 +113,216 @@ __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
113} 113}
114EXPORT_SYMBOL(__skb_flow_get_ports); 114EXPORT_SYMBOL(__skb_flow_get_ports);
115 115
116enum flow_dissect_ret {
117 FLOW_DISSECT_RET_OUT_GOOD,
118 FLOW_DISSECT_RET_OUT_BAD,
119 FLOW_DISSECT_RET_OUT_PROTO_AGAIN,
120};
121
122static enum flow_dissect_ret
123__skb_flow_dissect_mpls(const struct sk_buff *skb,
124 struct flow_dissector *flow_dissector,
125 void *target_container, void *data, int nhoff, int hlen)
126{
127 struct flow_dissector_key_keyid *key_keyid;
128 struct mpls_label *hdr, _hdr[2];
129
130 if (!dissector_uses_key(flow_dissector,
131 FLOW_DISSECTOR_KEY_MPLS_ENTROPY))
132 return FLOW_DISSECT_RET_OUT_GOOD;
133
134 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
135 hlen, &_hdr);
136 if (!hdr)
137 return FLOW_DISSECT_RET_OUT_BAD;
138
139 if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
140 MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
141 key_keyid = skb_flow_dissector_target(flow_dissector,
142 FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
143 target_container);
144 key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
145 }
146 return FLOW_DISSECT_RET_OUT_GOOD;
147}
148
149static enum flow_dissect_ret
150__skb_flow_dissect_arp(const struct sk_buff *skb,
151 struct flow_dissector *flow_dissector,
152 void *target_container, void *data, int nhoff, int hlen)
153{
154 struct flow_dissector_key_arp *key_arp;
155 struct {
156 unsigned char ar_sha[ETH_ALEN];
157 unsigned char ar_sip[4];
158 unsigned char ar_tha[ETH_ALEN];
159 unsigned char ar_tip[4];
160 } *arp_eth, _arp_eth;
161 const struct arphdr *arp;
162 struct arphdr *_arp;
163
164 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
165 return FLOW_DISSECT_RET_OUT_GOOD;
166
167 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
168 hlen, &_arp);
169 if (!arp)
170 return FLOW_DISSECT_RET_OUT_BAD;
171
172 if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
173 arp->ar_pro != htons(ETH_P_IP) ||
174 arp->ar_hln != ETH_ALEN ||
175 arp->ar_pln != 4 ||
176 (arp->ar_op != htons(ARPOP_REPLY) &&
177 arp->ar_op != htons(ARPOP_REQUEST)))
178 return FLOW_DISSECT_RET_OUT_BAD;
179
180 arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
181 sizeof(_arp_eth), data,
182 hlen, &_arp_eth);
183 if (!arp_eth)
184 return FLOW_DISSECT_RET_OUT_BAD;
185
186 key_arp = skb_flow_dissector_target(flow_dissector,
187 FLOW_DISSECTOR_KEY_ARP,
188 target_container);
189
190 memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
191 memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
192
193 /* Only store the lower byte of the opcode;
194 * this covers ARPOP_REPLY and ARPOP_REQUEST.
195 */
196 key_arp->op = ntohs(arp->ar_op) & 0xff;
197
198 ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
199 ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
200
201 return FLOW_DISSECT_RET_OUT_GOOD;
202}
203
204static enum flow_dissect_ret
205__skb_flow_dissect_gre(const struct sk_buff *skb,
206 struct flow_dissector_key_control *key_control,
207 struct flow_dissector *flow_dissector,
208 void *target_container, void *data,
209 __be16 *p_proto, int *p_nhoff, int *p_hlen,
210 unsigned int flags)
211{
212 struct flow_dissector_key_keyid *key_keyid;
213 struct gre_base_hdr *hdr, _hdr;
214 int offset = 0;
215 u16 gre_ver;
216
217 hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
218 data, *p_hlen, &_hdr);
219 if (!hdr)
220 return FLOW_DISSECT_RET_OUT_BAD;
221
222 /* Only look inside GRE without routing */
223 if (hdr->flags & GRE_ROUTING)
224 return FLOW_DISSECT_RET_OUT_GOOD;
225
226 /* Only look inside GRE for version 0 and 1 */
227 gre_ver = ntohs(hdr->flags & GRE_VERSION);
228 if (gre_ver > 1)
229 return FLOW_DISSECT_RET_OUT_GOOD;
230
231 *p_proto = hdr->protocol;
232 if (gre_ver) {
233 /* Version1 must be PPTP, and check the flags */
234 if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
235 return FLOW_DISSECT_RET_OUT_GOOD;
236 }
237
238 offset += sizeof(struct gre_base_hdr);
239
240 if (hdr->flags & GRE_CSUM)
241 offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
242 sizeof(((struct gre_full_hdr *) 0)->reserved1);
243
244 if (hdr->flags & GRE_KEY) {
245 const __be32 *keyid;
246 __be32 _keyid;
247
248 keyid = __skb_header_pointer(skb, *p_nhoff + offset,
249 sizeof(_keyid),
250 data, *p_hlen, &_keyid);
251 if (!keyid)
252 return FLOW_DISSECT_RET_OUT_BAD;
253
254 if (dissector_uses_key(flow_dissector,
255 FLOW_DISSECTOR_KEY_GRE_KEYID)) {
256 key_keyid = skb_flow_dissector_target(flow_dissector,
257 FLOW_DISSECTOR_KEY_GRE_KEYID,
258 target_container);
259 if (gre_ver == 0)
260 key_keyid->keyid = *keyid;
261 else
262 key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
263 }
264 offset += sizeof(((struct gre_full_hdr *) 0)->key);
265 }
266
267 if (hdr->flags & GRE_SEQ)
268 offset += sizeof(((struct pptp_gre_header *) 0)->seq);
269
270 if (gre_ver == 0) {
271 if (*p_proto == htons(ETH_P_TEB)) {
272 const struct ethhdr *eth;
273 struct ethhdr _eth;
274
275 eth = __skb_header_pointer(skb, *p_nhoff + offset,
276 sizeof(_eth),
277 data, *p_hlen, &_eth);
278 if (!eth)
279 return FLOW_DISSECT_RET_OUT_BAD;
280 *p_proto = eth->h_proto;
281 offset += sizeof(*eth);
282
283 /* Cap headers that we access via pointers at the
284 * end of the Ethernet header as our maximum alignment
285 * at that point is only 2 bytes.
286 */
287 if (NET_IP_ALIGN)
288 *p_hlen = *p_nhoff + offset;
289 }
290 } else { /* version 1, must be PPTP */
291 u8 _ppp_hdr[PPP_HDRLEN];
292 u8 *ppp_hdr;
293
294 if (hdr->flags & GRE_ACK)
295 offset += sizeof(((struct pptp_gre_header *) 0)->ack);
296
297 ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
298 sizeof(_ppp_hdr),
299 data, *p_hlen, _ppp_hdr);
300 if (!ppp_hdr)
301 return FLOW_DISSECT_RET_OUT_BAD;
302
303 switch (PPP_PROTOCOL(ppp_hdr)) {
304 case PPP_IP:
305 *p_proto = htons(ETH_P_IP);
306 break;
307 case PPP_IPV6:
308 *p_proto = htons(ETH_P_IPV6);
309 break;
310 default:
311 /* Could probably catch some more like MPLS */
312 break;
313 }
314
315 offset += PPP_HDRLEN;
316 }
317
318 *p_nhoff += offset;
319 key_control->flags |= FLOW_DIS_ENCAPSULATION;
320 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
321 return FLOW_DISSECT_RET_OUT_GOOD;
322
323 return FLOW_DISSECT_RET_OUT_PROTO_AGAIN;
324}
325
116/** 326/**
117 * __skb_flow_dissect - extract the flow_keys struct and return it 327 * __skb_flow_dissect - extract the flow_keys struct and return it
118 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified 328 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@@ -138,12 +348,10 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
138 struct flow_dissector_key_control *key_control; 348 struct flow_dissector_key_control *key_control;
139 struct flow_dissector_key_basic *key_basic; 349 struct flow_dissector_key_basic *key_basic;
140 struct flow_dissector_key_addrs *key_addrs; 350 struct flow_dissector_key_addrs *key_addrs;
141 struct flow_dissector_key_arp *key_arp;
142 struct flow_dissector_key_ports *key_ports; 351 struct flow_dissector_key_ports *key_ports;
143 struct flow_dissector_key_icmp *key_icmp; 352 struct flow_dissector_key_icmp *key_icmp;
144 struct flow_dissector_key_tags *key_tags; 353 struct flow_dissector_key_tags *key_tags;
145 struct flow_dissector_key_vlan *key_vlan; 354 struct flow_dissector_key_vlan *key_vlan;
146 struct flow_dissector_key_keyid *key_keyid;
147 bool skip_vlan = false; 355 bool skip_vlan = false;
148 u8 ip_proto = 0; 356 u8 ip_proto = 0;
149 bool ret; 357 bool ret;
@@ -181,7 +389,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
181 memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs)); 389 memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
182 } 390 }
183 391
184again: 392proto_again:
185 switch (proto) { 393 switch (proto) {
186 case htons(ETH_P_IP): { 394 case htons(ETH_P_IP): {
187 const struct iphdr *iph; 395 const struct iphdr *iph;
@@ -284,7 +492,7 @@ ipv6:
284 proto = vlan->h_vlan_encapsulated_proto; 492 proto = vlan->h_vlan_encapsulated_proto;
285 nhoff += sizeof(*vlan); 493 nhoff += sizeof(*vlan);
286 if (skip_vlan) 494 if (skip_vlan)
287 goto again; 495 goto proto_again;
288 } 496 }
289 497
290 skip_vlan = true; 498 skip_vlan = true;
@@ -307,7 +515,7 @@ ipv6:
307 } 515 }
308 } 516 }
309 517
310 goto again; 518 goto proto_again;
311 } 519 }
312 case htons(ETH_P_PPP_SES): { 520 case htons(ETH_P_PPP_SES): {
313 struct { 521 struct {
@@ -349,31 +557,17 @@ ipv6:
349 } 557 }
350 558
351 case htons(ETH_P_MPLS_UC): 559 case htons(ETH_P_MPLS_UC):
352 case htons(ETH_P_MPLS_MC): { 560 case htons(ETH_P_MPLS_MC):
353 struct mpls_label *hdr, _hdr[2];
354mpls: 561mpls:
355 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, 562 switch (__skb_flow_dissect_mpls(skb, flow_dissector,
356 hlen, &_hdr); 563 target_container, data,
357 if (!hdr) 564 nhoff, hlen)) {
358 goto out_bad; 565 case FLOW_DISSECT_RET_OUT_GOOD:
359
360 if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
361 MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
362 if (dissector_uses_key(flow_dissector,
363 FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
364 key_keyid = skb_flow_dissector_target(flow_dissector,
365 FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
366 target_container);
367 key_keyid->keyid = hdr[1].entry &
368 htonl(MPLS_LS_LABEL_MASK);
369 }
370
371 goto out_good; 566 goto out_good;
567 case FLOW_DISSECT_RET_OUT_BAD:
568 default:
569 goto out_bad;
372 } 570 }
373
374 goto out_good;
375 }
376
377 case htons(ETH_P_FCOE): 571 case htons(ETH_P_FCOE):
378 if ((hlen - nhoff) < FCOE_HEADER_LEN) 572 if ((hlen - nhoff) < FCOE_HEADER_LEN)
379 goto out_bad; 573 goto out_bad;
@@ -382,177 +576,33 @@ mpls:
382 goto out_good; 576 goto out_good;
383 577
384 case htons(ETH_P_ARP): 578 case htons(ETH_P_ARP):
385 case htons(ETH_P_RARP): { 579 case htons(ETH_P_RARP):
386 struct { 580 switch (__skb_flow_dissect_arp(skb, flow_dissector,
387 unsigned char ar_sha[ETH_ALEN]; 581 target_container, data,
388 unsigned char ar_sip[4]; 582 nhoff, hlen)) {
389 unsigned char ar_tha[ETH_ALEN]; 583 case FLOW_DISSECT_RET_OUT_GOOD:
390 unsigned char ar_tip[4]; 584 goto out_good;
391 } *arp_eth, _arp_eth; 585 case FLOW_DISSECT_RET_OUT_BAD:
392 const struct arphdr *arp; 586 default:
393 struct arphdr *_arp;
394
395 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
396 hlen, &_arp);
397 if (!arp)
398 goto out_bad;
399
400 if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
401 arp->ar_pro != htons(ETH_P_IP) ||
402 arp->ar_hln != ETH_ALEN ||
403 arp->ar_pln != 4 ||
404 (arp->ar_op != htons(ARPOP_REPLY) &&
405 arp->ar_op != htons(ARPOP_REQUEST)))
406 goto out_bad;
407
408 arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
409 sizeof(_arp_eth), data,
410 hlen,
411 &_arp_eth);
412 if (!arp_eth)
413 goto out_bad; 587 goto out_bad;
414
415 if (dissector_uses_key(flow_dissector,
416 FLOW_DISSECTOR_KEY_ARP)) {
417
418 key_arp = skb_flow_dissector_target(flow_dissector,
419 FLOW_DISSECTOR_KEY_ARP,
420 target_container);
421
422 memcpy(&key_arp->sip, arp_eth->ar_sip,
423 sizeof(key_arp->sip));
424 memcpy(&key_arp->tip, arp_eth->ar_tip,
425 sizeof(key_arp->tip));
426
427 /* Only store the lower byte of the opcode;
428 * this covers ARPOP_REPLY and ARPOP_REQUEST.
429 */
430 key_arp->op = ntohs(arp->ar_op) & 0xff;
431
432 ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
433 ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
434 } 588 }
435
436 goto out_good;
437 }
438
439 default: 589 default:
440 goto out_bad; 590 goto out_bad;
441 } 591 }
442 592
443ip_proto_again: 593ip_proto_again:
444 switch (ip_proto) { 594 switch (ip_proto) {
445 case IPPROTO_GRE: { 595 case IPPROTO_GRE:
446 struct gre_base_hdr *hdr, _hdr; 596 switch (__skb_flow_dissect_gre(skb, key_control, flow_dissector,
447 u16 gre_ver; 597 target_container, data,
448 int offset = 0; 598 &proto, &nhoff, &hlen, flags)) {
449 599 case FLOW_DISSECT_RET_OUT_GOOD:
450 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); 600 goto out_good;
451 if (!hdr) 601 case FLOW_DISSECT_RET_OUT_BAD:
452 goto out_bad; 602 goto out_bad;
453 603 case FLOW_DISSECT_RET_OUT_PROTO_AGAIN:
454 /* Only look inside GRE without routing */ 604 goto proto_again;
455 if (hdr->flags & GRE_ROUTING)
456 break;
457
458 /* Only look inside GRE for version 0 and 1 */
459 gre_ver = ntohs(hdr->flags & GRE_VERSION);
460 if (gre_ver > 1)
461 break;
462
463 proto = hdr->protocol;
464 if (gre_ver) {
465 /* Version1 must be PPTP, and check the flags */
466 if (!(proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
467 break;
468 }
469
470 offset += sizeof(struct gre_base_hdr);
471
472 if (hdr->flags & GRE_CSUM)
473 offset += sizeof(((struct gre_full_hdr *)0)->csum) +
474 sizeof(((struct gre_full_hdr *)0)->reserved1);
475
476 if (hdr->flags & GRE_KEY) {
477 const __be32 *keyid;
478 __be32 _keyid;
479
480 keyid = __skb_header_pointer(skb, nhoff + offset, sizeof(_keyid),
481 data, hlen, &_keyid);
482 if (!keyid)
483 goto out_bad;
484
485 if (dissector_uses_key(flow_dissector,
486 FLOW_DISSECTOR_KEY_GRE_KEYID)) {
487 key_keyid = skb_flow_dissector_target(flow_dissector,
488 FLOW_DISSECTOR_KEY_GRE_KEYID,
489 target_container);
490 if (gre_ver == 0)
491 key_keyid->keyid = *keyid;
492 else
493 key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
494 }
495 offset += sizeof(((struct gre_full_hdr *)0)->key);
496 } 605 }
497
498 if (hdr->flags & GRE_SEQ)
499 offset += sizeof(((struct pptp_gre_header *)0)->seq);
500
501 if (gre_ver == 0) {
502 if (proto == htons(ETH_P_TEB)) {
503 const struct ethhdr *eth;
504 struct ethhdr _eth;
505
506 eth = __skb_header_pointer(skb, nhoff + offset,
507 sizeof(_eth),
508 data, hlen, &_eth);
509 if (!eth)
510 goto out_bad;
511 proto = eth->h_proto;
512 offset += sizeof(*eth);
513
514 /* Cap headers that we access via pointers at the
515 * end of the Ethernet header as our maximum alignment
516 * at that point is only 2 bytes.
517 */
518 if (NET_IP_ALIGN)
519 hlen = (nhoff + offset);
520 }
521 } else { /* version 1, must be PPTP */
522 u8 _ppp_hdr[PPP_HDRLEN];
523 u8 *ppp_hdr;
524
525 if (hdr->flags & GRE_ACK)
526 offset += sizeof(((struct pptp_gre_header *)0)->ack);
527
528 ppp_hdr = __skb_header_pointer(skb, nhoff + offset,
529 sizeof(_ppp_hdr),
530 data, hlen, _ppp_hdr);
531 if (!ppp_hdr)
532 goto out_bad;
533
534 switch (PPP_PROTOCOL(ppp_hdr)) {
535 case PPP_IP:
536 proto = htons(ETH_P_IP);
537 break;
538 case PPP_IPV6:
539 proto = htons(ETH_P_IPV6);
540 break;
541 default:
542 /* Could probably catch some more like MPLS */
543 break;
544 }
545
546 offset += PPP_HDRLEN;
547 }
548
549 nhoff += offset;
550 key_control->flags |= FLOW_DIS_ENCAPSULATION;
551 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
552 goto out_good;
553
554 goto again;
555 }
556 case NEXTHDR_HOP: 606 case NEXTHDR_HOP:
557 case NEXTHDR_ROUTING: 607 case NEXTHDR_ROUTING:
558 case NEXTHDR_DEST: { 608 case NEXTHDR_DEST: {
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 6df9f8fabf0c..b5888190223c 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -162,7 +162,6 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
162 struct rtnexthop *rtnh = (struct rtnexthop *)attr; 162 struct rtnexthop *rtnh = (struct rtnexthop *)attr;
163 struct nlattr *nla_entype; 163 struct nlattr *nla_entype;
164 struct nlattr *attrs; 164 struct nlattr *attrs;
165 struct nlattr *nla;
166 u16 encap_type; 165 u16 encap_type;
167 int attrlen; 166 int attrlen;
168 167
@@ -170,7 +169,6 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
170 attrlen = rtnh_attrlen(rtnh); 169 attrlen = rtnh_attrlen(rtnh);
171 if (attrlen > 0) { 170 if (attrlen > 0) {
172 attrs = rtnh_attrs(rtnh); 171 attrs = rtnh_attrs(rtnh);
173 nla = nla_find(attrs, attrlen, RTA_ENCAP);
174 nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); 172 nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
175 173
176 if (nla_entype) { 174 if (nla_entype) {
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 758f140b6bed..fb87e78a2cc7 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -45,8 +45,8 @@ static u32 seq_scale(u32 seq)
45#endif 45#endif
46 46
47#if IS_ENABLED(CONFIG_IPV6) 47#if IS_ENABLED(CONFIG_IPV6)
48u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, 48u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
49 __be16 sport, __be16 dport, u32 *tsoff) 49 __be16 sport, __be16 dport, u32 *tsoff)
50{ 50{
51 const struct { 51 const struct {
52 struct in6_addr saddr; 52 struct in6_addr saddr;
@@ -66,7 +66,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
66 *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; 66 *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
67 return seq_scale(hash); 67 return seq_scale(hash);
68} 68}
69EXPORT_SYMBOL(secure_tcpv6_sequence_number); 69EXPORT_SYMBOL(secure_tcpv6_seq_and_tsoff);
70 70
71u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, 71u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
72 __be16 dport) 72 __be16 dport)
@@ -89,14 +89,13 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
89 89
90#ifdef CONFIG_INET 90#ifdef CONFIG_INET
91 91
92/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), 92/* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
93 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes, 93 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
94 * it would be easy enough to have the former function use siphash_4u32, passing 94 * it would be easy enough to have the former function use siphash_4u32, passing
95 * the arguments as separate u32. 95 * the arguments as separate u32.
96 */ 96 */
97 97u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr,
98u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, 98 __be16 sport, __be16 dport, u32 *tsoff)
99 __be16 sport, __be16 dport, u32 *tsoff)
100{ 99{
101 u64 hash; 100 u64 hash;
102 net_secret_init(); 101 net_secret_init();
diff --git a/net/core/sock.c b/net/core/sock.c
index a96d5f7a5734..a83731c36761 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -247,12 +247,66 @@ static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
247static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = { 247static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
248 _sock_locks("k-clock-") 248 _sock_locks("k-clock-")
249}; 249};
250static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
251 "rlock-AF_UNSPEC", "rlock-AF_UNIX" , "rlock-AF_INET" ,
252 "rlock-AF_AX25" , "rlock-AF_IPX" , "rlock-AF_APPLETALK",
253 "rlock-AF_NETROM", "rlock-AF_BRIDGE" , "rlock-AF_ATMPVC" ,
254 "rlock-AF_X25" , "rlock-AF_INET6" , "rlock-AF_ROSE" ,
255 "rlock-AF_DECnet", "rlock-AF_NETBEUI" , "rlock-AF_SECURITY" ,
256 "rlock-AF_KEY" , "rlock-AF_NETLINK" , "rlock-AF_PACKET" ,
257 "rlock-AF_ASH" , "rlock-AF_ECONET" , "rlock-AF_ATMSVC" ,
258 "rlock-AF_RDS" , "rlock-AF_SNA" , "rlock-AF_IRDA" ,
259 "rlock-AF_PPPOX" , "rlock-AF_WANPIPE" , "rlock-AF_LLC" ,
260 "rlock-27" , "rlock-28" , "rlock-AF_CAN" ,
261 "rlock-AF_TIPC" , "rlock-AF_BLUETOOTH", "rlock-AF_IUCV" ,
262 "rlock-AF_RXRPC" , "rlock-AF_ISDN" , "rlock-AF_PHONET" ,
263 "rlock-AF_IEEE802154", "rlock-AF_CAIF" , "rlock-AF_ALG" ,
264 "rlock-AF_NFC" , "rlock-AF_VSOCK" , "rlock-AF_KCM" ,
265 "rlock-AF_QIPCRTR", "rlock-AF_SMC" , "rlock-AF_MAX"
266};
267static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
268 "wlock-AF_UNSPEC", "wlock-AF_UNIX" , "wlock-AF_INET" ,
269 "wlock-AF_AX25" , "wlock-AF_IPX" , "wlock-AF_APPLETALK",
270 "wlock-AF_NETROM", "wlock-AF_BRIDGE" , "wlock-AF_ATMPVC" ,
271 "wlock-AF_X25" , "wlock-AF_INET6" , "wlock-AF_ROSE" ,
272 "wlock-AF_DECnet", "wlock-AF_NETBEUI" , "wlock-AF_SECURITY" ,
273 "wlock-AF_KEY" , "wlock-AF_NETLINK" , "wlock-AF_PACKET" ,
274 "wlock-AF_ASH" , "wlock-AF_ECONET" , "wlock-AF_ATMSVC" ,
275 "wlock-AF_RDS" , "wlock-AF_SNA" , "wlock-AF_IRDA" ,
276 "wlock-AF_PPPOX" , "wlock-AF_WANPIPE" , "wlock-AF_LLC" ,
277 "wlock-27" , "wlock-28" , "wlock-AF_CAN" ,
278 "wlock-AF_TIPC" , "wlock-AF_BLUETOOTH", "wlock-AF_IUCV" ,
279 "wlock-AF_RXRPC" , "wlock-AF_ISDN" , "wlock-AF_PHONET" ,
280 "wlock-AF_IEEE802154", "wlock-AF_CAIF" , "wlock-AF_ALG" ,
281 "wlock-AF_NFC" , "wlock-AF_VSOCK" , "wlock-AF_KCM" ,
282 "wlock-AF_QIPCRTR", "wlock-AF_SMC" , "wlock-AF_MAX"
283};
284static const char *const af_family_elock_key_strings[AF_MAX+1] = {
285 "elock-AF_UNSPEC", "elock-AF_UNIX" , "elock-AF_INET" ,
286 "elock-AF_AX25" , "elock-AF_IPX" , "elock-AF_APPLETALK",
287 "elock-AF_NETROM", "elock-AF_BRIDGE" , "elock-AF_ATMPVC" ,
288 "elock-AF_X25" , "elock-AF_INET6" , "elock-AF_ROSE" ,
289 "elock-AF_DECnet", "elock-AF_NETBEUI" , "elock-AF_SECURITY" ,
290 "elock-AF_KEY" , "elock-AF_NETLINK" , "elock-AF_PACKET" ,
291 "elock-AF_ASH" , "elock-AF_ECONET" , "elock-AF_ATMSVC" ,
292 "elock-AF_RDS" , "elock-AF_SNA" , "elock-AF_IRDA" ,
293 "elock-AF_PPPOX" , "elock-AF_WANPIPE" , "elock-AF_LLC" ,
294 "elock-27" , "elock-28" , "elock-AF_CAN" ,
295 "elock-AF_TIPC" , "elock-AF_BLUETOOTH", "elock-AF_IUCV" ,
296 "elock-AF_RXRPC" , "elock-AF_ISDN" , "elock-AF_PHONET" ,
297 "elock-AF_IEEE802154", "elock-AF_CAIF" , "elock-AF_ALG" ,
298 "elock-AF_NFC" , "elock-AF_VSOCK" , "elock-AF_KCM" ,
299 "elock-AF_QIPCRTR", "elock-AF_SMC" , "elock-AF_MAX"
300};
250 301
251/* 302/*
252 * sk_callback_lock locking rules are per-address-family, 303 * sk_callback_lock and sk queues locking rules are per-address-family,
253 * so split the lock classes by using a per-AF key: 304 * so split the lock classes by using a per-AF key:
254 */ 305 */
255static struct lock_class_key af_callback_keys[AF_MAX]; 306static struct lock_class_key af_callback_keys[AF_MAX];
307static struct lock_class_key af_rlock_keys[AF_MAX];
308static struct lock_class_key af_wlock_keys[AF_MAX];
309static struct lock_class_key af_elock_keys[AF_MAX];
256static struct lock_class_key af_kern_callback_keys[AF_MAX]; 310static struct lock_class_key af_kern_callback_keys[AF_MAX];
257 311
258/* Take into consideration the size of the struct sk_buff overhead in the 312/* Take into consideration the size of the struct sk_buff overhead in the
@@ -1478,6 +1532,27 @@ void sk_free(struct sock *sk)
1478} 1532}
1479EXPORT_SYMBOL(sk_free); 1533EXPORT_SYMBOL(sk_free);
1480 1534
1535static void sk_init_common(struct sock *sk)
1536{
1537 skb_queue_head_init(&sk->sk_receive_queue);
1538 skb_queue_head_init(&sk->sk_write_queue);
1539 skb_queue_head_init(&sk->sk_error_queue);
1540
1541 rwlock_init(&sk->sk_callback_lock);
1542 lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
1543 af_rlock_keys + sk->sk_family,
1544 af_family_rlock_key_strings[sk->sk_family]);
1545 lockdep_set_class_and_name(&sk->sk_write_queue.lock,
1546 af_wlock_keys + sk->sk_family,
1547 af_family_wlock_key_strings[sk->sk_family]);
1548 lockdep_set_class_and_name(&sk->sk_error_queue.lock,
1549 af_elock_keys + sk->sk_family,
1550 af_family_elock_key_strings[sk->sk_family]);
1551 lockdep_set_class_and_name(&sk->sk_callback_lock,
1552 af_callback_keys + sk->sk_family,
1553 af_family_clock_key_strings[sk->sk_family]);
1554}
1555
1481/** 1556/**
1482 * sk_clone_lock - clone a socket, and lock its clone 1557 * sk_clone_lock - clone a socket, and lock its clone
1483 * @sk: the socket to clone 1558 * @sk: the socket to clone
@@ -1511,13 +1586,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1511 */ 1586 */
1512 atomic_set(&newsk->sk_wmem_alloc, 1); 1587 atomic_set(&newsk->sk_wmem_alloc, 1);
1513 atomic_set(&newsk->sk_omem_alloc, 0); 1588 atomic_set(&newsk->sk_omem_alloc, 0);
1514 skb_queue_head_init(&newsk->sk_receive_queue); 1589 sk_init_common(newsk);
1515 skb_queue_head_init(&newsk->sk_write_queue);
1516
1517 rwlock_init(&newsk->sk_callback_lock);
1518 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1519 af_callback_keys + newsk->sk_family,
1520 af_family_clock_key_strings[newsk->sk_family]);
1521 1590
1522 newsk->sk_dst_cache = NULL; 1591 newsk->sk_dst_cache = NULL;
1523 newsk->sk_dst_pending_confirm = 0; 1592 newsk->sk_dst_pending_confirm = 0;
@@ -1528,7 +1597,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1528 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1597 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1529 1598
1530 sock_reset_flag(newsk, SOCK_DONE); 1599 sock_reset_flag(newsk, SOCK_DONE);
1531 skb_queue_head_init(&newsk->sk_error_queue);
1532 1600
1533 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1601 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1534 if (filter != NULL) 1602 if (filter != NULL)
@@ -2455,10 +2523,7 @@ EXPORT_SYMBOL(sk_stop_timer);
2455 2523
2456void sock_init_data(struct socket *sock, struct sock *sk) 2524void sock_init_data(struct socket *sock, struct sock *sk)
2457{ 2525{
2458 skb_queue_head_init(&sk->sk_receive_queue); 2526 sk_init_common(sk);
2459 skb_queue_head_init(&sk->sk_write_queue);
2460 skb_queue_head_init(&sk->sk_error_queue);
2461
2462 sk->sk_send_head = NULL; 2527 sk->sk_send_head = NULL;
2463 2528
2464 init_timer(&sk->sk_timer); 2529 init_timer(&sk->sk_timer);
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 7de5b40a5d0d..9afa2a5030b2 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -132,6 +132,7 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
132#include <net/neighbour.h> 132#include <net/neighbour.h>
133#include <net/dst.h> 133#include <net/dst.h>
134#include <net/fib_rules.h> 134#include <net/fib_rules.h>
135#include <net/tcp.h>
135#include <net/dn.h> 136#include <net/dn.h>
136#include <net/dn_nsp.h> 137#include <net/dn_nsp.h>
137#include <net/dn_dev.h> 138#include <net/dn_dev.h>
@@ -1469,18 +1470,18 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1469 case DSO_NODELAY: 1470 case DSO_NODELAY:
1470 if (optlen != sizeof(int)) 1471 if (optlen != sizeof(int))
1471 return -EINVAL; 1472 return -EINVAL;
1472 if (scp->nonagle == 2) 1473 if (scp->nonagle == TCP_NAGLE_CORK)
1473 return -EINVAL; 1474 return -EINVAL;
1474 scp->nonagle = (u.val == 0) ? 0 : 1; 1475 scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_OFF;
1475 /* if (scp->nonagle == 1) { Push pending frames } */ 1476 /* if (scp->nonagle == 1) { Push pending frames } */
1476 break; 1477 break;
1477 1478
1478 case DSO_CORK: 1479 case DSO_CORK:
1479 if (optlen != sizeof(int)) 1480 if (optlen != sizeof(int))
1480 return -EINVAL; 1481 return -EINVAL;
1481 if (scp->nonagle == 1) 1482 if (scp->nonagle == TCP_NAGLE_OFF)
1482 return -EINVAL; 1483 return -EINVAL;
1483 scp->nonagle = (u.val == 0) ? 0 : 2; 1484 scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_CORK;
1484 /* if (scp->nonagle == 0) { Push pending frames } */ 1485 /* if (scp->nonagle == 0) { Push pending frames } */
1485 break; 1486 break;
1486 1487
@@ -1608,14 +1609,14 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1608 case DSO_NODELAY: 1609 case DSO_NODELAY:
1609 if (r_len > sizeof(int)) 1610 if (r_len > sizeof(int))
1610 r_len = sizeof(int); 1611 r_len = sizeof(int);
1611 val = (scp->nonagle == 1); 1612 val = (scp->nonagle == TCP_NAGLE_OFF);
1612 r_data = &val; 1613 r_data = &val;
1613 break; 1614 break;
1614 1615
1615 case DSO_CORK: 1616 case DSO_CORK:
1616 if (r_len > sizeof(int)) 1617 if (r_len > sizeof(int))
1617 r_len = sizeof(int); 1618 r_len = sizeof(int);
1618 val = (scp->nonagle == 2); 1619 val = (scp->nonagle == TCP_NAGLE_CORK);
1619 r_data = &val; 1620 r_data = &val;
1620 break; 1621 break;
1621 1622
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index c34872e1febc..78128acfbf63 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -419,8 +419,8 @@ static int dsa_slave_vlan_filtering(struct net_device *dev,
419 return 0; 419 return 0;
420} 420}
421 421
422static int dsa_fastest_ageing_time(struct dsa_switch *ds, 422static unsigned int dsa_fastest_ageing_time(struct dsa_switch *ds,
423 unsigned int ageing_time) 423 unsigned int ageing_time)
424{ 424{
425 int i; 425 int i;
426 426
@@ -443,9 +443,13 @@ static int dsa_slave_ageing_time(struct net_device *dev,
443 unsigned long ageing_jiffies = clock_t_to_jiffies(attr->u.ageing_time); 443 unsigned long ageing_jiffies = clock_t_to_jiffies(attr->u.ageing_time);
444 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 444 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
445 445
446 /* bridge skips -EOPNOTSUPP, so skip the prepare phase */ 446 if (switchdev_trans_ph_prepare(trans)) {
447 if (switchdev_trans_ph_prepare(trans)) 447 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
448 return -ERANGE;
449 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
450 return -ERANGE;
448 return 0; 451 return 0;
452 }
449 453
450 /* Keep the fastest ageing time in case of multiple bridges */ 454 /* Keep the fastest ageing time in case of multiple bridges */
451 p->dp->ageing_time = ageing_time; 455 p->dp->ageing_time = ageing_time;
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index c6d4238ff94a..f83de23a30e7 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -11,7 +11,7 @@ obj-y := route.o inetpeer.o protocol.o \
11 tcp_rate.o tcp_recovery.o \ 11 tcp_rate.o tcp_recovery.o \
12 tcp_offload.o datagram.o raw.o udp.o udplite.o \ 12 tcp_offload.o datagram.o raw.o udp.o udplite.o \
13 udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \ 13 udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
14 fib_frontend.o fib_semantics.o fib_trie.o \ 14 fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \
15 inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o 15 inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o
16 16
17obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o 17obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index cebedd545e5e..927f1d4b8c80 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1192,6 +1192,18 @@ out:
1192 return done; 1192 return done;
1193} 1193}
1194 1194
1195static __be32 in_dev_select_addr(const struct in_device *in_dev,
1196 int scope)
1197{
1198 for_primary_ifa(in_dev) {
1199 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1200 ifa->ifa_scope <= scope)
1201 return ifa->ifa_local;
1202 } endfor_ifa(in_dev);
1203
1204 return 0;
1205}
1206
1195__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope) 1207__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1196{ 1208{
1197 __be32 addr = 0; 1209 __be32 addr = 0;
@@ -1228,13 +1240,9 @@ no_in_dev:
1228 if (master_idx && 1240 if (master_idx &&
1229 (dev = dev_get_by_index_rcu(net, master_idx)) && 1241 (dev = dev_get_by_index_rcu(net, master_idx)) &&
1230 (in_dev = __in_dev_get_rcu(dev))) { 1242 (in_dev = __in_dev_get_rcu(dev))) {
1231 for_primary_ifa(in_dev) { 1243 addr = in_dev_select_addr(in_dev, scope);
1232 if (ifa->ifa_scope != RT_SCOPE_LINK && 1244 if (addr)
1233 ifa->ifa_scope <= scope) { 1245 goto out_unlock;
1234 addr = ifa->ifa_local;
1235 goto out_unlock;
1236 }
1237 } endfor_ifa(in_dev);
1238 } 1246 }
1239 1247
1240 /* Not loopback addresses on loopback should be preferred 1248 /* Not loopback addresses on loopback should be preferred
@@ -1249,13 +1257,9 @@ no_in_dev:
1249 if (!in_dev) 1257 if (!in_dev)
1250 continue; 1258 continue;
1251 1259
1252 for_primary_ifa(in_dev) { 1260 addr = in_dev_select_addr(in_dev, scope);
1253 if (ifa->ifa_scope != RT_SCOPE_LINK && 1261 if (addr)
1254 ifa->ifa_scope <= scope) { 1262 goto out_unlock;
1255 addr = ifa->ifa_local;
1256 goto out_unlock;
1257 }
1258 } endfor_ifa(in_dev);
1259 } 1263 }
1260out_unlock: 1264out_unlock:
1261 rcu_read_unlock(); 1265 rcu_read_unlock();
diff --git a/net/ipv4/fib_notifier.c b/net/ipv4/fib_notifier.c
new file mode 100644
index 000000000000..e0714d975947
--- /dev/null
+++ b/net/ipv4/fib_notifier.c
@@ -0,0 +1,86 @@
1#include <linux/rtnetlink.h>
2#include <linux/notifier.h>
3#include <linux/rcupdate.h>
4#include <linux/kernel.h>
5#include <net/net_namespace.h>
6#include <net/netns/ipv4.h>
7#include <net/ip_fib.h>
8
9static ATOMIC_NOTIFIER_HEAD(fib_chain);
10
11int call_fib_notifier(struct notifier_block *nb, struct net *net,
12 enum fib_event_type event_type,
13 struct fib_notifier_info *info)
14{
15 info->net = net;
16 return nb->notifier_call(nb, event_type, info);
17}
18
19int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
20 struct fib_notifier_info *info)
21{
22 net->ipv4.fib_seq++;
23 info->net = net;
24 return atomic_notifier_call_chain(&fib_chain, event_type, info);
25}
26
27static unsigned int fib_seq_sum(void)
28{
29 unsigned int fib_seq = 0;
30 struct net *net;
31
32 rtnl_lock();
33 for_each_net(net)
34 fib_seq += net->ipv4.fib_seq;
35 rtnl_unlock();
36
37 return fib_seq;
38}
39
40static bool fib_dump_is_consistent(struct notifier_block *nb,
41 void (*cb)(struct notifier_block *nb),
42 unsigned int fib_seq)
43{
44 atomic_notifier_chain_register(&fib_chain, nb);
45 if (fib_seq == fib_seq_sum())
46 return true;
47 atomic_notifier_chain_unregister(&fib_chain, nb);
48 if (cb)
49 cb(nb);
50 return false;
51}
52
53#define FIB_DUMP_MAX_RETRIES 5
54int register_fib_notifier(struct notifier_block *nb,
55 void (*cb)(struct notifier_block *nb))
56{
57 int retries = 0;
58
59 do {
60 unsigned int fib_seq = fib_seq_sum();
61 struct net *net;
62
63 /* Mutex semantics guarantee that every change done to
64 * FIB tries before we read the change sequence counter
65 * is now visible to us.
66 */
67 rcu_read_lock();
68 for_each_net_rcu(net) {
69 fib_rules_notify(net, nb);
70 fib_notify(net, nb);
71 }
72 rcu_read_unlock();
73
74 if (fib_dump_is_consistent(nb, cb, fib_seq))
75 return 0;
76 } while (++retries < FIB_DUMP_MAX_RETRIES);
77
78 return -EBUSY;
79}
80EXPORT_SYMBOL(register_fib_notifier);
81
82int unregister_fib_notifier(struct notifier_block *nb)
83{
84 return atomic_notifier_chain_unregister(&fib_chain, nb);
85}
86EXPORT_SYMBOL(unregister_fib_notifier);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 2e50062f642d..778ecf977eb2 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -47,6 +47,27 @@ struct fib4_rule {
47#endif 47#endif
48}; 48};
49 49
50static bool fib4_rule_matchall(const struct fib_rule *rule)
51{
52 struct fib4_rule *r = container_of(rule, struct fib4_rule, common);
53
54 if (r->dst_len || r->src_len || r->tos)
55 return false;
56 return fib_rule_matchall(rule);
57}
58
59bool fib4_rule_default(const struct fib_rule *rule)
60{
61 if (!fib4_rule_matchall(rule) || rule->action != FR_ACT_TO_TBL ||
62 rule->l3mdev)
63 return false;
64 if (rule->table != RT_TABLE_LOCAL && rule->table != RT_TABLE_MAIN &&
65 rule->table != RT_TABLE_DEFAULT)
66 return false;
67 return true;
68}
69EXPORT_SYMBOL_GPL(fib4_rule_default);
70
50int __fib_lookup(struct net *net, struct flowi4 *flp, 71int __fib_lookup(struct net *net, struct flowi4 *flp,
51 struct fib_result *res, unsigned int flags) 72 struct fib_result *res, unsigned int flags)
52{ 73{
@@ -164,12 +185,36 @@ static struct fib_table *fib_empty_table(struct net *net)
164 return NULL; 185 return NULL;
165} 186}
166 187
188static int call_fib_rule_notifier(struct notifier_block *nb, struct net *net,
189 enum fib_event_type event_type,
190 struct fib_rule *rule)
191{
192 struct fib_rule_notifier_info info = {
193 .rule = rule,
194 };
195
196 return call_fib_notifier(nb, net, event_type, &info.info);
197}
198
167static int call_fib_rule_notifiers(struct net *net, 199static int call_fib_rule_notifiers(struct net *net,
168 enum fib_event_type event_type) 200 enum fib_event_type event_type,
201 struct fib_rule *rule)
202{
203 struct fib_rule_notifier_info info = {
204 .rule = rule,
205 };
206
207 return call_fib_notifiers(net, event_type, &info.info);
208}
209
210/* Called with rcu_read_lock() */
211void fib_rules_notify(struct net *net, struct notifier_block *nb)
169{ 212{
170 struct fib_notifier_info info; 213 struct fib_rules_ops *ops = net->ipv4.rules_ops;
214 struct fib_rule *rule;
171 215
172 return call_fib_notifiers(net, event_type, &info); 216 list_for_each_entry_rcu(rule, &ops->rules_list, list)
217 call_fib_rule_notifier(nb, net, FIB_EVENT_RULE_ADD, rule);
173} 218}
174 219
175static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = { 220static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = {
@@ -228,7 +273,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
228 rule4->tos = frh->tos; 273 rule4->tos = frh->tos;
229 274
230 net->ipv4.fib_has_custom_rules = true; 275 net->ipv4.fib_has_custom_rules = true;
231 call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD); 276 call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule);
232 277
233 err = 0; 278 err = 0;
234errout: 279errout:
@@ -250,7 +295,7 @@ static int fib4_rule_delete(struct fib_rule *rule)
250 net->ipv4.fib_num_tclassid_users--; 295 net->ipv4.fib_num_tclassid_users--;
251#endif 296#endif
252 net->ipv4.fib_has_custom_rules = true; 297 net->ipv4.fib_has_custom_rules = true;
253 call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL); 298 call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule);
254errout: 299errout:
255 return err; 300 return err;
256} 301}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 317026a39cfa..da449ddb8cc1 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -57,7 +57,6 @@ static unsigned int fib_info_cnt;
57static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; 57static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
58 58
59#ifdef CONFIG_IP_ROUTE_MULTIPATH 59#ifdef CONFIG_IP_ROUTE_MULTIPATH
60u32 fib_multipath_secret __read_mostly;
61 60
62#define for_nexthops(fi) { \ 61#define for_nexthops(fi) { \
63 int nhsel; const struct fib_nh *nh; \ 62 int nhsel; const struct fib_nh *nh; \
@@ -576,9 +575,6 @@ static void fib_rebalance(struct fib_info *fi)
576 575
577 atomic_set(&nexthop_nh->nh_upper_bound, upper_bound); 576 atomic_set(&nexthop_nh->nh_upper_bound, upper_bound);
578 } endfor_nexthops(fi); 577 } endfor_nexthops(fi);
579
580 net_get_random_once(&fib_multipath_secret,
581 sizeof(fib_multipath_secret));
582} 578}
583 579
584static inline void fib_add_weight(struct fib_info *fi, 580static inline void fib_add_weight(struct fib_info *fi,
@@ -1641,7 +1637,7 @@ void fib_select_multipath(struct fib_result *res, int hash)
1641#endif 1637#endif
1642 1638
1643void fib_select_path(struct net *net, struct fib_result *res, 1639void fib_select_path(struct net *net, struct fib_result *res,
1644 struct flowi4 *fl4, int mp_hash) 1640 struct flowi4 *fl4, const struct sk_buff *skb)
1645{ 1641{
1646 bool oif_check; 1642 bool oif_check;
1647 1643
@@ -1650,10 +1646,9 @@ void fib_select_path(struct net *net, struct fib_result *res,
1650 1646
1651#ifdef CONFIG_IP_ROUTE_MULTIPATH 1647#ifdef CONFIG_IP_ROUTE_MULTIPATH
1652 if (res->fi->fib_nhs > 1 && oif_check) { 1648 if (res->fi->fib_nhs > 1 && oif_check) {
1653 if (mp_hash < 0) 1649 int h = fib_multipath_hash(res->fi, fl4, skb);
1654 mp_hash = get_hash_from_flowi4(fl4) >> 1;
1655 1650
1656 fib_select_multipath(res, mp_hash); 1651 fib_select_multipath(res, h);
1657 } 1652 }
1658 else 1653 else
1659#endif 1654#endif
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2f0d8233950f..1201409ba1dc 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -84,43 +84,6 @@
84#include <trace/events/fib.h> 84#include <trace/events/fib.h>
85#include "fib_lookup.h" 85#include "fib_lookup.h"
86 86
87static unsigned int fib_seq_sum(void)
88{
89 unsigned int fib_seq = 0;
90 struct net *net;
91
92 rtnl_lock();
93 for_each_net(net)
94 fib_seq += net->ipv4.fib_seq;
95 rtnl_unlock();
96
97 return fib_seq;
98}
99
100static ATOMIC_NOTIFIER_HEAD(fib_chain);
101
102static int call_fib_notifier(struct notifier_block *nb, struct net *net,
103 enum fib_event_type event_type,
104 struct fib_notifier_info *info)
105{
106 info->net = net;
107 return nb->notifier_call(nb, event_type, info);
108}
109
110static void fib_rules_notify(struct net *net, struct notifier_block *nb,
111 enum fib_event_type event_type)
112{
113#ifdef CONFIG_IP_MULTIPLE_TABLES
114 struct fib_notifier_info info;
115
116 if (net->ipv4.fib_has_custom_rules)
117 call_fib_notifier(nb, net, event_type, &info);
118#endif
119}
120
121static void fib_notify(struct net *net, struct notifier_block *nb,
122 enum fib_event_type event_type);
123
124static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net, 87static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net,
125 enum fib_event_type event_type, u32 dst, 88 enum fib_event_type event_type, u32 dst,
126 int dst_len, struct fib_info *fi, 89 int dst_len, struct fib_info *fi,
@@ -137,62 +100,6 @@ static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net,
137 return call_fib_notifier(nb, net, event_type, &info.info); 100 return call_fib_notifier(nb, net, event_type, &info.info);
138} 101}
139 102
140static bool fib_dump_is_consistent(struct notifier_block *nb,
141 void (*cb)(struct notifier_block *nb),
142 unsigned int fib_seq)
143{
144 atomic_notifier_chain_register(&fib_chain, nb);
145 if (fib_seq == fib_seq_sum())
146 return true;
147 atomic_notifier_chain_unregister(&fib_chain, nb);
148 if (cb)
149 cb(nb);
150 return false;
151}
152
153#define FIB_DUMP_MAX_RETRIES 5
154int register_fib_notifier(struct notifier_block *nb,
155 void (*cb)(struct notifier_block *nb))
156{
157 int retries = 0;
158
159 do {
160 unsigned int fib_seq = fib_seq_sum();
161 struct net *net;
162
163 /* Mutex semantics guarantee that every change done to
164 * FIB tries before we read the change sequence counter
165 * is now visible to us.
166 */
167 rcu_read_lock();
168 for_each_net_rcu(net) {
169 fib_rules_notify(net, nb, FIB_EVENT_RULE_ADD);
170 fib_notify(net, nb, FIB_EVENT_ENTRY_ADD);
171 }
172 rcu_read_unlock();
173
174 if (fib_dump_is_consistent(nb, cb, fib_seq))
175 return 0;
176 } while (++retries < FIB_DUMP_MAX_RETRIES);
177
178 return -EBUSY;
179}
180EXPORT_SYMBOL(register_fib_notifier);
181
182int unregister_fib_notifier(struct notifier_block *nb)
183{
184 return atomic_notifier_chain_unregister(&fib_chain, nb);
185}
186EXPORT_SYMBOL(unregister_fib_notifier);
187
188int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
189 struct fib_notifier_info *info)
190{
191 net->ipv4.fib_seq++;
192 info->net = net;
193 return atomic_notifier_call_chain(&fib_chain, event_type, info);
194}
195
196static int call_fib_entry_notifiers(struct net *net, 103static int call_fib_entry_notifiers(struct net *net,
197 enum fib_event_type event_type, u32 dst, 104 enum fib_event_type event_type, u32 dst,
198 int dst_len, struct fib_info *fi, 105 int dst_len, struct fib_info *fi,
@@ -1995,8 +1902,7 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
1995} 1902}
1996 1903
1997static void fib_leaf_notify(struct net *net, struct key_vector *l, 1904static void fib_leaf_notify(struct net *net, struct key_vector *l,
1998 struct fib_table *tb, struct notifier_block *nb, 1905 struct fib_table *tb, struct notifier_block *nb)
1999 enum fib_event_type event_type)
2000{ 1906{
2001 struct fib_alias *fa; 1907 struct fib_alias *fa;
2002 1908
@@ -2012,22 +1918,21 @@ static void fib_leaf_notify(struct net *net, struct key_vector *l,
2012 if (tb->tb_id != fa->tb_id) 1918 if (tb->tb_id != fa->tb_id)
2013 continue; 1919 continue;
2014 1920
2015 call_fib_entry_notifier(nb, net, event_type, l->key, 1921 call_fib_entry_notifier(nb, net, FIB_EVENT_ENTRY_ADD, l->key,
2016 KEYLENGTH - fa->fa_slen, fi, fa->fa_tos, 1922 KEYLENGTH - fa->fa_slen, fi, fa->fa_tos,
2017 fa->fa_type, fa->tb_id); 1923 fa->fa_type, fa->tb_id);
2018 } 1924 }
2019} 1925}
2020 1926
2021static void fib_table_notify(struct net *net, struct fib_table *tb, 1927static void fib_table_notify(struct net *net, struct fib_table *tb,
2022 struct notifier_block *nb, 1928 struct notifier_block *nb)
2023 enum fib_event_type event_type)
2024{ 1929{
2025 struct trie *t = (struct trie *)tb->tb_data; 1930 struct trie *t = (struct trie *)tb->tb_data;
2026 struct key_vector *l, *tp = t->kv; 1931 struct key_vector *l, *tp = t->kv;
2027 t_key key = 0; 1932 t_key key = 0;
2028 1933
2029 while ((l = leaf_walk_rcu(&tp, key)) != NULL) { 1934 while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
2030 fib_leaf_notify(net, l, tb, nb, event_type); 1935 fib_leaf_notify(net, l, tb, nb);
2031 1936
2032 key = l->key + 1; 1937 key = l->key + 1;
2033 /* stop in case of wrap around */ 1938 /* stop in case of wrap around */
@@ -2036,8 +1941,7 @@ static void fib_table_notify(struct net *net, struct fib_table *tb,
2036 } 1941 }
2037} 1942}
2038 1943
2039static void fib_notify(struct net *net, struct notifier_block *nb, 1944void fib_notify(struct net *net, struct notifier_block *nb)
2040 enum fib_event_type event_type)
2041{ 1945{
2042 unsigned int h; 1946 unsigned int h;
2043 1947
@@ -2046,7 +1950,7 @@ static void fib_notify(struct net *net, struct notifier_block *nb,
2046 struct fib_table *tb; 1950 struct fib_table *tb;
2047 1951
2048 hlist_for_each_entry_rcu(tb, head, tb_hlist) 1952 hlist_for_each_entry_rcu(tb, head, tb_hlist)
2049 fib_table_notify(net, tb, nb, event_type); 1953 fib_table_notify(net, tb, nb);
2050 } 1954 }
2051} 1955}
2052 1956
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index fc310db2708b..43318b5f5647 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -464,22 +464,6 @@ out_bh_enable:
464 local_bh_enable(); 464 local_bh_enable();
465} 465}
466 466
467#ifdef CONFIG_IP_ROUTE_MULTIPATH
468
469/* Source and destination is swapped. See ip_multipath_icmp_hash */
470static int icmp_multipath_hash_skb(const struct sk_buff *skb)
471{
472 const struct iphdr *iph = ip_hdr(skb);
473
474 return fib_multipath_hash(iph->daddr, iph->saddr);
475}
476
477#else
478
479#define icmp_multipath_hash_skb(skb) (-1)
480
481#endif
482
483static struct rtable *icmp_route_lookup(struct net *net, 467static struct rtable *icmp_route_lookup(struct net *net,
484 struct flowi4 *fl4, 468 struct flowi4 *fl4,
485 struct sk_buff *skb_in, 469 struct sk_buff *skb_in,
@@ -505,8 +489,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
505 fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); 489 fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
506 490
507 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); 491 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
508 rt = __ip_route_output_key_hash(net, fl4, 492 rt = __ip_route_output_key_hash(net, fl4, skb_in);
509 icmp_multipath_hash_skb(skb_in));
510 if (IS_ERR(rt)) 493 if (IS_ERR(rt))
511 return rt; 494 return rt;
512 495
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 6241a81fd7f5..f17dab1dee6e 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -562,8 +562,6 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
562 XT_ERROR_TARGET) == 0) 562 XT_ERROR_TARGET) == 0)
563 ++newinfo->stacksize; 563 ++newinfo->stacksize;
564 } 564 }
565 if (ret != 0)
566 goto out_free;
567 565
568 ret = -EINVAL; 566 ret = -EINVAL;
569 if (i != repl->num_entries) 567 if (i != repl->num_entries)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 52f26459efc3..fcbdc0c49b0e 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -22,6 +22,7 @@
22#include <linux/icmp.h> 22#include <linux/icmp.h>
23#include <linux/if_arp.h> 23#include <linux/if_arp.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/refcount.h>
25#include <linux/netfilter_arp.h> 26#include <linux/netfilter_arp.h>
26#include <linux/netfilter/x_tables.h> 27#include <linux/netfilter/x_tables.h>
27#include <linux/netfilter_ipv4/ip_tables.h> 28#include <linux/netfilter_ipv4/ip_tables.h>
@@ -40,8 +41,8 @@ MODULE_DESCRIPTION("Xtables: CLUSTERIP target");
40 41
41struct clusterip_config { 42struct clusterip_config {
42 struct list_head list; /* list of all configs */ 43 struct list_head list; /* list of all configs */
43 atomic_t refcount; /* reference count */ 44 refcount_t refcount; /* reference count */
44 atomic_t entries; /* number of entries/rules 45 refcount_t entries; /* number of entries/rules
45 * referencing us */ 46 * referencing us */
46 47
47 __be32 clusterip; /* the IP address */ 48 __be32 clusterip; /* the IP address */
@@ -77,7 +78,7 @@ struct clusterip_net {
77static inline void 78static inline void
78clusterip_config_get(struct clusterip_config *c) 79clusterip_config_get(struct clusterip_config *c)
79{ 80{
80 atomic_inc(&c->refcount); 81 refcount_inc(&c->refcount);
81} 82}
82 83
83 84
@@ -89,7 +90,7 @@ static void clusterip_config_rcu_free(struct rcu_head *head)
89static inline void 90static inline void
90clusterip_config_put(struct clusterip_config *c) 91clusterip_config_put(struct clusterip_config *c)
91{ 92{
92 if (atomic_dec_and_test(&c->refcount)) 93 if (refcount_dec_and_test(&c->refcount))
93 call_rcu_bh(&c->rcu, clusterip_config_rcu_free); 94 call_rcu_bh(&c->rcu, clusterip_config_rcu_free);
94} 95}
95 96
@@ -103,7 +104,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
103 struct clusterip_net *cn = net_generic(net, clusterip_net_id); 104 struct clusterip_net *cn = net_generic(net, clusterip_net_id);
104 105
105 local_bh_disable(); 106 local_bh_disable();
106 if (atomic_dec_and_lock(&c->entries, &cn->lock)) { 107 if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
107 list_del_rcu(&c->list); 108 list_del_rcu(&c->list);
108 spin_unlock(&cn->lock); 109 spin_unlock(&cn->lock);
109 local_bh_enable(); 110 local_bh_enable();
@@ -149,10 +150,10 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
149 c = NULL; 150 c = NULL;
150 else 151 else
151#endif 152#endif
152 if (unlikely(!atomic_inc_not_zero(&c->refcount))) 153 if (unlikely(!refcount_inc_not_zero(&c->refcount)))
153 c = NULL; 154 c = NULL;
154 else if (entry) 155 else if (entry)
155 atomic_inc(&c->entries); 156 refcount_inc(&c->entries);
156 } 157 }
157 rcu_read_unlock_bh(); 158 rcu_read_unlock_bh();
158 159
@@ -188,8 +189,8 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
188 clusterip_config_init_nodelist(c, i); 189 clusterip_config_init_nodelist(c, i);
189 c->hash_mode = i->hash_mode; 190 c->hash_mode = i->hash_mode;
190 c->hash_initval = i->hash_initval; 191 c->hash_initval = i->hash_initval;
191 atomic_set(&c->refcount, 1); 192 refcount_set(&c->refcount, 1);
192 atomic_set(&c->entries, 1); 193 refcount_set(&c->entries, 1);
193 194
194 spin_lock_bh(&cn->lock); 195 spin_lock_bh(&cn->lock);
195 if (__clusterip_config_find(net, ip)) { 196 if (__clusterip_config_find(net, ip)) {
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index c9b52c361da2..ef49989c93b1 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -998,18 +998,6 @@ err_id_free:
998 * 998 *
999 *****************************************************************************/ 999 *****************************************************************************/
1000 1000
1001static void hex_dump(const unsigned char *buf, size_t len)
1002{
1003 size_t i;
1004
1005 for (i = 0; i < len; i++) {
1006 if (i && !(i % 16))
1007 printk("\n");
1008 printk("%02x ", *(buf + i));
1009 }
1010 printk("\n");
1011}
1012
1013/* 1001/*
1014 * Parse and mangle SNMP message according to mapping. 1002 * Parse and mangle SNMP message according to mapping.
1015 * (And this is the fucking 'basic' method). 1003 * (And this is the fucking 'basic' method).
@@ -1026,7 +1014,8 @@ static int snmp_parse_mangle(unsigned char *msg,
1026 struct snmp_object *obj; 1014 struct snmp_object *obj;
1027 1015
1028 if (debug > 1) 1016 if (debug > 1)
1029 hex_dump(msg, len); 1017 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 1,
1018 msg, len, 0);
1030 1019
1031 asn1_open(&ctx, msg, len); 1020 asn1_open(&ctx, msg, len);
1032 1021
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 146d86105183..7cd8d0d918f8 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -104,7 +104,6 @@ EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
104void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) 104void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
105{ 105{
106 struct sk_buff *nskb; 106 struct sk_buff *nskb;
107 const struct iphdr *oiph;
108 struct iphdr *niph; 107 struct iphdr *niph;
109 const struct tcphdr *oth; 108 const struct tcphdr *oth;
110 struct tcphdr _oth; 109 struct tcphdr _oth;
@@ -116,8 +115,6 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
116 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 115 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
117 return; 116 return;
118 117
119 oiph = ip_hdr(oldskb);
120
121 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + 118 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
122 LL_MAX_HEADER, GFP_ATOMIC); 119 LL_MAX_HEADER, GFP_ATOMIC);
123 if (!nskb) 120 if (!nskb)
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index 2981291910dd..f4e4462cb5bb 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -90,7 +90,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
90 90
91 if (nft_hook(pkt) == NF_INET_PRE_ROUTING && 91 if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
92 nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { 92 nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
93 nft_fib_store_result(dest, priv->result, pkt, 93 nft_fib_store_result(dest, priv, pkt,
94 nft_in(pkt)->ifindex); 94 nft_in(pkt)->ifindex);
95 return; 95 return;
96 } 96 }
@@ -99,7 +99,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
99 if (ipv4_is_zeronet(iph->saddr)) { 99 if (ipv4_is_zeronet(iph->saddr)) {
100 if (ipv4_is_lbcast(iph->daddr) || 100 if (ipv4_is_lbcast(iph->daddr) ||
101 ipv4_is_local_multicast(iph->daddr)) { 101 ipv4_is_local_multicast(iph->daddr)) {
102 nft_fib_store_result(dest, priv->result, pkt, 102 nft_fib_store_result(dest, priv, pkt,
103 get_ifindex(pkt->skb->dev)); 103 get_ifindex(pkt->skb->dev));
104 return; 104 return;
105 } 105 }
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 69cf49e8356d..4ccbf464d1ac 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -199,7 +199,6 @@ static const struct snmp_mib snmp4_net_list[] = {
199 SNMP_MIB_ITEM("TW", LINUX_MIB_TIMEWAITED), 199 SNMP_MIB_ITEM("TW", LINUX_MIB_TIMEWAITED),
200 SNMP_MIB_ITEM("TWRecycled", LINUX_MIB_TIMEWAITRECYCLED), 200 SNMP_MIB_ITEM("TWRecycled", LINUX_MIB_TIMEWAITRECYCLED),
201 SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED), 201 SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED),
202 SNMP_MIB_ITEM("PAWSPassive", LINUX_MIB_PAWSPASSIVEREJECTED),
203 SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED), 202 SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED),
204 SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED), 203 SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED),
205 SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS), 204 SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 8471dd116771..5dda1ef81c7e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1734,45 +1734,97 @@ out:
1734} 1734}
1735 1735
1736#ifdef CONFIG_IP_ROUTE_MULTIPATH 1736#ifdef CONFIG_IP_ROUTE_MULTIPATH
1737
1738/* To make ICMP packets follow the right flow, the multipath hash is 1737/* To make ICMP packets follow the right flow, the multipath hash is
1739 * calculated from the inner IP addresses in reverse order. 1738 * calculated from the inner IP addresses.
1740 */ 1739 */
1741static int ip_multipath_icmp_hash(struct sk_buff *skb) 1740static void ip_multipath_l3_keys(const struct sk_buff *skb,
1741 struct flow_keys *hash_keys)
1742{ 1742{
1743 const struct iphdr *outer_iph = ip_hdr(skb); 1743 const struct iphdr *outer_iph = ip_hdr(skb);
1744 struct icmphdr _icmph; 1744 const struct iphdr *inner_iph;
1745 const struct icmphdr *icmph; 1745 const struct icmphdr *icmph;
1746 struct iphdr _inner_iph; 1746 struct iphdr _inner_iph;
1747 const struct iphdr *inner_iph; 1747 struct icmphdr _icmph;
1748
1749 hash_keys->addrs.v4addrs.src = outer_iph->saddr;
1750 hash_keys->addrs.v4addrs.dst = outer_iph->daddr;
1751 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1752 return;
1748 1753
1749 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0)) 1754 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1750 goto standard_hash; 1755 return;
1751 1756
1752 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph), 1757 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1753 &_icmph); 1758 &_icmph);
1754 if (!icmph) 1759 if (!icmph)
1755 goto standard_hash; 1760 return;
1756 1761
1757 if (icmph->type != ICMP_DEST_UNREACH && 1762 if (icmph->type != ICMP_DEST_UNREACH &&
1758 icmph->type != ICMP_REDIRECT && 1763 icmph->type != ICMP_REDIRECT &&
1759 icmph->type != ICMP_TIME_EXCEEDED && 1764 icmph->type != ICMP_TIME_EXCEEDED &&
1760 icmph->type != ICMP_PARAMETERPROB) { 1765 icmph->type != ICMP_PARAMETERPROB)
1761 goto standard_hash; 1766 return;
1762 }
1763 1767
1764 inner_iph = skb_header_pointer(skb, 1768 inner_iph = skb_header_pointer(skb,
1765 outer_iph->ihl * 4 + sizeof(_icmph), 1769 outer_iph->ihl * 4 + sizeof(_icmph),
1766 sizeof(_inner_iph), &_inner_iph); 1770 sizeof(_inner_iph), &_inner_iph);
1767 if (!inner_iph) 1771 if (!inner_iph)
1768 goto standard_hash; 1772 return;
1773 hash_keys->addrs.v4addrs.src = inner_iph->saddr;
1774 hash_keys->addrs.v4addrs.dst = inner_iph->daddr;
1775}
1769 1776
1770 return fib_multipath_hash(inner_iph->daddr, inner_iph->saddr); 1777/* if skb is set it will be used and fl4 can be NULL */
1778int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
1779 const struct sk_buff *skb)
1780{
1781 struct net *net = fi->fib_net;
1782 struct flow_keys hash_keys;
1783 u32 mhash;
1771 1784
1772standard_hash: 1785 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1773 return fib_multipath_hash(outer_iph->saddr, outer_iph->daddr); 1786 case 0:
1774} 1787 memset(&hash_keys, 0, sizeof(hash_keys));
1788 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1789 if (skb) {
1790 ip_multipath_l3_keys(skb, &hash_keys);
1791 } else {
1792 hash_keys.addrs.v4addrs.src = fl4->saddr;
1793 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1794 }
1795 break;
1796 case 1:
1797 /* skb is currently provided only when forwarding */
1798 if (skb) {
1799 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1800 struct flow_keys keys;
1801
1802 /* short-circuit if we already have L4 hash present */
1803 if (skb->l4_hash)
1804 return skb_get_hash_raw(skb) >> 1;
1805 memset(&hash_keys, 0, sizeof(hash_keys));
1806 skb_flow_dissect_flow_keys(skb, &keys, flag);
1807 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1808 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1809 hash_keys.ports.src = keys.ports.src;
1810 hash_keys.ports.dst = keys.ports.dst;
1811 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1812 } else {
1813 memset(&hash_keys, 0, sizeof(hash_keys));
1814 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1815 hash_keys.addrs.v4addrs.src = fl4->saddr;
1816 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1817 hash_keys.ports.src = fl4->fl4_sport;
1818 hash_keys.ports.dst = fl4->fl4_dport;
1819 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1820 }
1821 break;
1822 }
1823 mhash = flow_hash_from_keys(&hash_keys);
1775 1824
1825 return mhash >> 1;
1826}
1827EXPORT_SYMBOL_GPL(fib_multipath_hash);
1776#endif /* CONFIG_IP_ROUTE_MULTIPATH */ 1828#endif /* CONFIG_IP_ROUTE_MULTIPATH */
1777 1829
1778static int ip_mkroute_input(struct sk_buff *skb, 1830static int ip_mkroute_input(struct sk_buff *skb,
@@ -1782,12 +1834,8 @@ static int ip_mkroute_input(struct sk_buff *skb,
1782{ 1834{
1783#ifdef CONFIG_IP_ROUTE_MULTIPATH 1835#ifdef CONFIG_IP_ROUTE_MULTIPATH
1784 if (res->fi && res->fi->fib_nhs > 1) { 1836 if (res->fi && res->fi->fib_nhs > 1) {
1785 int h; 1837 int h = fib_multipath_hash(res->fi, NULL, skb);
1786 1838
1787 if (unlikely(ip_hdr(skb)->protocol == IPPROTO_ICMP))
1788 h = ip_multipath_icmp_hash(skb);
1789 else
1790 h = fib_multipath_hash(saddr, daddr);
1791 fib_select_multipath(res, h); 1839 fib_select_multipath(res, h);
1792 } 1840 }
1793#endif 1841#endif
@@ -2203,7 +2251,7 @@ add:
2203 */ 2251 */
2204 2252
2205struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, 2253struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2206 int mp_hash) 2254 const struct sk_buff *skb)
2207{ 2255{
2208 struct net_device *dev_out = NULL; 2256 struct net_device *dev_out = NULL;
2209 __u8 tos = RT_FL_TOS(fl4); 2257 __u8 tos = RT_FL_TOS(fl4);
@@ -2365,7 +2413,7 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2365 goto make_route; 2413 goto make_route;
2366 } 2414 }
2367 2415
2368 fib_select_path(net, &res, fl4, mp_hash); 2416 fib_select_path(net, &res, fl4, skb);
2369 2417
2370 dev_out = FIB_RES_DEV(res); 2418 dev_out = FIB_RES_DEV(res);
2371 fl4->flowi4_oif = dev_out->ifindex; 2419 fl4->flowi4_oif = dev_out->ifindex;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d6880a6149ee..711c3e2e17b1 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -981,13 +981,6 @@ static struct ctl_table ipv4_net_table[] = {
981 .proc_handler = proc_dointvec 981 .proc_handler = proc_dointvec
982 }, 982 },
983 { 983 {
984 .procname = "tcp_tw_recycle",
985 .data = &init_net.ipv4.tcp_death_row.sysctl_tw_recycle,
986 .maxlen = sizeof(int),
987 .mode = 0644,
988 .proc_handler = proc_dointvec
989 },
990 {
991 .procname = "tcp_max_syn_backlog", 984 .procname = "tcp_max_syn_backlog",
992 .data = &init_net.ipv4.sysctl_max_syn_backlog, 985 .data = &init_net.ipv4.sysctl_max_syn_backlog,
993 .maxlen = sizeof(int), 986 .maxlen = sizeof(int),
@@ -1004,6 +997,15 @@ static struct ctl_table ipv4_net_table[] = {
1004 .extra1 = &zero, 997 .extra1 = &zero,
1005 .extra2 = &one, 998 .extra2 = &one,
1006 }, 999 },
1000 {
1001 .procname = "fib_multipath_hash_policy",
1002 .data = &init_net.ipv4.sysctl_fib_multipath_hash_policy,
1003 .maxlen = sizeof(int),
1004 .mode = 0644,
1005 .proc_handler = proc_dointvec_minmax,
1006 .extra1 = &zero,
1007 .extra2 = &one,
1008 },
1007#endif 1009#endif
1008 { 1010 {
1009 .procname = "ip_unprivileged_port_start", 1011 .procname = "ip_unprivileged_port_start",
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 39c393cc0fd3..bb09c7095988 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6324,36 +6324,14 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
6324 goto drop_and_free; 6324 goto drop_and_free;
6325 6325
6326 if (isn && tmp_opt.tstamp_ok) 6326 if (isn && tmp_opt.tstamp_ok)
6327 af_ops->init_seq(skb, &tcp_rsk(req)->ts_off); 6327 af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off);
6328 6328
6329 if (!want_cookie && !isn) { 6329 if (!want_cookie && !isn) {
6330 /* VJ's idea. We save last timestamp seen
6331 * from the destination in peer table, when entering
6332 * state TIME-WAIT, and check against it before
6333 * accepting new connection request.
6334 *
6335 * If "isn" is not zero, this request hit alive
6336 * timewait bucket, so that all the necessary checks
6337 * are made in the function processing timewait state.
6338 */
6339 if (net->ipv4.tcp_death_row.sysctl_tw_recycle) {
6340 bool strict;
6341
6342 dst = af_ops->route_req(sk, &fl, req, &strict);
6343
6344 if (dst && strict &&
6345 !tcp_peer_is_proven(req, dst, true,
6346 tmp_opt.saw_tstamp)) {
6347 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
6348 goto drop_and_release;
6349 }
6350 }
6351 /* Kill the following clause, if you dislike this way. */ 6330 /* Kill the following clause, if you dislike this way. */
6352 else if (!net->ipv4.sysctl_tcp_syncookies && 6331 if (!net->ipv4.sysctl_tcp_syncookies &&
6353 (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < 6332 (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
6354 (net->ipv4.sysctl_max_syn_backlog >> 2)) && 6333 (net->ipv4.sysctl_max_syn_backlog >> 2)) &&
6355 !tcp_peer_is_proven(req, dst, false, 6334 !tcp_peer_is_proven(req, dst)) {
6356 tmp_opt.saw_tstamp)) {
6357 /* Without syncookies last quarter of 6335 /* Without syncookies last quarter of
6358 * backlog is filled with destinations, 6336 * backlog is filled with destinations,
6359 * proven to be alive. 6337 * proven to be alive.
@@ -6366,10 +6344,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
6366 goto drop_and_release; 6344 goto drop_and_release;
6367 } 6345 }
6368 6346
6369 isn = af_ops->init_seq(skb, &tcp_rsk(req)->ts_off); 6347 isn = af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off);
6370 } 6348 }
6371 if (!dst) { 6349 if (!dst) {
6372 dst = af_ops->route_req(sk, &fl, req, NULL); 6350 dst = af_ops->route_req(sk, &fl, req);
6373 if (!dst) 6351 if (!dst)
6374 goto drop_and_free; 6352 goto drop_and_free;
6375 } 6353 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 575e19dcc017..7482b5d11861 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -94,12 +94,12 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94struct inet_hashinfo tcp_hashinfo; 94struct inet_hashinfo tcp_hashinfo;
95EXPORT_SYMBOL(tcp_hashinfo); 95EXPORT_SYMBOL(tcp_hashinfo);
96 96
97static u32 tcp_v4_init_sequence(const struct sk_buff *skb, u32 *tsoff) 97static u32 tcp_v4_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
98{ 98{
99 return secure_tcp_sequence_number(ip_hdr(skb)->daddr, 99 return secure_tcp_seq_and_tsoff(ip_hdr(skb)->daddr,
100 ip_hdr(skb)->saddr, 100 ip_hdr(skb)->saddr,
101 tcp_hdr(skb)->dest, 101 tcp_hdr(skb)->dest,
102 tcp_hdr(skb)->source, tsoff); 102 tcp_hdr(skb)->source, tsoff);
103} 103}
104 104
105int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) 105int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -198,10 +198,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
198 tp->write_seq = 0; 198 tp->write_seq = 0;
199 } 199 }
200 200
201 if (tcp_death_row->sysctl_tw_recycle &&
202 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
203 tcp_fetch_timewait_stamp(sk, &rt->dst);
204
205 inet->inet_dport = usin->sin_port; 201 inet->inet_dport = usin->sin_port;
206 sk_daddr_set(sk, daddr); 202 sk_daddr_set(sk, daddr);
207 203
@@ -236,11 +232,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
236 rt = NULL; 232 rt = NULL;
237 233
238 if (likely(!tp->repair)) { 234 if (likely(!tp->repair)) {
239 seq = secure_tcp_sequence_number(inet->inet_saddr, 235 seq = secure_tcp_seq_and_tsoff(inet->inet_saddr,
240 inet->inet_daddr, 236 inet->inet_daddr,
241 inet->inet_sport, 237 inet->inet_sport,
242 usin->sin_port, 238 usin->sin_port,
243 &tp->tsoffset); 239 &tp->tsoffset);
244 if (!tp->write_seq) 240 if (!tp->write_seq)
245 tp->write_seq = seq; 241 tp->write_seq = seq;
246 } 242 }
@@ -1217,19 +1213,9 @@ static void tcp_v4_init_req(struct request_sock *req,
1217 1213
1218static struct dst_entry *tcp_v4_route_req(const struct sock *sk, 1214static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1219 struct flowi *fl, 1215 struct flowi *fl,
1220 const struct request_sock *req, 1216 const struct request_sock *req)
1221 bool *strict)
1222{ 1217{
1223 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req); 1218 return inet_csk_route_req(sk, &fl->u.ip4, req);
1224
1225 if (strict) {
1226 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1227 *strict = true;
1228 else
1229 *strict = false;
1230 }
1231
1232 return dst;
1233} 1219}
1234 1220
1235struct request_sock_ops tcp_request_sock_ops __read_mostly = { 1221struct request_sock_ops tcp_request_sock_ops __read_mostly = {
@@ -1253,7 +1239,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1253 .cookie_init_seq = cookie_v4_init_sequence, 1239 .cookie_init_seq = cookie_v4_init_sequence,
1254#endif 1240#endif
1255 .route_req = tcp_v4_route_req, 1241 .route_req = tcp_v4_route_req,
1256 .init_seq = tcp_v4_init_sequence, 1242 .init_seq_tsoff = tcp_v4_init_seq_and_tsoff,
1257 .send_synack = tcp_v4_send_synack, 1243 .send_synack = tcp_v4_send_synack,
1258}; 1244};
1259 1245
@@ -2466,7 +2452,6 @@ static int __net_init tcp_sk_init(struct net *net)
2466 net->ipv4.sysctl_tcp_tw_reuse = 0; 2452 net->ipv4.sysctl_tcp_tw_reuse = 0;
2467 2453
2468 cnt = tcp_hashinfo.ehash_mask + 1; 2454 cnt = tcp_hashinfo.ehash_mask + 1;
2469 net->ipv4.tcp_death_row.sysctl_tw_recycle = 0;
2470 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2; 2455 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2471 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; 2456 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2472 2457
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 0f46e5fe31ad..9d0d4f39e42b 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -45,8 +45,6 @@ struct tcp_metrics_block {
45 struct inetpeer_addr tcpm_saddr; 45 struct inetpeer_addr tcpm_saddr;
46 struct inetpeer_addr tcpm_daddr; 46 struct inetpeer_addr tcpm_daddr;
47 unsigned long tcpm_stamp; 47 unsigned long tcpm_stamp;
48 u32 tcpm_ts;
49 u32 tcpm_ts_stamp;
50 u32 tcpm_lock; 48 u32 tcpm_lock;
51 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1]; 49 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
52 struct tcp_fastopen_metrics tcpm_fastopen; 50 struct tcp_fastopen_metrics tcpm_fastopen;
@@ -123,8 +121,6 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
123 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH); 121 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
124 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND); 122 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
125 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); 123 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
126 tm->tcpm_ts = 0;
127 tm->tcpm_ts_stamp = 0;
128 if (fastopen_clear) { 124 if (fastopen_clear) {
129 tm->tcpm_fastopen.mss = 0; 125 tm->tcpm_fastopen.mss = 0;
130 tm->tcpm_fastopen.syn_loss = 0; 126 tm->tcpm_fastopen.syn_loss = 0;
@@ -273,48 +269,6 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
273 return tm; 269 return tm;
274} 270}
275 271
276static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
277{
278 struct tcp_metrics_block *tm;
279 struct inetpeer_addr saddr, daddr;
280 unsigned int hash;
281 struct net *net;
282
283 if (tw->tw_family == AF_INET) {
284 inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
285 inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
286 hash = ipv4_addr_hash(tw->tw_daddr);
287 }
288#if IS_ENABLED(CONFIG_IPV6)
289 else if (tw->tw_family == AF_INET6) {
290 if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
291 inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
292 inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
293 hash = ipv4_addr_hash(tw->tw_daddr);
294 } else {
295 inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr);
296 inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr);
297 hash = ipv6_addr_hash(&tw->tw_v6_daddr);
298 }
299 }
300#endif
301 else
302 return NULL;
303
304 net = twsk_net(tw);
305 hash ^= net_hash_mix(net);
306 hash = hash_32(hash, tcp_metrics_hash_log);
307
308 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
309 tm = rcu_dereference(tm->tcpm_next)) {
310 if (addr_same(&tm->tcpm_saddr, &saddr) &&
311 addr_same(&tm->tcpm_daddr, &daddr) &&
312 net_eq(tm_net(tm), net))
313 break;
314 }
315 return tm;
316}
317
318static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, 272static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
319 struct dst_entry *dst, 273 struct dst_entry *dst,
320 bool create) 274 bool create)
@@ -573,8 +527,7 @@ reset:
573 tp->snd_cwnd_stamp = tcp_time_stamp; 527 tp->snd_cwnd_stamp = tcp_time_stamp;
574} 528}
575 529
576bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, 530bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
577 bool paws_check, bool timestamps)
578{ 531{
579 struct tcp_metrics_block *tm; 532 struct tcp_metrics_block *tm;
580 bool ret; 533 bool ret;
@@ -584,94 +537,10 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
584 537
585 rcu_read_lock(); 538 rcu_read_lock();
586 tm = __tcp_get_metrics_req(req, dst); 539 tm = __tcp_get_metrics_req(req, dst);
587 if (paws_check) { 540 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
588 if (tm &&
589 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
590 ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW ||
591 !timestamps))
592 ret = false;
593 else
594 ret = true;
595 } else {
596 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
597 ret = true;
598 else
599 ret = false;
600 }
601 rcu_read_unlock();
602
603 return ret;
604}
605
606void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
607{
608 struct tcp_metrics_block *tm;
609
610 rcu_read_lock();
611 tm = tcp_get_metrics(sk, dst, true);
612 if (tm) {
613 struct tcp_sock *tp = tcp_sk(sk);
614
615 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
616 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
617 tp->rx_opt.ts_recent = tm->tcpm_ts;
618 }
619 }
620 rcu_read_unlock();
621}
622EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
623
624/* VJ's idea. Save last timestamp seen from this destination and hold
625 * it at least for normal timewait interval to use for duplicate
626 * segment detection in subsequent connections, before they enter
627 * synchronized state.
628 */
629bool tcp_remember_stamp(struct sock *sk)
630{
631 struct dst_entry *dst = __sk_dst_get(sk);
632 bool ret = false;
633
634 if (dst) {
635 struct tcp_metrics_block *tm;
636
637 rcu_read_lock();
638 tm = tcp_get_metrics(sk, dst, true);
639 if (tm) {
640 struct tcp_sock *tp = tcp_sk(sk);
641
642 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
643 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
644 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
645 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
646 tm->tcpm_ts = tp->rx_opt.ts_recent;
647 }
648 ret = true;
649 }
650 rcu_read_unlock();
651 }
652 return ret;
653}
654
655bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
656{
657 struct tcp_metrics_block *tm;
658 bool ret = false;
659
660 rcu_read_lock();
661 tm = __tcp_get_metrics_tw(tw);
662 if (tm) {
663 const struct tcp_timewait_sock *tcptw;
664 struct sock *sk = (struct sock *) tw;
665
666 tcptw = tcp_twsk(sk);
667 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
668 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
669 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
670 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
671 tm->tcpm_ts = tcptw->tw_ts_recent;
672 }
673 ret = true; 541 ret = true;
674 } 542 else
543 ret = false;
675 rcu_read_unlock(); 544 rcu_read_unlock();
676 545
677 return ret; 546 return ret;
@@ -791,14 +660,6 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
791 jiffies - tm->tcpm_stamp, 660 jiffies - tm->tcpm_stamp,
792 TCP_METRICS_ATTR_PAD) < 0) 661 TCP_METRICS_ATTR_PAD) < 0)
793 goto nla_put_failure; 662 goto nla_put_failure;
794 if (tm->tcpm_ts_stamp) {
795 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
796 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
797 goto nla_put_failure;
798 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
799 tm->tcpm_ts) < 0)
800 goto nla_put_failure;
801 }
802 663
803 { 664 {
804 int n = 0; 665 int n = 0;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 7e16243cdb58..692f974e5abe 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -94,7 +94,6 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
94 struct tcp_options_received tmp_opt; 94 struct tcp_options_received tmp_opt;
95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
96 bool paws_reject = false; 96 bool paws_reject = false;
97 struct inet_timewait_death_row *tcp_death_row = &sock_net((struct sock*)tw)->ipv4.tcp_death_row;
98 97
99 tmp_opt.saw_tstamp = 0; 98 tmp_opt.saw_tstamp = 0;
100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
@@ -149,12 +148,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
149 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 148 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
150 } 149 }
151 150
152 if (tcp_death_row->sysctl_tw_recycle && 151 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
153 tcptw->tw_ts_recent_stamp &&
154 tcp_tw_remember_stamp(tw))
155 inet_twsk_reschedule(tw, tw->tw_timeout);
156 else
157 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
158 return TCP_TW_ACK; 152 return TCP_TW_ACK;
159 } 153 }
160 154
@@ -259,12 +253,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
259 const struct inet_connection_sock *icsk = inet_csk(sk); 253 const struct inet_connection_sock *icsk = inet_csk(sk);
260 const struct tcp_sock *tp = tcp_sk(sk); 254 const struct tcp_sock *tp = tcp_sk(sk);
261 struct inet_timewait_sock *tw; 255 struct inet_timewait_sock *tw;
262 bool recycle_ok = false;
263 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; 256 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
264 257
265 if (tcp_death_row->sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
266 recycle_ok = tcp_remember_stamp(sk);
267
268 tw = inet_twsk_alloc(sk, tcp_death_row, state); 258 tw = inet_twsk_alloc(sk, tcp_death_row, state);
269 259
270 if (tw) { 260 if (tw) {
@@ -317,13 +307,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
317 if (timeo < rto) 307 if (timeo < rto)
318 timeo = rto; 308 timeo = rto;
319 309
320 if (recycle_ok) { 310 tw->tw_timeout = TCP_TIMEWAIT_LEN;
321 tw->tw_timeout = rto; 311 if (state == TCP_TIME_WAIT)
322 } else { 312 timeo = TCP_TIMEWAIT_LEN;
323 tw->tw_timeout = TCP_TIMEWAIT_LEN;
324 if (state == TCP_TIME_WAIT)
325 timeo = TCP_TIMEWAIT_LEN;
326 }
327 313
328 inet_twsk_schedule(tw, timeo); 314 inet_twsk_schedule(tw, timeo);
329 /* Linkage updates. */ 315 /* Linkage updates. */
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index fed66dc0e0f5..9775453b8d17 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -265,8 +265,8 @@ static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr,
265 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { 265 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
266 info->vegas.tcpv_enabled = 1; 266 info->vegas.tcpv_enabled = 1;
267 info->vegas.tcpv_rttcnt = 0; 267 info->vegas.tcpv_rttcnt = 0;
268 info->vegas.tcpv_rtt = jiffies_to_usecs(ca->rtt), 268 info->vegas.tcpv_rtt = jiffies_to_usecs(ca->rtt);
269 info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min), 269 info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
270 270
271 *attr = INET_DIAG_VEGASINFO; 271 *attr = INET_DIAG_VEGASINFO;
272 return sizeof(struct tcpvegas_info); 272 return sizeof(struct tcpvegas_info);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 363172527e43..8c69768a5c46 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -245,6 +245,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
245#endif 245#endif
246 .enhanced_dad = 1, 246 .enhanced_dad = 1,
247 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64, 247 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
248 .disable_policy = 0,
248}; 249};
249 250
250static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { 251static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -297,6 +298,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
297#endif 298#endif
298 .enhanced_dad = 1, 299 .enhanced_dad = 1,
299 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64, 300 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
301 .disable_policy = 0,
300}; 302};
301 303
302/* Check if a valid qdisc is available */ 304/* Check if a valid qdisc is available */
@@ -944,6 +946,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
944 const struct in6_addr *peer_addr, int pfxlen, 946 const struct in6_addr *peer_addr, int pfxlen,
945 int scope, u32 flags, u32 valid_lft, u32 prefered_lft) 947 int scope, u32 flags, u32 valid_lft, u32 prefered_lft)
946{ 948{
949 struct net *net = dev_net(idev->dev);
947 struct inet6_ifaddr *ifa = NULL; 950 struct inet6_ifaddr *ifa = NULL;
948 struct rt6_info *rt; 951 struct rt6_info *rt;
949 unsigned int hash; 952 unsigned int hash;
@@ -990,6 +993,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
990 goto out; 993 goto out;
991 } 994 }
992 995
996 if (net->ipv6.devconf_all->disable_policy ||
997 idev->cnf.disable_policy)
998 rt->dst.flags |= DST_NOPOLICY;
999
993 neigh_parms_data_state_setall(idev->nd_parms); 1000 neigh_parms_data_state_setall(idev->nd_parms);
994 1001
995 ifa->addr = *addr; 1002 ifa->addr = *addr;
@@ -5003,6 +5010,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
5003#endif 5010#endif
5004 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad; 5011 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5005 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode; 5012 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5013 array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5006} 5014}
5007 5015
5008static inline size_t inet6_ifla6_size(void) 5016static inline size_t inet6_ifla6_size(void)
@@ -5827,6 +5835,105 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
5827 return ret; 5835 return ret;
5828} 5836}
5829 5837
5838static
5839void addrconf_set_nopolicy(struct rt6_info *rt, int action)
5840{
5841 if (rt) {
5842 if (action)
5843 rt->dst.flags |= DST_NOPOLICY;
5844 else
5845 rt->dst.flags &= ~DST_NOPOLICY;
5846 }
5847}
5848
5849static
5850void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
5851{
5852 struct inet6_ifaddr *ifa;
5853
5854 read_lock_bh(&idev->lock);
5855 list_for_each_entry(ifa, &idev->addr_list, if_list) {
5856 spin_lock(&ifa->lock);
5857 if (ifa->rt) {
5858 struct rt6_info *rt = ifa->rt;
5859 struct fib6_table *table = rt->rt6i_table;
5860 int cpu;
5861
5862 read_lock(&table->tb6_lock);
5863 addrconf_set_nopolicy(ifa->rt, val);
5864 if (rt->rt6i_pcpu) {
5865 for_each_possible_cpu(cpu) {
5866 struct rt6_info **rtp;
5867
5868 rtp = per_cpu_ptr(rt->rt6i_pcpu, cpu);
5869 addrconf_set_nopolicy(*rtp, val);
5870 }
5871 }
5872 read_unlock(&table->tb6_lock);
5873 }
5874 spin_unlock(&ifa->lock);
5875 }
5876 read_unlock_bh(&idev->lock);
5877}
5878
5879static
5880int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
5881{
5882 struct inet6_dev *idev;
5883 struct net *net;
5884
5885 if (!rtnl_trylock())
5886 return restart_syscall();
5887
5888 *valp = val;
5889
5890 net = (struct net *)ctl->extra2;
5891 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
5892 rtnl_unlock();
5893 return 0;
5894 }
5895
5896 if (valp == &net->ipv6.devconf_all->disable_policy) {
5897 struct net_device *dev;
5898
5899 for_each_netdev(net, dev) {
5900 idev = __in6_dev_get(dev);
5901 if (idev)
5902 addrconf_disable_policy_idev(idev, val);
5903 }
5904 } else {
5905 idev = (struct inet6_dev *)ctl->extra1;
5906 addrconf_disable_policy_idev(idev, val);
5907 }
5908
5909 rtnl_unlock();
5910 return 0;
5911}
5912
5913static
5914int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
5915 void __user *buffer, size_t *lenp,
5916 loff_t *ppos)
5917{
5918 int *valp = ctl->data;
5919 int val = *valp;
5920 loff_t pos = *ppos;
5921 struct ctl_table lctl;
5922 int ret;
5923
5924 lctl = *ctl;
5925 lctl.data = &val;
5926 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5927
5928 if (write && (*valp != val))
5929 ret = addrconf_disable_policy(ctl, valp, val);
5930
5931 if (ret)
5932 *ppos = pos;
5933
5934 return ret;
5935}
5936
5830static int minus_one = -1; 5937static int minus_one = -1;
5831static const int one = 1; 5938static const int one = 1;
5832static const int two_five_five = 255; 5939static const int two_five_five = 255;
@@ -6185,6 +6292,13 @@ static const struct ctl_table addrconf_sysctl[] = {
6185 .proc_handler = addrconf_sysctl_addr_gen_mode, 6292 .proc_handler = addrconf_sysctl_addr_gen_mode,
6186 }, 6293 },
6187 { 6294 {
6295 .procname = "disable_policy",
6296 .data = &ipv6_devconf.disable_policy,
6297 .maxlen = sizeof(int),
6298 .mode = 0644,
6299 .proc_handler = addrconf_sysctl_disable_policy,
6300 },
6301 {
6188 /* sentinel */ 6302 /* sentinel */
6189 } 6303 }
6190}; 6304};
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index 765facf03d45..e8d88d82636b 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -159,7 +159,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
159 159
160 if (nft_hook(pkt) == NF_INET_PRE_ROUTING && 160 if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
161 nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { 161 nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
162 nft_fib_store_result(dest, priv->result, pkt, 162 nft_fib_store_result(dest, priv, pkt,
163 nft_in(pkt)->ifindex); 163 nft_in(pkt)->ifindex);
164 return; 164 return;
165 } 165 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 49fa2e8c3fa9..0f08d718a002 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -101,12 +101,12 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
101 } 101 }
102} 102}
103 103
104static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff) 104static u32 tcp_v6_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
105{ 105{
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 106 return secure_tcpv6_seq_and_tsoff(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32, 107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest, 108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source, tsoff); 109 tcp_hdr(skb)->source, tsoff);
110} 110}
111 111
112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
@@ -265,11 +265,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
265 sk->sk_gso_type = SKB_GSO_TCPV6; 265 sk->sk_gso_type = SKB_GSO_TCPV6;
266 ip6_dst_store(sk, dst, NULL, NULL); 266 ip6_dst_store(sk, dst, NULL, NULL);
267 267
268 if (tcp_death_row->sysctl_tw_recycle &&
269 !tp->rx_opt.ts_recent_stamp &&
270 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
271 tcp_fetch_timewait_stamp(sk, dst);
272
273 icsk->icsk_ext_hdr_len = 0; 268 icsk->icsk_ext_hdr_len = 0;
274 if (opt) 269 if (opt)
275 icsk->icsk_ext_hdr_len = opt->opt_flen + 270 icsk->icsk_ext_hdr_len = opt->opt_flen +
@@ -287,11 +282,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
287 sk_set_txhash(sk); 282 sk_set_txhash(sk);
288 283
289 if (likely(!tp->repair)) { 284 if (likely(!tp->repair)) {
290 seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, 285 seq = secure_tcpv6_seq_and_tsoff(np->saddr.s6_addr32,
291 sk->sk_v6_daddr.s6_addr32, 286 sk->sk_v6_daddr.s6_addr32,
292 inet->inet_sport, 287 inet->inet_sport,
293 inet->inet_dport, 288 inet->inet_dport,
294 &tp->tsoffset); 289 &tp->tsoffset);
295 if (!tp->write_seq) 290 if (!tp->write_seq)
296 tp->write_seq = seq; 291 tp->write_seq = seq;
297 } 292 }
@@ -727,11 +722,8 @@ static void tcp_v6_init_req(struct request_sock *req,
727 722
728static struct dst_entry *tcp_v6_route_req(const struct sock *sk, 723static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
729 struct flowi *fl, 724 struct flowi *fl,
730 const struct request_sock *req, 725 const struct request_sock *req)
731 bool *strict)
732{ 726{
733 if (strict)
734 *strict = true;
735 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP); 727 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
736} 728}
737 729
@@ -757,7 +749,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
757 .cookie_init_seq = cookie_v6_init_sequence, 749 .cookie_init_seq = cookie_v6_init_sequence,
758#endif 750#endif
759 .route_req = tcp_v6_route_req, 751 .route_req = tcp_v6_route_req,
760 .init_seq = tcp_v6_init_sequence, 752 .init_seq_tsoff = tcp_v6_init_seq_and_tsoff,
761 .send_synack = tcp_v6_send_synack, 753 .send_synack = tcp_v6_send_synack,
762}; 754};
763 755
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4e4c401e3bc6..08a188ffe070 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -864,6 +864,64 @@ discard:
864 return 0; 864 return 0;
865} 865}
866 866
867static struct sock *__udp6_lib_demux_lookup(struct net *net,
868 __be16 loc_port, const struct in6_addr *loc_addr,
869 __be16 rmt_port, const struct in6_addr *rmt_addr,
870 int dif)
871{
872 struct sock *sk;
873
874 rcu_read_lock();
875 sk = __udp6_lib_lookup(net, rmt_addr, rmt_port, loc_addr, loc_port,
876 dif, &udp_table, NULL);
877 if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
878 sk = NULL;
879 rcu_read_unlock();
880
881 return sk;
882}
883
884static void udp_v6_early_demux(struct sk_buff *skb)
885{
886 struct net *net = dev_net(skb->dev);
887 const struct udphdr *uh;
888 struct sock *sk;
889 struct dst_entry *dst;
890 int dif = skb->dev->ifindex;
891
892 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
893 sizeof(struct udphdr)))
894 return;
895
896 uh = udp_hdr(skb);
897
898 if (skb->pkt_type == PACKET_HOST)
899 sk = __udp6_lib_demux_lookup(net, uh->dest,
900 &ipv6_hdr(skb)->daddr,
901 uh->source, &ipv6_hdr(skb)->saddr,
902 dif);
903 else
904 return;
905
906 if (!sk)
907 return;
908
909 skb->sk = sk;
910 skb->destructor = sock_efree;
911 dst = READ_ONCE(sk->sk_rx_dst);
912
913 if (dst)
914 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
915 if (dst) {
916 if (dst->flags & DST_NOCACHE) {
917 if (likely(atomic_inc_not_zero(&dst->__refcnt)))
918 skb_dst_set(skb, dst);
919 } else {
920 skb_dst_set_noref(skb, dst);
921 }
922 }
923}
924
867static __inline__ int udpv6_rcv(struct sk_buff *skb) 925static __inline__ int udpv6_rcv(struct sk_buff *skb)
868{ 926{
869 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); 927 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
@@ -1379,6 +1437,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
1379#endif 1437#endif
1380 1438
1381static const struct inet6_protocol udpv6_protocol = { 1439static const struct inet6_protocol udpv6_protocol = {
1440 .early_demux = udp_v6_early_demux,
1382 .handler = udpv6_rcv, 1441 .handler = udpv6_rcv,
1383 .err_handler = udpv6_err, 1442 .err_handler = udpv6_err,
1384 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1443 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 33211f9a2656..f7a08e5f9763 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -32,7 +32,9 @@
32#define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1) 32#define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
33 33
34static int zero = 0; 34static int zero = 0;
35static int one = 1;
35static int label_limit = (1 << 20) - 1; 36static int label_limit = (1 << 20) - 1;
37static int ttl_max = 255;
36 38
37static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt, 39static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
38 struct nlmsghdr *nlh, struct net *net, u32 portid, 40 struct nlmsghdr *nlh, struct net *net, u32 portid,
@@ -220,8 +222,8 @@ out:
220 return &rt->rt_nh[nh_index]; 222 return &rt->rt_nh[nh_index];
221} 223}
222 224
223static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb, 225static bool mpls_egress(struct net *net, struct mpls_route *rt,
224 struct mpls_entry_decoded dec) 226 struct sk_buff *skb, struct mpls_entry_decoded dec)
225{ 227{
226 enum mpls_payload_type payload_type; 228 enum mpls_payload_type payload_type;
227 bool success = false; 229 bool success = false;
@@ -246,22 +248,46 @@ static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
246 switch (payload_type) { 248 switch (payload_type) {
247 case MPT_IPV4: { 249 case MPT_IPV4: {
248 struct iphdr *hdr4 = ip_hdr(skb); 250 struct iphdr *hdr4 = ip_hdr(skb);
251 u8 new_ttl;
249 skb->protocol = htons(ETH_P_IP); 252 skb->protocol = htons(ETH_P_IP);
253
254 /* If propagating TTL, take the decremented TTL from
255 * the incoming MPLS header, otherwise decrement the
256 * TTL, but only if not 0 to avoid underflow.
257 */
258 if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
259 (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
260 net->mpls.ip_ttl_propagate))
261 new_ttl = dec.ttl;
262 else
263 new_ttl = hdr4->ttl ? hdr4->ttl - 1 : 0;
264
250 csum_replace2(&hdr4->check, 265 csum_replace2(&hdr4->check,
251 htons(hdr4->ttl << 8), 266 htons(hdr4->ttl << 8),
252 htons(dec.ttl << 8)); 267 htons(new_ttl << 8));
253 hdr4->ttl = dec.ttl; 268 hdr4->ttl = new_ttl;
254 success = true; 269 success = true;
255 break; 270 break;
256 } 271 }
257 case MPT_IPV6: { 272 case MPT_IPV6: {
258 struct ipv6hdr *hdr6 = ipv6_hdr(skb); 273 struct ipv6hdr *hdr6 = ipv6_hdr(skb);
259 skb->protocol = htons(ETH_P_IPV6); 274 skb->protocol = htons(ETH_P_IPV6);
260 hdr6->hop_limit = dec.ttl; 275
276 /* If propagating TTL, take the decremented TTL from
277 * the incoming MPLS header, otherwise decrement the
278 * hop limit, but only if not 0 to avoid underflow.
279 */
280 if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
281 (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
282 net->mpls.ip_ttl_propagate))
283 hdr6->hop_limit = dec.ttl;
284 else if (hdr6->hop_limit)
285 hdr6->hop_limit = hdr6->hop_limit - 1;
261 success = true; 286 success = true;
262 break; 287 break;
263 } 288 }
264 case MPT_UNSPEC: 289 case MPT_UNSPEC:
290 /* Should have decided which protocol it is by now */
265 break; 291 break;
266 } 292 }
267 293
@@ -361,7 +387,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
361 387
362 if (unlikely(!new_header_size && dec.bos)) { 388 if (unlikely(!new_header_size && dec.bos)) {
363 /* Penultimate hop popping */ 389 /* Penultimate hop popping */
364 if (!mpls_egress(rt, skb, dec)) 390 if (!mpls_egress(dev_net(out_dev), rt, skb, dec))
365 goto err; 391 goto err;
366 } else { 392 } else {
367 bool bos; 393 bool bos;
@@ -412,6 +438,7 @@ static struct packet_type mpls_packet_type __read_mostly = {
412static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = { 438static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
413 [RTA_DST] = { .type = NLA_U32 }, 439 [RTA_DST] = { .type = NLA_U32 },
414 [RTA_OIF] = { .type = NLA_U32 }, 440 [RTA_OIF] = { .type = NLA_U32 },
441 [RTA_TTL_PROPAGATE] = { .type = NLA_U8 },
415}; 442};
416 443
417struct mpls_route_config { 444struct mpls_route_config {
@@ -421,6 +448,7 @@ struct mpls_route_config {
421 u8 rc_via_alen; 448 u8 rc_via_alen;
422 u8 rc_via[MAX_VIA_ALEN]; 449 u8 rc_via[MAX_VIA_ALEN];
423 u32 rc_label; 450 u32 rc_label;
451 u8 rc_ttl_propagate;
424 u8 rc_output_labels; 452 u8 rc_output_labels;
425 u32 rc_output_label[MAX_NEW_LABELS]; 453 u32 rc_output_label[MAX_NEW_LABELS];
426 u32 rc_nlflags; 454 u32 rc_nlflags;
@@ -856,6 +884,7 @@ static int mpls_route_add(struct mpls_route_config *cfg)
856 884
857 rt->rt_protocol = cfg->rc_protocol; 885 rt->rt_protocol = cfg->rc_protocol;
858 rt->rt_payload_type = cfg->rc_payload_type; 886 rt->rt_payload_type = cfg->rc_payload_type;
887 rt->rt_ttl_propagate = cfg->rc_ttl_propagate;
859 888
860 if (cfg->rc_mp) 889 if (cfg->rc_mp)
861 err = mpls_nh_build_multi(cfg, rt); 890 err = mpls_nh_build_multi(cfg, rt);
@@ -1577,6 +1606,7 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1577 cfg->rc_label = LABEL_NOT_SPECIFIED; 1606 cfg->rc_label = LABEL_NOT_SPECIFIED;
1578 cfg->rc_protocol = rtm->rtm_protocol; 1607 cfg->rc_protocol = rtm->rtm_protocol;
1579 cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC; 1608 cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC;
1609 cfg->rc_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
1580 cfg->rc_nlflags = nlh->nlmsg_flags; 1610 cfg->rc_nlflags = nlh->nlmsg_flags;
1581 cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid; 1611 cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
1582 cfg->rc_nlinfo.nlh = nlh; 1612 cfg->rc_nlinfo.nlh = nlh;
@@ -1623,6 +1653,17 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1623 cfg->rc_mp_len = nla_len(nla); 1653 cfg->rc_mp_len = nla_len(nla);
1624 break; 1654 break;
1625 } 1655 }
1656 case RTA_TTL_PROPAGATE:
1657 {
1658 u8 ttl_propagate = nla_get_u8(nla);
1659
1660 if (ttl_propagate > 1)
1661 goto errout;
1662 cfg->rc_ttl_propagate = ttl_propagate ?
1663 MPLS_TTL_PROP_ENABLED :
1664 MPLS_TTL_PROP_DISABLED;
1665 break;
1666 }
1626 default: 1667 default:
1627 /* Unsupported attribute */ 1668 /* Unsupported attribute */
1628 goto errout; 1669 goto errout;
@@ -1683,6 +1724,15 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
1683 1724
1684 if (nla_put_labels(skb, RTA_DST, 1, &label)) 1725 if (nla_put_labels(skb, RTA_DST, 1, &label))
1685 goto nla_put_failure; 1726 goto nla_put_failure;
1727
1728 if (rt->rt_ttl_propagate != MPLS_TTL_PROP_DEFAULT) {
1729 bool ttl_propagate =
1730 rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED;
1731
1732 if (nla_put_u8(skb, RTA_TTL_PROPAGATE,
1733 ttl_propagate))
1734 goto nla_put_failure;
1735 }
1686 if (rt->rt_nhn == 1) { 1736 if (rt->rt_nhn == 1) {
1687 const struct mpls_nh *nh = rt->rt_nh; 1737 const struct mpls_nh *nh = rt->rt_nh;
1688 1738
@@ -1793,7 +1843,8 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
1793{ 1843{
1794 size_t payload = 1844 size_t payload =
1795 NLMSG_ALIGN(sizeof(struct rtmsg)) 1845 NLMSG_ALIGN(sizeof(struct rtmsg))
1796 + nla_total_size(4); /* RTA_DST */ 1846 + nla_total_size(4) /* RTA_DST */
1847 + nla_total_size(1); /* RTA_TTL_PROPAGATE */
1797 1848
1798 if (rt->rt_nhn == 1) { 1849 if (rt->rt_nhn == 1) {
1799 struct mpls_nh *nh = rt->rt_nh; 1850 struct mpls_nh *nh = rt->rt_nh;
@@ -1877,6 +1928,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
1877 RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo); 1928 RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
1878 rt0->rt_protocol = RTPROT_KERNEL; 1929 rt0->rt_protocol = RTPROT_KERNEL;
1879 rt0->rt_payload_type = MPT_IPV4; 1930 rt0->rt_payload_type = MPT_IPV4;
1931 rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
1880 rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE; 1932 rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
1881 rt0->rt_nh->nh_via_alen = lo->addr_len; 1933 rt0->rt_nh->nh_via_alen = lo->addr_len;
1882 memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr, 1934 memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
@@ -1890,6 +1942,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
1890 RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo); 1942 RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
1891 rt2->rt_protocol = RTPROT_KERNEL; 1943 rt2->rt_protocol = RTPROT_KERNEL;
1892 rt2->rt_payload_type = MPT_IPV6; 1944 rt2->rt_payload_type = MPT_IPV6;
1945 rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
1893 rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE; 1946 rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
1894 rt2->rt_nh->nh_via_alen = lo->addr_len; 1947 rt2->rt_nh->nh_via_alen = lo->addr_len;
1895 memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr, 1948 memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
@@ -1971,6 +2024,9 @@ static int mpls_platform_labels(struct ctl_table *table, int write,
1971 return ret; 2024 return ret;
1972} 2025}
1973 2026
2027#define MPLS_NS_SYSCTL_OFFSET(field) \
2028 (&((struct net *)0)->field)
2029
1974static const struct ctl_table mpls_table[] = { 2030static const struct ctl_table mpls_table[] = {
1975 { 2031 {
1976 .procname = "platform_labels", 2032 .procname = "platform_labels",
@@ -1979,21 +2035,47 @@ static const struct ctl_table mpls_table[] = {
1979 .mode = 0644, 2035 .mode = 0644,
1980 .proc_handler = mpls_platform_labels, 2036 .proc_handler = mpls_platform_labels,
1981 }, 2037 },
2038 {
2039 .procname = "ip_ttl_propagate",
2040 .data = MPLS_NS_SYSCTL_OFFSET(mpls.ip_ttl_propagate),
2041 .maxlen = sizeof(int),
2042 .mode = 0644,
2043 .proc_handler = proc_dointvec_minmax,
2044 .extra1 = &zero,
2045 .extra2 = &one,
2046 },
2047 {
2048 .procname = "default_ttl",
2049 .data = MPLS_NS_SYSCTL_OFFSET(mpls.default_ttl),
2050 .maxlen = sizeof(int),
2051 .mode = 0644,
2052 .proc_handler = proc_dointvec_minmax,
2053 .extra1 = &one,
2054 .extra2 = &ttl_max,
2055 },
1982 { } 2056 { }
1983}; 2057};
1984 2058
1985static int mpls_net_init(struct net *net) 2059static int mpls_net_init(struct net *net)
1986{ 2060{
1987 struct ctl_table *table; 2061 struct ctl_table *table;
2062 int i;
1988 2063
1989 net->mpls.platform_labels = 0; 2064 net->mpls.platform_labels = 0;
1990 net->mpls.platform_label = NULL; 2065 net->mpls.platform_label = NULL;
2066 net->mpls.ip_ttl_propagate = 1;
2067 net->mpls.default_ttl = 255;
1991 2068
1992 table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL); 2069 table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
1993 if (table == NULL) 2070 if (table == NULL)
1994 return -ENOMEM; 2071 return -ENOMEM;
1995 2072
1996 table[0].data = net; 2073 /* Table data contains only offsets relative to the base of
2074 * the mdev at this point, so make them absolute.
2075 */
2076 for (i = 0; i < ARRAY_SIZE(mpls_table) - 1; i++)
2077 table[i].data = (char *)net + (uintptr_t)table[i].data;
2078
1997 net->mpls.ctl = register_net_sysctl(net, "net/mpls", table); 2079 net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
1998 if (net->mpls.ctl == NULL) { 2080 if (net->mpls.ctl == NULL) {
1999 kfree(table); 2081 kfree(table);
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 76360d8b9579..62928d8fabd1 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -90,6 +90,12 @@ struct mpls_nh { /* next hop label forwarding entry */
90 u8 nh_via_table; 90 u8 nh_via_table;
91}; 91};
92 92
93enum mpls_ttl_propagation {
94 MPLS_TTL_PROP_DEFAULT,
95 MPLS_TTL_PROP_ENABLED,
96 MPLS_TTL_PROP_DISABLED,
97};
98
93/* The route, nexthops and vias are stored together in the same memory 99/* The route, nexthops and vias are stored together in the same memory
94 * block: 100 * block:
95 * 101 *
@@ -116,6 +122,7 @@ struct mpls_route { /* next hop label forwarding entry */
116 u8 rt_protocol; 122 u8 rt_protocol;
117 u8 rt_payload_type; 123 u8 rt_payload_type;
118 u8 rt_max_alen; 124 u8 rt_max_alen;
125 u8 rt_ttl_propagate;
119 unsigned int rt_nhn; 126 unsigned int rt_nhn;
120 unsigned int rt_nhn_alive; 127 unsigned int rt_nhn_alive;
121 struct mpls_nh rt_nh[0]; 128 struct mpls_nh rt_nh[0];
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index e4e4424f9eb1..22f71fce0bfb 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -29,6 +29,7 @@
29 29
30static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = { 30static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
31 [MPLS_IPTUNNEL_DST] = { .type = NLA_U32 }, 31 [MPLS_IPTUNNEL_DST] = { .type = NLA_U32 },
32 [MPLS_IPTUNNEL_TTL] = { .type = NLA_U8 },
32}; 33};
33 34
34static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en) 35static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
@@ -49,6 +50,7 @@ static int mpls_xmit(struct sk_buff *skb)
49 struct rtable *rt = NULL; 50 struct rtable *rt = NULL;
50 struct rt6_info *rt6 = NULL; 51 struct rt6_info *rt6 = NULL;
51 struct mpls_dev *out_mdev; 52 struct mpls_dev *out_mdev;
53 struct net *net;
52 int err = 0; 54 int err = 0;
53 bool bos; 55 bool bos;
54 int i; 56 int i;
@@ -56,17 +58,7 @@ static int mpls_xmit(struct sk_buff *skb)
56 58
57 /* Find the output device */ 59 /* Find the output device */
58 out_dev = dst->dev; 60 out_dev = dst->dev;
59 61 net = dev_net(out_dev);
60 /* Obtain the ttl */
61 if (dst->ops->family == AF_INET) {
62 ttl = ip_hdr(skb)->ttl;
63 rt = (struct rtable *)dst;
64 } else if (dst->ops->family == AF_INET6) {
65 ttl = ipv6_hdr(skb)->hop_limit;
66 rt6 = (struct rt6_info *)dst;
67 } else {
68 goto drop;
69 }
70 62
71 skb_orphan(skb); 63 skb_orphan(skb);
72 64
@@ -78,6 +70,38 @@ static int mpls_xmit(struct sk_buff *skb)
78 70
79 tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate); 71 tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate);
80 72
73 /* Obtain the ttl using the following set of rules.
74 *
75 * LWT ttl propagation setting:
76 * - disabled => use default TTL value from LWT
77 * - enabled => use TTL value from IPv4/IPv6 header
78 * - default =>
79 * Global ttl propagation setting:
80 * - disabled => use default TTL value from global setting
81 * - enabled => use TTL value from IPv4/IPv6 header
82 */
83 if (dst->ops->family == AF_INET) {
84 if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
85 ttl = tun_encap_info->default_ttl;
86 else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
87 !net->mpls.ip_ttl_propagate)
88 ttl = net->mpls.default_ttl;
89 else
90 ttl = ip_hdr(skb)->ttl;
91 rt = (struct rtable *)dst;
92 } else if (dst->ops->family == AF_INET6) {
93 if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
94 ttl = tun_encap_info->default_ttl;
95 else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
96 !net->mpls.ip_ttl_propagate)
97 ttl = net->mpls.default_ttl;
98 else
99 ttl = ipv6_hdr(skb)->hop_limit;
100 rt6 = (struct rt6_info *)dst;
101 } else {
102 goto drop;
103 }
104
81 /* Verify the destination can hold the packet */ 105 /* Verify the destination can hold the packet */
82 new_header_size = mpls_encap_size(tun_encap_info); 106 new_header_size = mpls_encap_size(tun_encap_info);
83 mtu = mpls_dev_mtu(out_dev); 107 mtu = mpls_dev_mtu(out_dev);
@@ -160,6 +184,17 @@ static int mpls_build_state(struct nlattr *nla,
160 &tun_encap_info->labels, tun_encap_info->label); 184 &tun_encap_info->labels, tun_encap_info->label);
161 if (ret) 185 if (ret)
162 goto errout; 186 goto errout;
187
188 tun_encap_info->ttl_propagate = MPLS_TTL_PROP_DEFAULT;
189
190 if (tb[MPLS_IPTUNNEL_TTL]) {
191 tun_encap_info->default_ttl = nla_get_u8(tb[MPLS_IPTUNNEL_TTL]);
192 /* TTL 0 implies propagate from IP header */
193 tun_encap_info->ttl_propagate = tun_encap_info->default_ttl ?
194 MPLS_TTL_PROP_DISABLED :
195 MPLS_TTL_PROP_ENABLED;
196 }
197
163 newts->type = LWTUNNEL_ENCAP_MPLS; 198 newts->type = LWTUNNEL_ENCAP_MPLS;
164 newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT; 199 newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
165 newts->headroom = mpls_encap_size(tun_encap_info); 200 newts->headroom = mpls_encap_size(tun_encap_info);
@@ -186,6 +221,10 @@ static int mpls_fill_encap_info(struct sk_buff *skb,
186 tun_encap_info->label)) 221 tun_encap_info->label))
187 goto nla_put_failure; 222 goto nla_put_failure;
188 223
224 if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT &&
225 nla_put_u8(skb, MPLS_IPTUNNEL_TTL, tun_encap_info->default_ttl))
226 goto nla_put_failure;
227
189 return 0; 228 return 0;
190 229
191nla_put_failure: 230nla_put_failure:
@@ -195,10 +234,16 @@ nla_put_failure:
195static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate) 234static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate)
196{ 235{
197 struct mpls_iptunnel_encap *tun_encap_info; 236 struct mpls_iptunnel_encap *tun_encap_info;
237 int nlsize;
198 238
199 tun_encap_info = mpls_lwtunnel_encap(lwtstate); 239 tun_encap_info = mpls_lwtunnel_encap(lwtstate);
200 240
201 return nla_total_size(tun_encap_info->labels * 4); 241 nlsize = nla_total_size(tun_encap_info->labels * 4);
242
243 if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT)
244 nlsize += nla_total_size(1);
245
246 return nlsize;
202} 247}
203 248
204static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) 249static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
@@ -207,7 +252,9 @@ static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
207 struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b); 252 struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b);
208 int l; 253 int l;
209 254
210 if (a_hdr->labels != b_hdr->labels) 255 if (a_hdr->labels != b_hdr->labels ||
256 a_hdr->ttl_propagate != b_hdr->ttl_propagate ||
257 a_hdr->default_ttl != b_hdr->default_ttl)
211 return 1; 258 return 1;
212 259
213 for (l = 0; l < MAX_NEW_LABELS; l++) 260 for (l = 0; l < MAX_NEW_LABELS; l++)
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index e6a2753dff9e..3d2ac71a83ec 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -181,7 +181,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
181 181
182 if (!(cp->flags & IP_VS_CONN_F_HASHED)) { 182 if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
183 cp->flags |= IP_VS_CONN_F_HASHED; 183 cp->flags |= IP_VS_CONN_F_HASHED;
184 atomic_inc(&cp->refcnt); 184 refcount_inc(&cp->refcnt);
185 hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]); 185 hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]);
186 ret = 1; 186 ret = 1;
187 } else { 187 } else {
@@ -215,7 +215,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
215 if (cp->flags & IP_VS_CONN_F_HASHED) { 215 if (cp->flags & IP_VS_CONN_F_HASHED) {
216 hlist_del_rcu(&cp->c_list); 216 hlist_del_rcu(&cp->c_list);
217 cp->flags &= ~IP_VS_CONN_F_HASHED; 217 cp->flags &= ~IP_VS_CONN_F_HASHED;
218 atomic_dec(&cp->refcnt); 218 refcount_dec(&cp->refcnt);
219 ret = 1; 219 ret = 1;
220 } else 220 } else
221 ret = 0; 221 ret = 0;
@@ -242,13 +242,13 @@ static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
242 if (cp->flags & IP_VS_CONN_F_HASHED) { 242 if (cp->flags & IP_VS_CONN_F_HASHED) {
243 ret = false; 243 ret = false;
244 /* Decrease refcnt and unlink conn only if we are last user */ 244 /* Decrease refcnt and unlink conn only if we are last user */
245 if (atomic_cmpxchg(&cp->refcnt, 1, 0) == 1) { 245 if (refcount_dec_if_one(&cp->refcnt)) {
246 hlist_del_rcu(&cp->c_list); 246 hlist_del_rcu(&cp->c_list);
247 cp->flags &= ~IP_VS_CONN_F_HASHED; 247 cp->flags &= ~IP_VS_CONN_F_HASHED;
248 ret = true; 248 ret = true;
249 } 249 }
250 } else 250 } else
251 ret = atomic_read(&cp->refcnt) ? false : true; 251 ret = refcount_read(&cp->refcnt) ? false : true;
252 252
253 spin_unlock(&cp->lock); 253 spin_unlock(&cp->lock);
254 ct_write_unlock_bh(hash); 254 ct_write_unlock_bh(hash);
@@ -475,7 +475,7 @@ static void __ip_vs_conn_put_timer(struct ip_vs_conn *cp)
475void ip_vs_conn_put(struct ip_vs_conn *cp) 475void ip_vs_conn_put(struct ip_vs_conn *cp)
476{ 476{
477 if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && 477 if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) &&
478 (atomic_read(&cp->refcnt) == 1) && 478 (refcount_read(&cp->refcnt) == 1) &&
479 !timer_pending(&cp->timer)) 479 !timer_pending(&cp->timer))
480 /* expire connection immediately */ 480 /* expire connection immediately */
481 __ip_vs_conn_put_notimer(cp); 481 __ip_vs_conn_put_notimer(cp);
@@ -617,8 +617,8 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
617 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), 617 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
618 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), 618 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
619 ip_vs_fwd_tag(cp), cp->state, 619 ip_vs_fwd_tag(cp), cp->state,
620 cp->flags, atomic_read(&cp->refcnt), 620 cp->flags, refcount_read(&cp->refcnt),
621 atomic_read(&dest->refcnt)); 621 refcount_read(&dest->refcnt));
622 622
623 /* Update the connection counters */ 623 /* Update the connection counters */
624 if (!(flags & IP_VS_CONN_F_TEMPLATE)) { 624 if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
@@ -714,8 +714,8 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
714 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), 714 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
715 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), 715 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
716 ip_vs_fwd_tag(cp), cp->state, 716 ip_vs_fwd_tag(cp), cp->state,
717 cp->flags, atomic_read(&cp->refcnt), 717 cp->flags, refcount_read(&cp->refcnt),
718 atomic_read(&dest->refcnt)); 718 refcount_read(&dest->refcnt));
719 719
720 /* Update the connection counters */ 720 /* Update the connection counters */
721 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { 721 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
@@ -863,10 +863,10 @@ static void ip_vs_conn_expire(unsigned long data)
863 863
864 expire_later: 864 expire_later:
865 IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n", 865 IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n",
866 atomic_read(&cp->refcnt), 866 refcount_read(&cp->refcnt),
867 atomic_read(&cp->n_control)); 867 atomic_read(&cp->n_control));
868 868
869 atomic_inc(&cp->refcnt); 869 refcount_inc(&cp->refcnt);
870 cp->timeout = 60*HZ; 870 cp->timeout = 60*HZ;
871 871
872 if (ipvs->sync_state & IP_VS_STATE_MASTER) 872 if (ipvs->sync_state & IP_VS_STATE_MASTER)
@@ -941,7 +941,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
941 * it in the table, so that other thread run ip_vs_random_dropentry 941 * it in the table, so that other thread run ip_vs_random_dropentry
942 * but cannot drop this entry. 942 * but cannot drop this entry.
943 */ 943 */
944 atomic_set(&cp->refcnt, 1); 944 refcount_set(&cp->refcnt, 1);
945 945
946 cp->control = NULL; 946 cp->control = NULL;
947 atomic_set(&cp->n_control, 0); 947 atomic_set(&cp->n_control, 0);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index db40050f8785..b4a746d0e39b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -542,7 +542,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
542 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), 542 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
543 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), 543 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
544 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), 544 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
545 cp->flags, atomic_read(&cp->refcnt)); 545 cp->flags, refcount_read(&cp->refcnt));
546 546
547 ip_vs_conn_stats(cp, svc); 547 ip_vs_conn_stats(cp, svc);
548 return cp; 548 return cp;
@@ -1193,7 +1193,7 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
1193 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), 1193 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
1194 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), 1194 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
1195 IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), 1195 IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
1196 cp->flags, atomic_read(&cp->refcnt)); 1196 cp->flags, refcount_read(&cp->refcnt));
1197 LeaveFunction(12); 1197 LeaveFunction(12);
1198 return cp; 1198 return cp;
1199} 1199}
@@ -2231,8 +2231,6 @@ static int __net_init __ip_vs_init(struct net *net)
2231 if (ip_vs_sync_net_init(ipvs) < 0) 2231 if (ip_vs_sync_net_init(ipvs) < 0)
2232 goto sync_fail; 2232 goto sync_fail;
2233 2233
2234 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
2235 sizeof(struct netns_ipvs), ipvs->gen);
2236 return 0; 2234 return 0;
2237/* 2235/*
2238 * Error handling 2236 * Error handling
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 5aeb0dde6ccc..541aa7694775 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -699,7 +699,7 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, int dest_af,
699 dest->vfwmark, 699 dest->vfwmark,
700 IP_VS_DBG_ADDR(dest->af, &dest->addr), 700 IP_VS_DBG_ADDR(dest->af, &dest->addr),
701 ntohs(dest->port), 701 ntohs(dest->port),
702 atomic_read(&dest->refcnt)); 702 refcount_read(&dest->refcnt));
703 if (dest->af == dest_af && 703 if (dest->af == dest_af &&
704 ip_vs_addr_equal(dest_af, &dest->addr, daddr) && 704 ip_vs_addr_equal(dest_af, &dest->addr, daddr) &&
705 dest->port == dport && 705 dest->port == dport &&
@@ -934,7 +934,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
934 atomic_set(&dest->activeconns, 0); 934 atomic_set(&dest->activeconns, 0);
935 atomic_set(&dest->inactconns, 0); 935 atomic_set(&dest->inactconns, 0);
936 atomic_set(&dest->persistconns, 0); 936 atomic_set(&dest->persistconns, 0);
937 atomic_set(&dest->refcnt, 1); 937 refcount_set(&dest->refcnt, 1);
938 938
939 INIT_HLIST_NODE(&dest->d_list); 939 INIT_HLIST_NODE(&dest->d_list);
940 spin_lock_init(&dest->dst_lock); 940 spin_lock_init(&dest->dst_lock);
@@ -998,7 +998,7 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
998 IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, " 998 IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, "
999 "dest->refcnt=%d, service %u/%s:%u\n", 999 "dest->refcnt=%d, service %u/%s:%u\n",
1000 IP_VS_DBG_ADDR(udest->af, &daddr), ntohs(dport), 1000 IP_VS_DBG_ADDR(udest->af, &daddr), ntohs(dport),
1001 atomic_read(&dest->refcnt), 1001 refcount_read(&dest->refcnt),
1002 dest->vfwmark, 1002 dest->vfwmark,
1003 IP_VS_DBG_ADDR(svc->af, &dest->vaddr), 1003 IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
1004 ntohs(dest->vport)); 1004 ntohs(dest->vport));
@@ -1074,7 +1074,7 @@ static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest,
1074 spin_lock_bh(&ipvs->dest_trash_lock); 1074 spin_lock_bh(&ipvs->dest_trash_lock);
1075 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n", 1075 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n",
1076 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), 1076 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
1077 atomic_read(&dest->refcnt)); 1077 refcount_read(&dest->refcnt));
1078 if (list_empty(&ipvs->dest_trash) && !cleanup) 1078 if (list_empty(&ipvs->dest_trash) && !cleanup)
1079 mod_timer(&ipvs->dest_trash_timer, 1079 mod_timer(&ipvs->dest_trash_timer,
1080 jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); 1080 jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
@@ -1157,7 +1157,7 @@ static void ip_vs_dest_trash_expire(unsigned long data)
1157 1157
1158 spin_lock(&ipvs->dest_trash_lock); 1158 spin_lock(&ipvs->dest_trash_lock);
1159 list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { 1159 list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
1160 if (atomic_read(&dest->refcnt) > 1) 1160 if (refcount_read(&dest->refcnt) > 1)
1161 continue; 1161 continue;
1162 if (dest->idle_start) { 1162 if (dest->idle_start) {
1163 if (time_before(now, dest->idle_start + 1163 if (time_before(now, dest->idle_start +
@@ -1545,7 +1545,7 @@ ip_vs_forget_dev(struct ip_vs_dest *dest, struct net_device *dev)
1545 dev->name, 1545 dev->name,
1546 IP_VS_DBG_ADDR(dest->af, &dest->addr), 1546 IP_VS_DBG_ADDR(dest->af, &dest->addr),
1547 ntohs(dest->port), 1547 ntohs(dest->port),
1548 atomic_read(&dest->refcnt)); 1548 refcount_read(&dest->refcnt));
1549 __ip_vs_dst_cache_reset(dest); 1549 __ip_vs_dst_cache_reset(dest);
1550 } 1550 }
1551 spin_unlock_bh(&dest->dst_lock); 1551 spin_unlock_bh(&dest->dst_lock);
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 5824927cf8e0..b6aa4a970c6e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -448,7 +448,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
448 IP_VS_DBG_ADDR(least->af, &least->addr), 448 IP_VS_DBG_ADDR(least->af, &least->addr),
449 ntohs(least->port), 449 ntohs(least->port),
450 atomic_read(&least->activeconns), 450 atomic_read(&least->activeconns),
451 atomic_read(&least->refcnt), 451 refcount_read(&least->refcnt),
452 atomic_read(&least->weight), loh); 452 atomic_read(&least->weight), loh);
453 453
454 return least; 454 return least;
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 703f11877bee..c13ff575f9f7 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -204,7 +204,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
204 IP_VS_DBG_ADDR(least->af, &least->addr), 204 IP_VS_DBG_ADDR(least->af, &least->addr),
205 ntohs(least->port), 205 ntohs(least->port),
206 atomic_read(&least->activeconns), 206 atomic_read(&least->activeconns),
207 atomic_read(&least->refcnt), 207 refcount_read(&least->refcnt),
208 atomic_read(&least->weight), loh); 208 atomic_read(&least->weight), loh);
209 return least; 209 return least;
210} 210}
@@ -249,7 +249,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
249 __func__, 249 __func__,
250 IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port), 250 IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port),
251 atomic_read(&most->activeconns), 251 atomic_read(&most->activeconns),
252 atomic_read(&most->refcnt), 252 refcount_read(&most->refcnt),
253 atomic_read(&most->weight), moh); 253 atomic_read(&most->weight), moh);
254 return most; 254 return most;
255} 255}
@@ -612,7 +612,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
612 IP_VS_DBG_ADDR(least->af, &least->addr), 612 IP_VS_DBG_ADDR(least->af, &least->addr),
613 ntohs(least->port), 613 ntohs(least->port),
614 atomic_read(&least->activeconns), 614 atomic_read(&least->activeconns),
615 atomic_read(&least->refcnt), 615 refcount_read(&least->refcnt),
616 atomic_read(&least->weight), loh); 616 atomic_read(&least->weight), loh);
617 617
618 return least; 618 return least;
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index a8b63401e773..7d9d4ac596ca 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -110,7 +110,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
110 IP_VS_DBG_ADDR(least->af, &least->addr), 110 IP_VS_DBG_ADDR(least->af, &least->addr),
111 ntohs(least->port), 111 ntohs(least->port),
112 atomic_read(&least->activeconns), 112 atomic_read(&least->activeconns),
113 atomic_read(&least->refcnt), 113 refcount_read(&least->refcnt),
114 atomic_read(&least->weight), loh); 114 atomic_read(&least->weight), loh);
115 115
116 return least; 116 return least;
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index d952d67f904d..56f8e4b204ff 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -447,7 +447,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
447 ntohs(cp->cport), 447 ntohs(cp->cport),
448 sctp_state_name(cp->state), 448 sctp_state_name(cp->state),
449 sctp_state_name(next_state), 449 sctp_state_name(next_state),
450 atomic_read(&cp->refcnt)); 450 refcount_read(&cp->refcnt));
451 if (dest) { 451 if (dest) {
452 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && 452 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
453 (next_state != IP_VS_SCTP_S_ESTABLISHED)) { 453 (next_state != IP_VS_SCTP_S_ESTABLISHED)) {
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 5117bcb7d2f0..12dc8d5bc37d 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -557,7 +557,7 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
557 ntohs(cp->cport), 557 ntohs(cp->cport),
558 tcp_state_name(cp->state), 558 tcp_state_name(cp->state),
559 tcp_state_name(new_state), 559 tcp_state_name(new_state),
560 atomic_read(&cp->refcnt)); 560 refcount_read(&cp->refcnt));
561 561
562 if (dest) { 562 if (dest) {
563 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && 563 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index 58bacfc461ee..ee0530d14c5f 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -97,7 +97,7 @@ stop:
97 "activeconns %d refcnt %d weight %d\n", 97 "activeconns %d refcnt %d weight %d\n",
98 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), 98 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
99 atomic_read(&dest->activeconns), 99 atomic_read(&dest->activeconns),
100 atomic_read(&dest->refcnt), atomic_read(&dest->weight)); 100 refcount_read(&dest->refcnt), atomic_read(&dest->weight));
101 101
102 return dest; 102 return dest;
103} 103}
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index f8e2d00f528b..ab23cf203437 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -111,7 +111,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
111 IP_VS_DBG_ADDR(least->af, &least->addr), 111 IP_VS_DBG_ADDR(least->af, &least->addr),
112 ntohs(least->port), 112 ntohs(least->port),
113 atomic_read(&least->activeconns), 113 atomic_read(&least->activeconns),
114 atomic_read(&least->refcnt), 114 refcount_read(&least->refcnt),
115 atomic_read(&least->weight), loh); 115 atomic_read(&least->weight), loh);
116 116
117 return least; 117 return least;
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index 6b366fd90554..6add39e0ec20 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -83,7 +83,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
83 IP_VS_DBG_ADDR(least->af, &least->addr), 83 IP_VS_DBG_ADDR(least->af, &least->addr),
84 ntohs(least->port), 84 ntohs(least->port),
85 atomic_read(&least->activeconns), 85 atomic_read(&least->activeconns),
86 atomic_read(&least->refcnt), 86 refcount_read(&least->refcnt),
87 atomic_read(&least->weight), loh); 87 atomic_read(&least->weight), loh);
88 88
89 return least; 89 return least;
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 17e6d4406ca7..62258dd457ac 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -218,7 +218,7 @@ found:
218 "activeconns %d refcnt %d weight %d\n", 218 "activeconns %d refcnt %d weight %d\n",
219 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), 219 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
220 atomic_read(&dest->activeconns), 220 atomic_read(&dest->activeconns),
221 atomic_read(&dest->refcnt), 221 refcount_read(&dest->refcnt),
222 atomic_read(&dest->weight)); 222 atomic_read(&dest->weight));
223 mark->cl = dest; 223 mark->cl = dest;
224 224
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 071b97fcbefb..b0f2e8e65084 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1129,7 +1129,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free);
1129 1129
1130/* Allocate a new conntrack: we return -ENOMEM if classification 1130/* Allocate a new conntrack: we return -ENOMEM if classification
1131 failed due to stress. Otherwise it really is unclassifiable. */ 1131 failed due to stress. Otherwise it really is unclassifiable. */
1132static struct nf_conntrack_tuple_hash * 1132static noinline struct nf_conntrack_tuple_hash *
1133init_conntrack(struct net *net, struct nf_conn *tmpl, 1133init_conntrack(struct net *net, struct nf_conn *tmpl,
1134 const struct nf_conntrack_tuple *tuple, 1134 const struct nf_conntrack_tuple *tuple,
1135 struct nf_conntrack_l3proto *l3proto, 1135 struct nf_conntrack_l3proto *l3proto,
@@ -1237,21 +1237,20 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
1237 return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; 1237 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1238} 1238}
1239 1239
1240/* On success, returns conntrack ptr, sets skb->_nfct | ctinfo */ 1240/* On success, returns 0, sets skb->_nfct | ctinfo */
1241static inline struct nf_conn * 1241static int
1242resolve_normal_ct(struct net *net, struct nf_conn *tmpl, 1242resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1243 struct sk_buff *skb, 1243 struct sk_buff *skb,
1244 unsigned int dataoff, 1244 unsigned int dataoff,
1245 u_int16_t l3num, 1245 u_int16_t l3num,
1246 u_int8_t protonum, 1246 u_int8_t protonum,
1247 struct nf_conntrack_l3proto *l3proto, 1247 struct nf_conntrack_l3proto *l3proto,
1248 struct nf_conntrack_l4proto *l4proto, 1248 struct nf_conntrack_l4proto *l4proto)
1249 int *set_reply,
1250 enum ip_conntrack_info *ctinfo)
1251{ 1249{
1252 const struct nf_conntrack_zone *zone; 1250 const struct nf_conntrack_zone *zone;
1253 struct nf_conntrack_tuple tuple; 1251 struct nf_conntrack_tuple tuple;
1254 struct nf_conntrack_tuple_hash *h; 1252 struct nf_conntrack_tuple_hash *h;
1253 enum ip_conntrack_info ctinfo;
1255 struct nf_conntrack_zone tmp; 1254 struct nf_conntrack_zone tmp;
1256 struct nf_conn *ct; 1255 struct nf_conn *ct;
1257 u32 hash; 1256 u32 hash;
@@ -1260,7 +1259,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1260 dataoff, l3num, protonum, net, &tuple, l3proto, 1259 dataoff, l3num, protonum, net, &tuple, l3proto,
1261 l4proto)) { 1260 l4proto)) {
1262 pr_debug("Can't get tuple\n"); 1261 pr_debug("Can't get tuple\n");
1263 return NULL; 1262 return 0;
1264 } 1263 }
1265 1264
1266 /* look for tuple match */ 1265 /* look for tuple match */
@@ -1271,33 +1270,30 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1271 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, 1270 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
1272 skb, dataoff, hash); 1271 skb, dataoff, hash);
1273 if (!h) 1272 if (!h)
1274 return NULL; 1273 return 0;
1275 if (IS_ERR(h)) 1274 if (IS_ERR(h))
1276 return (void *)h; 1275 return PTR_ERR(h);
1277 } 1276 }
1278 ct = nf_ct_tuplehash_to_ctrack(h); 1277 ct = nf_ct_tuplehash_to_ctrack(h);
1279 1278
1280 /* It exists; we have (non-exclusive) reference. */ 1279 /* It exists; we have (non-exclusive) reference. */
1281 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { 1280 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1282 *ctinfo = IP_CT_ESTABLISHED_REPLY; 1281 ctinfo = IP_CT_ESTABLISHED_REPLY;
1283 /* Please set reply bit if this packet OK */
1284 *set_reply = 1;
1285 } else { 1282 } else {
1286 /* Once we've had two way comms, always ESTABLISHED. */ 1283 /* Once we've had two way comms, always ESTABLISHED. */
1287 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 1284 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1288 pr_debug("normal packet for %p\n", ct); 1285 pr_debug("normal packet for %p\n", ct);
1289 *ctinfo = IP_CT_ESTABLISHED; 1286 ctinfo = IP_CT_ESTABLISHED;
1290 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { 1287 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1291 pr_debug("related packet for %p\n", ct); 1288 pr_debug("related packet for %p\n", ct);
1292 *ctinfo = IP_CT_RELATED; 1289 ctinfo = IP_CT_RELATED;
1293 } else { 1290 } else {
1294 pr_debug("new packet for %p\n", ct); 1291 pr_debug("new packet for %p\n", ct);
1295 *ctinfo = IP_CT_NEW; 1292 ctinfo = IP_CT_NEW;
1296 } 1293 }
1297 *set_reply = 0;
1298 } 1294 }
1299 nf_ct_set(skb, ct, *ctinfo); 1295 nf_ct_set(skb, ct, ctinfo);
1300 return ct; 1296 return 0;
1301} 1297}
1302 1298
1303unsigned int 1299unsigned int
@@ -1311,7 +1307,6 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1311 unsigned int *timeouts; 1307 unsigned int *timeouts;
1312 unsigned int dataoff; 1308 unsigned int dataoff;
1313 u_int8_t protonum; 1309 u_int8_t protonum;
1314 int set_reply = 0;
1315 int ret; 1310 int ret;
1316 1311
1317 tmpl = nf_ct_get(skb, &ctinfo); 1312 tmpl = nf_ct_get(skb, &ctinfo);
@@ -1354,23 +1349,22 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1354 goto out; 1349 goto out;
1355 } 1350 }
1356repeat: 1351repeat:
1357 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, 1352 ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
1358 l3proto, l4proto, &set_reply, &ctinfo); 1353 l3proto, l4proto);
1359 if (!ct) { 1354 if (ret < 0) {
1360 /* Not valid part of a connection */
1361 NF_CT_STAT_INC_ATOMIC(net, invalid);
1362 ret = NF_ACCEPT;
1363 goto out;
1364 }
1365
1366 if (IS_ERR(ct)) {
1367 /* Too stressed to deal. */ 1355 /* Too stressed to deal. */
1368 NF_CT_STAT_INC_ATOMIC(net, drop); 1356 NF_CT_STAT_INC_ATOMIC(net, drop);
1369 ret = NF_DROP; 1357 ret = NF_DROP;
1370 goto out; 1358 goto out;
1371 } 1359 }
1372 1360
1373 NF_CT_ASSERT(skb_nfct(skb)); 1361 ct = nf_ct_get(skb, &ctinfo);
1362 if (!ct) {
1363 /* Not valid part of a connection */
1364 NF_CT_STAT_INC_ATOMIC(net, invalid);
1365 ret = NF_ACCEPT;
1366 goto out;
1367 }
1374 1368
1375 /* Decide what timeout policy we want to apply to this flow. */ 1369 /* Decide what timeout policy we want to apply to this flow. */
1376 timeouts = nf_ct_timeout_lookup(net, ct, l4proto); 1370 timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
@@ -1395,7 +1389,8 @@ repeat:
1395 goto out; 1389 goto out;
1396 } 1390 }
1397 1391
1398 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 1392 if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
1393 !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1399 nf_conntrack_event_cache(IPCT_REPLY, ct); 1394 nf_conntrack_event_cache(IPCT_REPLY, ct);
1400out: 1395out:
1401 if (tmpl) 1396 if (tmpl)
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 4b2e1fb28bb4..cb29e598605f 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -133,7 +133,7 @@ nf_ct_expect_find_get(struct net *net,
133 133
134 rcu_read_lock(); 134 rcu_read_lock();
135 i = __nf_ct_expect_find(net, zone, tuple); 135 i = __nf_ct_expect_find(net, zone, tuple);
136 if (i && !atomic_inc_not_zero(&i->use)) 136 if (i && !refcount_inc_not_zero(&i->use))
137 i = NULL; 137 i = NULL;
138 rcu_read_unlock(); 138 rcu_read_unlock();
139 139
@@ -186,7 +186,7 @@ nf_ct_find_expectation(struct net *net,
186 return NULL; 186 return NULL;
187 187
188 if (exp->flags & NF_CT_EXPECT_PERMANENT) { 188 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
189 atomic_inc(&exp->use); 189 refcount_inc(&exp->use);
190 return exp; 190 return exp;
191 } else if (del_timer(&exp->timeout)) { 191 } else if (del_timer(&exp->timeout)) {
192 nf_ct_unlink_expect(exp); 192 nf_ct_unlink_expect(exp);
@@ -275,7 +275,7 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
275 return NULL; 275 return NULL;
276 276
277 new->master = me; 277 new->master = me;
278 atomic_set(&new->use, 1); 278 refcount_set(&new->use, 1);
279 return new; 279 return new;
280} 280}
281EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); 281EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
@@ -348,7 +348,7 @@ static void nf_ct_expect_free_rcu(struct rcu_head *head)
348 348
349void nf_ct_expect_put(struct nf_conntrack_expect *exp) 349void nf_ct_expect_put(struct nf_conntrack_expect *exp)
350{ 350{
351 if (atomic_dec_and_test(&exp->use)) 351 if (refcount_dec_and_test(&exp->use))
352 call_rcu(&exp->rcu, nf_ct_expect_free_rcu); 352 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
353} 353}
354EXPORT_SYMBOL_GPL(nf_ct_expect_put); 354EXPORT_SYMBOL_GPL(nf_ct_expect_put);
@@ -361,7 +361,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
361 unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple); 361 unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
362 362
363 /* two references : one for hash insert, one for the timer */ 363 /* two references : one for hash insert, one for the timer */
364 atomic_add(2, &exp->use); 364 refcount_add(2, &exp->use);
365 365
366 hlist_add_head(&exp->lnode, &master_help->expectations); 366 hlist_add_head(&exp->lnode, &master_help->expectations);
367 master_help->expecting[exp->class]++; 367 master_help->expecting[exp->class]++;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6806b5e73567..d49cc1e03c5b 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2693,7 +2693,7 @@ restart:
2693 cb->nlh->nlmsg_seq, 2693 cb->nlh->nlmsg_seq,
2694 IPCTNL_MSG_EXP_NEW, 2694 IPCTNL_MSG_EXP_NEW,
2695 exp) < 0) { 2695 exp) < 0) {
2696 if (!atomic_inc_not_zero(&exp->use)) 2696 if (!refcount_inc_not_zero(&exp->use))
2697 continue; 2697 continue;
2698 cb->args[1] = (unsigned long)exp; 2698 cb->args[1] = (unsigned long)exp;
2699 goto out; 2699 goto out;
@@ -2739,7 +2739,7 @@ restart:
2739 cb->nlh->nlmsg_seq, 2739 cb->nlh->nlmsg_seq,
2740 IPCTNL_MSG_EXP_NEW, 2740 IPCTNL_MSG_EXP_NEW,
2741 exp) < 0) { 2741 exp) < 0) {
2742 if (!atomic_inc_not_zero(&exp->use)) 2742 if (!refcount_inc_not_zero(&exp->use))
2743 continue; 2743 continue;
2744 cb->args[1] = (unsigned long)exp; 2744 cb->args[1] = (unsigned long)exp;
2745 goto out; 2745 goto out;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5e0ccfd5bb37..12cc5218de96 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1772,8 +1772,19 @@ static int nf_tables_newexpr(const struct nft_ctx *ctx,
1772 goto err1; 1772 goto err1;
1773 } 1773 }
1774 1774
1775 if (ops->validate) {
1776 const struct nft_data *data = NULL;
1777
1778 err = ops->validate(ctx, expr, &data);
1779 if (err < 0)
1780 goto err2;
1781 }
1782
1775 return 0; 1783 return 0;
1776 1784
1785err2:
1786 if (ops->destroy)
1787 ops->destroy(ctx, expr);
1777err1: 1788err1:
1778 expr->ops = NULL; 1789 expr->ops = NULL;
1779 return err; 1790 return err;
@@ -2523,8 +2534,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
2523 return 0; 2534 return 0;
2524} 2535}
2525 2536
2526struct nft_set *nf_tables_set_lookup(const struct nft_table *table, 2537static struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
2527 const struct nlattr *nla, u8 genmask) 2538 const struct nlattr *nla, u8 genmask)
2528{ 2539{
2529 struct nft_set *set; 2540 struct nft_set *set;
2530 2541
@@ -2538,11 +2549,10 @@ struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
2538 } 2549 }
2539 return ERR_PTR(-ENOENT); 2550 return ERR_PTR(-ENOENT);
2540} 2551}
2541EXPORT_SYMBOL_GPL(nf_tables_set_lookup);
2542 2552
2543struct nft_set *nf_tables_set_lookup_byid(const struct net *net, 2553static struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
2544 const struct nlattr *nla, 2554 const struct nlattr *nla,
2545 u8 genmask) 2555 u8 genmask)
2546{ 2556{
2547 struct nft_trans *trans; 2557 struct nft_trans *trans;
2548 u32 id = ntohl(nla_get_be32(nla)); 2558 u32 id = ntohl(nla_get_be32(nla));
@@ -2557,7 +2567,25 @@ struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
2557 } 2567 }
2558 return ERR_PTR(-ENOENT); 2568 return ERR_PTR(-ENOENT);
2559} 2569}
2560EXPORT_SYMBOL_GPL(nf_tables_set_lookup_byid); 2570
2571struct nft_set *nft_set_lookup(const struct net *net,
2572 const struct nft_table *table,
2573 const struct nlattr *nla_set_name,
2574 const struct nlattr *nla_set_id,
2575 u8 genmask)
2576{
2577 struct nft_set *set;
2578
2579 set = nf_tables_set_lookup(table, nla_set_name, genmask);
2580 if (IS_ERR(set)) {
2581 if (!nla_set_id)
2582 return set;
2583
2584 set = nf_tables_set_lookup_byid(net, nla_set_id, genmask);
2585 }
2586 return set;
2587}
2588EXPORT_SYMBOL_GPL(nft_set_lookup);
2561 2589
2562static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, 2590static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
2563 const char *name) 2591 const char *name)
@@ -4067,7 +4095,8 @@ static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
4067 [NFTA_OBJ_DATA] = { .type = NLA_NESTED }, 4095 [NFTA_OBJ_DATA] = { .type = NLA_NESTED },
4068}; 4096};
4069 4097
4070static struct nft_object *nft_obj_init(const struct nft_object_type *type, 4098static struct nft_object *nft_obj_init(const struct nft_ctx *ctx,
4099 const struct nft_object_type *type,
4071 const struct nlattr *attr) 4100 const struct nlattr *attr)
4072{ 4101{
4073 struct nlattr *tb[type->maxattr + 1]; 4102 struct nlattr *tb[type->maxattr + 1];
@@ -4087,7 +4116,7 @@ static struct nft_object *nft_obj_init(const struct nft_object_type *type,
4087 if (obj == NULL) 4116 if (obj == NULL)
4088 goto err1; 4117 goto err1;
4089 4118
4090 err = type->init((const struct nlattr * const *)tb, obj); 4119 err = type->init(ctx, (const struct nlattr * const *)tb, obj);
4091 if (err < 0) 4120 if (err < 0)
4092 goto err2; 4121 goto err2;
4093 4122
@@ -4195,7 +4224,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
4195 if (IS_ERR(type)) 4224 if (IS_ERR(type))
4196 return PTR_ERR(type); 4225 return PTR_ERR(type);
4197 4226
4198 obj = nft_obj_init(type, nla[NFTA_OBJ_DATA]); 4227 obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]);
4199 if (IS_ERR(obj)) { 4228 if (IS_ERR(obj)) {
4200 err = PTR_ERR(obj); 4229 err = PTR_ERR(obj);
4201 goto err1; 4230 goto err1;
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index d44d89b56127..c86da174a5fc 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14#include <linux/refcount.h>
14#include <linux/netlink.h> 15#include <linux/netlink.h>
15#include <linux/rculist.h> 16#include <linux/rculist.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
@@ -32,7 +33,7 @@ struct nf_acct {
32 atomic64_t bytes; 33 atomic64_t bytes;
33 unsigned long flags; 34 unsigned long flags;
34 struct list_head head; 35 struct list_head head;
35 atomic_t refcnt; 36 refcount_t refcnt;
36 char name[NFACCT_NAME_MAX]; 37 char name[NFACCT_NAME_MAX];
37 struct rcu_head rcu_head; 38 struct rcu_head rcu_head;
38 char data[0]; 39 char data[0];
@@ -123,7 +124,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
123 atomic64_set(&nfacct->pkts, 124 atomic64_set(&nfacct->pkts,
124 be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS]))); 125 be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
125 } 126 }
126 atomic_set(&nfacct->refcnt, 1); 127 refcount_set(&nfacct->refcnt, 1);
127 list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list); 128 list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list);
128 return 0; 129 return 0;
129} 130}
@@ -166,7 +167,7 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
166 NFACCT_PAD) || 167 NFACCT_PAD) ||
167 nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes), 168 nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes),
168 NFACCT_PAD) || 169 NFACCT_PAD) ||
169 nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)))) 170 nla_put_be32(skb, NFACCT_USE, htonl(refcount_read(&acct->refcnt))))
170 goto nla_put_failure; 171 goto nla_put_failure;
171 if (acct->flags & NFACCT_F_QUOTA) { 172 if (acct->flags & NFACCT_F_QUOTA) {
172 u64 *quota = (u64 *)acct->data; 173 u64 *quota = (u64 *)acct->data;
@@ -329,7 +330,7 @@ static int nfnl_acct_try_del(struct nf_acct *cur)
329 /* We want to avoid races with nfnl_acct_put. So only when the current 330 /* We want to avoid races with nfnl_acct_put. So only when the current
330 * refcnt is 1, we decrease it to 0. 331 * refcnt is 1, we decrease it to 0.
331 */ 332 */
332 if (atomic_cmpxchg(&cur->refcnt, 1, 0) == 1) { 333 if (refcount_dec_if_one(&cur->refcnt)) {
333 /* We are protected by nfnl mutex. */ 334 /* We are protected by nfnl mutex. */
334 list_del_rcu(&cur->head); 335 list_del_rcu(&cur->head);
335 kfree_rcu(cur, rcu_head); 336 kfree_rcu(cur, rcu_head);
@@ -413,7 +414,7 @@ struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name)
413 if (!try_module_get(THIS_MODULE)) 414 if (!try_module_get(THIS_MODULE))
414 goto err; 415 goto err;
415 416
416 if (!atomic_inc_not_zero(&cur->refcnt)) { 417 if (!refcount_inc_not_zero(&cur->refcnt)) {
417 module_put(THIS_MODULE); 418 module_put(THIS_MODULE);
418 goto err; 419 goto err;
419 } 420 }
@@ -429,7 +430,7 @@ EXPORT_SYMBOL_GPL(nfnl_acct_find_get);
429 430
430void nfnl_acct_put(struct nf_acct *acct) 431void nfnl_acct_put(struct nf_acct *acct)
431{ 432{
432 if (atomic_dec_and_test(&acct->refcnt)) 433 if (refcount_dec_and_test(&acct->refcnt))
433 kfree_rcu(acct, rcu_head); 434 kfree_rcu(acct, rcu_head);
434 435
435 module_put(THIS_MODULE); 436 module_put(THIS_MODULE);
@@ -502,7 +503,7 @@ static void __net_exit nfnl_acct_net_exit(struct net *net)
502 list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) { 503 list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) {
503 list_del_rcu(&cur->head); 504 list_del_rcu(&cur->head);
504 505
505 if (atomic_dec_and_test(&cur->refcnt)) 506 if (refcount_dec_and_test(&cur->refcnt))
506 kfree_rcu(cur, rcu_head); 507 kfree_rcu(cur, rcu_head);
507 } 508 }
508} 509}
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 139e0867e56e..baa75f3ab7e7 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -138,7 +138,7 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
138 strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME])); 138 strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME]));
139 timeout->l3num = l3num; 139 timeout->l3num = l3num;
140 timeout->l4proto = l4proto; 140 timeout->l4proto = l4proto;
141 atomic_set(&timeout->refcnt, 1); 141 refcount_set(&timeout->refcnt, 1);
142 list_add_tail_rcu(&timeout->head, &net->nfct_timeout_list); 142 list_add_tail_rcu(&timeout->head, &net->nfct_timeout_list);
143 143
144 return 0; 144 return 0;
@@ -172,7 +172,7 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
172 nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) || 172 nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) ||
173 nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) || 173 nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
174 nla_put_be32(skb, CTA_TIMEOUT_USE, 174 nla_put_be32(skb, CTA_TIMEOUT_USE,
175 htonl(atomic_read(&timeout->refcnt)))) 175 htonl(refcount_read(&timeout->refcnt))))
176 goto nla_put_failure; 176 goto nla_put_failure;
177 177
178 if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) { 178 if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
@@ -339,7 +339,7 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
339 /* We want to avoid races with ctnl_timeout_put. So only when the 339 /* We want to avoid races with ctnl_timeout_put. So only when the
340 * current refcnt is 1, we decrease it to 0. 340 * current refcnt is 1, we decrease it to 0.
341 */ 341 */
342 if (atomic_cmpxchg(&timeout->refcnt, 1, 0) == 1) { 342 if (refcount_dec_if_one(&timeout->refcnt)) {
343 /* We are protected by nfnl mutex. */ 343 /* We are protected by nfnl mutex. */
344 list_del_rcu(&timeout->head); 344 list_del_rcu(&timeout->head);
345 nf_ct_l4proto_put(timeout->l4proto); 345 nf_ct_l4proto_put(timeout->l4proto);
@@ -536,7 +536,7 @@ ctnl_timeout_find_get(struct net *net, const char *name)
536 if (!try_module_get(THIS_MODULE)) 536 if (!try_module_get(THIS_MODULE))
537 goto err; 537 goto err;
538 538
539 if (!atomic_inc_not_zero(&timeout->refcnt)) { 539 if (!refcount_inc_not_zero(&timeout->refcnt)) {
540 module_put(THIS_MODULE); 540 module_put(THIS_MODULE);
541 goto err; 541 goto err;
542 } 542 }
@@ -550,7 +550,7 @@ err:
550 550
551static void ctnl_timeout_put(struct ctnl_timeout *timeout) 551static void ctnl_timeout_put(struct ctnl_timeout *timeout)
552{ 552{
553 if (atomic_dec_and_test(&timeout->refcnt)) 553 if (refcount_dec_and_test(&timeout->refcnt))
554 kfree_rcu(timeout, rcu_head); 554 kfree_rcu(timeout, rcu_head);
555 555
556 module_put(THIS_MODULE); 556 module_put(THIS_MODULE);
@@ -601,7 +601,7 @@ static void __net_exit cttimeout_net_exit(struct net *net)
601 list_del_rcu(&cur->head); 601 list_del_rcu(&cur->head);
602 nf_ct_l4proto_put(cur->l4proto); 602 nf_ct_l4proto_put(cur->l4proto);
603 603
604 if (atomic_dec_and_test(&cur->refcnt)) 604 if (refcount_dec_and_test(&cur->refcnt))
605 kfree_rcu(cur, rcu_head); 605 kfree_rcu(cur, rcu_head);
606 } 606 }
607} 607}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 08247bf7d7b8..ecd857b75ffe 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -40,6 +40,8 @@
40#include <net/netfilter/nfnetlink_log.h> 40#include <net/netfilter/nfnetlink_log.h>
41 41
42#include <linux/atomic.h> 42#include <linux/atomic.h>
43#include <linux/refcount.h>
44
43 45
44#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 46#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
45#include "../bridge/br_private.h" 47#include "../bridge/br_private.h"
@@ -57,7 +59,7 @@
57struct nfulnl_instance { 59struct nfulnl_instance {
58 struct hlist_node hlist; /* global list of instances */ 60 struct hlist_node hlist; /* global list of instances */
59 spinlock_t lock; 61 spinlock_t lock;
60 atomic_t use; /* use count */ 62 refcount_t use; /* use count */
61 63
62 unsigned int qlen; /* number of nlmsgs in skb */ 64 unsigned int qlen; /* number of nlmsgs in skb */
63 struct sk_buff *skb; /* pre-allocatd skb */ 65 struct sk_buff *skb; /* pre-allocatd skb */
@@ -115,7 +117,7 @@ __instance_lookup(struct nfnl_log_net *log, u_int16_t group_num)
115static inline void 117static inline void
116instance_get(struct nfulnl_instance *inst) 118instance_get(struct nfulnl_instance *inst)
117{ 119{
118 atomic_inc(&inst->use); 120 refcount_inc(&inst->use);
119} 121}
120 122
121static struct nfulnl_instance * 123static struct nfulnl_instance *
@@ -125,7 +127,7 @@ instance_lookup_get(struct nfnl_log_net *log, u_int16_t group_num)
125 127
126 rcu_read_lock_bh(); 128 rcu_read_lock_bh();
127 inst = __instance_lookup(log, group_num); 129 inst = __instance_lookup(log, group_num);
128 if (inst && !atomic_inc_not_zero(&inst->use)) 130 if (inst && !refcount_inc_not_zero(&inst->use))
129 inst = NULL; 131 inst = NULL;
130 rcu_read_unlock_bh(); 132 rcu_read_unlock_bh();
131 133
@@ -145,7 +147,7 @@ static void nfulnl_instance_free_rcu(struct rcu_head *head)
145static void 147static void
146instance_put(struct nfulnl_instance *inst) 148instance_put(struct nfulnl_instance *inst)
147{ 149{
148 if (inst && atomic_dec_and_test(&inst->use)) 150 if (inst && refcount_dec_and_test(&inst->use))
149 call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu); 151 call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
150} 152}
151 153
@@ -180,7 +182,7 @@ instance_create(struct net *net, u_int16_t group_num,
180 INIT_HLIST_NODE(&inst->hlist); 182 INIT_HLIST_NODE(&inst->hlist);
181 spin_lock_init(&inst->lock); 183 spin_lock_init(&inst->lock);
182 /* needs to be two, since we _put() after creation */ 184 /* needs to be two, since we _put() after creation */
183 atomic_set(&inst->use, 2); 185 refcount_set(&inst->use, 2);
184 186
185 setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); 187 setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
186 188
@@ -1031,7 +1033,7 @@ static int seq_show(struct seq_file *s, void *v)
1031 inst->group_num, 1033 inst->group_num,
1032 inst->peer_portid, inst->qlen, 1034 inst->peer_portid, inst->qlen,
1033 inst->copy_mode, inst->copy_range, 1035 inst->copy_mode, inst->copy_range,
1034 inst->flushtimeout, atomic_read(&inst->use)); 1036 inst->flushtimeout, refcount_read(&inst->use));
1035 1037
1036 return 0; 1038 return 0;
1037} 1039}
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index c21e7eb8dce0..fab6bf3f955e 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -230,10 +230,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
230 union nft_entry e = {}; 230 union nft_entry e = {};
231 int ret; 231 int ret;
232 232
233 ret = nft_compat_chain_validate_dependency(target->table, ctx->chain);
234 if (ret < 0)
235 goto err;
236
237 target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); 233 target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info);
238 234
239 if (ctx->nla[NFTA_RULE_COMPAT]) { 235 if (ctx->nla[NFTA_RULE_COMPAT]) {
@@ -419,10 +415,6 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
419 union nft_entry e = {}; 415 union nft_entry e = {};
420 int ret; 416 int ret;
421 417
422 ret = nft_compat_chain_validate_dependency(match->table, ctx->chain);
423 if (ret < 0)
424 goto err;
425
426 match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); 418 match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info);
427 419
428 if (ctx->nla[NFTA_RULE_COMPAT]) { 420 if (ctx->nla[NFTA_RULE_COMPAT]) {
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index 7f8422213341..67a710ebde09 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -82,7 +82,8 @@ static int nft_counter_do_init(const struct nlattr * const tb[],
82 return 0; 82 return 0;
83} 83}
84 84
85static int nft_counter_obj_init(const struct nlattr * const tb[], 85static int nft_counter_obj_init(const struct nft_ctx *ctx,
86 const struct nlattr * const tb[],
86 struct nft_object *obj) 87 struct nft_object *obj)
87{ 88{
88 struct nft_counter_percpu_priv *priv = nft_obj_data(obj); 89 struct nft_counter_percpu_priv *priv = nft_obj_data(obj);
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index bf548a7a71ec..4144ae845bdd 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -32,6 +32,12 @@ struct nft_ct {
32 }; 32 };
33}; 33};
34 34
35struct nft_ct_helper_obj {
36 struct nf_conntrack_helper *helper4;
37 struct nf_conntrack_helper *helper6;
38 u8 l4proto;
39};
40
35#ifdef CONFIG_NF_CONNTRACK_ZONES 41#ifdef CONFIG_NF_CONNTRACK_ZONES
36static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template); 42static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template);
37static unsigned int nft_ct_pcpu_template_refcnt __read_mostly; 43static unsigned int nft_ct_pcpu_template_refcnt __read_mostly;
@@ -730,6 +736,162 @@ static struct nft_expr_type nft_notrack_type __read_mostly = {
730 .owner = THIS_MODULE, 736 .owner = THIS_MODULE,
731}; 737};
732 738
739static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
740 const struct nlattr * const tb[],
741 struct nft_object *obj)
742{
743 struct nft_ct_helper_obj *priv = nft_obj_data(obj);
744 struct nf_conntrack_helper *help4, *help6;
745 char name[NF_CT_HELPER_NAME_LEN];
746 int family = ctx->afi->family;
747
748 if (!tb[NFTA_CT_HELPER_NAME] || !tb[NFTA_CT_HELPER_L4PROTO])
749 return -EINVAL;
750
751 priv->l4proto = nla_get_u8(tb[NFTA_CT_HELPER_L4PROTO]);
752 if (!priv->l4proto)
753 return -ENOENT;
754
755 nla_strlcpy(name, tb[NFTA_CT_HELPER_NAME], sizeof(name));
756
757 if (tb[NFTA_CT_HELPER_L3PROTO])
758 family = ntohs(nla_get_be16(tb[NFTA_CT_HELPER_L3PROTO]));
759
760 help4 = NULL;
761 help6 = NULL;
762
763 switch (family) {
764 case NFPROTO_IPV4:
765 if (ctx->afi->family == NFPROTO_IPV6)
766 return -EINVAL;
767
768 help4 = nf_conntrack_helper_try_module_get(name, family,
769 priv->l4proto);
770 break;
771 case NFPROTO_IPV6:
772 if (ctx->afi->family == NFPROTO_IPV4)
773 return -EINVAL;
774
775 help6 = nf_conntrack_helper_try_module_get(name, family,
776 priv->l4proto);
777 break;
778 case NFPROTO_NETDEV: /* fallthrough */
779 case NFPROTO_BRIDGE: /* same */
780 case NFPROTO_INET:
781 help4 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV4,
782 priv->l4proto);
783 help6 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV6,
784 priv->l4proto);
785 break;
786 default:
787 return -EAFNOSUPPORT;
788 }
789
790 /* && is intentional; only error if INET found neither ipv4 or ipv6 */
791 if (!help4 && !help6)
792 return -ENOENT;
793
794 priv->helper4 = help4;
795 priv->helper6 = help6;
796
797 return 0;
798}
799
800static void nft_ct_helper_obj_destroy(struct nft_object *obj)
801{
802 struct nft_ct_helper_obj *priv = nft_obj_data(obj);
803
804 if (priv->helper4)
805 module_put(priv->helper4->me);
806 if (priv->helper6)
807 module_put(priv->helper6->me);
808}
809
810static void nft_ct_helper_obj_eval(struct nft_object *obj,
811 struct nft_regs *regs,
812 const struct nft_pktinfo *pkt)
813{
814 const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
815 struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
816 struct nf_conntrack_helper *to_assign = NULL;
817 struct nf_conn_help *help;
818
819 if (!ct ||
820 nf_ct_is_confirmed(ct) ||
821 nf_ct_is_template(ct) ||
822 priv->l4proto != nf_ct_protonum(ct))
823 return;
824
825 switch (nf_ct_l3num(ct)) {
826 case NFPROTO_IPV4:
827 to_assign = priv->helper4;
828 break;
829 case NFPROTO_IPV6:
830 to_assign = priv->helper6;
831 break;
832 default:
833 WARN_ON_ONCE(1);
834 return;
835 }
836
837 if (!to_assign)
838 return;
839
840 if (test_bit(IPS_HELPER_BIT, &ct->status))
841 return;
842
843 help = nf_ct_helper_ext_add(ct, to_assign, GFP_ATOMIC);
844 if (help) {
845 rcu_assign_pointer(help->helper, to_assign);
846 set_bit(IPS_HELPER_BIT, &ct->status);
847 }
848}
849
850static int nft_ct_helper_obj_dump(struct sk_buff *skb,
851 struct nft_object *obj, bool reset)
852{
853 const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
854 const struct nf_conntrack_helper *helper = priv->helper4;
855 u16 family;
856
857 if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
858 return -1;
859
860 if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
861 return -1;
862
863 if (priv->helper4 && priv->helper6)
864 family = NFPROTO_INET;
865 else if (priv->helper6)
866 family = NFPROTO_IPV6;
867 else
868 family = NFPROTO_IPV4;
869
870 if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
871 return -1;
872
873 return 0;
874}
875
876static const struct nla_policy nft_ct_helper_policy[NFTA_CT_HELPER_MAX + 1] = {
877 [NFTA_CT_HELPER_NAME] = { .type = NLA_STRING,
878 .len = NF_CT_HELPER_NAME_LEN - 1 },
879 [NFTA_CT_HELPER_L3PROTO] = { .type = NLA_U16 },
880 [NFTA_CT_HELPER_L4PROTO] = { .type = NLA_U8 },
881};
882
883static struct nft_object_type nft_ct_helper_obj __read_mostly = {
884 .type = NFT_OBJECT_CT_HELPER,
885 .size = sizeof(struct nft_ct_helper_obj),
886 .maxattr = NFTA_CT_HELPER_MAX,
887 .policy = nft_ct_helper_policy,
888 .eval = nft_ct_helper_obj_eval,
889 .init = nft_ct_helper_obj_init,
890 .destroy = nft_ct_helper_obj_destroy,
891 .dump = nft_ct_helper_obj_dump,
892 .owner = THIS_MODULE,
893};
894
733static int __init nft_ct_module_init(void) 895static int __init nft_ct_module_init(void)
734{ 896{
735 int err; 897 int err;
@@ -744,7 +906,14 @@ static int __init nft_ct_module_init(void)
744 if (err < 0) 906 if (err < 0)
745 goto err1; 907 goto err1;
746 908
909 err = nft_register_obj(&nft_ct_helper_obj);
910 if (err < 0)
911 goto err2;
912
747 return 0; 913 return 0;
914
915err2:
916 nft_unregister_expr(&nft_notrack_type);
748err1: 917err1:
749 nft_unregister_expr(&nft_ct_type); 918 nft_unregister_expr(&nft_ct_type);
750 return err; 919 return err;
@@ -752,6 +921,7 @@ err1:
752 921
753static void __exit nft_ct_module_exit(void) 922static void __exit nft_ct_module_exit(void)
754{ 923{
924 nft_unregister_obj(&nft_ct_helper_obj);
755 nft_unregister_expr(&nft_notrack_type); 925 nft_unregister_expr(&nft_notrack_type);
756 nft_unregister_expr(&nft_ct_type); 926 nft_unregister_expr(&nft_ct_type);
757} 927}
@@ -763,3 +933,4 @@ MODULE_LICENSE("GPL");
763MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 933MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
764MODULE_ALIAS_NFT_EXPR("ct"); 934MODULE_ALIAS_NFT_EXPR("ct");
765MODULE_ALIAS_NFT_EXPR("notrack"); 935MODULE_ALIAS_NFT_EXPR("notrack");
936MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER);
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 049ad2d9ee66..3948da380259 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -133,16 +133,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
133 priv->invert = true; 133 priv->invert = true;
134 } 134 }
135 135
136 set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME], 136 set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_DYNSET_SET_NAME],
137 genmask); 137 tb[NFTA_DYNSET_SET_ID], genmask);
138 if (IS_ERR(set)) { 138 if (IS_ERR(set))
139 if (tb[NFTA_DYNSET_SET_ID]) 139 return PTR_ERR(set);
140 set = nf_tables_set_lookup_byid(ctx->net,
141 tb[NFTA_DYNSET_SET_ID],
142 genmask);
143 if (IS_ERR(set))
144 return PTR_ERR(set);
145 }
146 140
147 if (set->ops->update == NULL) 141 if (set->ops->update == NULL)
148 return -EOPNOTSUPP; 142 return -EOPNOTSUPP;
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index c308920b194c..d212a85d2f33 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -98,14 +98,21 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
98 goto err; 98 goto err;
99 99
100 offset = i + priv->offset; 100 offset = i + priv->offset;
101 dest[priv->len / NFT_REG32_SIZE] = 0; 101 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
102 memcpy(dest, opt + offset, priv->len); 102 *dest = 1;
103 } else {
104 dest[priv->len / NFT_REG32_SIZE] = 0;
105 memcpy(dest, opt + offset, priv->len);
106 }
103 107
104 return; 108 return;
105 } 109 }
106 110
107err: 111err:
108 regs->verdict.code = NFT_BREAK; 112 if (priv->flags & NFT_EXTHDR_F_PRESENT)
113 *dest = 0;
114 else
115 regs->verdict.code = NFT_BREAK;
109} 116}
110 117
111static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { 118static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
index 29a4906adc27..21df8cccea65 100644
--- a/net/netfilter/nft_fib.c
+++ b/net/netfilter/nft_fib.c
@@ -24,7 +24,8 @@ const struct nla_policy nft_fib_policy[NFTA_FIB_MAX + 1] = {
24EXPORT_SYMBOL(nft_fib_policy); 24EXPORT_SYMBOL(nft_fib_policy);
25 25
26#define NFTA_FIB_F_ALL (NFTA_FIB_F_SADDR | NFTA_FIB_F_DADDR | \ 26#define NFTA_FIB_F_ALL (NFTA_FIB_F_SADDR | NFTA_FIB_F_DADDR | \
27 NFTA_FIB_F_MARK | NFTA_FIB_F_IIF | NFTA_FIB_F_OIF) 27 NFTA_FIB_F_MARK | NFTA_FIB_F_IIF | NFTA_FIB_F_OIF | \
28 NFTA_FIB_F_PRESENT)
28 29
29int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, 30int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
30 const struct nft_data **data) 31 const struct nft_data **data)
@@ -112,7 +113,7 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
112 if (err < 0) 113 if (err < 0)
113 return err; 114 return err;
114 115
115 return nft_fib_validate(ctx, expr, NULL); 116 return 0;
116} 117}
117EXPORT_SYMBOL_GPL(nft_fib_init); 118EXPORT_SYMBOL_GPL(nft_fib_init);
118 119
@@ -133,19 +134,22 @@ int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr)
133} 134}
134EXPORT_SYMBOL_GPL(nft_fib_dump); 135EXPORT_SYMBOL_GPL(nft_fib_dump);
135 136
136void nft_fib_store_result(void *reg, enum nft_fib_result r, 137void nft_fib_store_result(void *reg, const struct nft_fib *priv,
137 const struct nft_pktinfo *pkt, int index) 138 const struct nft_pktinfo *pkt, int index)
138{ 139{
139 struct net_device *dev; 140 struct net_device *dev;
140 u32 *dreg = reg; 141 u32 *dreg = reg;
141 142
142 switch (r) { 143 switch (priv->result) {
143 case NFT_FIB_RESULT_OIF: 144 case NFT_FIB_RESULT_OIF:
144 *dreg = index; 145 *dreg = (priv->flags & NFTA_FIB_F_PRESENT) ? !!index : index;
145 break; 146 break;
146 case NFT_FIB_RESULT_OIFNAME: 147 case NFT_FIB_RESULT_OIFNAME:
147 dev = dev_get_by_index_rcu(nft_net(pkt), index); 148 dev = dev_get_by_index_rcu(nft_net(pkt), index);
148 strncpy(reg, dev ? dev->name : "", IFNAMSIZ); 149 if (priv->flags & NFTA_FIB_F_PRESENT)
150 *dreg = !!dev;
151 else
152 strncpy(reg, dev ? dev->name : "", IFNAMSIZ);
149 break; 153 break;
150 default: 154 default:
151 WARN_ON_ONCE(1); 155 WARN_ON_ONCE(1);
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index eb2721af898d..a6a4633725bb 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -17,7 +17,7 @@
17#include <net/netfilter/nf_tables_core.h> 17#include <net/netfilter/nf_tables_core.h>
18#include <linux/jhash.h> 18#include <linux/jhash.h>
19 19
20struct nft_hash { 20struct nft_jhash {
21 enum nft_registers sreg:8; 21 enum nft_registers sreg:8;
22 enum nft_registers dreg:8; 22 enum nft_registers dreg:8;
23 u8 len; 23 u8 len;
@@ -26,11 +26,11 @@ struct nft_hash {
26 u32 offset; 26 u32 offset;
27}; 27};
28 28
29static void nft_hash_eval(const struct nft_expr *expr, 29static void nft_jhash_eval(const struct nft_expr *expr,
30 struct nft_regs *regs, 30 struct nft_regs *regs,
31 const struct nft_pktinfo *pkt) 31 const struct nft_pktinfo *pkt)
32{ 32{
33 struct nft_hash *priv = nft_expr_priv(expr); 33 struct nft_jhash *priv = nft_expr_priv(expr);
34 const void *data = &regs->data[priv->sreg]; 34 const void *data = &regs->data[priv->sreg];
35 u32 h; 35 u32 h;
36 36
@@ -38,6 +38,25 @@ static void nft_hash_eval(const struct nft_expr *expr,
38 regs->data[priv->dreg] = h + priv->offset; 38 regs->data[priv->dreg] = h + priv->offset;
39} 39}
40 40
41struct nft_symhash {
42 enum nft_registers dreg:8;
43 u32 modulus;
44 u32 offset;
45};
46
47static void nft_symhash_eval(const struct nft_expr *expr,
48 struct nft_regs *regs,
49 const struct nft_pktinfo *pkt)
50{
51 struct nft_symhash *priv = nft_expr_priv(expr);
52 struct sk_buff *skb = pkt->skb;
53 u32 h;
54
55 h = reciprocal_scale(__skb_get_hash_symmetric(skb), priv->modulus);
56
57 regs->data[priv->dreg] = h + priv->offset;
58}
59
41static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = { 60static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
42 [NFTA_HASH_SREG] = { .type = NLA_U32 }, 61 [NFTA_HASH_SREG] = { .type = NLA_U32 },
43 [NFTA_HASH_DREG] = { .type = NLA_U32 }, 62 [NFTA_HASH_DREG] = { .type = NLA_U32 },
@@ -45,13 +64,14 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
45 [NFTA_HASH_MODULUS] = { .type = NLA_U32 }, 64 [NFTA_HASH_MODULUS] = { .type = NLA_U32 },
46 [NFTA_HASH_SEED] = { .type = NLA_U32 }, 65 [NFTA_HASH_SEED] = { .type = NLA_U32 },
47 [NFTA_HASH_OFFSET] = { .type = NLA_U32 }, 66 [NFTA_HASH_OFFSET] = { .type = NLA_U32 },
67 [NFTA_HASH_TYPE] = { .type = NLA_U32 },
48}; 68};
49 69
50static int nft_hash_init(const struct nft_ctx *ctx, 70static int nft_jhash_init(const struct nft_ctx *ctx,
51 const struct nft_expr *expr, 71 const struct nft_expr *expr,
52 const struct nlattr * const tb[]) 72 const struct nlattr * const tb[])
53{ 73{
54 struct nft_hash *priv = nft_expr_priv(expr); 74 struct nft_jhash *priv = nft_expr_priv(expr);
55 u32 len; 75 u32 len;
56 int err; 76 int err;
57 77
@@ -92,10 +112,36 @@ static int nft_hash_init(const struct nft_ctx *ctx,
92 NFT_DATA_VALUE, sizeof(u32)); 112 NFT_DATA_VALUE, sizeof(u32));
93} 113}
94 114
95static int nft_hash_dump(struct sk_buff *skb, 115static int nft_symhash_init(const struct nft_ctx *ctx,
96 const struct nft_expr *expr) 116 const struct nft_expr *expr,
117 const struct nlattr * const tb[])
97{ 118{
98 const struct nft_hash *priv = nft_expr_priv(expr); 119 struct nft_symhash *priv = nft_expr_priv(expr);
120
121 if (!tb[NFTA_HASH_DREG] ||
122 !tb[NFTA_HASH_MODULUS])
123 return -EINVAL;
124
125 if (tb[NFTA_HASH_OFFSET])
126 priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
127
128 priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
129
130 priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
131 if (priv->modulus <= 1)
132 return -ERANGE;
133
134 if (priv->offset + priv->modulus - 1 < priv->offset)
135 return -EOVERFLOW;
136
137 return nft_validate_register_store(ctx, priv->dreg, NULL,
138 NFT_DATA_VALUE, sizeof(u32));
139}
140
141static int nft_jhash_dump(struct sk_buff *skb,
142 const struct nft_expr *expr)
143{
144 const struct nft_jhash *priv = nft_expr_priv(expr);
99 145
100 if (nft_dump_register(skb, NFTA_HASH_SREG, priv->sreg)) 146 if (nft_dump_register(skb, NFTA_HASH_SREG, priv->sreg))
101 goto nla_put_failure; 147 goto nla_put_failure;
@@ -110,6 +156,28 @@ static int nft_hash_dump(struct sk_buff *skb,
110 if (priv->offset != 0) 156 if (priv->offset != 0)
111 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) 157 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
112 goto nla_put_failure; 158 goto nla_put_failure;
159 if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_JENKINS)))
160 goto nla_put_failure;
161 return 0;
162
163nla_put_failure:
164 return -1;
165}
166
167static int nft_symhash_dump(struct sk_buff *skb,
168 const struct nft_expr *expr)
169{
170 const struct nft_symhash *priv = nft_expr_priv(expr);
171
172 if (nft_dump_register(skb, NFTA_HASH_DREG, priv->dreg))
173 goto nla_put_failure;
174 if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
175 goto nla_put_failure;
176 if (priv->offset != 0)
177 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
178 goto nla_put_failure;
179 if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_SYM)))
180 goto nla_put_failure;
113 return 0; 181 return 0;
114 182
115nla_put_failure: 183nla_put_failure:
@@ -117,17 +185,46 @@ nla_put_failure:
117} 185}
118 186
119static struct nft_expr_type nft_hash_type; 187static struct nft_expr_type nft_hash_type;
120static const struct nft_expr_ops nft_hash_ops = { 188static const struct nft_expr_ops nft_jhash_ops = {
121 .type = &nft_hash_type, 189 .type = &nft_hash_type,
122 .size = NFT_EXPR_SIZE(sizeof(struct nft_hash)), 190 .size = NFT_EXPR_SIZE(sizeof(struct nft_jhash)),
123 .eval = nft_hash_eval, 191 .eval = nft_jhash_eval,
124 .init = nft_hash_init, 192 .init = nft_jhash_init,
125 .dump = nft_hash_dump, 193 .dump = nft_jhash_dump,
126}; 194};
127 195
196static const struct nft_expr_ops nft_symhash_ops = {
197 .type = &nft_hash_type,
198 .size = NFT_EXPR_SIZE(sizeof(struct nft_symhash)),
199 .eval = nft_symhash_eval,
200 .init = nft_symhash_init,
201 .dump = nft_symhash_dump,
202};
203
204static const struct nft_expr_ops *
205nft_hash_select_ops(const struct nft_ctx *ctx,
206 const struct nlattr * const tb[])
207{
208 u32 type;
209
210 if (!tb[NFTA_HASH_TYPE])
211 return &nft_jhash_ops;
212
213 type = ntohl(nla_get_be32(tb[NFTA_HASH_TYPE]));
214 switch (type) {
215 case NFT_HASH_SYM:
216 return &nft_symhash_ops;
217 case NFT_HASH_JENKINS:
218 return &nft_jhash_ops;
219 default:
220 break;
221 }
222 return ERR_PTR(-EOPNOTSUPP);
223}
224
128static struct nft_expr_type nft_hash_type __read_mostly = { 225static struct nft_expr_type nft_hash_type __read_mostly = {
129 .name = "hash", 226 .name = "hash",
130 .ops = &nft_hash_ops, 227 .select_ops = &nft_hash_select_ops,
131 .policy = nft_hash_policy, 228 .policy = nft_hash_policy,
132 .maxattr = NFTA_HASH_MAX, 229 .maxattr = NFTA_HASH_MAX,
133 .owner = THIS_MODULE, 230 .owner = THIS_MODULE,
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index c6baf412236d..18dd57a52651 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -17,9 +17,8 @@
17#include <linux/netfilter/nf_tables.h> 17#include <linux/netfilter/nf_tables.h>
18#include <net/netfilter/nf_tables.h> 18#include <net/netfilter/nf_tables.h>
19 19
20static DEFINE_SPINLOCK(limit_lock);
21
22struct nft_limit { 20struct nft_limit {
21 spinlock_t lock;
23 u64 last; 22 u64 last;
24 u64 tokens; 23 u64 tokens;
25 u64 tokens_max; 24 u64 tokens_max;
@@ -34,7 +33,7 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
34 u64 now, tokens; 33 u64 now, tokens;
35 s64 delta; 34 s64 delta;
36 35
37 spin_lock_bh(&limit_lock); 36 spin_lock_bh(&limit->lock);
38 now = ktime_get_ns(); 37 now = ktime_get_ns();
39 tokens = limit->tokens + now - limit->last; 38 tokens = limit->tokens + now - limit->last;
40 if (tokens > limit->tokens_max) 39 if (tokens > limit->tokens_max)
@@ -44,11 +43,11 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
44 delta = tokens - cost; 43 delta = tokens - cost;
45 if (delta >= 0) { 44 if (delta >= 0) {
46 limit->tokens = delta; 45 limit->tokens = delta;
47 spin_unlock_bh(&limit_lock); 46 spin_unlock_bh(&limit->lock);
48 return limit->invert; 47 return limit->invert;
49 } 48 }
50 limit->tokens = tokens; 49 limit->tokens = tokens;
51 spin_unlock_bh(&limit_lock); 50 spin_unlock_bh(&limit->lock);
52 return !limit->invert; 51 return !limit->invert;
53} 52}
54 53
@@ -86,6 +85,7 @@ static int nft_limit_init(struct nft_limit *limit,
86 limit->invert = true; 85 limit->invert = true;
87 } 86 }
88 limit->last = ktime_get_ns(); 87 limit->last = ktime_get_ns();
88 spin_lock_init(&limit->lock);
89 89
90 return 0; 90 return 0;
91} 91}
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index e21aea7e5ec8..475570e89ede 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -71,16 +71,10 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
71 tb[NFTA_LOOKUP_SREG] == NULL) 71 tb[NFTA_LOOKUP_SREG] == NULL)
72 return -EINVAL; 72 return -EINVAL;
73 73
74 set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET], genmask); 74 set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_LOOKUP_SET],
75 if (IS_ERR(set)) { 75 tb[NFTA_LOOKUP_SET_ID], genmask);
76 if (tb[NFTA_LOOKUP_SET_ID]) { 76 if (IS_ERR(set))
77 set = nf_tables_set_lookup_byid(ctx->net, 77 return PTR_ERR(set);
78 tb[NFTA_LOOKUP_SET_ID],
79 genmask);
80 }
81 if (IS_ERR(set))
82 return PTR_ERR(set);
83 }
84 78
85 if (set->flags & NFT_SET_EVAL) 79 if (set->flags & NFT_SET_EVAL)
86 return -EOPNOTSUPP; 80 return -EOPNOTSUPP;
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index 11ce016cd479..6ac03d4266c9 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -46,10 +46,6 @@ int nft_masq_init(const struct nft_ctx *ctx,
46 struct nft_masq *priv = nft_expr_priv(expr); 46 struct nft_masq *priv = nft_expr_priv(expr);
47 int err; 47 int err;
48 48
49 err = nft_masq_validate(ctx, expr, NULL);
50 if (err)
51 return err;
52
53 if (tb[NFTA_MASQ_FLAGS]) { 49 if (tb[NFTA_MASQ_FLAGS]) {
54 priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS])); 50 priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS]));
55 if (priv->flags & ~NF_NAT_RANGE_MASK) 51 if (priv->flags & ~NF_NAT_RANGE_MASK)
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index e1f5ca9b423b..d14417aaf5d4 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -370,10 +370,6 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
370 return -EOPNOTSUPP; 370 return -EOPNOTSUPP;
371 } 371 }
372 372
373 err = nft_meta_set_validate(ctx, expr, NULL);
374 if (err < 0)
375 return err;
376
377 priv->sreg = nft_parse_register(tb[NFTA_META_SREG]); 373 priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
378 err = nft_validate_register_load(priv->sreg, len); 374 err = nft_validate_register_load(priv->sreg, len);
379 if (err < 0) 375 if (err < 0)
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 19a7bf3236f9..26a74dfb3b7a 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -138,10 +138,6 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
138 return -EINVAL; 138 return -EINVAL;
139 } 139 }
140 140
141 err = nft_nat_validate(ctx, expr, NULL);
142 if (err < 0)
143 return err;
144
145 if (tb[NFTA_NAT_FAMILY] == NULL) 141 if (tb[NFTA_NAT_FAMILY] == NULL)
146 return -EINVAL; 142 return -EINVAL;
147 143
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 1ae8c49ca4a1..1dd428fbaaa3 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -116,16 +116,10 @@ static int nft_objref_map_init(const struct nft_ctx *ctx,
116 struct nft_set *set; 116 struct nft_set *set;
117 int err; 117 int err;
118 118
119 set = nf_tables_set_lookup(ctx->table, tb[NFTA_OBJREF_SET_NAME], genmask); 119 set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_OBJREF_SET_NAME],
120 if (IS_ERR(set)) { 120 tb[NFTA_OBJREF_SET_ID], genmask);
121 if (tb[NFTA_OBJREF_SET_ID]) { 121 if (IS_ERR(set))
122 set = nf_tables_set_lookup_byid(ctx->net, 122 return PTR_ERR(set);
123 tb[NFTA_OBJREF_SET_ID],
124 genmask);
125 }
126 if (IS_ERR(set))
127 return PTR_ERR(set);
128 }
129 123
130 if (!(set->flags & NFT_SET_OBJECT)) 124 if (!(set->flags & NFT_SET_OBJECT))
131 return -EINVAL; 125 return -EINVAL;
diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
index 2d6fe3559912..25e33159be57 100644
--- a/net/netfilter/nft_quota.c
+++ b/net/netfilter/nft_quota.c
@@ -99,7 +99,8 @@ static int nft_quota_do_init(const struct nlattr * const tb[],
99 return 0; 99 return 0;
100} 100}
101 101
102static int nft_quota_obj_init(const struct nlattr * const tb[], 102static int nft_quota_obj_init(const struct nft_ctx *ctx,
103 const struct nlattr * const tb[],
103 struct nft_object *obj) 104 struct nft_object *obj)
104{ 105{
105 struct nft_quota *priv = nft_obj_data(obj); 106 struct nft_quota *priv = nft_obj_data(obj);
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 40dcd05146d5..1e66538bf0ff 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -47,10 +47,6 @@ int nft_redir_init(const struct nft_ctx *ctx,
47 unsigned int plen; 47 unsigned int plen;
48 int err; 48 int err;
49 49
50 err = nft_redir_validate(ctx, expr, NULL);
51 if (err < 0)
52 return err;
53
54 plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all); 50 plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
55 if (tb[NFTA_REDIR_REG_PROTO_MIN]) { 51 if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
56 priv->sreg_proto_min = 52 priv->sreg_proto_min =
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index c64de3f7379d..29f5bd2377b0 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -42,11 +42,6 @@ int nft_reject_init(const struct nft_ctx *ctx,
42 const struct nlattr * const tb[]) 42 const struct nlattr * const tb[])
43{ 43{
44 struct nft_reject *priv = nft_expr_priv(expr); 44 struct nft_reject *priv = nft_expr_priv(expr);
45 int err;
46
47 err = nft_reject_validate(ctx, expr, NULL);
48 if (err < 0)
49 return err;
50 45
51 if (tb[NFTA_REJECT_TYPE] == NULL) 46 if (tb[NFTA_REJECT_TYPE] == NULL)
52 return -EINVAL; 47 return -EINVAL;
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 9e90a02cb104..5a7fb5ff867d 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -66,11 +66,7 @@ static int nft_reject_inet_init(const struct nft_ctx *ctx,
66 const struct nlattr * const tb[]) 66 const struct nlattr * const tb[])
67{ 67{
68 struct nft_reject *priv = nft_expr_priv(expr); 68 struct nft_reject *priv = nft_expr_priv(expr);
69 int icmp_code, err; 69 int icmp_code;
70
71 err = nft_reject_validate(ctx, expr, NULL);
72 if (err < 0)
73 return err;
74 70
75 if (tb[NFTA_REJECT_TYPE] == NULL) 71 if (tb[NFTA_REJECT_TYPE] == NULL)
76 return -EINVAL; 72 return -EINVAL;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 78dfbf9588b3..e97e2fb53f0a 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -18,9 +18,8 @@
18#include <linux/netfilter/nf_tables.h> 18#include <linux/netfilter/nf_tables.h>
19#include <net/netfilter/nf_tables.h> 19#include <net/netfilter/nf_tables.h>
20 20
21static DEFINE_SPINLOCK(nft_rbtree_lock);
22
23struct nft_rbtree { 21struct nft_rbtree {
22 rwlock_t lock;
24 struct rb_root root; 23 struct rb_root root;
25}; 24};
26 25
@@ -44,14 +43,14 @@ static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
44static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, 43static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
45 const u32 *key, const struct nft_set_ext **ext) 44 const u32 *key, const struct nft_set_ext **ext)
46{ 45{
47 const struct nft_rbtree *priv = nft_set_priv(set); 46 struct nft_rbtree *priv = nft_set_priv(set);
48 const struct nft_rbtree_elem *rbe, *interval = NULL; 47 const struct nft_rbtree_elem *rbe, *interval = NULL;
49 u8 genmask = nft_genmask_cur(net); 48 u8 genmask = nft_genmask_cur(net);
50 const struct rb_node *parent; 49 const struct rb_node *parent;
51 const void *this; 50 const void *this;
52 int d; 51 int d;
53 52
54 spin_lock_bh(&nft_rbtree_lock); 53 read_lock_bh(&priv->lock);
55 parent = priv->root.rb_node; 54 parent = priv->root.rb_node;
56 while (parent != NULL) { 55 while (parent != NULL) {
57 rbe = rb_entry(parent, struct nft_rbtree_elem, node); 56 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
@@ -75,7 +74,7 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
75 } 74 }
76 if (nft_rbtree_interval_end(rbe)) 75 if (nft_rbtree_interval_end(rbe))
77 goto out; 76 goto out;
78 spin_unlock_bh(&nft_rbtree_lock); 77 read_unlock_bh(&priv->lock);
79 78
80 *ext = &rbe->ext; 79 *ext = &rbe->ext;
81 return true; 80 return true;
@@ -85,12 +84,12 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
85 if (set->flags & NFT_SET_INTERVAL && interval != NULL && 84 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
86 nft_set_elem_active(&interval->ext, genmask) && 85 nft_set_elem_active(&interval->ext, genmask) &&
87 !nft_rbtree_interval_end(interval)) { 86 !nft_rbtree_interval_end(interval)) {
88 spin_unlock_bh(&nft_rbtree_lock); 87 read_unlock_bh(&priv->lock);
89 *ext = &interval->ext; 88 *ext = &interval->ext;
90 return true; 89 return true;
91 } 90 }
92out: 91out:
93 spin_unlock_bh(&nft_rbtree_lock); 92 read_unlock_bh(&priv->lock);
94 return false; 93 return false;
95} 94}
96 95
@@ -140,12 +139,13 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
140 const struct nft_set_elem *elem, 139 const struct nft_set_elem *elem,
141 struct nft_set_ext **ext) 140 struct nft_set_ext **ext)
142{ 141{
142 struct nft_rbtree *priv = nft_set_priv(set);
143 struct nft_rbtree_elem *rbe = elem->priv; 143 struct nft_rbtree_elem *rbe = elem->priv;
144 int err; 144 int err;
145 145
146 spin_lock_bh(&nft_rbtree_lock); 146 write_lock_bh(&priv->lock);
147 err = __nft_rbtree_insert(net, set, rbe, ext); 147 err = __nft_rbtree_insert(net, set, rbe, ext);
148 spin_unlock_bh(&nft_rbtree_lock); 148 write_unlock_bh(&priv->lock);
149 149
150 return err; 150 return err;
151} 151}
@@ -157,9 +157,9 @@ static void nft_rbtree_remove(const struct net *net,
157 struct nft_rbtree *priv = nft_set_priv(set); 157 struct nft_rbtree *priv = nft_set_priv(set);
158 struct nft_rbtree_elem *rbe = elem->priv; 158 struct nft_rbtree_elem *rbe = elem->priv;
159 159
160 spin_lock_bh(&nft_rbtree_lock); 160 write_lock_bh(&priv->lock);
161 rb_erase(&rbe->node, &priv->root); 161 rb_erase(&rbe->node, &priv->root);
162 spin_unlock_bh(&nft_rbtree_lock); 162 write_unlock_bh(&priv->lock);
163} 163}
164 164
165static void nft_rbtree_activate(const struct net *net, 165static void nft_rbtree_activate(const struct net *net,
@@ -224,12 +224,12 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
224 struct nft_set *set, 224 struct nft_set *set,
225 struct nft_set_iter *iter) 225 struct nft_set_iter *iter)
226{ 226{
227 const struct nft_rbtree *priv = nft_set_priv(set); 227 struct nft_rbtree *priv = nft_set_priv(set);
228 struct nft_rbtree_elem *rbe; 228 struct nft_rbtree_elem *rbe;
229 struct nft_set_elem elem; 229 struct nft_set_elem elem;
230 struct rb_node *node; 230 struct rb_node *node;
231 231
232 spin_lock_bh(&nft_rbtree_lock); 232 read_lock_bh(&priv->lock);
233 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { 233 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
234 rbe = rb_entry(node, struct nft_rbtree_elem, node); 234 rbe = rb_entry(node, struct nft_rbtree_elem, node);
235 235
@@ -242,13 +242,13 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
242 242
243 iter->err = iter->fn(ctx, set, iter, &elem); 243 iter->err = iter->fn(ctx, set, iter, &elem);
244 if (iter->err < 0) { 244 if (iter->err < 0) {
245 spin_unlock_bh(&nft_rbtree_lock); 245 read_unlock_bh(&priv->lock);
246 return; 246 return;
247 } 247 }
248cont: 248cont:
249 iter->count++; 249 iter->count++;
250 } 250 }
251 spin_unlock_bh(&nft_rbtree_lock); 251 read_unlock_bh(&priv->lock);
252} 252}
253 253
254static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[]) 254static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
@@ -262,6 +262,7 @@ static int nft_rbtree_init(const struct nft_set *set,
262{ 262{
263 struct nft_rbtree *priv = nft_set_priv(set); 263 struct nft_rbtree *priv = nft_set_priv(set);
264 264
265 rwlock_init(&priv->lock);
265 priv->root = RB_ROOT; 266 priv->root = RB_ROOT;
266 return 0; 267 return 0;
267} 268}
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index dab962df1787..d27b5f1ea619 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -18,6 +18,7 @@
18#include <linux/netfilter/xt_limit.h> 18#include <linux/netfilter/xt_limit.h>
19 19
20struct xt_limit_priv { 20struct xt_limit_priv {
21 spinlock_t lock;
21 unsigned long prev; 22 unsigned long prev;
22 uint32_t credit; 23 uint32_t credit;
23}; 24};
@@ -32,8 +33,6 @@ MODULE_ALIAS("ip6t_limit");
32 * see net/sched/sch_tbf.c in the linux source tree 33 * see net/sched/sch_tbf.c in the linux source tree
33 */ 34 */
34 35
35static DEFINE_SPINLOCK(limit_lock);
36
37/* Rusty: This is my (non-mathematically-inclined) understanding of 36/* Rusty: This is my (non-mathematically-inclined) understanding of
38 this algorithm. The `average rate' in jiffies becomes your initial 37 this algorithm. The `average rate' in jiffies becomes your initial
39 amount of credit `credit' and the most credit you can ever have 38 amount of credit `credit' and the most credit you can ever have
@@ -72,7 +71,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
72 struct xt_limit_priv *priv = r->master; 71 struct xt_limit_priv *priv = r->master;
73 unsigned long now = jiffies; 72 unsigned long now = jiffies;
74 73
75 spin_lock_bh(&limit_lock); 74 spin_lock_bh(&priv->lock);
76 priv->credit += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY; 75 priv->credit += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY;
77 if (priv->credit > r->credit_cap) 76 if (priv->credit > r->credit_cap)
78 priv->credit = r->credit_cap; 77 priv->credit = r->credit_cap;
@@ -80,11 +79,11 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
80 if (priv->credit >= r->cost) { 79 if (priv->credit >= r->cost) {
81 /* We're not limited. */ 80 /* We're not limited. */
82 priv->credit -= r->cost; 81 priv->credit -= r->cost;
83 spin_unlock_bh(&limit_lock); 82 spin_unlock_bh(&priv->lock);
84 return true; 83 return true;
85 } 84 }
86 85
87 spin_unlock_bh(&limit_lock); 86 spin_unlock_bh(&priv->lock);
88 return false; 87 return false;
89} 88}
90 89
@@ -126,6 +125,8 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
126 r->credit_cap = priv->credit; /* Credits full. */ 125 r->credit_cap = priv->credit; /* Credits full. */
127 r->cost = user2credits(r->avg); 126 r->cost = user2credits(r->avg);
128 } 127 }
128 spin_lock_init(&priv->lock);
129
129 return 0; 130 return 0;
130} 131}
131 132
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 1c38d2c7caa8..80fb6f63e768 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -702,9 +702,8 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
702 event->param.conn.initiator_depth); 702 event->param.conn.initiator_depth);
703 703
704 /* rdma_accept() calls rdma_reject() internally if it fails */ 704 /* rdma_accept() calls rdma_reject() internally if it fails */
705 err = rdma_accept(cm_id, &conn_param); 705 if (rdma_accept(cm_id, &conn_param))
706 if (err) 706 rds_ib_conn_error(conn, "rdma_accept failed\n");
707 rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
708 707
709out: 708out:
710 if (conn) 709 if (conn)
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index 4fe8f4fec4ee..86ef907067bb 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -78,17 +78,15 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
78 return ibmr; 78 return ibmr;
79 79
80out_no_cigar: 80out_no_cigar:
81 if (ibmr) { 81 kfree(ibmr);
82 if (fmr->fmr)
83 ib_dealloc_fmr(fmr->fmr);
84 kfree(ibmr);
85 }
86 atomic_dec(&pool->item_count); 82 atomic_dec(&pool->item_count);
83
87 return ERR_PTR(err); 84 return ERR_PTR(err);
88} 85}
89 86
90int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, 87static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
91 struct scatterlist *sg, unsigned int nents) 88 struct rds_ib_mr *ibmr, struct scatterlist *sg,
89 unsigned int nents)
92{ 90{
93 struct ib_device *dev = rds_ibdev->dev; 91 struct ib_device *dev = rds_ibdev->dev;
94 struct rds_ib_fmr *fmr = &ibmr->u.fmr; 92 struct rds_ib_fmr *fmr = &ibmr->u.fmr;
@@ -114,29 +112,39 @@ int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
114 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); 112 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
115 113
116 if (dma_addr & ~PAGE_MASK) { 114 if (dma_addr & ~PAGE_MASK) {
117 if (i > 0) 115 if (i > 0) {
116 ib_dma_unmap_sg(dev, sg, nents,
117 DMA_BIDIRECTIONAL);
118 return -EINVAL; 118 return -EINVAL;
119 else 119 } else {
120 ++page_cnt; 120 ++page_cnt;
121 }
121 } 122 }
122 if ((dma_addr + dma_len) & ~PAGE_MASK) { 123 if ((dma_addr + dma_len) & ~PAGE_MASK) {
123 if (i < sg_dma_len - 1) 124 if (i < sg_dma_len - 1) {
125 ib_dma_unmap_sg(dev, sg, nents,
126 DMA_BIDIRECTIONAL);
124 return -EINVAL; 127 return -EINVAL;
125 else 128 } else {
126 ++page_cnt; 129 ++page_cnt;
130 }
127 } 131 }
128 132
129 len += dma_len; 133 len += dma_len;
130 } 134 }
131 135
132 page_cnt += len >> PAGE_SHIFT; 136 page_cnt += len >> PAGE_SHIFT;
133 if (page_cnt > ibmr->pool->fmr_attr.max_pages) 137 if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
138 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
134 return -EINVAL; 139 return -EINVAL;
140 }
135 141
136 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC, 142 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
137 rdsibdev_to_node(rds_ibdev)); 143 rdsibdev_to_node(rds_ibdev));
138 if (!dma_pages) 144 if (!dma_pages) {
145 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
139 return -ENOMEM; 146 return -ENOMEM;
147 }
140 148
141 page_cnt = 0; 149 page_cnt = 0;
142 for (i = 0; i < sg_dma_len; ++i) { 150 for (i = 0; i < sg_dma_len; ++i) {
@@ -149,8 +157,10 @@ int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
149 } 157 }
150 158
151 ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr); 159 ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
152 if (ret) 160 if (ret) {
161 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
153 goto out; 162 goto out;
163 }
154 164
155 /* Success - we successfully remapped the MR, so we can 165 /* Success - we successfully remapped the MR, so we can
156 * safely tear down the old mapping. 166 * safely tear down the old mapping.
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index 5d6e98a79a5e..0ea4ab017a8c 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -125,8 +125,6 @@ void rds_ib_mr_exit(void);
125void __rds_ib_teardown_mr(struct rds_ib_mr *); 125void __rds_ib_teardown_mr(struct rds_ib_mr *);
126void rds_ib_teardown_mr(struct rds_ib_mr *); 126void rds_ib_teardown_mr(struct rds_ib_mr *);
127struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *, int); 127struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *, int);
128int rds_ib_map_fmr(struct rds_ib_device *, struct rds_ib_mr *,
129 struct scatterlist *, unsigned int);
130struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *); 128struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *);
131int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *, int, struct rds_ib_mr **); 129int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *, int, struct rds_ib_mr **);
132struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *, struct scatterlist *, 130struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *, struct scatterlist *,
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 71e7ff22f7c9..c75ea5c9102c 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -603,8 +603,8 @@ nla_put_failure:
603 return -1; 603 return -1;
604} 604}
605 605
606int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, 606static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
607 u16 metaid, u16 mlen, void *mdata) 607 u16 metaid, u16 mlen, void *mdata)
608{ 608{
609 struct tcf_meta_info *e; 609 struct tcf_meta_info *e;
610 610
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index bcf49cd22786..62567bfe52c7 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -274,7 +274,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
274 return NULL; 274 return NULL;
275} 275}
276 276
277void qdisc_hash_add(struct Qdisc *q) 277void qdisc_hash_add(struct Qdisc *q, bool invisible)
278{ 278{
279 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { 279 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
280 struct Qdisc *root = qdisc_dev(q)->qdisc; 280 struct Qdisc *root = qdisc_dev(q)->qdisc;
@@ -282,6 +282,8 @@ void qdisc_hash_add(struct Qdisc *q)
282 WARN_ON_ONCE(root == &noop_qdisc); 282 WARN_ON_ONCE(root == &noop_qdisc);
283 ASSERT_RTNL(); 283 ASSERT_RTNL();
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle); 284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
285 if (invisible)
286 q->flags |= TCQ_F_INVISIBLE;
285 } 287 }
286} 288}
287EXPORT_SYMBOL(qdisc_hash_add); 289EXPORT_SYMBOL(qdisc_hash_add);
@@ -1003,7 +1005,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
1003 goto err_out4; 1005 goto err_out4;
1004 } 1006 }
1005 1007
1006 qdisc_hash_add(sch); 1008 qdisc_hash_add(sch, false);
1007 1009
1008 return sch; 1010 return sch;
1009 } 1011 }
@@ -1401,9 +1403,14 @@ nla_put_failure:
1401 return -1; 1403 return -1;
1402} 1404}
1403 1405
1404static bool tc_qdisc_dump_ignore(struct Qdisc *q) 1406static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
1405{ 1407{
1406 return (q->flags & TCQ_F_BUILTIN) ? true : false; 1408 if (q->flags & TCQ_F_BUILTIN)
1409 return true;
1410 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
1411 return true;
1412
1413 return false;
1407} 1414}
1408 1415
1409static int qdisc_notify(struct net *net, struct sk_buff *oskb, 1416static int qdisc_notify(struct net *net, struct sk_buff *oskb,
@@ -1417,12 +1424,12 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1417 if (!skb) 1424 if (!skb)
1418 return -ENOBUFS; 1425 return -ENOBUFS;
1419 1426
1420 if (old && !tc_qdisc_dump_ignore(old)) { 1427 if (old && !tc_qdisc_dump_ignore(old, false)) {
1421 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq, 1428 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1422 0, RTM_DELQDISC) < 0) 1429 0, RTM_DELQDISC) < 0)
1423 goto err_out; 1430 goto err_out;
1424 } 1431 }
1425 if (new && !tc_qdisc_dump_ignore(new)) { 1432 if (new && !tc_qdisc_dump_ignore(new, false)) {
1426 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq, 1433 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1427 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) 1434 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1428 goto err_out; 1435 goto err_out;
@@ -1439,7 +1446,8 @@ err_out:
1439 1446
1440static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, 1447static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1441 struct netlink_callback *cb, 1448 struct netlink_callback *cb,
1442 int *q_idx_p, int s_q_idx, bool recur) 1449 int *q_idx_p, int s_q_idx, bool recur,
1450 bool dump_invisible)
1443{ 1451{
1444 int ret = 0, q_idx = *q_idx_p; 1452 int ret = 0, q_idx = *q_idx_p;
1445 struct Qdisc *q; 1453 struct Qdisc *q;
@@ -1452,7 +1460,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1452 if (q_idx < s_q_idx) { 1460 if (q_idx < s_q_idx) {
1453 q_idx++; 1461 q_idx++;
1454 } else { 1462 } else {
1455 if (!tc_qdisc_dump_ignore(q) && 1463 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1456 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, 1464 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1457 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1465 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1458 RTM_NEWQDISC) <= 0) 1466 RTM_NEWQDISC) <= 0)
@@ -1474,7 +1482,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1474 q_idx++; 1482 q_idx++;
1475 continue; 1483 continue;
1476 } 1484 }
1477 if (!tc_qdisc_dump_ignore(q) && 1485 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1478 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, 1486 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1479 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1487 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1480 RTM_NEWQDISC) <= 0) 1488 RTM_NEWQDISC) <= 0)
@@ -1496,12 +1504,21 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1496 int idx, q_idx; 1504 int idx, q_idx;
1497 int s_idx, s_q_idx; 1505 int s_idx, s_q_idx;
1498 struct net_device *dev; 1506 struct net_device *dev;
1507 const struct nlmsghdr *nlh = cb->nlh;
1508 struct tcmsg *tcm = nlmsg_data(nlh);
1509 struct nlattr *tca[TCA_MAX + 1];
1510 int err;
1499 1511
1500 s_idx = cb->args[0]; 1512 s_idx = cb->args[0];
1501 s_q_idx = q_idx = cb->args[1]; 1513 s_q_idx = q_idx = cb->args[1];
1502 1514
1503 idx = 0; 1515 idx = 0;
1504 ASSERT_RTNL(); 1516 ASSERT_RTNL();
1517
1518 err = nlmsg_parse(nlh, sizeof(*tcm), tca, TCA_MAX, NULL);
1519 if (err < 0)
1520 return err;
1521
1505 for_each_netdev(net, dev) { 1522 for_each_netdev(net, dev) {
1506 struct netdev_queue *dev_queue; 1523 struct netdev_queue *dev_queue;
1507 1524
@@ -1512,13 +1529,14 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1512 q_idx = 0; 1529 q_idx = 0;
1513 1530
1514 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx, 1531 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1515 true) < 0) 1532 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1516 goto done; 1533 goto done;
1517 1534
1518 dev_queue = dev_ingress_queue(dev); 1535 dev_queue = dev_ingress_queue(dev);
1519 if (dev_queue && 1536 if (dev_queue &&
1520 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, 1537 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1521 &q_idx, s_q_idx, false) < 0) 1538 &q_idx, s_q_idx, false,
1539 tca[TCA_DUMP_INVISIBLE]) < 0)
1522 goto done; 1540 goto done;
1523 1541
1524cont: 1542cont:
@@ -1762,7 +1780,7 @@ static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1762{ 1780{
1763 struct qdisc_dump_args arg; 1781 struct qdisc_dump_args arg;
1764 1782
1765 if (tc_qdisc_dump_ignore(q) || 1783 if (tc_qdisc_dump_ignore(q, false) ||
1766 *t_p < s_t || !q->ops->cl_ops || 1784 *t_p < s_t || !q->ops->cl_ops ||
1767 (tcm->tcm_parent && 1785 (tcm->tcm_parent &&
1768 TC_H_MAJ(tcm->tcm_parent) != q->handle)) { 1786 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index d6ca18dc04c3..cf93e5ff3d63 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1161,6 +1161,8 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1161 sch->handle); 1161 sch->handle);
1162 if (!q->link.q) 1162 if (!q->link.q)
1163 q->link.q = &noop_qdisc; 1163 q->link.q = &noop_qdisc;
1164 else
1165 qdisc_hash_add(q->link.q, true);
1164 1166
1165 q->link.priority = TC_CBQ_MAXPRIO - 1; 1167 q->link.priority = TC_CBQ_MAXPRIO - 1;
1166 q->link.priority2 = TC_CBQ_MAXPRIO - 1; 1168 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
@@ -1600,6 +1602,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1600 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); 1602 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1601 if (!cl->q) 1603 if (!cl->q)
1602 cl->q = &noop_qdisc; 1604 cl->q = &noop_qdisc;
1605 else
1606 qdisc_hash_add(cl->q, true);
1607
1603 cl->common.classid = classid; 1608 cl->common.classid = classid;
1604 cl->tparent = parent; 1609 cl->tparent = parent;
1605 cl->qdisc = sch; 1610 cl->qdisc = sch;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index bb4cbdf75004..9fe67e257dfa 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -117,6 +117,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
117 &pfifo_qdisc_ops, classid); 117 &pfifo_qdisc_ops, classid);
118 if (cl->qdisc == NULL) 118 if (cl->qdisc == NULL)
119 cl->qdisc = &noop_qdisc; 119 cl->qdisc = &noop_qdisc;
120 else
121 qdisc_hash_add(cl->qdisc, true);
120 122
121 if (tca[TCA_RATE]) { 123 if (tca[TCA_RATE]) {
122 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, 124 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 802ac7c2e5e8..1b98cb2160ff 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -368,6 +368,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
368 p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle); 368 p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
369 if (p->q == NULL) 369 if (p->q == NULL)
370 p->q = &noop_qdisc; 370 p->q = &noop_qdisc;
371 else
372 qdisc_hash_add(p->q, true);
371 373
372 pr_debug("%s: qdisc %p\n", __func__, p->q); 374 pr_debug("%s: qdisc %p\n", __func__, p->q);
373 375
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 9f3a884d1590..097bbe9857a5 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -288,7 +288,6 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
288 struct fq_codel_flow *flow; 288 struct fq_codel_flow *flow;
289 struct list_head *head; 289 struct list_head *head;
290 u32 prev_drop_count, prev_ecn_mark; 290 u32 prev_drop_count, prev_ecn_mark;
291 unsigned int prev_backlog;
292 291
293begin: 292begin:
294 head = &q->new_flows; 293 head = &q->new_flows;
@@ -307,7 +306,6 @@ begin:
307 306
308 prev_drop_count = q->cstats.drop_count; 307 prev_drop_count = q->cstats.drop_count;
309 prev_ecn_mark = q->cstats.ecn_mark; 308 prev_ecn_mark = q->cstats.ecn_mark;
310 prev_backlog = sch->qstats.backlog;
311 309
312 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, 310 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
313 &flow->cvars, &q->cstats, qdisc_pkt_len, 311 &flow->cvars, &q->cstats, qdisc_pkt_len,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b052b27a984e..3e64d23e098c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -795,7 +795,7 @@ static void attach_default_qdiscs(struct net_device *dev)
795 } 795 }
796#ifdef CONFIG_NET_SCHED 796#ifdef CONFIG_NET_SCHED
797 if (dev->qdisc) 797 if (dev->qdisc)
798 qdisc_hash_add(dev->qdisc); 798 qdisc_hash_add(dev->qdisc, false);
799#endif 799#endif
800} 800}
801 801
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 3ffaa6fb0990..0198c6cdda49 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1066,6 +1066,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1066 &pfifo_qdisc_ops, classid); 1066 &pfifo_qdisc_ops, classid);
1067 if (cl->qdisc == NULL) 1067 if (cl->qdisc == NULL)
1068 cl->qdisc = &noop_qdisc; 1068 cl->qdisc = &noop_qdisc;
1069 else
1070 qdisc_hash_add(cl->qdisc, true);
1069 INIT_LIST_HEAD(&cl->children); 1071 INIT_LIST_HEAD(&cl->children);
1070 cl->vt_tree = RB_ROOT; 1072 cl->vt_tree = RB_ROOT;
1071 cl->cf_tree = RB_ROOT; 1073 cl->cf_tree = RB_ROOT;
@@ -1425,6 +1427,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1425 sch->handle); 1427 sch->handle);
1426 if (q->root.qdisc == NULL) 1428 if (q->root.qdisc == NULL)
1427 q->root.qdisc = &noop_qdisc; 1429 q->root.qdisc = &noop_qdisc;
1430 else
1431 qdisc_hash_add(q->root.qdisc, true);
1428 INIT_LIST_HEAD(&q->root.children); 1432 INIT_LIST_HEAD(&q->root.children);
1429 q->root.vt_tree = RB_ROOT; 1433 q->root.vt_tree = RB_ROOT;
1430 q->root.cf_tree = RB_ROOT; 1434 q->root.cf_tree = RB_ROOT;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 4cd5fb134bc9..95867033542e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1460,6 +1460,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1460 qdisc_class_hash_insert(&q->clhash, &cl->common); 1460 qdisc_class_hash_insert(&q->clhash, &cl->common);
1461 if (parent) 1461 if (parent)
1462 parent->children++; 1462 parent->children++;
1463 if (cl->un.leaf.q != &noop_qdisc)
1464 qdisc_hash_add(cl->un.leaf.q, true);
1463 } else { 1465 } else {
1464 if (tca[TCA_RATE]) { 1466 if (tca[TCA_RATE]) {
1465 err = gen_replace_estimator(&cl->bstats, NULL, 1467 err = gen_replace_estimator(&cl->bstats, NULL,
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 20b7f1646f69..cadfdd4f1e52 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -84,7 +84,7 @@ static void mq_attach(struct Qdisc *sch)
84 qdisc_destroy(old); 84 qdisc_destroy(old);
85#ifdef CONFIG_NET_SCHED 85#ifdef CONFIG_NET_SCHED
86 if (ntx < dev->real_num_tx_queues) 86 if (ntx < dev->real_num_tx_queues)
87 qdisc_hash_add(qdisc); 87 qdisc_hash_add(qdisc, false);
88#endif 88#endif
89 89
90 } 90 }
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 922683418e53..0a4cf27ea54b 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -21,14 +21,13 @@
21 21
22struct mqprio_sched { 22struct mqprio_sched {
23 struct Qdisc **qdiscs; 23 struct Qdisc **qdiscs;
24 int hw_owned; 24 int hw_offload;
25}; 25};
26 26
27static void mqprio_destroy(struct Qdisc *sch) 27static void mqprio_destroy(struct Qdisc *sch)
28{ 28{
29 struct net_device *dev = qdisc_dev(sch); 29 struct net_device *dev = qdisc_dev(sch);
30 struct mqprio_sched *priv = qdisc_priv(sch); 30 struct mqprio_sched *priv = qdisc_priv(sch);
31 struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO};
32 unsigned int ntx; 31 unsigned int ntx;
33 32
34 if (priv->qdiscs) { 33 if (priv->qdiscs) {
@@ -39,10 +38,15 @@ static void mqprio_destroy(struct Qdisc *sch)
39 kfree(priv->qdiscs); 38 kfree(priv->qdiscs);
40 } 39 }
41 40
42 if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc) 41 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
42 struct tc_mqprio_qopt offload = { 0 };
43 struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO,
44 { .mqprio = &offload } };
45
43 dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); 46 dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
44 else 47 } else {
45 netdev_set_num_tc(dev, 0); 48 netdev_set_num_tc(dev, 0);
49 }
46} 50}
47 51
48static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) 52static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
@@ -59,15 +63,20 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
59 return -EINVAL; 63 return -EINVAL;
60 } 64 }
61 65
62 /* net_device does not support requested operation */ 66 /* Limit qopt->hw to maximum supported offload value. Drivers have
63 if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) 67 * the option of overriding this later if they don't support the a
64 return -EINVAL; 68 * given offload type.
69 */
70 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
71 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
65 72
66 /* if hw owned qcount and qoffset are taken from LLD so 73 /* If hardware offload is requested we will leave it to the device
67 * no reason to verify them here 74 * to either populate the queue counts itself or to validate the
75 * provided queue counts. If ndo_setup_tc is not present then
76 * hardware doesn't support offload and we should return an error.
68 */ 77 */
69 if (qopt->hw) 78 if (qopt->hw)
70 return 0; 79 return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
71 80
72 for (i = 0; i < qopt->num_tc; i++) { 81 for (i = 0; i < qopt->num_tc; i++) {
73 unsigned int last = qopt->offset[i] + qopt->count[i]; 82 unsigned int last = qopt->offset[i] + qopt->count[i];
@@ -139,13 +148,15 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
139 * supplied and verified mapping 148 * supplied and verified mapping
140 */ 149 */
141 if (qopt->hw) { 150 if (qopt->hw) {
142 struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO, 151 struct tc_mqprio_qopt offload = *qopt;
143 { .tc = qopt->num_tc }}; 152 struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO,
153 { .mqprio = &offload } };
144 154
145 priv->hw_owned = 1;
146 err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); 155 err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
147 if (err) 156 if (err)
148 return err; 157 return err;
158
159 priv->hw_offload = offload.hw;
149 } else { 160 } else {
150 netdev_set_num_tc(dev, qopt->num_tc); 161 netdev_set_num_tc(dev, qopt->num_tc);
151 for (i = 0; i < qopt->num_tc; i++) 162 for (i = 0; i < qopt->num_tc; i++)
@@ -175,7 +186,7 @@ static void mqprio_attach(struct Qdisc *sch)
175 if (old) 186 if (old)
176 qdisc_destroy(old); 187 qdisc_destroy(old);
177 if (ntx < dev->real_num_tx_queues) 188 if (ntx < dev->real_num_tx_queues)
178 qdisc_hash_add(qdisc); 189 qdisc_hash_add(qdisc, false);
179 } 190 }
180 kfree(priv->qdiscs); 191 kfree(priv->qdiscs);
181 priv->qdiscs = NULL; 192 priv->qdiscs = NULL;
@@ -243,7 +254,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
243 254
244 opt.num_tc = netdev_get_num_tc(dev); 255 opt.num_tc = netdev_get_num_tc(dev);
245 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); 256 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
246 opt.hw = priv->hw_owned; 257 opt.hw = priv->hw_offload;
247 258
248 for (i = 0; i < netdev_get_num_tc(dev); i++) { 259 for (i = 0; i < netdev_get_num_tc(dev); i++) {
249 opt.count[i] = dev->tc_to_txq[i].count; 260 opt.count[i] = dev->tc_to_txq[i].count;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index e7839a0d0eaa..43a3a10b3c81 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -217,6 +217,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
217 sch_tree_lock(sch); 217 sch_tree_lock(sch);
218 old = q->queues[i]; 218 old = q->queues[i];
219 q->queues[i] = child; 219 q->queues[i] = child;
220 if (child != &noop_qdisc)
221 qdisc_hash_add(child, true);
220 222
221 if (old != &noop_qdisc) { 223 if (old != &noop_qdisc) {
222 qdisc_tree_reduce_backlog(old, 224 qdisc_tree_reduce_backlog(old,
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index c8bb62a1e744..94b4928ad413 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -462,7 +462,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
462 /* If a delay is expected, orphan the skb. (orphaning usually takes 462 /* If a delay is expected, orphan the skb. (orphaning usually takes
463 * place at TX completion time, so _before_ the link transit delay) 463 * place at TX completion time, so _before_ the link transit delay)
464 */ 464 */
465 if (q->latency || q->jitter) 465 if (q->latency || q->jitter || q->rate)
466 skb_orphan_partial(skb); 466 skb_orphan_partial(skb);
467 467
468 /* 468 /*
@@ -530,21 +530,31 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
530 now = psched_get_time(); 530 now = psched_get_time();
531 531
532 if (q->rate) { 532 if (q->rate) {
533 struct sk_buff *last; 533 struct netem_skb_cb *last = NULL;
534
535 if (sch->q.tail)
536 last = netem_skb_cb(sch->q.tail);
537 if (q->t_root.rb_node) {
538 struct sk_buff *t_skb;
539 struct netem_skb_cb *t_last;
540
541 t_skb = netem_rb_to_skb(rb_last(&q->t_root));
542 t_last = netem_skb_cb(t_skb);
543 if (!last ||
544 t_last->time_to_send > last->time_to_send) {
545 last = t_last;
546 }
547 }
534 548
535 if (sch->q.qlen)
536 last = sch->q.tail;
537 else
538 last = netem_rb_to_skb(rb_last(&q->t_root));
539 if (last) { 549 if (last) {
540 /* 550 /*
541 * Last packet in queue is reference point (now), 551 * Last packet in queue is reference point (now),
542 * calculate this time bonus and subtract 552 * calculate this time bonus and subtract
543 * from delay. 553 * from delay.
544 */ 554 */
545 delay -= netem_skb_cb(last)->time_to_send - now; 555 delay -= last->time_to_send - now;
546 delay = max_t(psched_tdiff_t, 0, delay); 556 delay = max_t(psched_tdiff_t, 0, delay);
547 now = netem_skb_cb(last)->time_to_send; 557 now = last->time_to_send;
548 } 558 }
549 559
550 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); 560 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index d4d7db267b6e..92c2e6d448d7 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -192,8 +192,11 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
192 qdisc_destroy(child); 192 qdisc_destroy(child);
193 } 193 }
194 194
195 for (i = oldbands; i < q->bands; i++) 195 for (i = oldbands; i < q->bands; i++) {
196 q->queues[i] = queues[i]; 196 q->queues[i] = queues[i];
197 if (q->queues[i] != &noop_qdisc)
198 qdisc_hash_add(q->queues[i], true);
199 }
197 200
198 sch_tree_unlock(sch); 201 sch_tree_unlock(sch);
199 return 0; 202 return 0;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index f9e712ce2d15..6c85f3e9239b 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -494,6 +494,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
494 goto destroy_class; 494 goto destroy_class;
495 } 495 }
496 496
497 if (cl->qdisc != &noop_qdisc)
498 qdisc_hash_add(cl->qdisc, true);
497 sch_tree_lock(sch); 499 sch_tree_lock(sch);
498 qdisc_class_hash_insert(&q->clhash, &cl->common); 500 qdisc_class_hash_insert(&q->clhash, &cl->common);
499 sch_tree_unlock(sch); 501 sch_tree_unlock(sch);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 249b2a18acbd..799ea6dd69b2 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -191,6 +191,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
191 return PTR_ERR(child); 191 return PTR_ERR(child);
192 } 192 }
193 193
194 if (child != &noop_qdisc)
195 qdisc_hash_add(child, true);
194 sch_tree_lock(sch); 196 sch_tree_lock(sch);
195 q->flags = ctl->flags; 197 q->flags = ctl->flags;
196 q->limit = ctl->limit; 198 q->limit = ctl->limit;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index fe6963d21519..ae862f172c94 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -513,6 +513,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
513 if (IS_ERR(child)) 513 if (IS_ERR(child))
514 return PTR_ERR(child); 514 return PTR_ERR(child);
515 515
516 if (child != &noop_qdisc)
517 qdisc_hash_add(child, true);
516 sch_tree_lock(sch); 518 sch_tree_lock(sch);
517 519
518 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, 520 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 303355c449ab..9850126129a3 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -396,6 +396,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
396 q->qdisc->qstats.backlog); 396 q->qdisc->qstats.backlog);
397 qdisc_destroy(q->qdisc); 397 qdisc_destroy(q->qdisc);
398 q->qdisc = child; 398 q->qdisc = child;
399 if (child != &noop_qdisc)
400 qdisc_hash_add(child, true);
399 } 401 }
400 q->limit = qopt->limit; 402 q->limit = qopt->limit;
401 if (tb[TCA_TBF_PBURST]) 403 if (tb[TCA_TBF_PBURST])
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index e03bb1aab4d0..ab1374fa5ab0 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3872,9 +3872,18 @@ sctp_disposition_t sctp_sf_do_reconf(struct net *net,
3872 else if (param.p->type == SCTP_PARAM_RESET_IN_REQUEST) 3872 else if (param.p->type == SCTP_PARAM_RESET_IN_REQUEST)
3873 reply = sctp_process_strreset_inreq( 3873 reply = sctp_process_strreset_inreq(
3874 (struct sctp_association *)asoc, param, &ev); 3874 (struct sctp_association *)asoc, param, &ev);
3875 /* More handles for other types will be added here, by now it 3875 else if (param.p->type == SCTP_PARAM_RESET_TSN_REQUEST)
3876 * just ignores other types. 3876 reply = sctp_process_strreset_tsnreq(
3877 */ 3877 (struct sctp_association *)asoc, param, &ev);
3878 else if (param.p->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS)
3879 reply = sctp_process_strreset_addstrm_out(
3880 (struct sctp_association *)asoc, param, &ev);
3881 else if (param.p->type == SCTP_PARAM_RESET_ADD_IN_STREAMS)
3882 reply = sctp_process_strreset_addstrm_in(
3883 (struct sctp_association *)asoc, param, &ev);
3884 else if (param.p->type == SCTP_PARAM_RESET_RESPONSE)
3885 reply = sctp_process_strreset_resp(
3886 (struct sctp_association *)asoc, param, &ev);
3878 3887
3879 if (ev) 3888 if (ev)
3880 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 3889 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0f378ea2ae38..72cc3ecf6516 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3758,6 +3758,39 @@ out:
3758 return retval; 3758 return retval;
3759} 3759}
3760 3760
3761static int sctp_setsockopt_reconfig_supported(struct sock *sk,
3762 char __user *optval,
3763 unsigned int optlen)
3764{
3765 struct sctp_assoc_value params;
3766 struct sctp_association *asoc;
3767 int retval = -EINVAL;
3768
3769 if (optlen != sizeof(params))
3770 goto out;
3771
3772 if (copy_from_user(&params, optval, optlen)) {
3773 retval = -EFAULT;
3774 goto out;
3775 }
3776
3777 asoc = sctp_id2assoc(sk, params.assoc_id);
3778 if (asoc) {
3779 asoc->reconf_enable = !!params.assoc_value;
3780 } else if (!params.assoc_id) {
3781 struct sctp_sock *sp = sctp_sk(sk);
3782
3783 sp->ep->reconf_enable = !!params.assoc_value;
3784 } else {
3785 goto out;
3786 }
3787
3788 retval = 0;
3789
3790out:
3791 return retval;
3792}
3793
3761static int sctp_setsockopt_enable_strreset(struct sock *sk, 3794static int sctp_setsockopt_enable_strreset(struct sock *sk,
3762 char __user *optval, 3795 char __user *optval,
3763 unsigned int optlen) 3796 unsigned int optlen)
@@ -4038,6 +4071,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
4038 case SCTP_DEFAULT_PRINFO: 4071 case SCTP_DEFAULT_PRINFO:
4039 retval = sctp_setsockopt_default_prinfo(sk, optval, optlen); 4072 retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
4040 break; 4073 break;
4074 case SCTP_RECONFIG_SUPPORTED:
4075 retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen);
4076 break;
4041 case SCTP_ENABLE_STREAM_RESET: 4077 case SCTP_ENABLE_STREAM_RESET:
4042 retval = sctp_setsockopt_enable_strreset(sk, optval, optlen); 4078 retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
4043 break; 4079 break;
@@ -6540,6 +6576,47 @@ out:
6540 return retval; 6576 return retval;
6541} 6577}
6542 6578
6579static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
6580 char __user *optval,
6581 int __user *optlen)
6582{
6583 struct sctp_assoc_value params;
6584 struct sctp_association *asoc;
6585 int retval = -EFAULT;
6586
6587 if (len < sizeof(params)) {
6588 retval = -EINVAL;
6589 goto out;
6590 }
6591
6592 len = sizeof(params);
6593 if (copy_from_user(&params, optval, len))
6594 goto out;
6595
6596 asoc = sctp_id2assoc(sk, params.assoc_id);
6597 if (asoc) {
6598 params.assoc_value = asoc->reconf_enable;
6599 } else if (!params.assoc_id) {
6600 struct sctp_sock *sp = sctp_sk(sk);
6601
6602 params.assoc_value = sp->ep->reconf_enable;
6603 } else {
6604 retval = -EINVAL;
6605 goto out;
6606 }
6607
6608 if (put_user(len, optlen))
6609 goto out;
6610
6611 if (copy_to_user(optval, &params, len))
6612 goto out;
6613
6614 retval = 0;
6615
6616out:
6617 return retval;
6618}
6619
6543static int sctp_getsockopt_enable_strreset(struct sock *sk, int len, 6620static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
6544 char __user *optval, 6621 char __user *optval,
6545 int __user *optlen) 6622 int __user *optlen)
@@ -6748,6 +6825,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
6748 retval = sctp_getsockopt_pr_assocstatus(sk, len, optval, 6825 retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
6749 optlen); 6826 optlen);
6750 break; 6827 break;
6828 case SCTP_RECONFIG_SUPPORTED:
6829 retval = sctp_getsockopt_reconfig_supported(sk, len, optval,
6830 optlen);
6831 break;
6751 case SCTP_ENABLE_STREAM_RESET: 6832 case SCTP_ENABLE_STREAM_RESET:
6752 retval = sctp_getsockopt_enable_strreset(sk, len, optval, 6833 retval = sctp_getsockopt_enable_strreset(sk, len, optval,
6753 optlen); 6834 optlen);
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 1c6cc04fa3a4..961d0a1e99d1 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -267,18 +267,6 @@ int sctp_send_add_streams(struct sctp_association *asoc,
267 stream->out = streamout; 267 stream->out = streamout;
268 } 268 }
269 269
270 if (in) {
271 struct sctp_stream_in *streamin;
272
273 streamin = krealloc(stream->in, incnt * sizeof(*streamin),
274 GFP_KERNEL);
275 if (!streamin)
276 goto out;
277
278 memset(streamin + stream->incnt, 0, in * sizeof(*streamin));
279 stream->in = streamin;
280 }
281
282 chunk = sctp_make_strreset_addstrm(asoc, out, in); 270 chunk = sctp_make_strreset_addstrm(asoc, out, in);
283 if (!chunk) 271 if (!chunk)
284 goto out; 272 goto out;
@@ -303,13 +291,14 @@ out:
303} 291}
304 292
305static sctp_paramhdr_t *sctp_chunk_lookup_strreset_param( 293static sctp_paramhdr_t *sctp_chunk_lookup_strreset_param(
306 struct sctp_association *asoc, __u32 resp_seq) 294 struct sctp_association *asoc, __u32 resp_seq,
295 __be16 type)
307{ 296{
308 struct sctp_chunk *chunk = asoc->strreset_chunk; 297 struct sctp_chunk *chunk = asoc->strreset_chunk;
309 struct sctp_reconf_chunk *hdr; 298 struct sctp_reconf_chunk *hdr;
310 union sctp_params param; 299 union sctp_params param;
311 300
312 if (ntohl(resp_seq) != asoc->strreset_outseq || !chunk) 301 if (!chunk)
313 return NULL; 302 return NULL;
314 303
315 hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; 304 hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
@@ -320,7 +309,8 @@ static sctp_paramhdr_t *sctp_chunk_lookup_strreset_param(
320 */ 309 */
321 struct sctp_strreset_tsnreq *req = param.v; 310 struct sctp_strreset_tsnreq *req = param.v;
322 311
323 if (req->request_seq == resp_seq) 312 if ((!resp_seq || req->request_seq == resp_seq) &&
313 (!type || type == req->param_hdr.type))
324 return param.v; 314 return param.v;
325 } 315 }
326 316
@@ -361,13 +351,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
361 goto out; 351 goto out;
362 352
363 if (asoc->strreset_chunk) { 353 if (asoc->strreset_chunk) {
364 sctp_paramhdr_t *param_hdr; 354 if (!sctp_chunk_lookup_strreset_param(
365 struct sctp_transport *t; 355 asoc, outreq->response_seq,
366 356 SCTP_PARAM_RESET_IN_REQUEST)) {
367 param_hdr = sctp_chunk_lookup_strreset_param(
368 asoc, outreq->response_seq);
369 if (!param_hdr || param_hdr->type !=
370 SCTP_PARAM_RESET_IN_REQUEST) {
371 /* same process with outstanding isn't 0 */ 357 /* same process with outstanding isn't 0 */
372 result = SCTP_STRRESET_ERR_IN_PROGRESS; 358 result = SCTP_STRRESET_ERR_IN_PROGRESS;
373 goto out; 359 goto out;
@@ -377,6 +363,8 @@ struct sctp_chunk *sctp_process_strreset_outreq(
377 asoc->strreset_outseq++; 363 asoc->strreset_outseq++;
378 364
379 if (!asoc->strreset_outstanding) { 365 if (!asoc->strreset_outstanding) {
366 struct sctp_transport *t;
367
380 t = asoc->strreset_chunk->transport; 368 t = asoc->strreset_chunk->transport;
381 if (del_timer(&t->reconf_timer)) 369 if (del_timer(&t->reconf_timer))
382 sctp_transport_put(t); 370 sctp_transport_put(t);
@@ -477,3 +465,367 @@ out:
477 465
478 return chunk; 466 return chunk;
479} 467}
468
469struct sctp_chunk *sctp_process_strreset_tsnreq(
470 struct sctp_association *asoc,
471 union sctp_params param,
472 struct sctp_ulpevent **evp)
473{
474 __u32 init_tsn = 0, next_tsn = 0, max_tsn_seen;
475 struct sctp_strreset_tsnreq *tsnreq = param.v;
476 struct sctp_stream *stream = asoc->stream;
477 __u32 result = SCTP_STRRESET_DENIED;
478 __u32 request_seq;
479 __u16 i;
480
481 request_seq = ntohl(tsnreq->request_seq);
482 if (request_seq > asoc->strreset_inseq) {
483 result = SCTP_STRRESET_ERR_BAD_SEQNO;
484 goto out;
485 } else if (request_seq == asoc->strreset_inseq) {
486 asoc->strreset_inseq++;
487 }
488
489 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
490 goto out;
491
492 if (asoc->strreset_outstanding) {
493 result = SCTP_STRRESET_ERR_IN_PROGRESS;
494 goto out;
495 }
496
497 /* G3: The same processing as though a SACK chunk with no gap report
498 * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
499 * received MUST be performed.
500 */
501 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
502 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
503 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
504
505 /* G1: Compute an appropriate value for the Receiver's Next TSN -- the
506 * TSN that the peer should use to send the next DATA chunk. The
507 * value SHOULD be the smallest TSN not acknowledged by the
508 * receiver of the request plus 2^31.
509 */
510 init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31);
511 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
512 init_tsn, GFP_ATOMIC);
513
514 /* G4: The same processing as though a FWD-TSN chunk (as defined in
515 * [RFC3758]) with all streams affected and a new cumulative TSN
516 * ACK of the Receiver's Next TSN minus 1 were received MUST be
517 * performed.
518 */
519 sctp_outq_free(&asoc->outqueue);
520
521 /* G2: Compute an appropriate value for the local endpoint's next TSN,
522 * i.e., the next TSN assigned by the receiver of the SSN/TSN reset
523 * chunk. The value SHOULD be the highest TSN sent by the receiver
524 * of the request plus 1.
525 */
526 next_tsn = asoc->next_tsn;
527 asoc->ctsn_ack_point = next_tsn - 1;
528 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
529
530 /* G5: The next expected and outgoing SSNs MUST be reset to 0 for all
531 * incoming and outgoing streams.
532 */
533 for (i = 0; i < stream->outcnt; i++)
534 stream->out[i].ssn = 0;
535 for (i = 0; i < stream->incnt; i++)
536 stream->in[i].ssn = 0;
537
538 result = SCTP_STRRESET_PERFORMED;
539
540 *evp = sctp_ulpevent_make_assoc_reset_event(asoc, 0, init_tsn,
541 next_tsn, GFP_ATOMIC);
542
543out:
544 return sctp_make_strreset_tsnresp(asoc, result, request_seq,
545 next_tsn, init_tsn);
546}
547
548struct sctp_chunk *sctp_process_strreset_addstrm_out(
549 struct sctp_association *asoc,
550 union sctp_params param,
551 struct sctp_ulpevent **evp)
552{
553 struct sctp_strreset_addstrm *addstrm = param.v;
554 struct sctp_stream *stream = asoc->stream;
555 __u32 result = SCTP_STRRESET_DENIED;
556 struct sctp_stream_in *streamin;
557 __u32 request_seq, incnt;
558 __u16 in;
559
560 request_seq = ntohl(addstrm->request_seq);
561 if (request_seq > asoc->strreset_inseq) {
562 result = SCTP_STRRESET_ERR_BAD_SEQNO;
563 goto out;
564 } else if (request_seq == asoc->strreset_inseq) {
565 asoc->strreset_inseq++;
566 }
567
568 if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
569 goto out;
570
571 if (asoc->strreset_chunk) {
572 if (!sctp_chunk_lookup_strreset_param(
573 asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
574 /* same process with outstanding isn't 0 */
575 result = SCTP_STRRESET_ERR_IN_PROGRESS;
576 goto out;
577 }
578
579 asoc->strreset_outstanding--;
580 asoc->strreset_outseq++;
581
582 if (!asoc->strreset_outstanding) {
583 struct sctp_transport *t;
584
585 t = asoc->strreset_chunk->transport;
586 if (del_timer(&t->reconf_timer))
587 sctp_transport_put(t);
588
589 sctp_chunk_put(asoc->strreset_chunk);
590 asoc->strreset_chunk = NULL;
591 }
592 }
593
594 in = ntohs(addstrm->number_of_streams);
595 incnt = stream->incnt + in;
596 if (!in || incnt > SCTP_MAX_STREAM)
597 goto out;
598
599 streamin = krealloc(stream->in, incnt * sizeof(*streamin),
600 GFP_ATOMIC);
601 if (!streamin)
602 goto out;
603
604 memset(streamin + stream->incnt, 0, in * sizeof(*streamin));
605 stream->in = streamin;
606 stream->incnt = incnt;
607
608 result = SCTP_STRRESET_PERFORMED;
609
610 *evp = sctp_ulpevent_make_stream_change_event(asoc,
611 0, ntohs(addstrm->number_of_streams), 0, GFP_ATOMIC);
612
613out:
614 return sctp_make_strreset_resp(asoc, result, request_seq);
615}
616
617struct sctp_chunk *sctp_process_strreset_addstrm_in(
618 struct sctp_association *asoc,
619 union sctp_params param,
620 struct sctp_ulpevent **evp)
621{
622 struct sctp_strreset_addstrm *addstrm = param.v;
623 struct sctp_stream *stream = asoc->stream;
624 __u32 result = SCTP_STRRESET_DENIED;
625 struct sctp_stream_out *streamout;
626 struct sctp_chunk *chunk = NULL;
627 __u32 request_seq, outcnt;
628 __u16 out;
629
630 request_seq = ntohl(addstrm->request_seq);
631 if (request_seq > asoc->strreset_inseq) {
632 result = SCTP_STRRESET_ERR_BAD_SEQNO;
633 goto out;
634 } else if (request_seq == asoc->strreset_inseq) {
635 asoc->strreset_inseq++;
636 }
637
638 if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
639 goto out;
640
641 if (asoc->strreset_outstanding) {
642 result = SCTP_STRRESET_ERR_IN_PROGRESS;
643 goto out;
644 }
645
646 out = ntohs(addstrm->number_of_streams);
647 outcnt = stream->outcnt + out;
648 if (!out || outcnt > SCTP_MAX_STREAM)
649 goto out;
650
651 streamout = krealloc(stream->out, outcnt * sizeof(*streamout),
652 GFP_ATOMIC);
653 if (!streamout)
654 goto out;
655
656 memset(streamout + stream->outcnt, 0, out * sizeof(*streamout));
657 stream->out = streamout;
658
659 chunk = sctp_make_strreset_addstrm(asoc, out, 0);
660 if (!chunk)
661 goto out;
662
663 asoc->strreset_chunk = chunk;
664 asoc->strreset_outstanding = 1;
665 sctp_chunk_hold(asoc->strreset_chunk);
666
667 stream->outcnt = outcnt;
668
669 *evp = sctp_ulpevent_make_stream_change_event(asoc,
670 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
671
672out:
673 if (!chunk)
674 chunk = sctp_make_strreset_resp(asoc, result, request_seq);
675
676 return chunk;
677}
678
679struct sctp_chunk *sctp_process_strreset_resp(
680 struct sctp_association *asoc,
681 union sctp_params param,
682 struct sctp_ulpevent **evp)
683{
684 struct sctp_strreset_resp *resp = param.v;
685 struct sctp_stream *stream = asoc->stream;
686 struct sctp_transport *t;
687 __u16 i, nums, flags = 0;
688 sctp_paramhdr_t *req;
689 __u32 result;
690
691 req = sctp_chunk_lookup_strreset_param(asoc, resp->response_seq, 0);
692 if (!req)
693 return NULL;
694
695 result = ntohl(resp->result);
696 if (result != SCTP_STRRESET_PERFORMED) {
697 /* if in progress, do nothing but retransmit */
698 if (result == SCTP_STRRESET_IN_PROGRESS)
699 return NULL;
700 else if (result == SCTP_STRRESET_DENIED)
701 flags = SCTP_STREAM_RESET_DENIED;
702 else
703 flags = SCTP_STREAM_RESET_FAILED;
704 }
705
706 if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
707 struct sctp_strreset_outreq *outreq;
708 __u16 *str_p = NULL;
709
710 outreq = (struct sctp_strreset_outreq *)req;
711 nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2;
712
713 if (result == SCTP_STRRESET_PERFORMED) {
714 if (nums) {
715 str_p = outreq->list_of_streams;
716 for (i = 0; i < nums; i++)
717 stream->out[ntohs(str_p[i])].ssn = 0;
718 } else {
719 for (i = 0; i < stream->outcnt; i++)
720 stream->out[i].ssn = 0;
721 }
722
723 flags = SCTP_STREAM_RESET_OUTGOING_SSN;
724 }
725
726 for (i = 0; i < stream->outcnt; i++)
727 stream->out[i].state = SCTP_STREAM_OPEN;
728
729 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
730 nums, str_p, GFP_ATOMIC);
731 } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
732 struct sctp_strreset_inreq *inreq;
733 __u16 *str_p = NULL;
734
735 /* if the result is performed, it's impossible for inreq */
736 if (result == SCTP_STRRESET_PERFORMED)
737 return NULL;
738
739 inreq = (struct sctp_strreset_inreq *)req;
740 nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2;
741
742 str_p = inreq->list_of_streams;
743 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
744 nums, str_p, GFP_ATOMIC);
745 } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
746 struct sctp_strreset_resptsn *resptsn;
747 __u32 stsn, rtsn;
748
749 /* check for resptsn, as sctp_verify_reconf didn't do it*/
750 if (ntohs(param.p->length) != sizeof(*resptsn))
751 return NULL;
752
753 resptsn = (struct sctp_strreset_resptsn *)resp;
754 stsn = ntohl(resptsn->senders_next_tsn);
755 rtsn = ntohl(resptsn->receivers_next_tsn);
756
757 if (result == SCTP_STRRESET_PERFORMED) {
758 __u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
759 &asoc->peer.tsn_map);
760
761 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
762 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
763
764 sctp_tsnmap_init(&asoc->peer.tsn_map,
765 SCTP_TSN_MAP_INITIAL,
766 stsn, GFP_ATOMIC);
767
768 sctp_outq_free(&asoc->outqueue);
769
770 asoc->next_tsn = rtsn;
771 asoc->ctsn_ack_point = asoc->next_tsn - 1;
772 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
773
774 for (i = 0; i < stream->outcnt; i++)
775 stream->out[i].ssn = 0;
776 for (i = 0; i < stream->incnt; i++)
777 stream->in[i].ssn = 0;
778 }
779
780 for (i = 0; i < stream->outcnt; i++)
781 stream->out[i].state = SCTP_STREAM_OPEN;
782
783 *evp = sctp_ulpevent_make_assoc_reset_event(asoc, flags,
784 stsn, rtsn, GFP_ATOMIC);
785 } else if (req->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS) {
786 struct sctp_strreset_addstrm *addstrm;
787 __u16 number;
788
789 addstrm = (struct sctp_strreset_addstrm *)req;
790 nums = ntohs(addstrm->number_of_streams);
791 number = stream->outcnt - nums;
792
793 if (result == SCTP_STRRESET_PERFORMED)
794 for (i = number; i < stream->outcnt; i++)
795 stream->out[i].state = SCTP_STREAM_OPEN;
796 else
797 stream->outcnt = number;
798
799 *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
800 0, nums, GFP_ATOMIC);
801 } else if (req->type == SCTP_PARAM_RESET_ADD_IN_STREAMS) {
802 struct sctp_strreset_addstrm *addstrm;
803
804 /* if the result is performed, it's impossible for addstrm in
805 * request.
806 */
807 if (result == SCTP_STRRESET_PERFORMED)
808 return NULL;
809
810 addstrm = (struct sctp_strreset_addstrm *)req;
811 nums = ntohs(addstrm->number_of_streams);
812
813 *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
814 nums, 0, GFP_ATOMIC);
815 }
816
817 asoc->strreset_outstanding--;
818 asoc->strreset_outseq++;
819
820 /* remove everything for this reconf request */
821 if (!asoc->strreset_outstanding) {
822 t = asoc->strreset_chunk->transport;
823 if (del_timer(&t->reconf_timer))
824 sctp_transport_put(t);
825
826 sctp_chunk_put(asoc->strreset_chunk);
827 asoc->strreset_chunk = NULL;
828 }
829
830 return NULL;
831}
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index daf8554fd42a..0e732f68c2bf 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -275,6 +275,13 @@ static struct ctl_table sctp_net_table[] = {
275 .proc_handler = proc_dointvec, 275 .proc_handler = proc_dointvec,
276 }, 276 },
277 { 277 {
278 .procname = "reconf_enable",
279 .data = &init_net.sctp.reconf_enable,
280 .maxlen = sizeof(int),
281 .mode = 0644,
282 .proc_handler = proc_dointvec,
283 },
284 {
278 .procname = "auth_enable", 285 .procname = "auth_enable",
279 .data = &init_net.sctp.auth_enable, 286 .data = &init_net.sctp.auth_enable,
280 .maxlen = sizeof(int), 287 .maxlen = sizeof(int),
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index c8881bc542a0..ec2b3e013c2f 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -883,6 +883,62 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
883 return event; 883 return event;
884} 884}
885 885
886struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
887 const struct sctp_association *asoc, __u16 flags, __u32 local_tsn,
888 __u32 remote_tsn, gfp_t gfp)
889{
890 struct sctp_assoc_reset_event *areset;
891 struct sctp_ulpevent *event;
892 struct sk_buff *skb;
893
894 event = sctp_ulpevent_new(sizeof(struct sctp_assoc_reset_event),
895 MSG_NOTIFICATION, gfp);
896 if (!event)
897 return NULL;
898
899 skb = sctp_event2skb(event);
900 areset = (struct sctp_assoc_reset_event *)
901 skb_put(skb, sizeof(struct sctp_assoc_reset_event));
902
903 areset->assocreset_type = SCTP_ASSOC_RESET_EVENT;
904 areset->assocreset_flags = flags;
905 areset->assocreset_length = sizeof(struct sctp_assoc_reset_event);
906 sctp_ulpevent_set_owner(event, asoc);
907 areset->assocreset_assoc_id = sctp_assoc2id(asoc);
908 areset->assocreset_local_tsn = local_tsn;
909 areset->assocreset_remote_tsn = remote_tsn;
910
911 return event;
912}
913
914struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
915 const struct sctp_association *asoc, __u16 flags,
916 __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp)
917{
918 struct sctp_stream_change_event *schange;
919 struct sctp_ulpevent *event;
920 struct sk_buff *skb;
921
922 event = sctp_ulpevent_new(sizeof(struct sctp_stream_change_event),
923 MSG_NOTIFICATION, gfp);
924 if (!event)
925 return NULL;
926
927 skb = sctp_event2skb(event);
928 schange = (struct sctp_stream_change_event *)
929 skb_put(skb, sizeof(struct sctp_stream_change_event));
930
931 schange->strchange_type = SCTP_STREAM_CHANGE_EVENT;
932 schange->strchange_flags = flags;
933 schange->strchange_length = sizeof(struct sctp_stream_change_event);
934 sctp_ulpevent_set_owner(event, asoc);
935 schange->strchange_assoc_id = sctp_assoc2id(asoc);
936 schange->strchange_instrms = strchange_instrms;
937 schange->strchange_outstrms = strchange_outstrms;
938
939 return event;
940}
941
886/* Return the notification type, assuming this is a notification 942/* Return the notification type, assuming this is a notification
887 * event. 943 * event.
888 */ 944 */
diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
index a91872a97742..9da2a3441b0a 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test_kern.c
@@ -65,6 +65,13 @@ struct bpf_map_def SEC("maps") lpm_trie_map_alloc = {
65 .map_flags = BPF_F_NO_PREALLOC, 65 .map_flags = BPF_F_NO_PREALLOC,
66}; 66};
67 67
68struct bpf_map_def SEC("maps") array_map = {
69 .type = BPF_MAP_TYPE_ARRAY,
70 .key_size = sizeof(u32),
71 .value_size = sizeof(long),
72 .max_entries = MAX_ENTRIES,
73};
74
68SEC("kprobe/sys_getuid") 75SEC("kprobe/sys_getuid")
69int stress_hmap(struct pt_regs *ctx) 76int stress_hmap(struct pt_regs *ctx)
70{ 77{
@@ -165,5 +172,31 @@ int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
165 return 0; 172 return 0;
166} 173}
167 174
175SEC("kprobe/sys_getpgid")
176int stress_hash_map_lookup(struct pt_regs *ctx)
177{
178 u32 key = 1, i;
179 long *value;
180
181#pragma clang loop unroll(full)
182 for (i = 0; i < 64; ++i)
183 value = bpf_map_lookup_elem(&hash_map, &key);
184
185 return 0;
186}
187
188SEC("kprobe/sys_getpgrp")
189int stress_array_map_lookup(struct pt_regs *ctx)
190{
191 u32 key = 1, i;
192 long *value;
193
194#pragma clang loop unroll(full)
195 for (i = 0; i < 64; ++i)
196 value = bpf_map_lookup_elem(&array_map, &key);
197
198 return 0;
199}
200
168char _license[] SEC("license") = "GPL"; 201char _license[] SEC("license") = "GPL";
169u32 _version SEC("version") = LINUX_VERSION_CODE; 202u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index 680260a91f50..e29ff318a793 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -38,6 +38,8 @@ static __u64 time_get_ns(void)
38#define LRU_HASH_PREALLOC (1 << 4) 38#define LRU_HASH_PREALLOC (1 << 4)
39#define PERCPU_LRU_HASH_PREALLOC (1 << 5) 39#define PERCPU_LRU_HASH_PREALLOC (1 << 5)
40#define LPM_KMALLOC (1 << 6) 40#define LPM_KMALLOC (1 << 6)
41#define HASH_LOOKUP (1 << 7)
42#define ARRAY_LOOKUP (1 << 8)
41 43
42static int test_flags = ~0; 44static int test_flags = ~0;
43 45
@@ -125,6 +127,30 @@ static void test_lpm_kmalloc(int cpu)
125 cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); 127 cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
126} 128}
127 129
130static void test_hash_lookup(int cpu)
131{
132 __u64 start_time;
133 int i;
134
135 start_time = time_get_ns();
136 for (i = 0; i < MAX_CNT; i++)
137 syscall(__NR_getpgid, 0);
138 printf("%d:hash_lookup %lld lookups per sec\n",
139 cpu, MAX_CNT * 1000000000ll * 64 / (time_get_ns() - start_time));
140}
141
142static void test_array_lookup(int cpu)
143{
144 __u64 start_time;
145 int i;
146
147 start_time = time_get_ns();
148 for (i = 0; i < MAX_CNT; i++)
149 syscall(__NR_getpgrp, 0);
150 printf("%d:array_lookup %lld lookups per sec\n",
151 cpu, MAX_CNT * 1000000000ll * 64 / (time_get_ns() - start_time));
152}
153
128static void loop(int cpu) 154static void loop(int cpu)
129{ 155{
130 cpu_set_t cpuset; 156 cpu_set_t cpuset;
@@ -153,6 +179,12 @@ static void loop(int cpu)
153 179
154 if (test_flags & LPM_KMALLOC) 180 if (test_flags & LPM_KMALLOC)
155 test_lpm_kmalloc(cpu); 181 test_lpm_kmalloc(cpu);
182
183 if (test_flags & HASH_LOOKUP)
184 test_hash_lookup(cpu);
185
186 if (test_flags & ARRAY_LOOKUP)
187 test_array_lookup(cpu);
156} 188}
157 189
158static void run_perf_test(int tasks) 190static void run_perf_test(int tasks)
diff --git a/tools/hv/bondvf.sh b/tools/hv/bondvf.sh
index 4aa5369ffa4e..d85968cb1bf2 100755
--- a/tools/hv/bondvf.sh
+++ b/tools/hv/bondvf.sh
@@ -101,9 +101,25 @@ function create_bond_cfg_redhat {
101 echo BONDING_OPTS=\"mode=active-backup miimon=100 primary=$2\" >>$fn 101 echo BONDING_OPTS=\"mode=active-backup miimon=100 primary=$2\" >>$fn
102} 102}
103 103
104function del_eth_cfg_ubuntu {
105 local fn=$cfgdir/interfaces
106 local tmpfl=$(mktemp)
107
108 local nic_start='^[ \t]*(auto|iface|mapping|allow-.*)[ \t]+'$1
109 local nic_end='^[ \t]*(auto|iface|mapping|allow-.*|source)'
110
111 awk "/$nic_end/{x=0} x{next} /$nic_start/{x=1;next} 1" $fn >$tmpfl
112
113 cp $tmpfl $fn
114
115 rm $tmpfl
116}
117
104function create_eth_cfg_ubuntu { 118function create_eth_cfg_ubuntu {
105 local fn=$cfgdir/interfaces 119 local fn=$cfgdir/interfaces
106 120
121 del_eth_cfg_ubuntu $1
122
107 echo $'\n'auto $1 >>$fn 123 echo $'\n'auto $1 >>$fn
108 echo iface $1 inet manual >>$fn 124 echo iface $1 inet manual >>$fn
109 echo bond-master $2 >>$fn 125 echo bond-master $2 >>$fn
@@ -119,6 +135,8 @@ function create_eth_cfg_pri_ubuntu {
119function create_bond_cfg_ubuntu { 135function create_bond_cfg_ubuntu {
120 local fn=$cfgdir/interfaces 136 local fn=$cfgdir/interfaces
121 137
138 del_eth_cfg_ubuntu $1
139
122 echo $'\n'auto $1 >>$fn 140 echo $'\n'auto $1 >>$fn
123 echo iface $1 inet dhcp >>$fn 141 echo iface $1 inet dhcp >>$fn
124 echo bond-mode active-backup >>$fn 142 echo bond-mode active-backup >>$fn