summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-07-02 14:18:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-02 14:18:28 -0400
commit4e33d7d47943aaa84a5904472cf2f9c6d6b0a6ca (patch)
tree6eb46696758bee8da0e8b830db4bc6915128bf60 /drivers
parent021c91791a5e7e85c567452f1be3e4c2c6cb6063 (diff)
parente48e097996439cd73f36c89b98ba4175d84c9be6 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Verify netlink attributes properly in nf_queue, from Eric Dumazet. 2) Need to bump memory lock rlimit for test_sockmap bpf test, from Yonghong Song. 3) Fix VLAN handling in lan78xx driver, from Dave Stevenson. 4) Fix uninitialized read in nf_log, from Jann Horn. 5) Fix raw command length parsing in mlx5, from Alex Vesker. 6) Cleanup loopback RDS connections upon netns deletion, from Sowmini Varadhan. 7) Fix regressions in FIB rule matching during create, from Jason A. Donenfeld and Roopa Prabhu. 8) Fix mpls ether type detection in nfp, from Pieter Jansen van Vuuren. 9) More bpfilter build fixes/adjustments from Masahiro Yamada. 10) Fix XDP_{TX,REDIRECT} flushing in various drivers, from Jesper Dangaard Brouer. 11) fib_tests.sh file permissions were broken, from Shuah Khan. 12) Make sure BH/preemption is disabled in data path of mac80211, from Denis Kenzior. 13) Don't ignore nla_parse_nested() return values in nl80211, from Johannes berg. 14) Properly account sock objects ot kmemcg, from Shakeel Butt. 15) Adjustments to setting bpf program permissions to read-only, from Daniel Borkmann. 16) TCP Fast Open key endianness was broken, it always took on the host endiannness. Whoops. Explicitly make it little endian. From Yuching Cheng. 17) Fix prefix route setting for link local addresses in ipv6, from David Ahern. 18) Potential Spectre v1 in zatm driver, from Gustavo A. R. Silva. 19) Various bpf sockmap fixes, from John Fastabend. 20) Use after free for GRO with ESP, from Sabrina Dubroca. 21) Passing bogus flags to crypto_alloc_shash() in ipv6 SR code, from Eric Biggers. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (87 commits) qede: Adverstise software timestamp caps when PHC is not available. qed: Fix use of incorrect size in memcpy call. qed: Fix setting of incorrect eswitch mode. qed: Limit msix vectors in kdump kernel to the minimum required count. ipvlan: call dev_change_flags when ipvlan mode is reset ipv6: sr: fix passing wrong flags to crypto_alloc_shash() net: fix use-after-free in GRO with ESP tcp: prevent bogus FRTO undos with non-SACK flows bpf: sockhash, add release routine bpf: sockhash fix omitted bucket lock in sock_close bpf: sockmap, fix smap_list_map_remove when psock is in many maps bpf: sockmap, fix crash when ipv6 sock is added net: fib_rules: bring back rule_exists to match rule during add hv_netvsc: split sub-channel setup into async and sync net: use dev_change_tx_queue_len() for SIOCSIFTXQLEN atm: zatm: Fix potential Spectre v1 s390/qeth: consistently re-enable device features s390/qeth: don't clobber buffer on async TX completion s390/qeth: avoid using is_multicast_ether_addr_64bits on (u8 *)[6] s390/qeth: fix race when setting MAC address ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/media/rc/bpf-lirc.c14
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c15
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c10
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c37
-rw-r--r--drivers/net/hyperv/netvsc_drv.c17
-rw-r--r--drivers/net/hyperv/rndis_filter.c61
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c36
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/usb/lan78xx.c37
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/virtio_net.c30
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/s390/net/qeth_core.h13
-rw-r--r--drivers/s390/net/qeth_core_main.c47
-rw-r--r--drivers/s390/net/qeth_l2_main.c24
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
55 files changed, 414 insertions, 198 deletions
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ff81a576347e..82532c299bb5 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
1618 skb_queue_head_init(&iadev->rx_dma_q); 1618 skb_queue_head_init(&iadev->rx_dma_q);
1619 iadev->rx_free_desc_qhead = NULL; 1619 iadev->rx_free_desc_qhead = NULL;
1620 1620
1621 iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL); 1621 iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1622 if (!iadev->rx_open) { 1622 if (!iadev->rx_open) {
1623 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n", 1623 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1624 dev->number); 1624 dev->number);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a8d2eb0ceb8d..2c288d1f42bb 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
1483 return -EFAULT; 1483 return -EFAULT;
1484 if (pool < 0 || pool > ZATM_LAST_POOL) 1484 if (pool < 0 || pool > ZATM_LAST_POOL)
1485 return -EINVAL; 1485 return -EINVAL;
1486 pool = array_index_nospec(pool,
1487 ZATM_LAST_POOL + 1);
1486 if (copy_from_user(&info, 1488 if (copy_from_user(&info,
1487 &((struct zatm_pool_req __user *) arg)->info, 1489 &((struct zatm_pool_req __user *) arg)->info,
1488 sizeof(info))) return -EFAULT; 1490 sizeof(info))) return -EFAULT;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e3e330f59c2c..b3ba9a222550 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -6113,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
6113 dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), 6113 dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6114 MLX5_CAP_GEN(mdev, num_vhca_ports)); 6114 MLX5_CAP_GEN(mdev, num_vhca_ports));
6115 6115
6116 if (MLX5_VPORT_MANAGER(mdev) && 6116 if (MLX5_ESWITCH_MANAGER(mdev) &&
6117 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { 6117 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
6118 dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); 6118 dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
6119 6119
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 40826bba06b6..fcfab6635f9c 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev)
207 bpf_prog_array_free(rcdev->raw->progs); 207 bpf_prog_array_free(rcdev->raw->progs);
208} 208}
209 209
210int lirc_prog_attach(const union bpf_attr *attr) 210int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
211{ 211{
212 struct bpf_prog *prog;
213 struct rc_dev *rcdev; 212 struct rc_dev *rcdev;
214 int ret; 213 int ret;
215 214
216 if (attr->attach_flags) 215 if (attr->attach_flags)
217 return -EINVAL; 216 return -EINVAL;
218 217
219 prog = bpf_prog_get_type(attr->attach_bpf_fd,
220 BPF_PROG_TYPE_LIRC_MODE2);
221 if (IS_ERR(prog))
222 return PTR_ERR(prog);
223
224 rcdev = rc_dev_get_from_fd(attr->target_fd); 218 rcdev = rc_dev_get_from_fd(attr->target_fd);
225 if (IS_ERR(rcdev)) { 219 if (IS_ERR(rcdev))
226 bpf_prog_put(prog);
227 return PTR_ERR(rcdev); 220 return PTR_ERR(rcdev);
228 }
229 221
230 ret = lirc_bpf_attach(rcdev, prog); 222 ret = lirc_bpf_attach(rcdev, prog);
231 if (ret)
232 bpf_prog_put(prog);
233 223
234 put_device(&rcdev->dev); 224 put_device(&rcdev->dev);
235 225
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 567ee54504bc..5e5022fa1d04 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev)
1897 struct pci_dev *pdev = to_pci_dev(dev); 1897 struct pci_dev *pdev = to_pci_dev(dev);
1898 struct alx_priv *alx = pci_get_drvdata(pdev); 1898 struct alx_priv *alx = pci_get_drvdata(pdev);
1899 struct alx_hw *hw = &alx->hw; 1899 struct alx_hw *hw = &alx->hw;
1900 int err;
1900 1901
1901 alx_reset_phy(hw); 1902 alx_reset_phy(hw);
1902 1903
1903 if (!netif_running(alx->dev)) 1904 if (!netif_running(alx->dev))
1904 return 0; 1905 return 0;
1905 netif_device_attach(alx->dev); 1906 netif_device_attach(alx->dev);
1906 return __alx_open(alx, true); 1907
1908 rtnl_lock();
1909 err = __alx_open(alx, true);
1910 rtnl_unlock();
1911
1912 return err;
1907} 1913}
1908 1914
1909static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); 1915static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d847e1b9c37b..be1506169076 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1533,6 +1533,7 @@ struct bnx2x {
1533 struct link_vars link_vars; 1533 struct link_vars link_vars;
1534 u32 link_cnt; 1534 u32 link_cnt;
1535 struct bnx2x_link_report_data last_reported_link; 1535 struct bnx2x_link_report_data last_reported_link;
1536 bool force_link_down;
1536 1537
1537 struct mdio_if_info mdio; 1538 struct mdio_if_info mdio;
1538 1539
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8cd73ff5debc..af7b5a4d8ba0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
1261{ 1261{
1262 struct bnx2x_link_report_data cur_data; 1262 struct bnx2x_link_report_data cur_data;
1263 1263
1264 if (bp->force_link_down) {
1265 bp->link_vars.link_up = 0;
1266 return;
1267 }
1268
1264 /* reread mf_cfg */ 1269 /* reread mf_cfg */
1265 if (IS_PF(bp) && !CHIP_IS_E1(bp)) 1270 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1266 bnx2x_read_mf_cfg(bp); 1271 bnx2x_read_mf_cfg(bp);
@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2817 bp->pending_max = 0; 2822 bp->pending_max = 0;
2818 } 2823 }
2819 2824
2825 bp->force_link_down = false;
2820 if (bp->port.pmf) { 2826 if (bp->port.pmf) {
2821 rc = bnx2x_initial_phy_init(bp, load_mode); 2827 rc = bnx2x_initial_phy_init(bp, load_mode);
2822 if (rc) 2828 if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5b1ed240bf18..57348f2b49a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
10279 bp->sp_rtnl_state = 0; 10279 bp->sp_rtnl_state = 0;
10280 smp_mb(); 10280 smp_mb();
10281 10281
10282 /* Immediately indicate link as down */
10283 bp->link_vars.link_up = 0;
10284 bp->force_link_down = true;
10285 netif_carrier_off(bp->dev);
10286 BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
10287
10282 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 10288 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10283 /* When ret value shows failure of allocation failure, 10289 /* When ret value shows failure of allocation failure,
10284 * the nic is rebooted again. If open still fails, a error 10290 * the nic is rebooted again. If open still fails, a error
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 30273a7717e2..4fd829b5e65d 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
660 id_tbl->max = size; 660 id_tbl->max = size;
661 id_tbl->next = next; 661 id_tbl->next = next;
662 spin_lock_init(&id_tbl->lock); 662 spin_lock_init(&id_tbl->lock);
663 id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL); 663 id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
664 if (!id_tbl->table) 664 if (!id_tbl->table)
665 return -ENOMEM; 665 return -ENOMEM;
666 666
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 3e93df5d4e3b..96cc03a6d942 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3726,6 +3726,8 @@ static int at91ether_init(struct platform_device *pdev)
3726 int err; 3726 int err;
3727 u32 reg; 3727 u32 reg;
3728 3728
3729 bp->queues[0].bp = bp;
3730
3729 dev->netdev_ops = &at91ether_netdev_ops; 3731 dev->netdev_ops = &at91ether_netdev_ops;
3730 dev->ethtool_ops = &macb_ethtool_ops; 3732 dev->ethtool_ops = &macb_ethtool_ops;
3731 3733
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 5f4e1ffa7b95..ab02057ac730 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
125/* Default alignment for start of data in an Rx FD */ 125/* Default alignment for start of data in an Rx FD */
126#define DPAA_FD_DATA_ALIGNMENT 16 126#define DPAA_FD_DATA_ALIGNMENT 16
127 127
128/* The DPAA requires 256 bytes reserved and mapped for the SGT */
129#define DPAA_SGT_SIZE 256
130
128/* Values for the L3R field of the FM Parse Results 131/* Values for the L3R field of the FM Parse Results
129 */ 132 */
130/* L3 Type field: First IP Present IPv4 */ 133/* L3 Type field: First IP Present IPv4 */
@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1617 1620
1618 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { 1621 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1619 nr_frags = skb_shinfo(skb)->nr_frags; 1622 nr_frags = skb_shinfo(skb)->nr_frags;
1620 dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + 1623 dma_unmap_single(dev, addr,
1621 sizeof(struct qm_sg_entry) * (1 + nr_frags), 1624 qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1622 dma_dir); 1625 dma_dir);
1623 1626
1624 /* The sgt buffer has been allocated with netdev_alloc_frag(), 1627 /* The sgt buffer has been allocated with netdev_alloc_frag(),
@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
1903 void *sgt_buf; 1906 void *sgt_buf;
1904 1907
1905 /* get a page frag to store the SGTable */ 1908 /* get a page frag to store the SGTable */
1906 sz = SKB_DATA_ALIGN(priv->tx_headroom + 1909 sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
1907 sizeof(struct qm_sg_entry) * (1 + nr_frags));
1908 sgt_buf = netdev_alloc_frag(sz); 1910 sgt_buf = netdev_alloc_frag(sz);
1909 if (unlikely(!sgt_buf)) { 1911 if (unlikely(!sgt_buf)) {
1910 netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", 1912 netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
1972 skbh = (struct sk_buff **)buffer_start; 1974 skbh = (struct sk_buff **)buffer_start;
1973 *skbh = skb; 1975 *skbh = skb;
1974 1976
1975 addr = dma_map_single(dev, buffer_start, priv->tx_headroom + 1977 addr = dma_map_single(dev, buffer_start,
1976 sizeof(struct qm_sg_entry) * (1 + nr_frags), 1978 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
1977 dma_dir);
1978 if (unlikely(dma_mapping_error(dev, addr))) { 1979 if (unlikely(dma_mapping_error(dev, addr))) {
1979 dev_err(dev, "DMA mapping failed"); 1980 dev_err(dev, "DMA mapping failed");
1980 err = -EINVAL; 1981 err = -EINVAL;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ce6e24c74978..ecbf6187e13a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
324#define HWP_HXS_PHE_REPORT 0x00000800 324#define HWP_HXS_PHE_REPORT 0x00000800
325#define HWP_HXS_PCAC_PSTAT 0x00000100 325#define HWP_HXS_PCAC_PSTAT 0x00000100
326#define HWP_HXS_PCAC_PSTOP 0x00000001 326#define HWP_HXS_PCAC_PSTOP 0x00000001
327#define HWP_HXS_TCP_OFFSET 0xA
328#define HWP_HXS_UDP_OFFSET 0xB
329#define HWP_HXS_SH_PAD_REM 0x80000000
330
327struct fman_port_hwp_regs { 331struct fman_port_hwp_regs {
328 struct { 332 struct {
329 u32 ssa; /* Soft Sequence Attachment */ 333 u32 ssa; /* Soft Sequence Attachment */
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
728 iowrite32be(0xffffffff, &regs->pmda[i].lcv); 732 iowrite32be(0xffffffff, &regs->pmda[i].lcv);
729 } 733 }
730 734
735 /* Short packet padding removal from checksum calculation */
736 iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
737 iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
738
731 start_port_hwp(port); 739 start_port_hwp(port);
732} 740}
733 741
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index e2e5cdc7119c..4c0f7eda1166 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
439{ 439{
440 struct hinic_rq *rq = rxq->rq; 440 struct hinic_rq *rq = rxq->rq;
441 441
442 irq_set_affinity_hint(rq->irq, NULL);
442 free_irq(rq->irq, rxq); 443 free_irq(rq->irq, rxq);
443 rx_del_napi(rxq); 444 rx_del_napi(rxq);
444} 445}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index ed6dbcfd4e96..b151ae316546 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2199,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2199 return true; 2199 return true;
2200} 2200}
2201 2201
2202#define I40E_XDP_PASS 0 2202#define I40E_XDP_PASS 0
2203#define I40E_XDP_CONSUMED 1 2203#define I40E_XDP_CONSUMED BIT(0)
2204#define I40E_XDP_TX 2 2204#define I40E_XDP_TX BIT(1)
2205#define I40E_XDP_REDIR BIT(2)
2205 2206
2206static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, 2207static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2207 struct i40e_ring *xdp_ring); 2208 struct i40e_ring *xdp_ring);
@@ -2248,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2248 break; 2249 break;
2249 case XDP_REDIRECT: 2250 case XDP_REDIRECT:
2250 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 2251 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2251 result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; 2252 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2252 break; 2253 break;
2253 default: 2254 default:
2254 bpf_warn_invalid_xdp_action(act); 2255 bpf_warn_invalid_xdp_action(act);
@@ -2311,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2311 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 2312 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2312 struct sk_buff *skb = rx_ring->skb; 2313 struct sk_buff *skb = rx_ring->skb;
2313 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); 2314 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2314 bool failure = false, xdp_xmit = false; 2315 unsigned int xdp_xmit = 0;
2316 bool failure = false;
2315 struct xdp_buff xdp; 2317 struct xdp_buff xdp;
2316 2318
2317 xdp.rxq = &rx_ring->xdp_rxq; 2319 xdp.rxq = &rx_ring->xdp_rxq;
@@ -2372,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2372 } 2374 }
2373 2375
2374 if (IS_ERR(skb)) { 2376 if (IS_ERR(skb)) {
2375 if (PTR_ERR(skb) == -I40E_XDP_TX) { 2377 unsigned int xdp_res = -PTR_ERR(skb);
2376 xdp_xmit = true; 2378
2379 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2380 xdp_xmit |= xdp_res;
2377 i40e_rx_buffer_flip(rx_ring, rx_buffer, size); 2381 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2378 } else { 2382 } else {
2379 rx_buffer->pagecnt_bias++; 2383 rx_buffer->pagecnt_bias++;
@@ -2427,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2427 total_rx_packets++; 2431 total_rx_packets++;
2428 } 2432 }
2429 2433
2430 if (xdp_xmit) { 2434 if (xdp_xmit & I40E_XDP_REDIR)
2435 xdp_do_flush_map();
2436
2437 if (xdp_xmit & I40E_XDP_TX) {
2431 struct i40e_ring *xdp_ring = 2438 struct i40e_ring *xdp_ring =
2432 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2439 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2433 2440
2434 i40e_xdp_ring_update_tail(xdp_ring); 2441 i40e_xdp_ring_update_tail(xdp_ring);
2435 xdp_do_flush_map();
2436 } 2442 }
2437 2443
2438 rx_ring->skb = skb; 2444 rx_ring->skb = skb;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3e87dbbc9024..62e57b05a0ae 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2186 return skb; 2186 return skb;
2187} 2187}
2188 2188
2189#define IXGBE_XDP_PASS 0 2189#define IXGBE_XDP_PASS 0
2190#define IXGBE_XDP_CONSUMED 1 2190#define IXGBE_XDP_CONSUMED BIT(0)
2191#define IXGBE_XDP_TX 2 2191#define IXGBE_XDP_TX BIT(1)
2192#define IXGBE_XDP_REDIR BIT(2)
2192 2193
2193static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, 2194static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
2194 struct xdp_frame *xdpf); 2195 struct xdp_frame *xdpf);
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2225 case XDP_REDIRECT: 2226 case XDP_REDIRECT:
2226 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); 2227 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2227 if (!err) 2228 if (!err)
2228 result = IXGBE_XDP_TX; 2229 result = IXGBE_XDP_REDIR;
2229 else 2230 else
2230 result = IXGBE_XDP_CONSUMED; 2231 result = IXGBE_XDP_CONSUMED;
2231 break; 2232 break;
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2285 unsigned int mss = 0; 2286 unsigned int mss = 0;
2286#endif /* IXGBE_FCOE */ 2287#endif /* IXGBE_FCOE */
2287 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 2288 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2288 bool xdp_xmit = false; 2289 unsigned int xdp_xmit = 0;
2289 struct xdp_buff xdp; 2290 struct xdp_buff xdp;
2290 2291
2291 xdp.rxq = &rx_ring->xdp_rxq; 2292 xdp.rxq = &rx_ring->xdp_rxq;
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2328 } 2329 }
2329 2330
2330 if (IS_ERR(skb)) { 2331 if (IS_ERR(skb)) {
2331 if (PTR_ERR(skb) == -IXGBE_XDP_TX) { 2332 unsigned int xdp_res = -PTR_ERR(skb);
2332 xdp_xmit = true; 2333
2334 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2335 xdp_xmit |= xdp_res;
2333 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); 2336 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2334 } else { 2337 } else {
2335 rx_buffer->pagecnt_bias++; 2338 rx_buffer->pagecnt_bias++;
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2401 total_rx_packets++; 2404 total_rx_packets++;
2402 } 2405 }
2403 2406
2404 if (xdp_xmit) { 2407 if (xdp_xmit & IXGBE_XDP_REDIR)
2408 xdp_do_flush_map();
2409
2410 if (xdp_xmit & IXGBE_XDP_TX) {
2405 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; 2411 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2406 2412
2407 /* Force memory writes to complete before letting h/w 2413 /* Force memory writes to complete before letting h/w
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2409 */ 2415 */
2410 wmb(); 2416 wmb();
2411 writel(ring->next_to_use, ring->tail); 2417 writel(ring->next_to_use, ring->tail);
2412
2413 xdp_do_flush_map();
2414 } 2418 }
2415 2419
2416 u64_stats_update_begin(&rx_ring->syncp); 2420 u64_stats_update_begin(&rx_ring->syncp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 487388aed98f..384c1fa49081 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work)
807 unsigned long flags; 807 unsigned long flags;
808 bool poll_cmd = ent->polling; 808 bool poll_cmd = ent->polling;
809 int alloc_ret; 809 int alloc_ret;
810 int cmd_mode;
810 811
811 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 812 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
812 down(sem); 813 down(sem);
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work)
853 set_signature(ent, !cmd->checksum_disabled); 854 set_signature(ent, !cmd->checksum_disabled);
854 dump_command(dev, ent, 1); 855 dump_command(dev, ent, 1);
855 ent->ts1 = ktime_get_ns(); 856 ent->ts1 = ktime_get_ns();
857 cmd_mode = cmd->mode;
856 858
857 if (ent->callback) 859 if (ent->callback)
858 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 860 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work)
877 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 879 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
878 mmiowb(); 880 mmiowb();
879 /* if not in polling don't use ent after this point */ 881 /* if not in polling don't use ent after this point */
880 if (cmd->mode == CMD_MODE_POLLING || poll_cmd) { 882 if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
881 poll_timeout(ent); 883 poll_timeout(ent);
882 /* make sure we read the descriptor after ownership is SW */ 884 /* make sure we read the descriptor after ownership is SW */
883 rmb(); 885 rmb();
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
1276{ 1278{
1277 struct mlx5_core_dev *dev = filp->private_data; 1279 struct mlx5_core_dev *dev = filp->private_data;
1278 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1280 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1279 char outlen_str[8]; 1281 char outlen_str[8] = {0};
1280 int outlen; 1282 int outlen;
1281 void *ptr; 1283 void *ptr;
1282 int err; 1284 int err;
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
1291 if (copy_from_user(outlen_str, buf, count)) 1293 if (copy_from_user(outlen_str, buf, count))
1292 return -EFAULT; 1294 return -EFAULT;
1293 1295
1294 outlen_str[7] = 0;
1295
1296 err = sscanf(outlen_str, "%d", &outlen); 1296 err = sscanf(outlen_str, "%d", &outlen);
1297 if (err < 0) 1297 if (err < 0)
1298 return err; 1298 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 56c1b6f5593e..dae4156a710d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2846 mlx5e_activate_channels(&priv->channels); 2846 mlx5e_activate_channels(&priv->channels);
2847 netif_tx_start_all_queues(priv->netdev); 2847 netif_tx_start_all_queues(priv->netdev);
2848 2848
2849 if (MLX5_VPORT_MANAGER(priv->mdev)) 2849 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2850 mlx5e_add_sqs_fwd_rules(priv); 2850 mlx5e_add_sqs_fwd_rules(priv);
2851 2851
2852 mlx5e_wait_channels_min_rx_wqes(&priv->channels); 2852 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2857{ 2857{
2858 mlx5e_redirect_rqts_to_drop(priv); 2858 mlx5e_redirect_rqts_to_drop(priv);
2859 2859
2860 if (MLX5_VPORT_MANAGER(priv->mdev)) 2860 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2861 mlx5e_remove_sqs_fwd_rules(priv); 2861 mlx5e_remove_sqs_fwd_rules(priv);
2862 2862
2863 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when 2863 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
4597 mlx5e_set_netdev_dev_addr(netdev); 4597 mlx5e_set_netdev_dev_addr(netdev);
4598 4598
4599#if IS_ENABLED(CONFIG_MLX5_ESWITCH) 4599#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4600 if (MLX5_VPORT_MANAGER(mdev)) 4600 if (MLX5_ESWITCH_MANAGER(mdev))
4601 netdev->switchdev_ops = &mlx5e_switchdev_ops; 4601 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4602#endif 4602#endif
4603 4603
@@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4753 4753
4754 mlx5e_enable_async_events(priv); 4754 mlx5e_enable_async_events(priv);
4755 4755
4756 if (MLX5_VPORT_MANAGER(priv->mdev)) 4756 if (MLX5_ESWITCH_MANAGER(priv->mdev))
4757 mlx5e_register_vport_reps(priv); 4757 mlx5e_register_vport_reps(priv);
4758 4758
4759 if (netdev->reg_state != NETREG_REGISTERED) 4759 if (netdev->reg_state != NETREG_REGISTERED)
@@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4788 4788
4789 queue_work(priv->wq, &priv->set_rx_mode_work); 4789 queue_work(priv->wq, &priv->set_rx_mode_work);
4790 4790
4791 if (MLX5_VPORT_MANAGER(priv->mdev)) 4791 if (MLX5_ESWITCH_MANAGER(priv->mdev))
4792 mlx5e_unregister_vport_reps(priv); 4792 mlx5e_unregister_vport_reps(priv);
4793 4793
4794 mlx5e_disable_async_events(priv); 4794 mlx5e_disable_async_events(priv);
@@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
4972 return NULL; 4972 return NULL;
4973 4973
4974#ifdef CONFIG_MLX5_ESWITCH 4974#ifdef CONFIG_MLX5_ESWITCH
4975 if (MLX5_VPORT_MANAGER(mdev)) { 4975 if (MLX5_ESWITCH_MANAGER(mdev)) {
4976 rpriv = mlx5e_alloc_nic_rep_priv(mdev); 4976 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
4977 if (!rpriv) { 4977 if (!rpriv) {
4978 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); 4978 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 57987f6546e8..2b8040a3cdbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
823 struct mlx5e_rep_priv *rpriv = priv->ppriv; 823 struct mlx5e_rep_priv *rpriv = priv->ppriv;
824 struct mlx5_eswitch_rep *rep; 824 struct mlx5_eswitch_rep *rep;
825 825
826 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) 826 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
827 return false; 827 return false;
828 828
829 rep = rpriv->rep; 829 rep = rpriv->rep;
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
837static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) 837static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
838{ 838{
839 struct mlx5e_rep_priv *rpriv = priv->ppriv; 839 struct mlx5e_rep_priv *rpriv = priv->ppriv;
840 struct mlx5_eswitch_rep *rep = rpriv->rep; 840 struct mlx5_eswitch_rep *rep;
841 841
842 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
843 return false;
844
845 rep = rpriv->rep;
842 if (rep && rep->vport != FDB_UPLINK_VPORT) 846 if (rep && rep->vport != FDB_UPLINK_VPORT)
843 return true; 847 return true;
844 848
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f63dfbcd29fe..b79d74860a30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1594} 1594}
1595 1595
1596/* Public E-Switch API */ 1596/* Public E-Switch API */
1597#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) 1597#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
1598
1598 1599
1599int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) 1600int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1600{ 1601{
1601 int err; 1602 int err;
1602 int i, enabled_events; 1603 int i, enabled_events;
1603 1604
1604 if (!ESW_ALLOWED(esw)) 1605 if (!ESW_ALLOWED(esw) ||
1605 return 0;
1606
1607 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1608 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1606 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1609 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); 1607 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1610 return -EOPNOTSUPP; 1608 return -EOPNOTSUPP;
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1806 u64 node_guid; 1804 u64 node_guid;
1807 int err = 0; 1805 int err = 0;
1808 1806
1809 if (!ESW_ALLOWED(esw)) 1807 if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
1810 return -EPERM; 1808 return -EPERM;
1811 if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) 1809 if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
1812 return -EINVAL; 1810 return -EINVAL;
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1883{ 1881{
1884 struct mlx5_vport *evport; 1882 struct mlx5_vport *evport;
1885 1883
1886 if (!ESW_ALLOWED(esw)) 1884 if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
1887 return -EPERM; 1885 return -EPERM;
1888 if (!LEGAL_VPORT(esw, vport)) 1886 if (!LEGAL_VPORT(esw, vport))
1889 return -EINVAL; 1887 return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index cecd201f0b73..91f1209886ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
1079 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1079 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1080 return -EOPNOTSUPP; 1080 return -EOPNOTSUPP;
1081 1081
1082 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1082 if(!MLX5_ESWITCH_MANAGER(dev))
1083 return -EOPNOTSUPP; 1083 return -EPERM;
1084 1084
1085 if (dev->priv.eswitch->mode == SRIOV_NONE) 1085 if (dev->priv.eswitch->mode == SRIOV_NONE)
1086 return -EOPNOTSUPP; 1086 return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 49a75d31185e..f1a86cea86a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/mlx5/driver.h> 34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/eswitch.h>
35 36
36#include "mlx5_core.h" 37#include "mlx5_core.h"
37#include "fs_core.h" 38#include "fs_core.h"
@@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
2652 goto err; 2653 goto err;
2653 } 2654 }
2654 2655
2655 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { 2656 if (MLX5_ESWITCH_MANAGER(dev)) {
2656 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { 2657 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2657 err = init_fdb_root_ns(steering); 2658 err = init_fdb_root_ns(steering);
2658 if (err) 2659 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index afd9f4fa22f4..41ad24f0de2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/mlx5/driver.h> 33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/cmd.h> 34#include <linux/mlx5/cmd.h>
35#include <linux/mlx5/eswitch.h>
35#include <linux/module.h> 36#include <linux/module.h>
36#include "mlx5_core.h" 37#include "mlx5_core.h"
37#include "../../mlxfw/mlxfw.h" 38#include "../../mlxfw/mlxfw.h"
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
159 } 160 }
160 161
161 if (MLX5_CAP_GEN(dev, vport_group_manager) && 162 if (MLX5_CAP_GEN(dev, vport_group_manager) &&
162 MLX5_CAP_GEN(dev, eswitch_flow_table)) { 163 MLX5_ESWITCH_MANAGER(dev)) {
163 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); 164 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
164 if (err) 165 if (err)
165 return err; 166 return err;
166 } 167 }
167 168
168 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { 169 if (MLX5_ESWITCH_MANAGER(dev)) {
169 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); 170 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
170 if (err) 171 if (err)
171 return err; 172 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 7cb67122e8b5..98359559c77e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -33,6 +33,7 @@
33#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h> 34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h> 35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/eswitch.h>
36#include "mlx5_core.h" 37#include "mlx5_core.h"
37#include "lib/mpfs.h" 38#include "lib/mpfs.h"
38 39
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
98 int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); 99 int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
99 struct mlx5_mpfs *mpfs; 100 struct mlx5_mpfs *mpfs;
100 101
101 if (!MLX5_VPORT_MANAGER(dev)) 102 if (!MLX5_ESWITCH_MANAGER(dev))
102 return 0; 103 return 0;
103 104
104 mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); 105 mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
122{ 123{
123 struct mlx5_mpfs *mpfs = dev->priv.mpfs; 124 struct mlx5_mpfs *mpfs = dev->priv.mpfs;
124 125
125 if (!MLX5_VPORT_MANAGER(dev)) 126 if (!MLX5_ESWITCH_MANAGER(dev))
126 return; 127 return;
127 128
128 WARN_ON(!hlist_empty(mpfs->hash)); 129 WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
137 u32 index; 138 u32 index;
138 int err; 139 int err;
139 140
140 if (!MLX5_VPORT_MANAGER(dev)) 141 if (!MLX5_ESWITCH_MANAGER(dev))
141 return 0; 142 return 0;
142 143
143 mutex_lock(&mpfs->lock); 144 mutex_lock(&mpfs->lock);
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
179 int err = 0; 180 int err = 0;
180 u32 index; 181 u32 index;
181 182
182 if (!MLX5_VPORT_MANAGER(dev)) 183 if (!MLX5_ESWITCH_MANAGER(dev))
183 return 0; 184 return 0;
184 185
185 mutex_lock(&mpfs->lock); 186 mutex_lock(&mpfs->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index fa9d0760dd36..31a9cbd85689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
701static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, 701static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
702 int inlen) 702 int inlen)
703{ 703{
704 u32 out[MLX5_ST_SZ_DW(qtct_reg)]; 704 u32 out[MLX5_ST_SZ_DW(qetc_reg)];
705 705
706 if (!MLX5_CAP_GEN(mdev, ets)) 706 if (!MLX5_CAP_GEN(mdev, ets))
707 return -EOPNOTSUPP; 707 return -EOPNOTSUPP;
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
713static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, 713static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
714 int outlen) 714 int outlen)
715{ 715{
716 u32 in[MLX5_ST_SZ_DW(qtct_reg)]; 716 u32 in[MLX5_ST_SZ_DW(qetc_reg)];
717 717
718 if (!MLX5_CAP_GEN(mdev, ets)) 718 if (!MLX5_CAP_GEN(mdev, ets))
719 return -EOPNOTSUPP; 719 return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 2a8b529ce6dd..a0674962f02c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
88 return -EBUSY; 88 return -EBUSY;
89 } 89 }
90 90
91 if (!MLX5_ESWITCH_MANAGER(dev))
92 goto enable_vfs_hca;
93
91 err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); 94 err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
92 if (err) { 95 if (err) {
93 mlx5_core_warn(dev, 96 mlx5_core_warn(dev,
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
95 return err; 98 return err;
96 } 99 }
97 100
101enable_vfs_hca:
98 for (vf = 0; vf < num_vfs; vf++) { 102 for (vf = 0; vf < num_vfs; vf++) {
99 err = mlx5_core_enable_hca(dev, vf + 1); 103 err = mlx5_core_enable_hca(dev, vf + 1);
100 if (err) { 104 if (err) {
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
140 } 144 }
141 145
142out: 146out:
143 mlx5_eswitch_disable_sriov(dev->priv.eswitch); 147 if (MLX5_ESWITCH_MANAGER(dev))
148 mlx5_eswitch_disable_sriov(dev->priv.eswitch);
144 149
145 if (mlx5_wait_for_vf_pages(dev)) 150 if (mlx5_wait_for_vf_pages(dev))
146 mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); 151 mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 719cecb182c6..7eecd5b07bb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
549 return -EINVAL; 549 return -EINVAL;
550 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 550 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
551 return -EACCES; 551 return -EACCES;
552 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
553 return -EOPNOTSUPP;
554 552
555 in = kvzalloc(inlen, GFP_KERNEL); 553 in = kvzalloc(inlen, GFP_KERNEL);
556 if (!in) 554 if (!in)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index fcdfb8e7fdea..40216d56dddc 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
81 81
82 ret = nfp_net_bpf_offload(nn, prog, running, extack); 82 ret = nfp_net_bpf_offload(nn, prog, running, extack);
83 /* Stop offload if replace not possible */ 83 /* Stop offload if replace not possible */
84 if (ret && prog) 84 if (ret)
85 nfp_bpf_xdp_offload(app, nn, NULL, extack); 85 return ret;
86 86
87 nn->dp.bpf_offload_xdp = prog && !ret; 87 nn->dp.bpf_offload_xdp = !!prog;
88 return ret; 88 return ret;
89} 89}
90 90
@@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
202 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 202 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
203 return -EOPNOTSUPP; 203 return -EOPNOTSUPP;
204 204
205 if (tcf_block_shared(f->block))
206 return -EOPNOTSUPP;
207
205 switch (f->command) { 208 switch (f->command) {
206 case TC_BLOCK_BIND: 209 case TC_BLOCK_BIND:
207 return tcf_block_cb_register(f->block, 210 return tcf_block_cb_register(f->block,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 91935405f586..84f7a5dbea9d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
123 NFP_FLOWER_MASK_MPLS_Q; 123 NFP_FLOWER_MASK_MPLS_Q;
124 124
125 frame->mpls_lse = cpu_to_be32(t_mpls); 125 frame->mpls_lse = cpu_to_be32(t_mpls);
126 } else if (dissector_uses_key(flow->dissector,
127 FLOW_DISSECTOR_KEY_BASIC)) {
128 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
129 * bit, which indicates an mpls ether type but without any
130 * mpls fields.
131 */
132 struct flow_dissector_key_basic *key_basic;
133
134 key_basic = skb_flow_dissector_target(flow->dissector,
135 FLOW_DISSECTOR_KEY_BASIC,
136 flow->key);
137 if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
138 key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
139 frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
126 } 140 }
127} 141}
128 142
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index c42e64f32333..525057bee0ed 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
264 case cpu_to_be16(ETH_P_ARP): 264 case cpu_to_be16(ETH_P_ARP):
265 return -EOPNOTSUPP; 265 return -EOPNOTSUPP;
266 266
267 case cpu_to_be16(ETH_P_MPLS_UC):
268 case cpu_to_be16(ETH_P_MPLS_MC):
269 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
270 key_layer |= NFP_FLOWER_LAYER_MAC;
271 key_size += sizeof(struct nfp_flower_mac_mpls);
272 }
273 break;
274
267 /* Will be included in layer 2. */ 275 /* Will be included in layer 2. */
268 case cpu_to_be16(ETH_P_8021Q): 276 case cpu_to_be16(ETH_P_8021Q):
269 break; 277 break;
@@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
623 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 631 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
624 return -EOPNOTSUPP; 632 return -EOPNOTSUPP;
625 633
634 if (tcf_block_shared(f->block))
635 return -EOPNOTSUPP;
636
626 switch (f->command) { 637 switch (f->command) {
627 case TC_BLOCK_BIND: 638 case TC_BLOCK_BIND:
628 return tcf_block_cb_register(f->block, 639 return tcf_block_cb_register(f->block,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
index cd34097b79f1..37a6d7822a38 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
232 err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), 232 err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
233 nfp_resource_address(state->res), 233 nfp_resource_address(state->res),
234 fwinf, sizeof(*fwinf)); 234 fwinf, sizeof(*fwinf));
235 if (err < sizeof(*fwinf)) 235 if (err < (int)sizeof(*fwinf))
236 goto err_release; 236 goto err_release;
237 237
238 if (!nffw_res_flg_init_get(fwinf)) 238 if (!nffw_res_flg_init_get(fwinf))
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index f0b01385d5cb..e0680ce91328 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -709,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
709 p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; 709 p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
710 710
711 memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, 711 memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
712 ARRAY_SIZE(p_local->local_chassis_id)); 712 sizeof(p_local->local_chassis_id));
713 memcpy(params->lldp_local.local_port_id, p_local->local_port_id, 713 memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
714 ARRAY_SIZE(p_local->local_port_id)); 714 sizeof(p_local->local_port_id));
715} 715}
716 716
717static void 717static void
@@ -723,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
723 p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; 723 p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
724 724
725 memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, 725 memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
726 ARRAY_SIZE(p_remote->peer_chassis_id)); 726 sizeof(p_remote->peer_chassis_id));
727 memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, 727 memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
728 ARRAY_SIZE(p_remote->peer_port_id)); 728 sizeof(p_remote->peer_port_id));
729} 729}
730 730
731static int 731static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 329781cda77f..e5249b4741d0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
1804 DP_INFO(p_hwfn, "Failed to update driver state\n"); 1804 DP_INFO(p_hwfn, "Failed to update driver state\n");
1805 1805
1806 rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 1806 rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
1807 QED_OV_ESWITCH_VEB); 1807 QED_OV_ESWITCH_NONE);
1808 if (rc) 1808 if (rc)
1809 DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 1809 DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
1810 } 1810 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 5c10fd7210c3..0cbc74d6ca8b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -789,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
789 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 789 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
790 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 790 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
791 791
792 if (is_kdump_kernel()) {
793 DP_INFO(cdev,
794 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
795 cdev->int_params.in.min_msix_cnt);
796 cdev->int_params.in.num_vectors =
797 cdev->int_params.in.min_msix_cnt;
798 }
799
792 rc = qed_set_int_mode(cdev, false); 800 rc = qed_set_int_mode(cdev, false);
793 if (rc) { 801 if (rc) {
794 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 802 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index f01bf52bc381..fd59cf45f4be 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4513static int qed_sriov_enable(struct qed_dev *cdev, int num) 4513static int qed_sriov_enable(struct qed_dev *cdev, int num)
4514{ 4514{
4515 struct qed_iov_vf_init_params params; 4515 struct qed_iov_vf_init_params params;
4516 struct qed_hwfn *hwfn;
4517 struct qed_ptt *ptt;
4516 int i, j, rc; 4518 int i, j, rc;
4517 4519
4518 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { 4520 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
4525 4527
4526 /* Initialize HW for VF access */ 4528 /* Initialize HW for VF access */
4527 for_each_hwfn(cdev, j) { 4529 for_each_hwfn(cdev, j) {
4528 struct qed_hwfn *hwfn = &cdev->hwfns[j]; 4530 hwfn = &cdev->hwfns[j];
4529 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 4531 ptt = qed_ptt_acquire(hwfn);
4530 4532
4531 /* Make sure not to use more than 16 queues per VF */ 4533 /* Make sure not to use more than 16 queues per VF */
4532 params.num_queues = min_t(int, 4534 params.num_queues = min_t(int,
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
4562 goto err; 4564 goto err;
4563 } 4565 }
4564 4566
4567 hwfn = QED_LEADING_HWFN(cdev);
4568 ptt = qed_ptt_acquire(hwfn);
4569 if (!ptt) {
4570 DP_ERR(hwfn, "Failed to acquire ptt\n");
4571 rc = -EBUSY;
4572 goto err;
4573 }
4574
4575 rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
4576 if (rc)
4577 DP_INFO(cdev, "Failed to update eswitch mode\n");
4578 qed_ptt_release(hwfn, ptt);
4579
4565 return num; 4580 return num;
4566 4581
4567err: 4582err:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 02adb513f475..013ff567283c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
337{ 337{
338 struct qede_ptp *ptp = edev->ptp; 338 struct qede_ptp *ptp = edev->ptp;
339 339
340 if (!ptp) 340 if (!ptp) {
341 return -EIO; 341 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
342 SOF_TIMESTAMPING_RX_SOFTWARE |
343 SOF_TIMESTAMPING_SOFTWARE;
344 info->phc_index = -1;
345
346 return 0;
347 }
342 348
343 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 349 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
344 SOF_TIMESTAMPING_RX_SOFTWARE | 350 SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 8edf20967c82..e045a5d6b938 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
2794 if (!state) 2794 if (!state)
2795 return -ENOMEM; 2795 return -ENOMEM;
2796 efx->filter_state = state; 2796 efx->filter_state = state;
2797 init_rwsem(&state->lock);
2797 2798
2798 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2799 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2799 table->id = EFX_FARCH_FILTER_TABLE_RX_IP; 2800 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index d37f17ca62fe..65bc3556bd8f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
407 } 407 }
408} 408}
409 409
410static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
411{
412 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
413
414 value &= ~DMA_RBSZ_MASK;
415 value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
416
417 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
418}
419
410const struct stmmac_dma_ops dwmac4_dma_ops = { 420const struct stmmac_dma_ops dwmac4_dma_ops = {
411 .reset = dwmac4_dma_reset, 421 .reset = dwmac4_dma_reset,
412 .init = dwmac4_dma_init, 422 .init = dwmac4_dma_init,
@@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
431 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 441 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
432 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 442 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
433 .enable_tso = dwmac4_enable_tso, 443 .enable_tso = dwmac4_enable_tso,
444 .set_bfsize = dwmac4_set_bfsize,
434}; 445};
435 446
436const struct stmmac_dma_ops dwmac410_dma_ops = { 447const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
457 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 468 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
458 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 469 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
459 .enable_tso = dwmac4_enable_tso, 470 .enable_tso = dwmac4_enable_tso,
471 .set_bfsize = dwmac4_set_bfsize,
460}; 472};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index c63c1fe3f26b..22a4a6dbb1a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -120,6 +120,8 @@
120 120
121/* DMA Rx Channel X Control register defines */ 121/* DMA Rx Channel X Control register defines */
122#define DMA_CONTROL_SR BIT(0) 122#define DMA_CONTROL_SR BIT(0)
123#define DMA_RBSZ_MASK GENMASK(14, 1)
124#define DMA_RBSZ_SHIFT 1
123 125
124/* Interrupt status per channel */ 126/* Interrupt status per channel */
125#define DMA_CHAN_STATUS_REB GENMASK(21, 19) 127#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index e44e7b26ce82..fe8b536b13f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -183,6 +183,7 @@ struct stmmac_dma_ops {
183 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 183 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
184 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 184 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
185 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); 185 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
186 void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
186}; 187};
187 188
188#define stmmac_reset(__priv, __args...) \ 189#define stmmac_reset(__priv, __args...) \
@@ -235,6 +236,8 @@ struct stmmac_dma_ops {
235 stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) 236 stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
236#define stmmac_enable_tso(__priv, __args...) \ 237#define stmmac_enable_tso(__priv, __args...) \
237 stmmac_do_void_callback(__priv, dma, enable_tso, __args) 238 stmmac_do_void_callback(__priv, dma, enable_tso, __args)
239#define stmmac_set_dma_bfsize(__priv, __args...) \
240 stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
238 241
239struct mac_device_info; 242struct mac_device_info;
240struct net_device; 243struct net_device;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index cba46b62a1cd..60f59abab009 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1804,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1804 1804
1805 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 1805 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1806 rxfifosz, qmode); 1806 rxfifosz, qmode);
1807 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1808 chan);
1807 } 1809 }
1808 1810
1809 for (chan = 0; chan < tx_channels_count; chan++) { 1811 for (chan = 0; chan < tx_channels_count; chan++) {
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 750eaa53bf0c..ada33c2d9ac2 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
476out_unlock: 476out_unlock:
477 rcu_read_unlock(); 477 rcu_read_unlock();
478out: 478out:
479 NAPI_GRO_CB(skb)->flush |= flush; 479 skb_gro_flush_final(skb, pp, flush);
480 480
481 return pp; 481 return pp;
482} 482}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 1a924b867b07..4b6e308199d2 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net,
210void netvsc_channel_cb(void *context); 210void netvsc_channel_cb(void *context);
211int netvsc_poll(struct napi_struct *napi, int budget); 211int netvsc_poll(struct napi_struct *napi, int budget);
212 212
213void rndis_set_subchannel(struct work_struct *w); 213int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
214int rndis_filter_open(struct netvsc_device *nvdev); 214int rndis_filter_open(struct netvsc_device *nvdev);
215int rndis_filter_close(struct netvsc_device *nvdev); 215int rndis_filter_close(struct netvsc_device *nvdev);
216struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, 216struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5d5bd513847f..8e9d0ee1572b 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
65 VM_PKT_DATA_INBAND, 0); 65 VM_PKT_DATA_INBAND, 0);
66} 66}
67 67
68/* Worker to setup sub channels on initial setup
69 * Initial hotplug event occurs in softirq context
70 * and can't wait for channels.
71 */
72static void netvsc_subchan_work(struct work_struct *w)
73{
74 struct netvsc_device *nvdev =
75 container_of(w, struct netvsc_device, subchan_work);
76 struct rndis_device *rdev;
77 int i, ret;
78
79 /* Avoid deadlock with device removal already under RTNL */
80 if (!rtnl_trylock()) {
81 schedule_work(w);
82 return;
83 }
84
85 rdev = nvdev->extension;
86 if (rdev) {
87 ret = rndis_set_subchannel(rdev->ndev, nvdev);
88 if (ret == 0) {
89 netif_device_attach(rdev->ndev);
90 } else {
91 /* fallback to only primary channel */
92 for (i = 1; i < nvdev->num_chn; i++)
93 netif_napi_del(&nvdev->chan_table[i].napi);
94
95 nvdev->max_chn = 1;
96 nvdev->num_chn = 1;
97 }
98 }
99
100 rtnl_unlock();
101}
102
68static struct netvsc_device *alloc_net_device(void) 103static struct netvsc_device *alloc_net_device(void)
69{ 104{
70 struct netvsc_device *net_device; 105 struct netvsc_device *net_device;
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void)
81 116
82 init_completion(&net_device->channel_init_wait); 117 init_completion(&net_device->channel_init_wait);
83 init_waitqueue_head(&net_device->subchan_open); 118 init_waitqueue_head(&net_device->subchan_open);
84 INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); 119 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
85 120
86 return net_device; 121 return net_device;
87} 122}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index fe2256bf1d13..dd1d6e115145 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev,
905 if (IS_ERR(nvdev)) 905 if (IS_ERR(nvdev))
906 return PTR_ERR(nvdev); 906 return PTR_ERR(nvdev);
907 907
908 /* Note: enable and attach happen when sub-channels setup */ 908 if (nvdev->num_chn > 1) {
909 ret = rndis_set_subchannel(ndev, nvdev);
910
911 /* if unavailable, just proceed with one queue */
912 if (ret) {
913 nvdev->max_chn = 1;
914 nvdev->num_chn = 1;
915 }
916 }
917
918 /* In any case device is now ready */
919 netif_device_attach(ndev);
909 920
921 /* Note: enable and attach happen when sub-channels setup */
910 netif_carrier_off(ndev); 922 netif_carrier_off(ndev);
911 923
912 if (netif_running(ndev)) { 924 if (netif_running(ndev)) {
@@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev,
2089 2101
2090 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 2102 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2091 2103
2104 if (nvdev->num_chn > 1)
2105 schedule_work(&nvdev->subchan_work);
2106
2092 /* hw_features computed in rndis_netdev_set_hwcaps() */ 2107 /* hw_features computed in rndis_netdev_set_hwcaps() */
2093 net->features = net->hw_features | 2108 net->features = net->hw_features |
2094 NETIF_F_HIGHDMA | NETIF_F_SG | 2109 NETIF_F_HIGHDMA | NETIF_F_SG |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 5428bb261102..9b4e3c3787e5 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
1062 * This breaks overlap of processing the host message for the 1062 * This breaks overlap of processing the host message for the
1063 * new primary channel with the initialization of sub-channels. 1063 * new primary channel with the initialization of sub-channels.
1064 */ 1064 */
1065void rndis_set_subchannel(struct work_struct *w) 1065int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
1066{ 1066{
1067 struct netvsc_device *nvdev
1068 = container_of(w, struct netvsc_device, subchan_work);
1069 struct nvsp_message *init_packet = &nvdev->channel_init_pkt; 1067 struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1070 struct net_device_context *ndev_ctx; 1068 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1071 struct rndis_device *rdev; 1069 struct hv_device *hv_dev = ndev_ctx->device_ctx;
1072 struct net_device *ndev; 1070 struct rndis_device *rdev = nvdev->extension;
1073 struct hv_device *hv_dev;
1074 int i, ret; 1071 int i, ret;
1075 1072
1076 if (!rtnl_trylock()) { 1073 ASSERT_RTNL();
1077 schedule_work(w);
1078 return;
1079 }
1080
1081 rdev = nvdev->extension;
1082 if (!rdev)
1083 goto unlock; /* device was removed */
1084
1085 ndev = rdev->ndev;
1086 ndev_ctx = netdev_priv(ndev);
1087 hv_dev = ndev_ctx->device_ctx;
1088 1074
1089 memset(init_packet, 0, sizeof(struct nvsp_message)); 1075 memset(init_packet, 0, sizeof(struct nvsp_message));
1090 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; 1076 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w)
1100 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1086 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1101 if (ret) { 1087 if (ret) {
1102 netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); 1088 netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1103 goto failed; 1089 return ret;
1104 } 1090 }
1105 1091
1106 wait_for_completion(&nvdev->channel_init_wait); 1092 wait_for_completion(&nvdev->channel_init_wait);
1107 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { 1093 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1108 netdev_err(ndev, "sub channel request failed\n"); 1094 netdev_err(ndev, "sub channel request failed\n");
1109 goto failed; 1095 return -EIO;
1110 } 1096 }
1111 1097
1112 nvdev->num_chn = 1 + 1098 nvdev->num_chn = 1 +
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w)
1125 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) 1111 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1126 ndev_ctx->tx_table[i] = i % nvdev->num_chn; 1112 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1127 1113
1128 netif_device_attach(ndev); 1114 return 0;
1129 rtnl_unlock();
1130 return;
1131
1132failed:
1133 /* fallback to only primary channel */
1134 for (i = 1; i < nvdev->num_chn; i++)
1135 netif_napi_del(&nvdev->chan_table[i].napi);
1136
1137 nvdev->max_chn = 1;
1138 nvdev->num_chn = 1;
1139
1140 netif_device_attach(ndev);
1141unlock:
1142 rtnl_unlock();
1143} 1115}
1144 1116
1145static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, 1117static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
@@ -1360,21 +1332,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1360 netif_napi_add(net, &net_device->chan_table[i].napi, 1332 netif_napi_add(net, &net_device->chan_table[i].napi,
1361 netvsc_poll, NAPI_POLL_WEIGHT); 1333 netvsc_poll, NAPI_POLL_WEIGHT);
1362 1334
1363 if (net_device->num_chn > 1) 1335 return net_device;
1364 schedule_work(&net_device->subchan_work);
1365 1336
1366out: 1337out:
1367 /* if unavailable, just proceed with one queue */ 1338 /* setting up multiple channels failed */
1368 if (ret) { 1339 net_device->max_chn = 1;
1369 net_device->max_chn = 1; 1340 net_device->num_chn = 1;
1370 net_device->num_chn = 1;
1371 }
1372
1373 /* No sub channels, device is ready */
1374 if (net_device->num_chn == 1)
1375 netif_device_attach(net);
1376
1377 return net_device;
1378 1341
1379err_dev_remv: 1342err_dev_remv:
1380 rndis_filter_device_remove(dev, net_device); 1343 rndis_filter_device_remove(dev, net_device);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 23c1d6600241..4a949569ec4c 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
75{ 75{
76 struct ipvl_dev *ipvlan; 76 struct ipvl_dev *ipvlan;
77 struct net_device *mdev = port->dev; 77 struct net_device *mdev = port->dev;
78 int err = 0; 78 unsigned int flags;
79 int err;
79 80
80 ASSERT_RTNL(); 81 ASSERT_RTNL();
81 if (port->mode != nval) { 82 if (port->mode != nval) {
83 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
84 flags = ipvlan->dev->flags;
85 if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
86 err = dev_change_flags(ipvlan->dev,
87 flags | IFF_NOARP);
88 } else {
89 err = dev_change_flags(ipvlan->dev,
90 flags & ~IFF_NOARP);
91 }
92 if (unlikely(err))
93 goto fail;
94 }
82 if (nval == IPVLAN_MODE_L3S) { 95 if (nval == IPVLAN_MODE_L3S) {
83 /* New mode is L3S */ 96 /* New mode is L3S */
84 err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); 97 err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
86 mdev->l3mdev_ops = &ipvl_l3mdev_ops; 99 mdev->l3mdev_ops = &ipvl_l3mdev_ops;
87 mdev->priv_flags |= IFF_L3MDEV_MASTER; 100 mdev->priv_flags |= IFF_L3MDEV_MASTER;
88 } else 101 } else
89 return err; 102 goto fail;
90 } else if (port->mode == IPVLAN_MODE_L3S) { 103 } else if (port->mode == IPVLAN_MODE_L3S) {
91 /* Old mode was L3S */ 104 /* Old mode was L3S */
92 mdev->priv_flags &= ~IFF_L3MDEV_MASTER; 105 mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
93 ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); 106 ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
94 mdev->l3mdev_ops = NULL; 107 mdev->l3mdev_ops = NULL;
95 } 108 }
96 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
97 if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
98 ipvlan->dev->flags |= IFF_NOARP;
99 else
100 ipvlan->dev->flags &= ~IFF_NOARP;
101 }
102 port->mode = nval; 109 port->mode = nval;
103 } 110 }
111 return 0;
112
113fail:
114 /* Undo the flags changes that have been done so far. */
115 list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
116 flags = ipvlan->dev->flags;
117 if (port->mode == IPVLAN_MODE_L3 ||
118 port->mode == IPVLAN_MODE_L3S)
119 dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
120 else
121 dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
122 }
123
104 return err; 124 return err;
105} 125}
106 126
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 081d99aa3985..49ac678eb2dc 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev)
222 if (err < 0) 222 if (err < 0)
223 return err; 223 return err;
224 224
225 err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); 225 err = phy_write(phydev, MII_DP83811_INT_STAT2, 0);
226 } 226 }
227 227
228 return err; 228 return err;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 8dff87ec6d99..2e4130746c40 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -64,6 +64,7 @@
64#define DEFAULT_RX_CSUM_ENABLE (true) 64#define DEFAULT_RX_CSUM_ENABLE (true)
65#define DEFAULT_TSO_CSUM_ENABLE (true) 65#define DEFAULT_TSO_CSUM_ENABLE (true)
66#define DEFAULT_VLAN_FILTER_ENABLE (true) 66#define DEFAULT_VLAN_FILTER_ENABLE (true)
67#define DEFAULT_VLAN_RX_OFFLOAD (true)
67#define TX_OVERHEAD (8) 68#define TX_OVERHEAD (8)
68#define RXW_PADDING 2 69#define RXW_PADDING 2
69 70
@@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2298 if ((ll_mtu % dev->maxpacket) == 0) 2299 if ((ll_mtu % dev->maxpacket) == 0)
2299 return -EDOM; 2300 return -EDOM;
2300 2301
2301 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); 2302 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2302 2303
2303 netdev->mtu = new_mtu; 2304 netdev->mtu = new_mtu;
2304 2305
@@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev,
2364 } 2365 }
2365 2366
2366 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2367 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2368 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2369 else
2370 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2371
2372 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2367 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; 2373 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2368 else 2374 else
2369 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; 2375 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
@@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2587 buf |= FCT_TX_CTL_EN_; 2593 buf |= FCT_TX_CTL_EN_;
2588 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); 2594 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2589 2595
2590 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); 2596 ret = lan78xx_set_rx_max_frame_length(dev,
2597 dev->net->mtu + VLAN_ETH_HLEN);
2591 2598
2592 ret = lan78xx_read_reg(dev, MAC_RX, &buf); 2599 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2593 buf |= MAC_RX_RXEN_; 2600 buf |= MAC_RX_RXEN_;
@@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2975 if (DEFAULT_TSO_CSUM_ENABLE) 2982 if (DEFAULT_TSO_CSUM_ENABLE)
2976 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; 2983 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2977 2984
2985 if (DEFAULT_VLAN_RX_OFFLOAD)
2986 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2987
2988 if (DEFAULT_VLAN_FILTER_ENABLE)
2989 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2990
2978 dev->net->hw_features = dev->net->features; 2991 dev->net->hw_features = dev->net->features;
2979 2992
2980 ret = lan78xx_setup_irq_domain(dev); 2993 ret = lan78xx_setup_irq_domain(dev);
@@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3039 struct sk_buff *skb, 3052 struct sk_buff *skb,
3040 u32 rx_cmd_a, u32 rx_cmd_b) 3053 u32 rx_cmd_a, u32 rx_cmd_b)
3041{ 3054{
3055 /* HW Checksum offload appears to be flawed if used when not stripping
3056 * VLAN headers. Drop back to S/W checksums under these conditions.
3057 */
3042 if (!(dev->net->features & NETIF_F_RXCSUM) || 3058 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3043 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) { 3059 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3060 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3061 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3044 skb->ip_summed = CHECKSUM_NONE; 3062 skb->ip_summed = CHECKSUM_NONE;
3045 } else { 3063 } else {
3046 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); 3064 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
@@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3048 } 3066 }
3049} 3067}
3050 3068
3069static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3070 struct sk_buff *skb,
3071 u32 rx_cmd_a, u32 rx_cmd_b)
3072{
3073 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3074 (rx_cmd_a & RX_CMD_A_FVTG_))
3075 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3076 (rx_cmd_b & 0xffff));
3077}
3078
3051static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) 3079static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3052{ 3080{
3053 int status; 3081 int status;
@@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3112 if (skb->len == size) { 3140 if (skb->len == size) {
3113 lan78xx_rx_csum_offload(dev, skb, 3141 lan78xx_rx_csum_offload(dev, skb,
3114 rx_cmd_a, rx_cmd_b); 3142 rx_cmd_a, rx_cmd_b);
3143 lan78xx_rx_vlan_offload(dev, skb,
3144 rx_cmd_a, rx_cmd_b);
3115 3145
3116 skb_trim(skb, skb->len - 4); /* remove fcs */ 3146 skb_trim(skb, skb->len - 4); /* remove fcs */
3117 skb->truesize = size + sizeof(struct sk_buff); 3147 skb->truesize = size + sizeof(struct sk_buff);
@@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3130 skb_set_tail_pointer(skb2, size); 3160 skb_set_tail_pointer(skb2, size);
3131 3161
3132 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); 3162 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3163 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3133 3164
3134 skb_trim(skb2, skb2->len - 4); /* remove fcs */ 3165 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3135 skb2->truesize = size + sizeof(struct sk_buff); 3166 skb2->truesize = size + sizeof(struct sk_buff);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 86f7196f9d91..2a58607a6aea 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev)
3962#ifdef CONFIG_PM_SLEEP 3962#ifdef CONFIG_PM_SLEEP
3963 unregister_pm_notifier(&tp->pm_notifier); 3963 unregister_pm_notifier(&tp->pm_notifier);
3964#endif 3964#endif
3965 napi_disable(&tp->napi); 3965 if (!test_bit(RTL8152_UNPLUG, &tp->flags))
3966 napi_disable(&tp->napi);
3966 clear_bit(WORK_ENABLE, &tp->flags); 3967 clear_bit(WORK_ENABLE, &tp->flags);
3967 usb_kill_urb(tp->intr_urb); 3968 usb_kill_urb(tp->intr_urb);
3968 cancel_delayed_work_sync(&tp->schedule); 3969 cancel_delayed_work_sync(&tp->schedule);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b6c9a2af3732..53085c63277b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
53/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ 53/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
54#define VIRTIO_XDP_HEADROOM 256 54#define VIRTIO_XDP_HEADROOM 256
55 55
56/* Separating two types of XDP xmit */
57#define VIRTIO_XDP_TX BIT(0)
58#define VIRTIO_XDP_REDIR BIT(1)
59
56/* RX packet size EWMA. The average packet size is used to determine the packet 60/* RX packet size EWMA. The average packet size is used to determine the packet
57 * buffer size when refilling RX rings. As the entire RX ring may be refilled 61 * buffer size when refilling RX rings. As the entire RX ring may be refilled
58 * at once, the weight is chosen so that the EWMA will be insensitive to short- 62 * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
582 struct receive_queue *rq, 586 struct receive_queue *rq,
583 void *buf, void *ctx, 587 void *buf, void *ctx,
584 unsigned int len, 588 unsigned int len,
585 bool *xdp_xmit) 589 unsigned int *xdp_xmit)
586{ 590{
587 struct sk_buff *skb; 591 struct sk_buff *skb;
588 struct bpf_prog *xdp_prog; 592 struct bpf_prog *xdp_prog;
@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
654 trace_xdp_exception(vi->dev, xdp_prog, act); 658 trace_xdp_exception(vi->dev, xdp_prog, act);
655 goto err_xdp; 659 goto err_xdp;
656 } 660 }
657 *xdp_xmit = true; 661 *xdp_xmit |= VIRTIO_XDP_TX;
658 rcu_read_unlock(); 662 rcu_read_unlock();
659 goto xdp_xmit; 663 goto xdp_xmit;
660 case XDP_REDIRECT: 664 case XDP_REDIRECT:
661 err = xdp_do_redirect(dev, &xdp, xdp_prog); 665 err = xdp_do_redirect(dev, &xdp, xdp_prog);
662 if (err) 666 if (err)
663 goto err_xdp; 667 goto err_xdp;
664 *xdp_xmit = true; 668 *xdp_xmit |= VIRTIO_XDP_REDIR;
665 rcu_read_unlock(); 669 rcu_read_unlock();
666 goto xdp_xmit; 670 goto xdp_xmit;
667 default: 671 default:
@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
723 void *buf, 727 void *buf,
724 void *ctx, 728 void *ctx,
725 unsigned int len, 729 unsigned int len,
726 bool *xdp_xmit) 730 unsigned int *xdp_xmit)
727{ 731{
728 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 732 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
729 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 733 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
818 put_page(xdp_page); 822 put_page(xdp_page);
819 goto err_xdp; 823 goto err_xdp;
820 } 824 }
821 *xdp_xmit = true; 825 *xdp_xmit |= VIRTIO_XDP_TX;
822 if (unlikely(xdp_page != page)) 826 if (unlikely(xdp_page != page))
823 put_page(page); 827 put_page(page);
824 rcu_read_unlock(); 828 rcu_read_unlock();
@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
830 put_page(xdp_page); 834 put_page(xdp_page);
831 goto err_xdp; 835 goto err_xdp;
832 } 836 }
833 *xdp_xmit = true; 837 *xdp_xmit |= VIRTIO_XDP_REDIR;
834 if (unlikely(xdp_page != page)) 838 if (unlikely(xdp_page != page))
835 put_page(page); 839 put_page(page);
836 rcu_read_unlock(); 840 rcu_read_unlock();
@@ -939,7 +943,8 @@ xdp_xmit:
939} 943}
940 944
941static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 945static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
942 void *buf, unsigned int len, void **ctx, bool *xdp_xmit) 946 void *buf, unsigned int len, void **ctx,
947 unsigned int *xdp_xmit)
943{ 948{
944 struct net_device *dev = vi->dev; 949 struct net_device *dev = vi->dev;
945 struct sk_buff *skb; 950 struct sk_buff *skb;
@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work)
1232 } 1237 }
1233} 1238}
1234 1239
1235static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) 1240static int virtnet_receive(struct receive_queue *rq, int budget,
1241 unsigned int *xdp_xmit)
1236{ 1242{
1237 struct virtnet_info *vi = rq->vq->vdev->priv; 1243 struct virtnet_info *vi = rq->vq->vdev->priv;
1238 unsigned int len, received = 0, bytes = 0; 1244 unsigned int len, received = 0, bytes = 0;
@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1321 struct virtnet_info *vi = rq->vq->vdev->priv; 1327 struct virtnet_info *vi = rq->vq->vdev->priv;
1322 struct send_queue *sq; 1328 struct send_queue *sq;
1323 unsigned int received, qp; 1329 unsigned int received, qp;
1324 bool xdp_xmit = false; 1330 unsigned int xdp_xmit = 0;
1325 1331
1326 virtnet_poll_cleantx(rq); 1332 virtnet_poll_cleantx(rq);
1327 1333
@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1331 if (received < budget) 1337 if (received < budget)
1332 virtqueue_napi_complete(napi, rq->vq, received); 1338 virtqueue_napi_complete(napi, rq->vq, received);
1333 1339
1334 if (xdp_xmit) { 1340 if (xdp_xmit & VIRTIO_XDP_REDIR)
1341 xdp_do_flush_map();
1342
1343 if (xdp_xmit & VIRTIO_XDP_TX) {
1335 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + 1344 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
1336 smp_processor_id(); 1345 smp_processor_id();
1337 sq = &vi->sq[qp]; 1346 sq = &vi->sq[qp];
1338 virtqueue_kick(sq->vq); 1347 virtqueue_kick(sq->vq);
1339 xdp_do_flush_map();
1340 } 1348 }
1341 1349
1342 return received; 1350 return received;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index aee0e60471f1..f6bb1d54d4bd 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
623 flush = 0; 623 flush = 0;
624 624
625out: 625out:
626 skb_gro_remcsum_cleanup(skb, &grc); 626 skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
627 skb->remcsum_offload = 0;
628 NAPI_GRO_CB(skb)->flush |= flush;
629 627
630 return pp; 628 return pp;
631} 629}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 2a5fec55bf60..a246a618f9a4 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -829,6 +829,17 @@ struct qeth_trap_id {
829/*some helper functions*/ 829/*some helper functions*/
830#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") 830#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
831 831
832static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
833 unsigned int elements)
834{
835 unsigned int i;
836
837 for (i = 0; i < elements; i++)
838 memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
839 buf->element[14].sflags = 0;
840 buf->element[15].sflags = 0;
841}
842
832/** 843/**
833 * qeth_get_elements_for_range() - find number of SBALEs to cover range. 844 * qeth_get_elements_for_range() - find number of SBALEs to cover range.
834 * @start: Start of the address range. 845 * @start: Start of the address range.
@@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
1029 __u16, __u16, 1040 __u16, __u16,
1030 enum qeth_prot_versions); 1041 enum qeth_prot_versions);
1031int qeth_set_features(struct net_device *, netdev_features_t); 1042int qeth_set_features(struct net_device *, netdev_features_t);
1032void qeth_recover_features(struct net_device *dev); 1043void qeth_enable_hw_features(struct net_device *dev);
1033netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); 1044netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
1034netdev_features_t qeth_features_check(struct sk_buff *skb, 1045netdev_features_t qeth_features_check(struct sk_buff *skb,
1035 struct net_device *dev, 1046 struct net_device *dev,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 8e1474f1ffac..d01ac29fd986 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
73 struct qeth_qdio_out_buffer *buf, 73 struct qeth_qdio_out_buffer *buf,
74 enum iucv_tx_notify notification); 74 enum iucv_tx_notify notification);
75static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); 75static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
76static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
77 struct qeth_qdio_out_buffer *buf,
78 enum qeth_qdio_buffer_states newbufstate);
79static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 76static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
80 77
81struct workqueue_struct *qeth_wq; 78struct workqueue_struct *qeth_wq;
@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
489 struct qaob *aob; 486 struct qaob *aob;
490 struct qeth_qdio_out_buffer *buffer; 487 struct qeth_qdio_out_buffer *buffer;
491 enum iucv_tx_notify notification; 488 enum iucv_tx_notify notification;
489 unsigned int i;
492 490
493 aob = (struct qaob *) phys_to_virt(phys_aob_addr); 491 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
494 QETH_CARD_TEXT(card, 5, "haob"); 492 QETH_CARD_TEXT(card, 5, "haob");
@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
513 qeth_notify_skbs(buffer->q, buffer, notification); 511 qeth_notify_skbs(buffer->q, buffer, notification);
514 512
515 buffer->aob = NULL; 513 buffer->aob = NULL;
516 qeth_clear_output_buffer(buffer->q, buffer, 514 /* Free dangling allocations. The attached skbs are handled by
517 QETH_QDIO_BUF_HANDLED_DELAYED); 515 * qeth_cleanup_handled_pending().
516 */
517 for (i = 0;
518 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
519 i++) {
520 if (aob->sba[i] && buffer->is_header[i])
521 kmem_cache_free(qeth_core_header_cache,
522 (void *) aob->sba[i]);
523 }
524 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
518 525
519 /* from here on: do not touch buffer anymore */
520 qdio_release_aob(aob); 526 qdio_release_aob(aob);
521} 527}
522 528
@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3759 QETH_CARD_TEXT(queue->card, 5, "aob"); 3765 QETH_CARD_TEXT(queue->card, 5, "aob");
3760 QETH_CARD_TEXT_(queue->card, 5, "%lx", 3766 QETH_CARD_TEXT_(queue->card, 5, "%lx",
3761 virt_to_phys(buffer->aob)); 3767 virt_to_phys(buffer->aob));
3768
3769 /* prepare the queue slot for re-use: */
3770 qeth_scrub_qdio_buffer(buffer->buffer,
3771 QETH_MAX_BUFFER_ELEMENTS(card));
3762 if (qeth_init_qdio_out_buf(queue, bidx)) { 3772 if (qeth_init_qdio_out_buf(queue, bidx)) {
3763 QETH_CARD_TEXT(card, 2, "outofbuf"); 3773 QETH_CARD_TEXT(card, 2, "outofbuf");
3764 qeth_schedule_recovery(card); 3774 qeth_schedule_recovery(card);
@@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
4834 goto out; 4844 goto out;
4835 } 4845 }
4836 4846
4837 ccw_device_get_id(CARD_RDEV(card), &id); 4847 ccw_device_get_id(CARD_DDEV(card), &id);
4838 request->resp_buf_len = sizeof(*response); 4848 request->resp_buf_len = sizeof(*response);
4839 request->resp_version = DIAG26C_VERSION2; 4849 request->resp_version = DIAG26C_VERSION2;
4840 request->op_code = DIAG26C_GET_MAC; 4850 request->op_code = DIAG26C_GET_MAC;
@@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6459#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ 6469#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
6460 NETIF_F_IPV6_CSUM) 6470 NETIF_F_IPV6_CSUM)
6461/** 6471/**
6462 * qeth_recover_features() - Restore device features after recovery 6472 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6463 * @dev: the recovering net_device 6473 * @dev: a net_device
6464 *
6465 * Caller must hold rtnl lock.
6466 */ 6474 */
6467void qeth_recover_features(struct net_device *dev) 6475void qeth_enable_hw_features(struct net_device *dev)
6468{ 6476{
6469 netdev_features_t features = dev->features;
6470 struct qeth_card *card = dev->ml_priv; 6477 struct qeth_card *card = dev->ml_priv;
6478 netdev_features_t features;
6471 6479
6480 rtnl_lock();
6481 features = dev->features;
6472 /* force-off any feature that needs an IPA sequence. 6482 /* force-off any feature that needs an IPA sequence.
6473 * netdev_update_features() will restart them. 6483 * netdev_update_features() will restart them.
6474 */ 6484 */
6475 dev->features &= ~QETH_HW_FEATURES; 6485 dev->features &= ~QETH_HW_FEATURES;
6476 netdev_update_features(dev); 6486 netdev_update_features(dev);
6477 6487 if (features != dev->features)
6478 if (features == dev->features) 6488 dev_warn(&card->gdev->dev,
6479 return; 6489 "Device recovery failed to restore all offload features\n");
6480 dev_warn(&card->gdev->dev, 6490 rtnl_unlock();
6481 "Device recovery failed to restore all offload features\n");
6482} 6491}
6483EXPORT_SYMBOL_GPL(qeth_recover_features); 6492EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6484 6493
6485int qeth_set_features(struct net_device *dev, netdev_features_t features) 6494int qeth_set_features(struct net_device *dev, netdev_features_t features)
6486{ 6495{
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a7cb37da6a21..2487f0aeb165 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
140 140
141static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) 141static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
142{ 142{
143 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? 143 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
144 IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; 144 IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
145 int rc; 145 int rc;
146 146
@@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
157 157
158static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) 158static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
159{ 159{
160 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? 160 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
161 IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; 161 IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
162 int rc; 162 int rc;
163 163
@@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
501 return -ERESTARTSYS; 501 return -ERESTARTSYS;
502 } 502 }
503 503
504 /* avoid racing against concurrent state change: */
505 if (!mutex_trylock(&card->conf_mutex))
506 return -EAGAIN;
507
504 if (!qeth_card_hw_is_reachable(card)) { 508 if (!qeth_card_hw_is_reachable(card)) {
505 ether_addr_copy(dev->dev_addr, addr->sa_data); 509 ether_addr_copy(dev->dev_addr, addr->sa_data);
506 return 0; 510 goto out_unlock;
507 } 511 }
508 512
509 /* don't register the same address twice */ 513 /* don't register the same address twice */
510 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && 514 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
511 (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) 515 (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
512 return 0; 516 goto out_unlock;
513 517
514 /* add the new address, switch over, drop the old */ 518 /* add the new address, switch over, drop the old */
515 rc = qeth_l2_send_setmac(card, addr->sa_data); 519 rc = qeth_l2_send_setmac(card, addr->sa_data);
516 if (rc) 520 if (rc)
517 return rc; 521 goto out_unlock;
518 ether_addr_copy(old_addr, dev->dev_addr); 522 ether_addr_copy(old_addr, dev->dev_addr);
519 ether_addr_copy(dev->dev_addr, addr->sa_data); 523 ether_addr_copy(dev->dev_addr, addr->sa_data);
520 524
521 if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) 525 if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
522 qeth_l2_remove_mac(card, old_addr); 526 qeth_l2_remove_mac(card, old_addr);
523 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; 527 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
524 return 0; 528
529out_unlock:
530 mutex_unlock(&card->conf_mutex);
531 return rc;
525} 532}
526 533
527static void qeth_promisc_to_bridge(struct qeth_card *card) 534static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1112 netif_carrier_off(card->dev); 1119 netif_carrier_off(card->dev);
1113 1120
1114 qeth_set_allowed_threads(card, 0xffffffff, 0); 1121 qeth_set_allowed_threads(card, 0xffffffff, 0);
1122
1123 qeth_enable_hw_features(card->dev);
1115 if (recover_flag == CARD_STATE_RECOVER) { 1124 if (recover_flag == CARD_STATE_RECOVER) {
1116 if (recovery_mode && 1125 if (recovery_mode &&
1117 card->info.type != QETH_CARD_TYPE_OSN) { 1126 card->info.type != QETH_CARD_TYPE_OSN) {
@@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1123 } 1132 }
1124 /* this also sets saved unicast addresses */ 1133 /* this also sets saved unicast addresses */
1125 qeth_l2_set_rx_mode(card->dev); 1134 qeth_l2_set_rx_mode(card->dev);
1126 rtnl_lock();
1127 qeth_recover_features(card->dev);
1128 rtnl_unlock();
1129 } 1135 }
1130 /* let user_space know that device is online */ 1136 /* let user_space know that device is online */
1131 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1137 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e7fa479adf47..5905dc63e256 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
2662 netif_carrier_on(card->dev); 2662 netif_carrier_on(card->dev);
2663 else 2663 else
2664 netif_carrier_off(card->dev); 2664 netif_carrier_off(card->dev);
2665
2666 qeth_enable_hw_features(card->dev);
2665 if (recover_flag == CARD_STATE_RECOVER) { 2667 if (recover_flag == CARD_STATE_RECOVER) {
2666 rtnl_lock(); 2668 rtnl_lock();
2667 if (recovery_mode) 2669 if (recovery_mode)
@@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
2669 else 2671 else
2670 dev_open(card->dev); 2672 dev_open(card->dev);
2671 qeth_l3_set_rx_mode(card->dev); 2673 qeth_l3_set_rx_mode(card->dev);
2672 qeth_recover_features(card->dev);
2673 rtnl_unlock(); 2674 rtnl_unlock();
2674 } 2675 }
2675 qeth_trace_features(card); 2676 qeth_trace_features(card);