aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-08 20:41:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-08 20:41:04 -0400
commit5879ae5fd052a63d5ac0684320cb7df3e83da7de (patch)
tree935348f2c8abace531f11d691ea28a763f1e7ef2
parent056537c686e1eb8331ece43dff252903a7911dd0 (diff)
parentbbbf2df0039d31c6a0a9708ce4fe220a54bd5379 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix stack allocation in s390 BPF JIT, from Michael Holzheu. 2) Disable LRO on openvswitch paths, from Jiri Benc. 3) UDP early demux doesn't handle multicast group membership properly, fix from Shawn Bohrer. 4) Fix TX queue hang due to incorrect handling of mixed sized fragments and linearlization in i40e driver, from Anjali Singhai Jain. 5) Cannot use disable_irq() in timer handler of AMD xgbe driver, from Thomas Lendacky. 6) b2net driver improperly assumes pci_alloc_consistent() gives zero'd out memory, use dma_zalloc_coherent(). From Sriharsha Basavapatna. 7) Fix use-after-free in MPLS and ipv6, from Robert Shearman. 8) Missing neif_napi_del() calls in cleanup paths of b44 driver, from Hauke Mehrtens. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: net: replace last open coded skb_orphan_frags with function call net: bcmgenet: power on MII block for all MII modes ipv6: Fix protocol resubmission ipv6: fix possible use after free of dev stats b44: call netif_napi_del() bridge: disable softirqs around br_fdb_update to avoid lockup Revert "bridge: use _bh spinlock variant for br_fdb_update to avoid lockup" mpls: fix possible use after free of device be2net: Replace dma/pci_alloc_coherent() calls with dma_zalloc_coherent() bridge: use _bh spinlock variant for br_fdb_update to avoid lockup amd-xgbe: Use disable_irq_nosync from within timer function rhashtable: add missing import <linux/export.h> i40e: Make sure to be in VEB mode if SRIOV is enabled at probe i40e: start up in VEPA mode by default i40e/i40evf: Fix mixed size frags and linearization ipv4/udp: Verify multicast group is ours in upd_v4_early_demux() openvswitch: disable LRO s390/bpf: fix bpf frame pointer setup s390/bpf: fix stack allocation
-rw-r--r--arch/s390/net/bpf_jit.h4
-rw-r--r--arch/s390/net/bpf_jit_comp.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c87
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c25
-rw-r--r--lib/rhashtable.c1
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/core/dev.c11
-rw-r--r--net/ipv4/udp.c18
-rw-r--r--net/ipv6/addrconf_core.c11
-rw-r--r--net/ipv6/ip6_input.c8
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/mpls/internal.h1
-rw-r--r--net/openvswitch/vport-netdev.c1
23 files changed, 196 insertions, 115 deletions
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index ba8593a515ba..de156ba3bd71 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -48,7 +48,9 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
48 * We get 160 bytes stack space from calling function, but only use 48 * We get 160 bytes stack space from calling function, but only use
49 * 11 * 8 byte (old backchain + r15 - r6) for storing registers. 49 * 11 * 8 byte (old backchain + r15 - r6) for storing registers.
50 */ 50 */
51#define STK_OFF (MAX_BPF_STACK + 8 + 4 + 4 + (160 - 11 * 8)) 51#define STK_SPACE (MAX_BPF_STACK + 8 + 4 + 4 + 160)
52#define STK_160_UNUSED (160 - 11 * 8)
53#define STK_OFF (STK_SPACE - STK_160_UNUSED)
52#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ 54#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
53#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ 55#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
54 56
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 20c146d1251a..55423d8be580 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -384,13 +384,16 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
384 } 384 }
385 /* Setup stack and backchain */ 385 /* Setup stack and backchain */
386 if (jit->seen & SEEN_STACK) { 386 if (jit->seen & SEEN_STACK) {
387 /* lgr %bfp,%r15 (BPF frame pointer) */ 387 if (jit->seen & SEEN_FUNC)
388 EMIT4(0xb9040000, BPF_REG_FP, REG_15); 388 /* lgr %w1,%r15 (backchain) */
389 EMIT4(0xb9040000, REG_W1, REG_15);
390 /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
391 EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
389 /* aghi %r15,-STK_OFF */ 392 /* aghi %r15,-STK_OFF */
390 EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF); 393 EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
391 if (jit->seen & SEEN_FUNC) 394 if (jit->seen & SEEN_FUNC)
392 /* stg %bfp,152(%r15) (backchain) */ 395 /* stg %w1,152(%r15) (backchain) */
393 EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_FP, REG_0, 396 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
394 REG_15, 152); 397 REG_15, 152);
395 } 398 }
396 /* 399 /*
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index db84ddcfec84..9fd6c69a8bac 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -423,7 +423,7 @@ static void xgbe_tx_timer(unsigned long data)
423 if (napi_schedule_prep(napi)) { 423 if (napi_schedule_prep(napi)) {
424 /* Disable Tx and Rx interrupts */ 424 /* Disable Tx and Rx interrupts */
425 if (pdata->per_channel_irq) 425 if (pdata->per_channel_irq)
426 disable_irq(channel->dma_irq); 426 disable_irq_nosync(channel->dma_irq);
427 else 427 else
428 xgbe_disable_rx_tx_ints(pdata); 428 xgbe_disable_rx_tx_ints(pdata);
429 429
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 77363d680532..a3b1c07ae0af 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2464,6 +2464,7 @@ err_out_powerdown:
2464 ssb_bus_may_powerdown(sdev->bus); 2464 ssb_bus_may_powerdown(sdev->bus);
2465 2465
2466err_out_free_dev: 2466err_out_free_dev:
2467 netif_napi_del(&bp->napi);
2467 free_netdev(dev); 2468 free_netdev(dev);
2468 2469
2469out: 2470out:
@@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev)
2480 b44_unregister_phy_one(bp); 2481 b44_unregister_phy_one(bp);
2481 ssb_device_disable(sdev, 0); 2482 ssb_device_disable(sdev, 0);
2482 ssb_bus_may_powerdown(sdev->bus); 2483 ssb_bus_may_powerdown(sdev->bus);
2484 netif_napi_del(&bp->napi);
2483 free_netdev(dev); 2485 free_netdev(dev);
2484 ssb_pcihost_set_power_state(sdev, PCI_D3hot); 2486 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2485 ssb_set_drvdata(sdev, NULL); 2487 ssb_set_drvdata(sdev, NULL);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e7651b3c6c57..420949cc55aa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
299 phy_name = "external RGMII (no delay)"; 299 phy_name = "external RGMII (no delay)";
300 else 300 else
301 phy_name = "external RGMII (TX delay)"; 301 phy_name = "external RGMII (TX delay)";
302 reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
303 reg |= RGMII_MODE_EN | id_mode_dis;
304 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
305 bcmgenet_sys_writel(priv, 302 bcmgenet_sys_writel(priv,
306 PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); 303 PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
307 break; 304 break;
@@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
310 return -EINVAL; 307 return -EINVAL;
311 } 308 }
312 309
310 /* This is an external PHY (xMII), so we need to enable the RGMII
311 * block for the interface to work
312 */
313 if (priv->ext_phy) {
314 reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
315 reg |= RGMII_MODE_EN | id_mode_dis;
316 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
317 }
318
313 if (init) 319 if (init)
314 dev_info(kdev, "configuring instance for %s\n", phy_name); 320 dev_info(kdev, "configuring instance for %s\n", phy_name);
315 321
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index fb140faeafb1..c5e1d0ac75f9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1720,9 +1720,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1720 total_size = buf_len; 1720 total_size = buf_len;
1721 1721
1722 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1722 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1723 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1723 get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1724 get_fat_cmd.size, 1724 get_fat_cmd.size,
1725 &get_fat_cmd.dma); 1725 &get_fat_cmd.dma, GFP_ATOMIC);
1726 if (!get_fat_cmd.va) { 1726 if (!get_fat_cmd.va) {
1727 dev_err(&adapter->pdev->dev, 1727 dev_err(&adapter->pdev->dev,
1728 "Memory allocation failure while reading FAT data\n"); 1728 "Memory allocation failure while reading FAT data\n");
@@ -1767,8 +1767,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1767 log_offset += buf_size; 1767 log_offset += buf_size;
1768 } 1768 }
1769err: 1769err:
1770 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1770 dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1771 get_fat_cmd.va, get_fat_cmd.dma); 1771 get_fat_cmd.va, get_fat_cmd.dma);
1772 spin_unlock_bh(&adapter->mcc_lock); 1772 spin_unlock_bh(&adapter->mcc_lock);
1773 return status; 1773 return status;
1774} 1774}
@@ -2215,12 +2215,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2215 return -EINVAL; 2215 return -EINVAL;
2216 2216
2217 cmd.size = sizeof(struct be_cmd_resp_port_type); 2217 cmd.size = sizeof(struct be_cmd_resp_port_type);
2218 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2218 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2219 GFP_ATOMIC);
2219 if (!cmd.va) { 2220 if (!cmd.va) {
2220 dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); 2221 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2221 return -ENOMEM; 2222 return -ENOMEM;
2222 } 2223 }
2223 memset(cmd.va, 0, cmd.size);
2224 2224
2225 spin_lock_bh(&adapter->mcc_lock); 2225 spin_lock_bh(&adapter->mcc_lock);
2226 2226
@@ -2245,7 +2245,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2245 } 2245 }
2246err: 2246err:
2247 spin_unlock_bh(&adapter->mcc_lock); 2247 spin_unlock_bh(&adapter->mcc_lock);
2248 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2248 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2249 return status; 2249 return status;
2250} 2250}
2251 2251
@@ -2720,7 +2720,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2720 goto err; 2720 goto err;
2721 } 2721 }
2722 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2722 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2723 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2723 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2724 GFP_ATOMIC);
2724 if (!cmd.va) { 2725 if (!cmd.va) {
2725 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2726 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2726 status = -ENOMEM; 2727 status = -ENOMEM;
@@ -2754,7 +2755,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2754 BE_SUPPORTED_SPEED_1GBPS; 2755 BE_SUPPORTED_SPEED_1GBPS;
2755 } 2756 }
2756 } 2757 }
2757 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2758 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2758err: 2759err:
2759 spin_unlock_bh(&adapter->mcc_lock); 2760 spin_unlock_bh(&adapter->mcc_lock);
2760 return status; 2761 return status;
@@ -2805,8 +2806,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2805 2806
2806 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2807 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2807 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2808 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2808 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2809 attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2809 &attribs_cmd.dma); 2810 attribs_cmd.size,
2811 &attribs_cmd.dma, GFP_ATOMIC);
2810 if (!attribs_cmd.va) { 2812 if (!attribs_cmd.va) {
2811 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 2813 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2812 status = -ENOMEM; 2814 status = -ENOMEM;
@@ -2833,8 +2835,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2833err: 2835err:
2834 mutex_unlock(&adapter->mbox_lock); 2836 mutex_unlock(&adapter->mbox_lock);
2835 if (attribs_cmd.va) 2837 if (attribs_cmd.va)
2836 pci_free_consistent(adapter->pdev, attribs_cmd.size, 2838 dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
2837 attribs_cmd.va, attribs_cmd.dma); 2839 attribs_cmd.va, attribs_cmd.dma);
2838 return status; 2840 return status;
2839} 2841}
2840 2842
@@ -2972,9 +2974,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2972 2974
2973 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2975 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2974 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2976 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2975 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2977 get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2976 get_mac_list_cmd.size, 2978 get_mac_list_cmd.size,
2977 &get_mac_list_cmd.dma); 2979 &get_mac_list_cmd.dma,
2980 GFP_ATOMIC);
2978 2981
2979 if (!get_mac_list_cmd.va) { 2982 if (!get_mac_list_cmd.va) {
2980 dev_err(&adapter->pdev->dev, 2983 dev_err(&adapter->pdev->dev,
@@ -3047,8 +3050,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
3047 3050
3048out: 3051out:
3049 spin_unlock_bh(&adapter->mcc_lock); 3052 spin_unlock_bh(&adapter->mcc_lock);
3050 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 3053 dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
3051 get_mac_list_cmd.va, get_mac_list_cmd.dma); 3054 get_mac_list_cmd.va, get_mac_list_cmd.dma);
3052 return status; 3055 return status;
3053} 3056}
3054 3057
@@ -3101,8 +3104,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3101 3104
3102 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3105 memset(&cmd, 0, sizeof(struct be_dma_mem));
3103 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 3106 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3104 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 3107 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3105 &cmd.dma, GFP_KERNEL); 3108 GFP_KERNEL);
3106 if (!cmd.va) 3109 if (!cmd.va)
3107 return -ENOMEM; 3110 return -ENOMEM;
3108 3111
@@ -3291,7 +3294,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3291 3294
3292 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3295 memset(&cmd, 0, sizeof(struct be_dma_mem));
3293 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 3296 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3294 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3297 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3298 GFP_ATOMIC);
3295 if (!cmd.va) { 3299 if (!cmd.va) {
3296 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3300 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3297 status = -ENOMEM; 3301 status = -ENOMEM;
@@ -3326,7 +3330,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3326err: 3330err:
3327 mutex_unlock(&adapter->mbox_lock); 3331 mutex_unlock(&adapter->mbox_lock);
3328 if (cmd.va) 3332 if (cmd.va)
3329 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3333 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3334 cmd.dma);
3330 return status; 3335 return status;
3331 3336
3332} 3337}
@@ -3340,8 +3345,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3340 3345
3341 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3346 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3342 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3347 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3343 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3348 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3344 &extfat_cmd.dma); 3349 extfat_cmd.size, &extfat_cmd.dma,
3350 GFP_ATOMIC);
3345 if (!extfat_cmd.va) 3351 if (!extfat_cmd.va)
3346 return -ENOMEM; 3352 return -ENOMEM;
3347 3353
@@ -3363,8 +3369,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3363 3369
3364 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); 3370 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3365err: 3371err:
3366 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3372 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
3367 extfat_cmd.dma); 3373 extfat_cmd.dma);
3368 return status; 3374 return status;
3369} 3375}
3370 3376
@@ -3377,8 +3383,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3377 3383
3378 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3384 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3379 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3385 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3380 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3386 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3381 &extfat_cmd.dma); 3387 extfat_cmd.size, &extfat_cmd.dma,
3388 GFP_ATOMIC);
3382 3389
3383 if (!extfat_cmd.va) { 3390 if (!extfat_cmd.va) {
3384 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 3391 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -3396,8 +3403,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3396 level = cfgs->module[0].trace_lvl[j].dbg_lvl; 3403 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3397 } 3404 }
3398 } 3405 }
3399 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3406 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
3400 extfat_cmd.dma); 3407 extfat_cmd.dma);
3401err: 3408err:
3402 return level; 3409 return level;
3403} 3410}
@@ -3595,7 +3602,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3595 3602
3596 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3603 memset(&cmd, 0, sizeof(struct be_dma_mem));
3597 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3604 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3598 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3605 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3606 GFP_ATOMIC);
3599 if (!cmd.va) { 3607 if (!cmd.va) {
3600 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3608 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3601 status = -ENOMEM; 3609 status = -ENOMEM;
@@ -3635,7 +3643,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3635err: 3643err:
3636 mutex_unlock(&adapter->mbox_lock); 3644 mutex_unlock(&adapter->mbox_lock);
3637 if (cmd.va) 3645 if (cmd.va)
3638 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3646 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3647 cmd.dma);
3639 return status; 3648 return status;
3640} 3649}
3641 3650
@@ -3656,7 +3665,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3656 3665
3657 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3666 memset(&cmd, 0, sizeof(struct be_dma_mem));
3658 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 3667 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3659 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3668 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3669 GFP_ATOMIC);
3660 if (!cmd.va) 3670 if (!cmd.va)
3661 return -ENOMEM; 3671 return -ENOMEM;
3662 3672
@@ -3702,7 +3712,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3702 res->vf_if_cap_flags = vf_res->cap_flags; 3712 res->vf_if_cap_flags = vf_res->cap_flags;
3703err: 3713err:
3704 if (cmd.va) 3714 if (cmd.va)
3705 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3715 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3716 cmd.dma);
3706 return status; 3717 return status;
3707} 3718}
3708 3719
@@ -3717,7 +3728,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3717 3728
3718 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3729 memset(&cmd, 0, sizeof(struct be_dma_mem));
3719 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 3730 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3720 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3731 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3732 GFP_ATOMIC);
3721 if (!cmd.va) 3733 if (!cmd.va)
3722 return -ENOMEM; 3734 return -ENOMEM;
3723 3735
@@ -3733,7 +3745,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3733 status = be_cmd_notify_wait(adapter, &wrb); 3745 status = be_cmd_notify_wait(adapter, &wrb);
3734 3746
3735 if (cmd.va) 3747 if (cmd.va)
3736 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3748 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3749 cmd.dma);
3737 return status; 3750 return status;
3738} 3751}
3739 3752
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b765c24625bf..2835dee5dc39 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
264 int status = 0; 264 int status = 0;
265 265
266 read_cmd.size = LANCER_READ_FILE_CHUNK; 266 read_cmd.size = LANCER_READ_FILE_CHUNK;
267 read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, 267 read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
268 &read_cmd.dma); 268 &read_cmd.dma, GFP_ATOMIC);
269 269
270 if (!read_cmd.va) { 270 if (!read_cmd.va) {
271 dev_err(&adapter->pdev->dev, 271 dev_err(&adapter->pdev->dev,
@@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
289 break; 289 break;
290 } 290 }
291 } 291 }
292 pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, 292 dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
293 read_cmd.dma); 293 read_cmd.dma);
294 294
295 return status; 295 return status;
296} 296}
@@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
818 }; 818 };
819 819
820 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 820 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
821 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, 821 ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
822 &ddrdma_cmd.dma, GFP_KERNEL); 822 ddrdma_cmd.size, &ddrdma_cmd.dma,
823 GFP_KERNEL);
823 if (!ddrdma_cmd.va) 824 if (!ddrdma_cmd.va)
824 return -ENOMEM; 825 return -ENOMEM;
825 826
@@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev,
941 942
942 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 943 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
943 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 944 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
944 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, 945 eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
945 &eeprom_cmd.dma, GFP_KERNEL); 946 eeprom_cmd.size, &eeprom_cmd.dma,
947 GFP_KERNEL);
946 948
947 if (!eeprom_cmd.va) 949 if (!eeprom_cmd.va)
948 return -ENOMEM; 950 return -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f9ffb9026cd..e43cc8a73ea7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4605,8 +4605,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
4605 4605
4606 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) 4606 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4607 + LANCER_FW_DOWNLOAD_CHUNK; 4607 + LANCER_FW_DOWNLOAD_CHUNK;
4608 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, 4608 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4609 &flash_cmd.dma, GFP_KERNEL); 4609 &flash_cmd.dma, GFP_KERNEL);
4610 if (!flash_cmd.va) 4610 if (!flash_cmd.va)
4611 return -ENOMEM; 4611 return -ENOMEM;
4612 4612
@@ -4739,8 +4739,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4739 } 4739 }
4740 4740
4741 flash_cmd.size = sizeof(struct be_cmd_write_flashrom); 4741 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4742 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 4742 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4743 GFP_KERNEL); 4743 GFP_KERNEL);
4744 if (!flash_cmd.va) 4744 if (!flash_cmd.va)
4745 return -ENOMEM; 4745 return -ENOMEM;
4746 4746
@@ -5291,16 +5291,15 @@ static int be_drv_init(struct be_adapter *adapter)
5291 int status = 0; 5291 int status = 0;
5292 5292
5293 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 5293 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5294 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, 5294 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5295 &mbox_mem_alloc->dma, 5295 &mbox_mem_alloc->dma,
5296 GFP_KERNEL); 5296 GFP_KERNEL);
5297 if (!mbox_mem_alloc->va) 5297 if (!mbox_mem_alloc->va)
5298 return -ENOMEM; 5298 return -ENOMEM;
5299 5299
5300 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 5300 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5301 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 5301 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5302 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 5302 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5303 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
5304 5303
5305 rx_filter->size = sizeof(struct be_cmd_req_rx_filter); 5304 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5306 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, 5305 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 33c35d3b7420..5d47307121ab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -317,6 +317,7 @@ struct i40e_pf {
317#endif 317#endif
318#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) 318#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
319#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) 319#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
320#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
320 321
321 /* tracks features that get auto disabled by errors */ 322 /* tracks features that get auto disabled by errors */
322 u64 auto_disable_flags; 323 u64 auto_disable_flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 34170eabca7d..da0faf478af0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1021 goto command_write_done; 1021 goto command_write_done;
1022 } 1022 }
1023 1023
1024 /* By default we are in VEPA mode, if this is the first VF/VMDq
1025 * VSI to be added switch to VEB mode.
1026 */
1027 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1028 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1029 i40e_do_reset_safe(pf,
1030 BIT_ULL(__I40E_PF_RESET_REQUESTED));
1031 }
1032
1024 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); 1033 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
1025 if (vsi) 1034 if (vsi)
1026 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", 1035 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a54c14491e3b..5b5bea159bd5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -6097,6 +6097,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
6097 if (ret) 6097 if (ret)
6098 goto end_reconstitute; 6098 goto end_reconstitute;
6099 6099
6100 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6101 veb->bridge_mode = BRIDGE_MODE_VEB;
6102 else
6103 veb->bridge_mode = BRIDGE_MODE_VEPA;
6100 i40e_config_bridge_mode(veb); 6104 i40e_config_bridge_mode(veb);
6101 6105
6102 /* create the remaining VSIs attached to this VEB */ 6106 /* create the remaining VSIs attached to this VEB */
@@ -8031,7 +8035,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
8031 } else if (mode != veb->bridge_mode) { 8035 } else if (mode != veb->bridge_mode) {
8032 /* Existing HW bridge but different mode needs reset */ 8036 /* Existing HW bridge but different mode needs reset */
8033 veb->bridge_mode = mode; 8037 veb->bridge_mode = mode;
8034 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 8038 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8039 if (mode == BRIDGE_MODE_VEB)
8040 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8041 else
8042 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8043 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8035 break; 8044 break;
8036 } 8045 }
8037 } 8046 }
@@ -8343,11 +8352,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
8343 ctxt.uplink_seid = vsi->uplink_seid; 8352 ctxt.uplink_seid = vsi->uplink_seid;
8344 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 8353 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8345 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 8354 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8346 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 8355 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
8356 (i40e_is_vsi_uplink_mode_veb(vsi))) {
8347 ctxt.info.valid_sections |= 8357 ctxt.info.valid_sections |=
8348 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8358 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8349 ctxt.info.switch_id = 8359 ctxt.info.switch_id =
8350 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8360 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8351 } 8361 }
8352 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 8362 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8353 break; 8363 break;
@@ -8746,6 +8756,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
8746 __func__); 8756 __func__);
8747 return NULL; 8757 return NULL;
8748 } 8758 }
8759 /* We come up by default in VEPA mode if SRIOV is not
8760 * already enabled, in which case we can't force VEPA
8761 * mode.
8762 */
8763 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
8764 veb->bridge_mode = BRIDGE_MODE_VEPA;
8765 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8766 }
8749 i40e_config_bridge_mode(veb); 8767 i40e_config_bridge_mode(veb);
8750 } 8768 }
8751 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8769 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
@@ -9856,6 +9874,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9856 goto err_switch_setup; 9874 goto err_switch_setup;
9857 } 9875 }
9858 9876
9877#ifdef CONFIG_PCI_IOV
9878 /* prep for VF support */
9879 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9880 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
9881 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
9882 if (pci_num_vf(pdev))
9883 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
9884 }
9885#endif
9859 err = i40e_setup_pf_switch(pf, false); 9886 err = i40e_setup_pf_switch(pf, false);
9860 if (err) { 9887 if (err) {
9861 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 9888 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4bd3a80aba82..9d95042d5a0f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2410,14 +2410,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2410 * i40e_chk_linearize - Check if there are more than 8 fragments per packet 2410 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2411 * @skb: send buffer 2411 * @skb: send buffer
2412 * @tx_flags: collected send information 2412 * @tx_flags: collected send information
2413 * @hdr_len: size of the packet header
2414 * 2413 *
2415 * Note: Our HW can't scatter-gather more than 8 fragments to build 2414 * Note: Our HW can't scatter-gather more than 8 fragments to build
2416 * a packet on the wire and so we need to figure out the cases where we 2415 * a packet on the wire and so we need to figure out the cases where we
2417 * need to linearize the skb. 2416 * need to linearize the skb.
2418 **/ 2417 **/
2419static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, 2418static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2420 const u8 hdr_len)
2421{ 2419{
2422 struct skb_frag_struct *frag; 2420 struct skb_frag_struct *frag;
2423 bool linearize = false; 2421 bool linearize = false;
@@ -2429,7 +2427,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
2429 gso_segs = skb_shinfo(skb)->gso_segs; 2427 gso_segs = skb_shinfo(skb)->gso_segs;
2430 2428
2431 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { 2429 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2432 u16 j = 1; 2430 u16 j = 0;
2433 2431
2434 if (num_frags < (I40E_MAX_BUFFER_TXD)) 2432 if (num_frags < (I40E_MAX_BUFFER_TXD))
2435 goto linearize_chk_done; 2433 goto linearize_chk_done;
@@ -2440,21 +2438,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
2440 goto linearize_chk_done; 2438 goto linearize_chk_done;
2441 } 2439 }
2442 frag = &skb_shinfo(skb)->frags[0]; 2440 frag = &skb_shinfo(skb)->frags[0];
2443 size = hdr_len;
2444 /* we might still have more fragments per segment */ 2441 /* we might still have more fragments per segment */
2445 do { 2442 do {
2446 size += skb_frag_size(frag); 2443 size += skb_frag_size(frag);
2447 frag++; j++; 2444 frag++; j++;
2445 if ((size >= skb_shinfo(skb)->gso_size) &&
2446 (j < I40E_MAX_BUFFER_TXD)) {
2447 size = (size % skb_shinfo(skb)->gso_size);
2448 j = (size) ? 1 : 0;
2449 }
2448 if (j == I40E_MAX_BUFFER_TXD) { 2450 if (j == I40E_MAX_BUFFER_TXD) {
2449 if (size < skb_shinfo(skb)->gso_size) { 2451 linearize = true;
2450 linearize = true; 2452 break;
2451 break;
2452 }
2453 j = 1;
2454 size -= skb_shinfo(skb)->gso_size;
2455 if (size)
2456 j++;
2457 size += hdr_len;
2458 } 2453 }
2459 num_frags--; 2454 num_frags--;
2460 } while (num_frags); 2455 } while (num_frags);
@@ -2724,7 +2719,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2724 if (tsyn) 2719 if (tsyn)
2725 tx_flags |= I40E_TX_FLAGS_TSYN; 2720 tx_flags |= I40E_TX_FLAGS_TSYN;
2726 2721
2727 if (i40e_chk_linearize(skb, tx_flags, hdr_len)) 2722 if (i40e_chk_linearize(skb, tx_flags))
2728 if (skb_linearize(skb)) 2723 if (skb_linearize(skb))
2729 goto out_drop; 2724 goto out_drop;
2730 2725
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 78d1c4ff565e..4e9376da0518 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1018,11 +1018,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1018{ 1018{
1019 struct i40e_pf *pf = pci_get_drvdata(pdev); 1019 struct i40e_pf *pf = pci_get_drvdata(pdev);
1020 1020
1021 if (num_vfs) 1021 if (num_vfs) {
1022 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1023 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1024 i40e_do_reset_safe(pf,
1025 BIT_ULL(__I40E_PF_RESET_REQUESTED));
1026 }
1022 return i40e_pci_sriov_enable(pdev, num_vfs); 1027 return i40e_pci_sriov_enable(pdev, num_vfs);
1028 }
1023 1029
1024 if (!pci_vfs_assigned(pf->pdev)) { 1030 if (!pci_vfs_assigned(pf->pdev)) {
1025 i40e_free_vfs(pf); 1031 i40e_free_vfs(pf);
1032 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1033 i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
1026 } else { 1034 } else {
1027 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1035 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1028 return -EINVAL; 1036 return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index b077e02a0cc7..458fbb421090 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1619,14 +1619,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1619 * i40e_chk_linearize - Check if there are more than 8 fragments per packet 1619 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
1620 * @skb: send buffer 1620 * @skb: send buffer
1621 * @tx_flags: collected send information 1621 * @tx_flags: collected send information
1622 * @hdr_len: size of the packet header
1623 * 1622 *
1624 * Note: Our HW can't scatter-gather more than 8 fragments to build 1623 * Note: Our HW can't scatter-gather more than 8 fragments to build
1625 * a packet on the wire and so we need to figure out the cases where we 1624 * a packet on the wire and so we need to figure out the cases where we
1626 * need to linearize the skb. 1625 * need to linearize the skb.
1627 **/ 1626 **/
1628static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, 1627static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
1629 const u8 hdr_len)
1630{ 1628{
1631 struct skb_frag_struct *frag; 1629 struct skb_frag_struct *frag;
1632 bool linearize = false; 1630 bool linearize = false;
@@ -1638,7 +1636,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
1638 gso_segs = skb_shinfo(skb)->gso_segs; 1636 gso_segs = skb_shinfo(skb)->gso_segs;
1639 1637
1640 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { 1638 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
1641 u16 j = 1; 1639 u16 j = 0;
1642 1640
1643 if (num_frags < (I40E_MAX_BUFFER_TXD)) 1641 if (num_frags < (I40E_MAX_BUFFER_TXD))
1644 goto linearize_chk_done; 1642 goto linearize_chk_done;
@@ -1649,21 +1647,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
1649 goto linearize_chk_done; 1647 goto linearize_chk_done;
1650 } 1648 }
1651 frag = &skb_shinfo(skb)->frags[0]; 1649 frag = &skb_shinfo(skb)->frags[0];
1652 size = hdr_len;
1653 /* we might still have more fragments per segment */ 1650 /* we might still have more fragments per segment */
1654 do { 1651 do {
1655 size += skb_frag_size(frag); 1652 size += skb_frag_size(frag);
1656 frag++; j++; 1653 frag++; j++;
1654 if ((size >= skb_shinfo(skb)->gso_size) &&
1655 (j < I40E_MAX_BUFFER_TXD)) {
1656 size = (size % skb_shinfo(skb)->gso_size);
1657 j = (size) ? 1 : 0;
1658 }
1657 if (j == I40E_MAX_BUFFER_TXD) { 1659 if (j == I40E_MAX_BUFFER_TXD) {
1658 if (size < skb_shinfo(skb)->gso_size) { 1660 linearize = true;
1659 linearize = true; 1661 break;
1660 break;
1661 }
1662 j = 1;
1663 size -= skb_shinfo(skb)->gso_size;
1664 if (size)
1665 j++;
1666 size += hdr_len;
1667 } 1662 }
1668 num_frags--; 1663 num_frags--;
1669 } while (num_frags); 1664 } while (num_frags);
@@ -1950,7 +1945,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1950 else if (tso) 1945 else if (tso)
1951 tx_flags |= I40E_TX_FLAGS_TSO; 1946 tx_flags |= I40E_TX_FLAGS_TSO;
1952 1947
1953 if (i40e_chk_linearize(skb, tx_flags, hdr_len)) 1948 if (i40e_chk_linearize(skb, tx_flags))
1954 if (skb_linearize(skb)) 1949 if (skb_linearize(skb))
1955 goto out_drop; 1950 goto out_drop;
1956 1951
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 4396434e4715..8609378e6505 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -26,6 +26,7 @@
26#include <linux/random.h> 26#include <linux/random.h>
27#include <linux/rhashtable.h> 27#include <linux/rhashtable.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/export.h>
29 30
30#define HASH_DEFAULT_SIZE 64UL 31#define HASH_DEFAULT_SIZE 64UL
31#define HASH_MIN_SIZE 4U 32#define HASH_MIN_SIZE 4U
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index e0670d7054f9..659fb96672e4 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -796,9 +796,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
796 int err = 0; 796 int err = 0;
797 797
798 if (ndm->ndm_flags & NTF_USE) { 798 if (ndm->ndm_flags & NTF_USE) {
799 local_bh_disable();
799 rcu_read_lock(); 800 rcu_read_lock();
800 br_fdb_update(p->br, p, addr, vid, true); 801 br_fdb_update(p->br, p, addr, vid, true);
801 rcu_read_unlock(); 802 rcu_read_unlock();
803 local_bh_enable();
802 } else { 804 } else {
803 spin_lock_bh(&p->br->hash_lock); 805 spin_lock_bh(&p->br->hash_lock);
804 err = fdb_add_entry(p, addr, ndm->ndm_state, 806 err = fdb_add_entry(p, addr, ndm->ndm_state,
diff --git a/net/core/dev.c b/net/core/dev.c
index 2c1c67fad64d..aa82f9ab6a36 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1718,15 +1718,8 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
1718 1718
1719int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1719int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1720{ 1720{
1721 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1721 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1722 if (skb_copy_ubufs(skb, GFP_ATOMIC)) { 1722 unlikely(!is_skb_forwardable(dev, skb))) {
1723 atomic_long_inc(&dev->rx_dropped);
1724 kfree_skb(skb);
1725 return NET_RX_DROP;
1726 }
1727 }
1728
1729 if (unlikely(!is_skb_forwardable(dev, skb))) {
1730 atomic_long_inc(&dev->rx_dropped); 1723 atomic_long_inc(&dev->rx_dropped);
1731 kfree_skb(skb); 1724 kfree_skb(skb);
1732 return NET_RX_DROP; 1725 return NET_RX_DROP;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1c92ea67baef..83aa604f9273 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -90,6 +90,7 @@
90#include <linux/socket.h> 90#include <linux/socket.h>
91#include <linux/sockios.h> 91#include <linux/sockios.h>
92#include <linux/igmp.h> 92#include <linux/igmp.h>
93#include <linux/inetdevice.h>
93#include <linux/in.h> 94#include <linux/in.h>
94#include <linux/errno.h> 95#include <linux/errno.h>
95#include <linux/timer.h> 96#include <linux/timer.h>
@@ -1960,6 +1961,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
1960 struct sock *sk; 1961 struct sock *sk;
1961 struct dst_entry *dst; 1962 struct dst_entry *dst;
1962 int dif = skb->dev->ifindex; 1963 int dif = skb->dev->ifindex;
1964 int ours;
1963 1965
1964 /* validate the packet */ 1966 /* validate the packet */
1965 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) 1967 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
@@ -1969,14 +1971,24 @@ void udp_v4_early_demux(struct sk_buff *skb)
1969 uh = udp_hdr(skb); 1971 uh = udp_hdr(skb);
1970 1972
1971 if (skb->pkt_type == PACKET_BROADCAST || 1973 if (skb->pkt_type == PACKET_BROADCAST ||
1972 skb->pkt_type == PACKET_MULTICAST) 1974 skb->pkt_type == PACKET_MULTICAST) {
1975 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
1976
1977 if (!in_dev)
1978 return;
1979
1980 ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
1981 iph->protocol);
1982 if (!ours)
1983 return;
1973 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, 1984 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
1974 uh->source, iph->saddr, dif); 1985 uh->source, iph->saddr, dif);
1975 else if (skb->pkt_type == PACKET_HOST) 1986 } else if (skb->pkt_type == PACKET_HOST) {
1976 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, 1987 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
1977 uh->source, iph->saddr, dif); 1988 uh->source, iph->saddr, dif);
1978 else 1989 } else {
1979 return; 1990 return;
1991 }
1980 1992
1981 if (!sk) 1993 if (!sk)
1982 return; 1994 return;
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index d873ceea86e6..ca09bf49ac68 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -133,6 +133,14 @@ static void snmp6_free_dev(struct inet6_dev *idev)
133 free_percpu(idev->stats.ipv6); 133 free_percpu(idev->stats.ipv6);
134} 134}
135 135
136static void in6_dev_finish_destroy_rcu(struct rcu_head *head)
137{
138 struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu);
139
140 snmp6_free_dev(idev);
141 kfree(idev);
142}
143
136/* Nobody refers to this device, we may destroy it. */ 144/* Nobody refers to this device, we may destroy it. */
137 145
138void in6_dev_finish_destroy(struct inet6_dev *idev) 146void in6_dev_finish_destroy(struct inet6_dev *idev)
@@ -151,7 +159,6 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
151 pr_warn("Freeing alive inet6 device %p\n", idev); 159 pr_warn("Freeing alive inet6 device %p\n", idev);
152 return; 160 return;
153 } 161 }
154 snmp6_free_dev(idev); 162 call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu);
155 kfree_rcu(idev, rcu);
156} 163}
157EXPORT_SYMBOL(in6_dev_finish_destroy); 164EXPORT_SYMBOL(in6_dev_finish_destroy);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index f2e464eba5ef..41a73da371a9 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -212,13 +212,13 @@ static int ip6_input_finish(struct sock *sk, struct sk_buff *skb)
212 */ 212 */
213 213
214 rcu_read_lock(); 214 rcu_read_lock();
215resubmit:
216 idev = ip6_dst_idev(skb_dst(skb)); 215 idev = ip6_dst_idev(skb_dst(skb));
217 if (!pskb_pull(skb, skb_transport_offset(skb))) 216 if (!pskb_pull(skb, skb_transport_offset(skb)))
218 goto discard; 217 goto discard;
219 nhoff = IP6CB(skb)->nhoff; 218 nhoff = IP6CB(skb)->nhoff;
220 nexthdr = skb_network_header(skb)[nhoff]; 219 nexthdr = skb_network_header(skb)[nhoff];
221 220
221resubmit:
222 raw = raw6_local_deliver(skb, nexthdr); 222 raw = raw6_local_deliver(skb, nexthdr);
223 ipprot = rcu_dereference(inet6_protos[nexthdr]); 223 ipprot = rcu_dereference(inet6_protos[nexthdr]);
224 if (ipprot) { 224 if (ipprot) {
@@ -246,10 +246,12 @@ resubmit:
246 goto discard; 246 goto discard;
247 247
248 ret = ipprot->handler(skb); 248 ret = ipprot->handler(skb);
249 if (ret > 0) 249 if (ret < 0) {
250 nexthdr = -ret;
250 goto resubmit; 251 goto resubmit;
251 else if (ret == 0) 252 } else if (ret == 0) {
252 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); 253 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
254 }
253 } else { 255 } else {
254 if (!raw) { 256 if (!raw) {
255 if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 257 if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 7b3f732269e4..bff427f31924 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -541,7 +541,7 @@ static void mpls_ifdown(struct net_device *dev)
541 541
542 RCU_INIT_POINTER(dev->mpls_ptr, NULL); 542 RCU_INIT_POINTER(dev->mpls_ptr, NULL);
543 543
544 kfree(mdev); 544 kfree_rcu(mdev, rcu);
545} 545}
546 546
547static int mpls_dev_notify(struct notifier_block *this, unsigned long event, 547static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index b064c345042c..8cabeb5a1cb9 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -16,6 +16,7 @@ struct mpls_dev {
16 int input_enabled; 16 int input_enabled;
17 17
18 struct ctl_table_header *sysctl; 18 struct ctl_table_header *sysctl;
19 struct rcu_head rcu;
19}; 20};
20 21
21struct sk_buff; 22struct sk_buff;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 4776282c6417..33e6d6e2908f 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -125,6 +125,7 @@ static struct vport *netdev_create(const struct vport_parms *parms)
125 if (err) 125 if (err)
126 goto error_master_upper_dev_unlink; 126 goto error_master_upper_dev_unlink;
127 127
128 dev_disable_lro(netdev_vport->dev);
128 dev_set_promiscuity(netdev_vport->dev, 1); 129 dev_set_promiscuity(netdev_vport->dev, 1);
129 netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH; 130 netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
130 rtnl_unlock(); 131 rtnl_unlock();