aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-23 02:44:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-23 02:44:31 -0400
commit107df03203bb66de56e2caec3bde6d22b55480c5 (patch)
treecff42c091a4a9f43203bbb85c9cf526857470a8f
parent88083e9845612826dfd44a5215647b4f6567317c (diff)
parentf8e7718cc0445587fe8530fc2d240d9aac2c9072 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix memory leak in nftables, from Liping Zhang. 2) Need to check result of vlan_insert_tag() in batman-adv otherwise we risk NULL skb derefs, from Sven Eckelmann. 3) Check for dev_alloc_skb() failures in cfg80211, from Gregory Greenman. 4) Handle properly when we have ppp_unregister_channel() happening in parallel with ppp_connect_channel(), from WANG Cong. 5) Fix DCCP deadlock, from Eric Dumazet. 6) Bail out properly in UDP if sk_filter() truncates the packet to be smaller than even the space that the protocol headers need. From Michal Kubecek. 7) Similarly for rose, dccp, and sctp, from Willem de Bruijn. 8) Make TCP challenge ACKs less predictable, from Eric Dumazet. 9) Fix infinite loop in bgmac_dma_tx_add() from Florian Fainelli. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (65 commits) packet: propagate sock_cmsg_send() error net/mlx5e: Fix del vxlan port command buffer memset packet: fix second argument of sock_tx_timestamp() net: switchdev: change ageing_time type to clock_t Update maintainer for EHEA driver. net/mlx4_en: Add resilience in low memory systems net/mlx4_en: Move filters cleanup to a proper location sctp: load transport header after sk_filter net/sched/sch_htb: clamp xstats tokens to fit into 32-bit int net: cavium: liquidio: Avoid dma_unmap_single on uninitialized ndata net: nb8800: Fix SKB leak in nb8800_receive() et131x: Fix logical vs bitwise check in et131x_tx_timeout() vlan: use a valid default mtu value for vlan over macsec net: bgmac: Fix infinite loop in bgmac_dma_tx_add() mlxsw: spectrum: Prevent invalid ingress buffer mapping mlxsw: spectrum: Prevent overwrite of DCB capability fields mlxsw: spectrum: Don't emit errors when PFC is disabled mlxsw: spectrum: Indicate support for autonegotiation mlxsw: spectrum: Force link training according to admin state r8152: add MODULE_VERSION ...
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/net/bonding/bond_netlink.c6
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c9
-rw-r--r--drivers/net/ethernet/ethoc.c16
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c227
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c30
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c8
-rw-r--r--drivers/net/ppp/ppp_generic.c5
-rw-r--r--drivers/net/usb/r8152.c85
-rw-r--r--include/linux/filter.h6
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/net/netfilter/nf_conntrack.h8
-rw-r--r--include/net/sock.h8
-rw-r--r--include/net/switchdev.h2
-rw-r--r--net/8021q/vlan_dev.c10
-rw-r--r--net/8021q/vlan_netlink.c7
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c116
-rw-r--r--net/batman-adv/distributed-arp-table.c10
-rw-r--r--net/batman-adv/originator.c15
-rw-r--r--net/batman-adv/routing.c52
-rw-r--r--net/batman-adv/send.c4
-rw-r--r--net/batman-adv/types.h6
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/sock.c11
-rw-r--r--net/dccp/ipv4.c12
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/ipv4/fib_semantics.c6
-rw-r--r--net/ipv4/tcp_input.c54
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c8
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/netfilter/nft_ct.c6
-rw-r--r--net/netfilter/nft_meta.c2
-rw-r--r--net/packet/af_packet.c10
-rw-r--r--net/rose/rose_in.c3
-rw-r--r--net/sched/sch_htb.c6
-rw-r--r--net/sctp/input.c5
-rw-r--r--net/tipc/bearer.c15
-rw-r--r--net/tipc/bearer.h1
-rw-r--r--net/tipc/link.c9
-rw-r--r--net/tipc/node.c15
-rw-r--r--net/wireless/nl80211.c8
-rw-r--r--net/wireless/util.c2
64 files changed, 850 insertions, 320 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 4a728291f568..8c20323d1277 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4476,7 +4476,7 @@ S: Orphan
4476F: fs/efs/ 4476F: fs/efs/
4477 4477
4478EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER 4478EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
4479M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com> 4479M: Douglas Miller <dougmill@linux.vnet.ibm.com>
4480L: netdev@vger.kernel.org 4480L: netdev@vger.kernel.org
4481S: Maintained 4481S: Maintained
4482F: drivers/net/ethernet/ibm/ehea/ 4482F: drivers/net/ethernet/ibm/ehea/
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index db760e84119f..b8df0f5e8c25 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -446,7 +446,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
446 if (err < 0) 446 if (err < 0)
447 return err; 447 return err;
448 448
449 return register_netdevice(bond_dev); 449 err = register_netdevice(bond_dev);
450
451 netif_carrier_off(bond_dev);
452
453 return err;
450} 454}
451 455
452static size_t bond_get_size(const struct net_device *bond_dev) 456static size_t bond_get_size(const struct net_device *bond_dev)
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 30defe6c81f2..821d86c38ab2 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3851,7 +3851,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
3851 unsigned long flags; 3851 unsigned long flags;
3852 3852
3853 /* If the device is closed, ignore the timeout */ 3853 /* If the device is closed, ignore the timeout */
3854 if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) 3854 if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
3855 return; 3855 return;
3856 3856
3857 /* Any nonrecoverable hardware error? 3857 /* Any nonrecoverable hardware error?
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 08a23e6b60e9..1a3555d03a96 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -259,6 +259,7 @@ static void nb8800_receive(struct net_device *dev, unsigned int i,
259 if (err) { 259 if (err) {
260 netdev_err(dev, "rx buffer allocation failed\n"); 260 netdev_err(dev, "rx buffer allocation failed\n");
261 dev->stats.rx_dropped++; 261 dev->stats.rx_dropped++;
262 dev_kfree_skb(skb);
262 return; 263 return;
263 } 264 }
264 265
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index a6333d38ecc0..25bbae5928d4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -231,7 +231,7 @@ err_dma:
231 dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), 231 dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
232 DMA_TO_DEVICE); 232 DMA_TO_DEVICE);
233 233
234 while (i > 0) { 234 while (i-- > 0) {
235 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; 235 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
236 struct bgmac_slot_info *slot = &ring->slots[index]; 236 struct bgmac_slot_info *slot = &ring->slots[index];
237 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); 237 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index a38cb047b540..1b0ae4a72e9e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1591,7 +1591,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
1591{ 1591{
1592 struct bnxt *bp = netdev_priv(dev); 1592 struct bnxt *bp = netdev_priv(dev);
1593 u16 start = eeprom->offset, length = eeprom->len; 1593 u16 start = eeprom->offset, length = eeprom->len;
1594 int rc; 1594 int rc = 0;
1595 1595
1596 memset(data, 0, eeprom->len); 1596 memset(data, 0, eeprom->len);
1597 1597
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8de79ae63231..0e7e7da8d201 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2821,7 +2821,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2821 if (!g) { 2821 if (!g) {
2822 netif_info(lio, tx_err, lio->netdev, 2822 netif_info(lio, tx_err, lio->netdev,
2823 "Transmit scatter gather: glist null!\n"); 2823 "Transmit scatter gather: glist null!\n");
2824 goto lio_xmit_failed; 2824 goto lio_xmit_dma_failed;
2825 } 2825 }
2826 2826
2827 cmdsetup.s.gather = 1; 2827 cmdsetup.s.gather = 1;
@@ -2892,7 +2892,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2892 else 2892 else
2893 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 2893 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2894 if (status == IQ_SEND_FAILED) 2894 if (status == IQ_SEND_FAILED)
2895 goto lio_xmit_failed; 2895 goto lio_xmit_dma_failed;
2896 2896
2897 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2897 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2898 2898
@@ -2906,12 +2906,13 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2906 2906
2907 return NETDEV_TX_OK; 2907 return NETDEV_TX_OK;
2908 2908
2909lio_xmit_dma_failed:
2910 dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
2911 ndata.datasize, DMA_TO_DEVICE);
2909lio_xmit_failed: 2912lio_xmit_failed:
2910 stats->tx_dropped++; 2913 stats->tx_dropped++;
2911 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2914 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2912 iq_no, stats->tx_dropped); 2915 iq_no, stats->tx_dropped);
2913 dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
2914 ndata.datasize, DMA_TO_DEVICE);
2915 recv_buffer_free(skb); 2916 recv_buffer_free(skb);
2916 return NETDEV_TX_OK; 2917 return NETDEV_TX_OK;
2917} 2918}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 4edb98c3c6c7..4466a1187110 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -860,6 +860,11 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
860 unsigned int entry; 860 unsigned int entry;
861 void *dest; 861 void *dest;
862 862
863 if (skb_put_padto(skb, ETHOC_ZLEN)) {
864 dev->stats.tx_errors++;
865 goto out_no_free;
866 }
867
863 if (unlikely(skb->len > ETHOC_BUFSIZ)) { 868 if (unlikely(skb->len > ETHOC_BUFSIZ)) {
864 dev->stats.tx_errors++; 869 dev->stats.tx_errors++;
865 goto out; 870 goto out;
@@ -894,6 +899,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
894 skb_tx_timestamp(skb); 899 skb_tx_timestamp(skb);
895out: 900out:
896 dev_kfree_skb(skb); 901 dev_kfree_skb(skb);
902out_no_free:
897 return NETDEV_TX_OK; 903 return NETDEV_TX_OK;
898} 904}
899 905
@@ -1086,7 +1092,7 @@ static int ethoc_probe(struct platform_device *pdev)
1086 if (!priv->iobase) { 1092 if (!priv->iobase) {
1087 dev_err(&pdev->dev, "cannot remap I/O memory space\n"); 1093 dev_err(&pdev->dev, "cannot remap I/O memory space\n");
1088 ret = -ENXIO; 1094 ret = -ENXIO;
1089 goto error; 1095 goto free;
1090 } 1096 }
1091 1097
1092 if (netdev->mem_end) { 1098 if (netdev->mem_end) {
@@ -1095,7 +1101,7 @@ static int ethoc_probe(struct platform_device *pdev)
1095 if (!priv->membase) { 1101 if (!priv->membase) {
1096 dev_err(&pdev->dev, "cannot remap memory space\n"); 1102 dev_err(&pdev->dev, "cannot remap memory space\n");
1097 ret = -ENXIO; 1103 ret = -ENXIO;
1098 goto error; 1104 goto free;
1099 } 1105 }
1100 } else { 1106 } else {
1101 /* Allocate buffer memory */ 1107 /* Allocate buffer memory */
@@ -1106,7 +1112,7 @@ static int ethoc_probe(struct platform_device *pdev)
1106 dev_err(&pdev->dev, "cannot allocate %dB buffer\n", 1112 dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
1107 buffer_size); 1113 buffer_size);
1108 ret = -ENOMEM; 1114 ret = -ENOMEM;
1109 goto error; 1115 goto free;
1110 } 1116 }
1111 netdev->mem_end = netdev->mem_start + buffer_size; 1117 netdev->mem_end = netdev->mem_start + buffer_size;
1112 priv->dma_alloc = buffer_size; 1118 priv->dma_alloc = buffer_size;
@@ -1120,7 +1126,7 @@ static int ethoc_probe(struct platform_device *pdev)
1120 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); 1126 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
1121 if (num_bd < 4) { 1127 if (num_bd < 4) {
1122 ret = -ENODEV; 1128 ret = -ENODEV;
1123 goto error; 1129 goto free;
1124 } 1130 }
1125 priv->num_bd = num_bd; 1131 priv->num_bd = num_bd;
1126 /* num_tx must be a power of two */ 1132 /* num_tx must be a power of two */
@@ -1133,7 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
1133 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL); 1139 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
1134 if (!priv->vma) { 1140 if (!priv->vma) {
1135 ret = -ENOMEM; 1141 ret = -ENOMEM;
1136 goto error; 1142 goto free;
1137 } 1143 }
1138 1144
1139 /* Allow the platform setup code to pass in a MAC address. */ 1145 /* Allow the platform setup code to pass in a MAC address. */
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 06f031715b57..9b7a3f5a2818 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -285,6 +285,7 @@ static void nps_enet_hw_reset(struct net_device *ndev)
285 ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT; 285 ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
286 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); 286 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
287 usleep_range(10, 20); 287 usleep_range(10, 20);
288 ge_rst_value = 0;
288 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); 289 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
289 290
290 /* Tx fifo reset sequence */ 291 /* Tx fifo reset sequence */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index ecdb6854a898..88f3c85fb04a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -75,6 +75,7 @@
75#include <linux/uaccess.h> 75#include <linux/uaccess.h>
76#include <asm/firmware.h> 76#include <asm/firmware.h>
77#include <linux/seq_file.h> 77#include <linux/seq_file.h>
78#include <linux/workqueue.h>
78 79
79#include "ibmvnic.h" 80#include "ibmvnic.h"
80 81
@@ -89,6 +90,7 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
89static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 90static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90static int ibmvnic_remove(struct vio_dev *); 91static int ibmvnic_remove(struct vio_dev *);
91static void release_sub_crqs(struct ibmvnic_adapter *); 92static void release_sub_crqs(struct ibmvnic_adapter *);
93static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
92static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 94static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 95static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 96static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
@@ -469,7 +471,8 @@ static int ibmvnic_open(struct net_device *netdev)
469 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP; 471 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
470 ibmvnic_send_crq(adapter, &crq); 472 ibmvnic_send_crq(adapter, &crq);
471 473
472 netif_start_queue(netdev); 474 netif_tx_start_all_queues(netdev);
475
473 return 0; 476 return 0;
474 477
475bounce_map_failed: 478bounce_map_failed:
@@ -519,7 +522,7 @@ static int ibmvnic_close(struct net_device *netdev)
519 for (i = 0; i < adapter->req_rx_queues; i++) 522 for (i = 0; i < adapter->req_rx_queues; i++)
520 napi_disable(&adapter->napi[i]); 523 napi_disable(&adapter->napi[i]);
521 524
522 netif_stop_queue(netdev); 525 netif_tx_stop_all_queues(netdev);
523 526
524 if (adapter->bounce_buffer) { 527 if (adapter->bounce_buffer) {
525 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 528 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
@@ -1212,12 +1215,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1212 goto reg_failed; 1215 goto reg_failed;
1213 } 1216 }
1214 1217
1215 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1216 if (scrq->irq == NO_IRQ) {
1217 dev_err(dev, "Error mapping irq\n");
1218 goto map_irq_failed;
1219 }
1220
1221 scrq->adapter = adapter; 1218 scrq->adapter = adapter;
1222 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 1219 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1223 scrq->cur = 0; 1220 scrq->cur = 0;
@@ -1230,12 +1227,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1230 1227
1231 return scrq; 1228 return scrq;
1232 1229
1233map_irq_failed:
1234 do {
1235 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1236 adapter->vdev->unit_address,
1237 scrq->crq_num);
1238 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1239reg_failed: 1230reg_failed:
1240 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 1231 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1241 DMA_BIDIRECTIONAL); 1232 DMA_BIDIRECTIONAL);
@@ -1256,6 +1247,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1256 if (adapter->tx_scrq[i]) { 1247 if (adapter->tx_scrq[i]) {
1257 free_irq(adapter->tx_scrq[i]->irq, 1248 free_irq(adapter->tx_scrq[i]->irq,
1258 adapter->tx_scrq[i]); 1249 adapter->tx_scrq[i]);
1250 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1259 release_sub_crq_queue(adapter, 1251 release_sub_crq_queue(adapter,
1260 adapter->tx_scrq[i]); 1252 adapter->tx_scrq[i]);
1261 } 1253 }
@@ -1267,6 +1259,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1267 if (adapter->rx_scrq[i]) { 1259 if (adapter->rx_scrq[i]) {
1268 free_irq(adapter->rx_scrq[i]->irq, 1260 free_irq(adapter->rx_scrq[i]->irq,
1269 adapter->rx_scrq[i]); 1261 adapter->rx_scrq[i]);
1262 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1270 release_sub_crq_queue(adapter, 1263 release_sub_crq_queue(adapter,
1271 adapter->rx_scrq[i]); 1264 adapter->rx_scrq[i]);
1272 } 1265 }
@@ -1276,6 +1269,29 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1276 adapter->requested_caps = 0; 1269 adapter->requested_caps = 0;
1277} 1270}
1278 1271
1272static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1273{
1274 int i;
1275
1276 if (adapter->tx_scrq) {
1277 for (i = 0; i < adapter->req_tx_queues; i++)
1278 if (adapter->tx_scrq[i])
1279 release_sub_crq_queue(adapter,
1280 adapter->tx_scrq[i]);
1281 adapter->tx_scrq = NULL;
1282 }
1283
1284 if (adapter->rx_scrq) {
1285 for (i = 0; i < adapter->req_rx_queues; i++)
1286 if (adapter->rx_scrq[i])
1287 release_sub_crq_queue(adapter,
1288 adapter->rx_scrq[i]);
1289 adapter->rx_scrq = NULL;
1290 }
1291
1292 adapter->requested_caps = 0;
1293}
1294
1279static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 1295static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1280 struct ibmvnic_sub_crq_queue *scrq) 1296 struct ibmvnic_sub_crq_queue *scrq)
1281{ 1297{
@@ -1395,6 +1411,66 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1395 return IRQ_HANDLED; 1411 return IRQ_HANDLED;
1396} 1412}
1397 1413
1414static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1415{
1416 struct device *dev = &adapter->vdev->dev;
1417 struct ibmvnic_sub_crq_queue *scrq;
1418 int i = 0, j = 0;
1419 int rc = 0;
1420
1421 for (i = 0; i < adapter->req_tx_queues; i++) {
1422 scrq = adapter->tx_scrq[i];
1423 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1424
1425 if (scrq->irq == NO_IRQ) {
1426 rc = -EINVAL;
1427 dev_err(dev, "Error mapping irq\n");
1428 goto req_tx_irq_failed;
1429 }
1430
1431 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1432 0, "ibmvnic_tx", scrq);
1433
1434 if (rc) {
1435 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1436 scrq->irq, rc);
1437 irq_dispose_mapping(scrq->irq);
1438 goto req_rx_irq_failed;
1439 }
1440 }
1441
1442 for (i = 0; i < adapter->req_rx_queues; i++) {
1443 scrq = adapter->rx_scrq[i];
1444 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1445 if (scrq->irq == NO_IRQ) {
1446 rc = -EINVAL;
1447 dev_err(dev, "Error mapping irq\n");
1448 goto req_rx_irq_failed;
1449 }
1450 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1451 0, "ibmvnic_rx", scrq);
1452 if (rc) {
1453 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1454 scrq->irq, rc);
1455 irq_dispose_mapping(scrq->irq);
1456 goto req_rx_irq_failed;
1457 }
1458 }
1459 return rc;
1460
1461req_rx_irq_failed:
1462 for (j = 0; j < i; j++)
1463 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1464 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1465 i = adapter->req_tx_queues;
1466req_tx_irq_failed:
1467 for (j = 0; j < i; j++)
1468 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1469 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1470 release_sub_crqs_no_irqs(adapter);
1471 return rc;
1472}
1473
1398static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) 1474static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1399{ 1475{
1400 struct device *dev = &adapter->vdev->dev; 1476 struct device *dev = &adapter->vdev->dev;
@@ -1403,8 +1479,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1403 union ibmvnic_crq crq; 1479 union ibmvnic_crq crq;
1404 int total_queues; 1480 int total_queues;
1405 int more = 0; 1481 int more = 0;
1406 int i, j; 1482 int i;
1407 int rc;
1408 1483
1409 if (!retry) { 1484 if (!retry) {
1410 /* Sub-CRQ entries are 32 byte long */ 1485 /* Sub-CRQ entries are 32 byte long */
@@ -1483,13 +1558,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1483 for (i = 0; i < adapter->req_tx_queues; i++) { 1558 for (i = 0; i < adapter->req_tx_queues; i++) {
1484 adapter->tx_scrq[i] = allqueues[i]; 1559 adapter->tx_scrq[i] = allqueues[i];
1485 adapter->tx_scrq[i]->pool_index = i; 1560 adapter->tx_scrq[i]->pool_index = i;
1486 rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
1487 0, "ibmvnic_tx", adapter->tx_scrq[i]);
1488 if (rc) {
1489 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1490 adapter->tx_scrq[i]->irq, rc);
1491 goto req_tx_irq_failed;
1492 }
1493 } 1561 }
1494 1562
1495 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 1563 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -1500,13 +1568,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1500 for (i = 0; i < adapter->req_rx_queues; i++) { 1568 for (i = 0; i < adapter->req_rx_queues; i++) {
1501 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 1569 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1502 adapter->rx_scrq[i]->scrq_num = i; 1570 adapter->rx_scrq[i]->scrq_num = i;
1503 rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
1504 0, "ibmvnic_rx", adapter->rx_scrq[i]);
1505 if (rc) {
1506 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1507 adapter->rx_scrq[i]->irq, rc);
1508 goto req_rx_irq_failed;
1509 }
1510 } 1571 }
1511 1572
1512 memset(&crq, 0, sizeof(crq)); 1573 memset(&crq, 0, sizeof(crq));
@@ -1559,15 +1620,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1559 1620
1560 return; 1621 return;
1561 1622
1562req_rx_irq_failed:
1563 for (j = 0; j < i; j++)
1564 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1565 i = adapter->req_tx_queues;
1566req_tx_irq_failed:
1567 for (j = 0; j < i; j++)
1568 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1569 kfree(adapter->rx_scrq);
1570 adapter->rx_scrq = NULL;
1571rx_failed: 1623rx_failed:
1572 kfree(adapter->tx_scrq); 1624 kfree(adapter->tx_scrq);
1573 adapter->tx_scrq = NULL; 1625 adapter->tx_scrq = NULL;
@@ -2348,9 +2400,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2348 *req_value, 2400 *req_value,
2349 (long int)be32_to_cpu(crq->request_capability_rsp. 2401 (long int)be32_to_cpu(crq->request_capability_rsp.
2350 number), name); 2402 number), name);
2351 release_sub_crqs(adapter); 2403 release_sub_crqs_no_irqs(adapter);
2352 *req_value = be32_to_cpu(crq->request_capability_rsp.number); 2404 *req_value = be32_to_cpu(crq->request_capability_rsp.number);
2353 complete(&adapter->init_done); 2405 init_sub_crqs(adapter, 1);
2354 return; 2406 return;
2355 default: 2407 default:
2356 dev_err(dev, "Error %d in request cap rsp\n", 2408 dev_err(dev, "Error %d in request cap rsp\n",
@@ -2659,7 +2711,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2659 2711
2660out: 2712out:
2661 if (atomic_read(&adapter->running_cap_queries) == 0) 2713 if (atomic_read(&adapter->running_cap_queries) == 0)
2662 complete(&adapter->init_done); 2714 init_sub_crqs(adapter, 0);
2663 /* We're done querying the capabilities, initialize sub-crqs */ 2715 /* We're done querying the capabilities, initialize sub-crqs */
2664} 2716}
2665 2717
@@ -3202,8 +3254,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3202 dev_info(dev, "Partner initialized\n"); 3254 dev_info(dev, "Partner initialized\n");
3203 /* Send back a response */ 3255 /* Send back a response */
3204 rc = ibmvnic_send_crq_init_complete(adapter); 3256 rc = ibmvnic_send_crq_init_complete(adapter);
3205 if (rc == 0) 3257 if (!rc)
3206 send_version_xchg(adapter); 3258 schedule_work(&adapter->vnic_crq_init);
3207 else 3259 else
3208 dev_err(dev, "Can't send initrsp rc=%ld\n", rc); 3260 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3209 break; 3261 break;
@@ -3555,8 +3607,63 @@ static const struct file_operations ibmvnic_dump_ops = {
3555 .release = single_release, 3607 .release = single_release,
3556}; 3608};
3557 3609
3610static void handle_crq_init_rsp(struct work_struct *work)
3611{
3612 struct ibmvnic_adapter *adapter = container_of(work,
3613 struct ibmvnic_adapter,
3614 vnic_crq_init);
3615 struct device *dev = &adapter->vdev->dev;
3616 struct net_device *netdev = adapter->netdev;
3617 unsigned long timeout = msecs_to_jiffies(30000);
3618 int rc;
3619
3620 send_version_xchg(adapter);
3621 reinit_completion(&adapter->init_done);
3622 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3623 dev_err(dev, "Passive init timeout\n");
3624 goto task_failed;
3625 }
3626
3627 do {
3628 if (adapter->renegotiate) {
3629 adapter->renegotiate = false;
3630 release_sub_crqs_no_irqs(adapter);
3631 send_cap_queries(adapter);
3632
3633 reinit_completion(&adapter->init_done);
3634 if (!wait_for_completion_timeout(&adapter->init_done,
3635 timeout)) {
3636 dev_err(dev, "Passive init timeout\n");
3637 goto task_failed;
3638 }
3639 }
3640 } while (adapter->renegotiate);
3641 rc = init_sub_crq_irqs(adapter);
3642
3643 if (rc)
3644 goto task_failed;
3645
3646 netdev->real_num_tx_queues = adapter->req_tx_queues;
3647
3648 rc = register_netdev(netdev);
3649 if (rc) {
3650 dev_err(dev,
3651 "failed to register netdev rc=%d\n", rc);
3652 goto register_failed;
3653 }
3654 dev_info(dev, "ibmvnic registered\n");
3655
3656 return;
3657
3658register_failed:
3659 release_sub_crqs(adapter);
3660task_failed:
3661 dev_err(dev, "Passive initialization was not successful\n");
3662}
3663
3558static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 3664static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3559{ 3665{
3666 unsigned long timeout = msecs_to_jiffies(30000);
3560 struct ibmvnic_adapter *adapter; 3667 struct ibmvnic_adapter *adapter;
3561 struct net_device *netdev; 3668 struct net_device *netdev;
3562 unsigned char *mac_addr_p; 3669 unsigned char *mac_addr_p;
@@ -3593,6 +3700,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3593 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 3700 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3594 SET_NETDEV_DEV(netdev, &dev->dev); 3701 SET_NETDEV_DEV(netdev, &dev->dev);
3595 3702
3703 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
3704
3596 spin_lock_init(&adapter->stats_lock); 3705 spin_lock_init(&adapter->stats_lock);
3597 3706
3598 rc = ibmvnic_init_crq_queue(adapter); 3707 rc = ibmvnic_init_crq_queue(adapter);
@@ -3635,30 +3744,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3635 ibmvnic_send_crq_init(adapter); 3744 ibmvnic_send_crq_init(adapter);
3636 3745
3637 init_completion(&adapter->init_done); 3746 init_completion(&adapter->init_done);
3638 wait_for_completion(&adapter->init_done); 3747 if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3748 return 0;
3639 3749
3640 do { 3750 do {
3641 adapter->renegotiate = false;
3642
3643 init_sub_crqs(adapter, 0);
3644 reinit_completion(&adapter->init_done);
3645 wait_for_completion(&adapter->init_done);
3646
3647 if (adapter->renegotiate) { 3751 if (adapter->renegotiate) {
3648 release_sub_crqs(adapter); 3752 adapter->renegotiate = false;
3753 release_sub_crqs_no_irqs(adapter);
3649 send_cap_queries(adapter); 3754 send_cap_queries(adapter);
3650 3755
3651 reinit_completion(&adapter->init_done); 3756 reinit_completion(&adapter->init_done);
3652 wait_for_completion(&adapter->init_done); 3757 if (!wait_for_completion_timeout(&adapter->init_done,
3758 timeout))
3759 return 0;
3653 } 3760 }
3654 } while (adapter->renegotiate); 3761 } while (adapter->renegotiate);
3655 3762
3656 /* if init_sub_crqs is partially successful, retry */ 3763 rc = init_sub_crq_irqs(adapter);
3657 while (!adapter->tx_scrq || !adapter->rx_scrq) { 3764 if (rc) {
3658 init_sub_crqs(adapter, 1); 3765 dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
3659 3766 goto free_debugfs;
3660 reinit_completion(&adapter->init_done);
3661 wait_for_completion(&adapter->init_done);
3662 } 3767 }
3663 3768
3664 netdev->real_num_tx_queues = adapter->req_tx_queues; 3769 netdev->real_num_tx_queues = adapter->req_tx_queues;
@@ -3666,12 +3771,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3666 rc = register_netdev(netdev); 3771 rc = register_netdev(netdev);
3667 if (rc) { 3772 if (rc) {
3668 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 3773 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3669 goto free_debugfs; 3774 goto free_sub_crqs;
3670 } 3775 }
3671 dev_info(&dev->dev, "ibmvnic registered\n"); 3776 dev_info(&dev->dev, "ibmvnic registered\n");
3672 3777
3673 return 0; 3778 return 0;
3674 3779
3780free_sub_crqs:
3781 release_sub_crqs(adapter);
3675free_debugfs: 3782free_debugfs:
3676 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) 3783 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3677 debugfs_remove_recursive(adapter->debugfs_dir); 3784 debugfs_remove_recursive(adapter->debugfs_dir);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 0b66a506a4e4..e82898fd518e 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1045,4 +1045,6 @@ struct ibmvnic_adapter {
1045 u64 opt_rxba_entries_per_subcrq; 1045 u64 opt_rxba_entries_per_subcrq;
1046 __be64 tx_rx_desc_req; 1046 __be64 tx_rx_desc_req;
1047 u8 map_id; 1047 u8 map_id;
1048
1049 struct work_struct vnic_crq_init;
1048}; 1050};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5ea22008d721..501f15d9f4d6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1344,6 +1344,13 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1344 if (!vsi || !macaddr) 1344 if (!vsi || !macaddr)
1345 return NULL; 1345 return NULL;
1346 1346
1347 /* Do not allow broadcast filter to be added since broadcast filter
1348 * is added as part of add VSI for any newly created VSI except
1349 * FDIR VSI
1350 */
1351 if (is_broadcast_ether_addr(macaddr))
1352 return NULL;
1353
1347 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1354 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1348 if (!f) { 1355 if (!f) {
1349 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1356 f = kzalloc(sizeof(*f), GFP_ATOMIC);
@@ -2151,18 +2158,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2151 aq_ret, pf->hw.aq.asq_last_status); 2158 aq_ret, pf->hw.aq.asq_last_status);
2152 } 2159 }
2153 } 2160 }
2154 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2155 vsi->seid,
2156 cur_promisc, NULL);
2157 if (aq_ret) {
2158 retval = i40e_aq_rc_to_posix(aq_ret,
2159 pf->hw.aq.asq_last_status);
2160 dev_info(&pf->pdev->dev,
2161 "set brdcast promisc failed, err %s, aq_err %s\n",
2162 i40e_stat_str(&pf->hw, aq_ret),
2163 i40e_aq_str(&pf->hw,
2164 pf->hw.aq.asq_last_status));
2165 }
2166 } 2161 }
2167out: 2162out:
2168 /* if something went wrong then set the changed flag so we try again */ 2163 /* if something went wrong then set the changed flag so we try again */
@@ -7726,10 +7721,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
7726 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 7721 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7727 * @vsi: the VSI being configured 7722 * @vsi: the VSI being configured
7728 * @v_idx: index of the vector in the vsi struct 7723 * @v_idx: index of the vector in the vsi struct
7724 * @cpu: cpu to be used on affinity_mask
7729 * 7725 *
7730 * We allocate one q_vector. If allocation fails we return -ENOMEM. 7726 * We allocate one q_vector. If allocation fails we return -ENOMEM.
7731 **/ 7727 **/
7732static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 7728static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
7733{ 7729{
7734 struct i40e_q_vector *q_vector; 7730 struct i40e_q_vector *q_vector;
7735 7731
@@ -7740,7 +7736,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7740 7736
7741 q_vector->vsi = vsi; 7737 q_vector->vsi = vsi;
7742 q_vector->v_idx = v_idx; 7738 q_vector->v_idx = v_idx;
7743 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 7739 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
7740
7744 if (vsi->netdev) 7741 if (vsi->netdev)
7745 netif_napi_add(vsi->netdev, &q_vector->napi, 7742 netif_napi_add(vsi->netdev, &q_vector->napi,
7746 i40e_napi_poll, NAPI_POLL_WEIGHT); 7743 i40e_napi_poll, NAPI_POLL_WEIGHT);
@@ -7764,8 +7761,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7764static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 7761static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7765{ 7762{
7766 struct i40e_pf *pf = vsi->back; 7763 struct i40e_pf *pf = vsi->back;
7767 int v_idx, num_q_vectors; 7764 int err, v_idx, num_q_vectors, current_cpu;
7768 int err;
7769 7765
7770 /* if not MSIX, give the one vector only to the LAN VSI */ 7766 /* if not MSIX, give the one vector only to the LAN VSI */
7771 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7767 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -7775,10 +7771,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7775 else 7771 else
7776 return -EINVAL; 7772 return -EINVAL;
7777 7773
7774 current_cpu = cpumask_first(cpu_online_mask);
7775
7778 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 7776 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7779 err = i40e_vsi_alloc_q_vector(vsi, v_idx); 7777 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
7780 if (err) 7778 if (err)
7781 goto err_out; 7779 goto err_out;
7780 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
7781 if (unlikely(current_cpu >= nr_cpu_ids))
7782 current_cpu = cpumask_first(cpu_online_mask);
7782 } 7783 }
7783 7784
7784 return 0; 7785 return 0;
@@ -9224,6 +9225,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9224static int i40e_add_vsi(struct i40e_vsi *vsi) 9225static int i40e_add_vsi(struct i40e_vsi *vsi)
9225{ 9226{
9226 int ret = -ENODEV; 9227 int ret = -ENODEV;
9228 i40e_status aq_ret = 0;
9227 u8 laa_macaddr[ETH_ALEN]; 9229 u8 laa_macaddr[ETH_ALEN];
9228 bool found_laa_mac_filter = false; 9230 bool found_laa_mac_filter = false;
9229 struct i40e_pf *pf = vsi->back; 9231 struct i40e_pf *pf = vsi->back;
@@ -9413,6 +9415,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
9413 vsi->seid = ctxt.seid; 9415 vsi->seid = ctxt.seid;
9414 vsi->id = ctxt.vsi_number; 9416 vsi->id = ctxt.vsi_number;
9415 } 9417 }
9418 /* Except FDIR VSI, for all othet VSI set the broadcast filter */
9419 if (vsi->type != I40E_VSI_FDIR) {
9420 aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
9421 if (aq_ret) {
9422 ret = i40e_aq_rc_to_posix(aq_ret,
9423 hw->aq.asq_last_status);
9424 dev_info(&pf->pdev->dev,
9425 "set brdcast promisc failed, err %s, aq_err %s\n",
9426 i40e_stat_str(hw, aq_ret),
9427 i40e_aq_str(hw, hw->aq.asq_last_status));
9428 }
9429 }
9416 9430
9417 spin_lock_bh(&vsi->mac_filter_list_lock); 9431 spin_lock_bh(&vsi->mac_filter_list_lock);
9418 /* If macvlan filters already exist, force them to get loaded */ 9432 /* If macvlan filters already exist, force them to get loaded */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 55f151fca1dc..a8868e1bf832 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1280,8 +1280,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1280 union i40e_rx_desc *rx_desc) 1280 union i40e_rx_desc *rx_desc)
1281{ 1281{
1282 struct i40e_rx_ptype_decoded decoded; 1282 struct i40e_rx_ptype_decoded decoded;
1283 bool ipv4, ipv6, tunnel = false;
1284 u32 rx_error, rx_status; 1283 u32 rx_error, rx_status;
1284 bool ipv4, ipv6;
1285 u8 ptype; 1285 u8 ptype;
1286 u64 qword; 1286 u64 qword;
1287 1287
@@ -1336,19 +1336,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1336 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) 1336 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1337 return; 1337 return;
1338 1338
1339 /* The hardware supported by this driver does not validate outer 1339 /* If there is an outer header present that might contain a checksum
1340 * checksums for tunneled VXLAN or GENEVE frames. I don't agree 1340 * we need to bump the checksum level by 1 to reflect the fact that
1341 * with it but the specification states that you "MAY validate", it 1341 * we are indicating we validated the inner checksum.
1342 * doesn't make it a hard requirement so if we have validated the
1343 * inner checksum report CHECKSUM_UNNECESSARY.
1344 */ 1342 */
1345 if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | 1343 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1346 I40E_RX_PTYPE_INNER_PROT_UDP | 1344 skb->csum_level = 1;
1347 I40E_RX_PTYPE_INNER_PROT_SCTP)) 1345
1348 tunnel = true; 1346 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1349 1347 switch (decoded.inner_prot) {
1350 skb->ip_summed = CHECKSUM_UNNECESSARY; 1348 case I40E_RX_PTYPE_INNER_PROT_TCP:
1351 skb->csum_level = tunnel ? 1 : 0; 1349 case I40E_RX_PTYPE_INNER_PROT_UDP:
1350 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1351 skb->ip_summed = CHECKSUM_UNNECESSARY;
1352 /* fall though */
1353 default:
1354 break;
1355 }
1352 1356
1353 return; 1357 return;
1354 1358
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index be99189da925..79d99cd91b24 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -752,8 +752,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
752 union i40e_rx_desc *rx_desc) 752 union i40e_rx_desc *rx_desc)
753{ 753{
754 struct i40e_rx_ptype_decoded decoded; 754 struct i40e_rx_ptype_decoded decoded;
755 bool ipv4, ipv6, tunnel = false;
756 u32 rx_error, rx_status; 755 u32 rx_error, rx_status;
756 bool ipv4, ipv6;
757 u8 ptype; 757 u8 ptype;
758 u64 qword; 758 u64 qword;
759 759
@@ -808,19 +808,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
808 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) 808 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
809 return; 809 return;
810 810
811 /* The hardware supported by this driver does not validate outer 811 /* If there is an outer header present that might contain a checksum
812 * checksums for tunneled VXLAN or GENEVE frames. I don't agree 812 * we need to bump the checksum level by 1 to reflect the fact that
813 * with it but the specification states that you "MAY validate", it 813 * we are indicating we validated the inner checksum.
814 * doesn't make it a hard requirement so if we have validated the
815 * inner checksum report CHECKSUM_UNNECESSARY.
816 */ 814 */
817 if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | 815 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
818 I40E_RX_PTYPE_INNER_PROT_UDP | 816 skb->csum_level = 1;
819 I40E_RX_PTYPE_INNER_PROT_SCTP)) 817
820 tunnel = true; 818 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
821 819 switch (decoded.inner_prot) {
822 skb->ip_summed = CHECKSUM_UNNECESSARY; 820 case I40E_RX_PTYPE_INNER_PROT_TCP:
823 skb->csum_level = tunnel ? 1 : 0; 821 case I40E_RX_PTYPE_INNER_PROT_UDP:
822 case I40E_RX_PTYPE_INNER_PROT_SCTP:
823 skb->ip_summed = CHECKSUM_UNNECESSARY;
824 /* fall though */
825 default:
826 break;
827 }
824 828
825 return; 829 return;
826 830
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 088c47cf27d9..8bebd862a54c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2887,7 +2887,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
2887 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2887 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2888 ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); 2888 ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
2889 2889
2890 return 0; 2890 return min(work_done, budget - 1);
2891} 2891}
2892 2892
2893/** 2893/**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d5d263bda333..f92018b13d28 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -244,7 +244,7 @@
244/* Various constants */ 244/* Various constants */
245 245
246/* Coalescing */ 246/* Coalescing */
247#define MVNETA_TXDONE_COAL_PKTS 1 247#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
248#define MVNETA_RX_COAL_PKTS 32 248#define MVNETA_RX_COAL_PKTS 32
249#define MVNETA_RX_COAL_USEC 100 249#define MVNETA_RX_COAL_USEC 100
250 250
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fc95affaf76b..44cf16d01f42 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1042,6 +1042,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
1042{ 1042{
1043 struct mlx4_en_priv *priv = netdev_priv(dev); 1043 struct mlx4_en_priv *priv = netdev_priv(dev);
1044 struct mlx4_en_dev *mdev = priv->mdev; 1044 struct mlx4_en_dev *mdev = priv->mdev;
1045 struct mlx4_en_port_profile new_prof;
1046 struct mlx4_en_priv *tmp;
1045 u32 rx_size, tx_size; 1047 u32 rx_size, tx_size;
1046 int port_up = 0; 1048 int port_up = 0;
1047 int err = 0; 1049 int err = 0;
@@ -1061,22 +1063,25 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
1061 tx_size == priv->tx_ring[0]->size) 1063 tx_size == priv->tx_ring[0]->size)
1062 return 0; 1064 return 0;
1063 1065
1066 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
1067 if (!tmp)
1068 return -ENOMEM;
1069
1064 mutex_lock(&mdev->state_lock); 1070 mutex_lock(&mdev->state_lock);
1071 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
1072 new_prof.tx_ring_size = tx_size;
1073 new_prof.rx_ring_size = rx_size;
1074 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
1075 if (err)
1076 goto out;
1077
1065 if (priv->port_up) { 1078 if (priv->port_up) {
1066 port_up = 1; 1079 port_up = 1;
1067 mlx4_en_stop_port(dev, 1); 1080 mlx4_en_stop_port(dev, 1);
1068 } 1081 }
1069 1082
1070 mlx4_en_free_resources(priv); 1083 mlx4_en_safe_replace_resources(priv, tmp);
1071
1072 priv->prof->tx_ring_size = tx_size;
1073 priv->prof->rx_ring_size = rx_size;
1074 1084
1075 err = mlx4_en_alloc_resources(priv);
1076 if (err) {
1077 en_err(priv, "Failed reallocating port resources\n");
1078 goto out;
1079 }
1080 if (port_up) { 1085 if (port_up) {
1081 err = mlx4_en_start_port(dev); 1086 err = mlx4_en_start_port(dev);
1082 if (err) 1087 if (err)
@@ -1084,8 +1089,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
1084 } 1089 }
1085 1090
1086 err = mlx4_en_moderation_update(priv); 1091 err = mlx4_en_moderation_update(priv);
1087
1088out: 1092out:
1093 kfree(tmp);
1089 mutex_unlock(&mdev->state_lock); 1094 mutex_unlock(&mdev->state_lock);
1090 return err; 1095 return err;
1091} 1096}
@@ -1714,6 +1719,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
1714{ 1719{
1715 struct mlx4_en_priv *priv = netdev_priv(dev); 1720 struct mlx4_en_priv *priv = netdev_priv(dev);
1716 struct mlx4_en_dev *mdev = priv->mdev; 1721 struct mlx4_en_dev *mdev = priv->mdev;
1722 struct mlx4_en_port_profile new_prof;
1723 struct mlx4_en_priv *tmp;
1717 int port_up = 0; 1724 int port_up = 0;
1718 int err = 0; 1725 int err = 0;
1719 1726
@@ -1723,23 +1730,26 @@ static int mlx4_en_set_channels(struct net_device *dev,
1723 !channel->tx_count || !channel->rx_count) 1730 !channel->tx_count || !channel->rx_count)
1724 return -EINVAL; 1731 return -EINVAL;
1725 1732
1733 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
1734 if (!tmp)
1735 return -ENOMEM;
1736
1726 mutex_lock(&mdev->state_lock); 1737 mutex_lock(&mdev->state_lock);
1738 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
1739 new_prof.num_tx_rings_p_up = channel->tx_count;
1740 new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
1741 new_prof.rx_ring_num = channel->rx_count;
1742
1743 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
1744 if (err)
1745 goto out;
1746
1727 if (priv->port_up) { 1747 if (priv->port_up) {
1728 port_up = 1; 1748 port_up = 1;
1729 mlx4_en_stop_port(dev, 1); 1749 mlx4_en_stop_port(dev, 1);
1730 } 1750 }
1731 1751
1732 mlx4_en_free_resources(priv); 1752 mlx4_en_safe_replace_resources(priv, tmp);
1733
1734 priv->num_tx_rings_p_up = channel->tx_count;
1735 priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
1736 priv->rx_ring_num = channel->rx_count;
1737
1738 err = mlx4_en_alloc_resources(priv);
1739 if (err) {
1740 en_err(priv, "Failed reallocating port resources\n");
1741 goto out;
1742 }
1743 1753
1744 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1754 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1745 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1755 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -1757,8 +1767,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
1757 } 1767 }
1758 1768
1759 err = mlx4_en_moderation_update(priv); 1769 err = mlx4_en_moderation_update(priv);
1760
1761out: 1770out:
1771 kfree(tmp);
1762 mutex_unlock(&mdev->state_lock); 1772 mutex_unlock(&mdev->state_lock);
1763 return err; 1773 return err;
1764} 1774}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0c0dfd6cdca6..8359e9e51b3b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1954,7 +1954,7 @@ static int mlx4_en_close(struct net_device *dev)
1954 return 0; 1954 return 0;
1955} 1955}
1956 1956
1957void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1957static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1958{ 1958{
1959 int i; 1959 int i;
1960 1960
@@ -1979,7 +1979,7 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1979 1979
1980} 1980}
1981 1981
1982int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1982static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1983{ 1983{
1984 struct mlx4_en_port_profile *prof = priv->prof; 1984 struct mlx4_en_port_profile *prof = priv->prof;
1985 int i; 1985 int i;
@@ -2044,6 +2044,77 @@ static void mlx4_en_shutdown(struct net_device *dev)
2044 rtnl_unlock(); 2044 rtnl_unlock();
2045} 2045}
2046 2046
2047static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2048 struct mlx4_en_priv *src,
2049 struct mlx4_en_port_profile *prof)
2050{
2051 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2052 sizeof(dst->hwtstamp_config));
2053 dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
2054 dst->tx_ring_num = prof->tx_ring_num;
2055 dst->rx_ring_num = prof->rx_ring_num;
2056 dst->flags = prof->flags;
2057 dst->mdev = src->mdev;
2058 dst->port = src->port;
2059 dst->dev = src->dev;
2060 dst->prof = prof;
2061 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2062 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2063
2064 dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2065 GFP_KERNEL);
2066 if (!dst->tx_ring)
2067 return -ENOMEM;
2068
2069 dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2070 GFP_KERNEL);
2071 if (!dst->tx_cq) {
2072 kfree(dst->tx_ring);
2073 return -ENOMEM;
2074 }
2075 return 0;
2076}
2077
2078static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2079 struct mlx4_en_priv *src)
2080{
2081 memcpy(dst->rx_ring, src->rx_ring,
2082 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2083 memcpy(dst->rx_cq, src->rx_cq,
2084 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2085 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2086 sizeof(dst->hwtstamp_config));
2087 dst->tx_ring_num = src->tx_ring_num;
2088 dst->rx_ring_num = src->rx_ring_num;
2089 dst->tx_ring = src->tx_ring;
2090 dst->tx_cq = src->tx_cq;
2091 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2092}
2093
2094int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2095 struct mlx4_en_priv *tmp,
2096 struct mlx4_en_port_profile *prof)
2097{
2098 mlx4_en_copy_priv(tmp, priv, prof);
2099
2100 if (mlx4_en_alloc_resources(tmp)) {
2101 en_warn(priv,
2102 "%s: Resource allocation failed, using previous configuration\n",
2103 __func__);
2104 kfree(tmp->tx_ring);
2105 kfree(tmp->tx_cq);
2106 return -ENOMEM;
2107 }
2108 return 0;
2109}
2110
2111void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2112 struct mlx4_en_priv *tmp)
2113{
2114 mlx4_en_free_resources(priv);
2115 mlx4_en_update_priv(priv, tmp);
2116}
2117
2047void mlx4_en_destroy_netdev(struct net_device *dev) 2118void mlx4_en_destroy_netdev(struct net_device *dev)
2048{ 2119{
2049 struct mlx4_en_priv *priv = netdev_priv(dev); 2120 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -2080,6 +2151,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2080 mdev->upper[priv->port] = NULL; 2151 mdev->upper[priv->port] = NULL;
2081 mutex_unlock(&mdev->state_lock); 2152 mutex_unlock(&mdev->state_lock);
2082 2153
2154#ifdef CONFIG_RFS_ACCEL
2155 mlx4_en_cleanup_filters(priv);
2156#endif
2157
2083 mlx4_en_free_resources(priv); 2158 mlx4_en_free_resources(priv);
2084 2159
2085 kfree(priv->tx_ring); 2160 kfree(priv->tx_ring);
@@ -3124,6 +3199,8 @@ int mlx4_en_reset_config(struct net_device *dev,
3124{ 3199{
3125 struct mlx4_en_priv *priv = netdev_priv(dev); 3200 struct mlx4_en_priv *priv = netdev_priv(dev);
3126 struct mlx4_en_dev *mdev = priv->mdev; 3201 struct mlx4_en_dev *mdev = priv->mdev;
3202 struct mlx4_en_port_profile new_prof;
3203 struct mlx4_en_priv *tmp;
3127 int port_up = 0; 3204 int port_up = 0;
3128 int err = 0; 3205 int err = 0;
3129 3206
@@ -3140,19 +3217,29 @@ int mlx4_en_reset_config(struct net_device *dev,
3140 return -EINVAL; 3217 return -EINVAL;
3141 } 3218 }
3142 3219
3220 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3221 if (!tmp)
3222 return -ENOMEM;
3223
3143 mutex_lock(&mdev->state_lock); 3224 mutex_lock(&mdev->state_lock);
3225
3226 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3227 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3228
3229 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
3230 if (err)
3231 goto out;
3232
3144 if (priv->port_up) { 3233 if (priv->port_up) {
3145 port_up = 1; 3234 port_up = 1;
3146 mlx4_en_stop_port(dev, 1); 3235 mlx4_en_stop_port(dev, 1);
3147 } 3236 }
3148 3237
3149 mlx4_en_free_resources(priv);
3150
3151 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", 3238 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
3152 ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX)); 3239 ts_config.rx_filter,
3240 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
3153 3241
3154 priv->hwtstamp_config.tx_type = ts_config.tx_type; 3242 mlx4_en_safe_replace_resources(priv, tmp);
3155 priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
3156 3243
3157 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 3244 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3158 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3245 if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -3186,11 +3273,6 @@ int mlx4_en_reset_config(struct net_device *dev,
3186 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3273 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3187 } 3274 }
3188 3275
3189 err = mlx4_en_alloc_resources(priv);
3190 if (err) {
3191 en_err(priv, "Failed reallocating port resources\n");
3192 goto out;
3193 }
3194 if (port_up) { 3276 if (port_up) {
3195 err = mlx4_en_start_port(dev); 3277 err = mlx4_en_start_port(dev);
3196 if (err) 3278 if (err)
@@ -3199,6 +3281,8 @@ int mlx4_en_reset_config(struct net_device *dev,
3199 3281
3200out: 3282out:
3201 mutex_unlock(&mdev->state_lock); 3283 mutex_unlock(&mdev->state_lock);
3202 netdev_features_change(dev); 3284 kfree(tmp);
3285 if (!err)
3286 netdev_features_change(dev);
3203 return err; 3287 return err;
3204} 3288}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c1b3a9c8cf3b..99b5407f2278 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -514,9 +514,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
514 ring->rx_info = NULL; 514 ring->rx_info = NULL;
515 kfree(ring); 515 kfree(ring);
516 *pring = NULL; 516 *pring = NULL;
517#ifdef CONFIG_RFS_ACCEL
518 mlx4_en_cleanup_filters(priv);
519#endif
520} 517}
521 518
522void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, 519void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 467d47ed2c39..13d297ee34bb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -353,12 +353,14 @@ struct mlx4_en_port_profile {
353 u32 rx_ring_num; 353 u32 rx_ring_num;
354 u32 tx_ring_size; 354 u32 tx_ring_size;
355 u32 rx_ring_size; 355 u32 rx_ring_size;
356 u8 num_tx_rings_p_up;
356 u8 rx_pause; 357 u8 rx_pause;
357 u8 rx_ppp; 358 u8 rx_ppp;
358 u8 tx_pause; 359 u8 tx_pause;
359 u8 tx_ppp; 360 u8 tx_ppp;
360 int rss_rings; 361 int rss_rings;
361 int inline_thold; 362 int inline_thold;
363 struct hwtstamp_config hwtstamp_config;
362}; 364};
363 365
364struct mlx4_en_profile { 366struct mlx4_en_profile {
@@ -623,8 +625,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
623 u8 rx_ppp, u8 rx_pause, 625 u8 rx_ppp, u8 rx_pause,
624 u8 tx_ppp, u8 tx_pause); 626 u8 tx_ppp, u8 tx_pause);
625 627
626void mlx4_en_free_resources(struct mlx4_en_priv *priv); 628int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
627int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 629 struct mlx4_en_priv *tmp,
630 struct mlx4_en_port_profile *prof);
631void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
632 struct mlx4_en_priv *tmp);
628 633
629int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq, 634int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
630 int entries, int ring, enum cq_type mode, int node); 635 int entries, int ring, enum cq_type mode, int node);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7a0dca29c642..5a4d88c2cdb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1348,6 +1348,11 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
1348 goto err_close_channels; 1348 goto err_close_channels;
1349 } 1349 }
1350 1350
1351 /* FIXME: This is a W/A for tx timeout watch dog false alarm when
1352 * polling for inactive tx queues.
1353 */
1354 netif_tx_start_all_queues(priv->netdev);
1355
1351 kfree(cparam); 1356 kfree(cparam);
1352 return 0; 1357 return 0;
1353 1358
@@ -1367,6 +1372,12 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
1367{ 1372{
1368 int i; 1373 int i;
1369 1374
1375 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
1376 * polling for inactive tx queues.
1377 */
1378 netif_tx_stop_all_queues(priv->netdev);
1379 netif_tx_disable(priv->netdev);
1380
1370 for (i = 0; i < priv->params.num_channels; i++) 1381 for (i = 0; i < priv->params.num_channels; i++)
1371 mlx5e_close_channel(priv->channel[i]); 1382 mlx5e_close_channel(priv->channel[i]);
1372 1383
@@ -2656,7 +2667,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
2656 for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) { 2667 for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
2657 struct mlx5e_sq *sq = priv->txq_to_sq_map[i]; 2668 struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
2658 2669
2659 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 2670 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
2660 continue; 2671 continue;
2661 sched_work = true; 2672 sched_work = true;
2662 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); 2673 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 05de77267d58..e25a73ed2981 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -72,8 +72,8 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
72 u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)]; 72 u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
73 u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)]; 73 u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
74 74
75 memset(&in, 0, sizeof(in)); 75 memset(in, 0, sizeof(in));
76 memset(&out, 0, sizeof(out)); 76 memset(out, 0, sizeof(out));
77 77
78 MLX5_SET(delete_vxlan_udp_dport_in, in, opcode, 78 MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
79 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); 79 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1977e7a5c530..57d48da709fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -2718,7 +2718,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
2718 * Configures the switch priority to buffer table. 2718 * Configures the switch priority to buffer table.
2719 */ 2719 */
2720#define MLXSW_REG_PPTB_ID 0x500B 2720#define MLXSW_REG_PPTB_ID 0x500B
2721#define MLXSW_REG_PPTB_LEN 0x0C 2721#define MLXSW_REG_PPTB_LEN 0x10
2722 2722
2723static const struct mlxsw_reg_info mlxsw_reg_pptb = { 2723static const struct mlxsw_reg_info mlxsw_reg_pptb = {
2724 .id = MLXSW_REG_PPTB_ID, 2724 .id = MLXSW_REG_PPTB_ID,
@@ -2784,6 +2784,13 @@ MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
2784 */ 2784 */
2785MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4); 2785MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
2786 2786
2787/* reg_pptb_prio_to_buff_msb
2788 * Mapping of switch priority <i+8> to one of the allocated receive port
2789 * buffers.
2790 * Access: RW
2791 */
2792MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
2793
2787#define MLXSW_REG_PPTB_ALL_PRIO 0xFF 2794#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
2788 2795
2789static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port) 2796static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
@@ -2792,6 +2799,14 @@ static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
2792 mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM); 2799 mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
2793 mlxsw_reg_pptb_local_port_set(payload, local_port); 2800 mlxsw_reg_pptb_local_port_set(payload, local_port);
2794 mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO); 2801 mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
2802 mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
2803}
2804
2805static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
2806 u8 buff)
2807{
2808 mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
2809 mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
2795} 2810}
2796 2811
2797/* PBMC - Port Buffer Management Control Register 2812/* PBMC - Port Buffer Management Control Register
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 660429ebfbe1..374080027b2f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -171,23 +171,6 @@ static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
171 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 171 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
172} 172}
173 173
174static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
175 bool *p_is_up)
176{
177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178 char paos_pl[MLXSW_REG_PAOS_LEN];
179 u8 oper_status;
180 int err;
181
182 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
184 if (err)
185 return err;
186 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
187 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
188 return 0;
189}
190
191static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 174static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
192 unsigned char *addr) 175 unsigned char *addr)
193{ 176{
@@ -1434,7 +1417,8 @@ static int mlxsw_sp_port_get_settings(struct net_device *dev,
1434 1417
1435 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | 1418 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1436 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | 1419 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1437 SUPPORTED_Pause | SUPPORTED_Asym_Pause; 1420 SUPPORTED_Pause | SUPPORTED_Asym_Pause |
1421 SUPPORTED_Autoneg;
1438 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); 1422 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1439 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), 1423 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1440 eth_proto_oper, cmd); 1424 eth_proto_oper, cmd);
@@ -1493,7 +1477,6 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
1493 u32 eth_proto_new; 1477 u32 eth_proto_new;
1494 u32 eth_proto_cap; 1478 u32 eth_proto_cap;
1495 u32 eth_proto_admin; 1479 u32 eth_proto_admin;
1496 bool is_up;
1497 int err; 1480 int err;
1498 1481
1499 speed = ethtool_cmd_speed(cmd); 1482 speed = ethtool_cmd_speed(cmd);
@@ -1525,12 +1508,7 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
1525 return err; 1508 return err;
1526 } 1509 }
1527 1510
1528 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up); 1511 if (!netif_running(dev))
1529 if (err) {
1530 netdev_err(dev, "Failed to get oper status");
1531 return err;
1532 }
1533 if (!is_up)
1534 return 0; 1512 return 0;
1535 1513
1536 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1514 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index a3720a0fad7d..074cdda7b6f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -194,7 +194,7 @@ static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
194 194
195 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port); 195 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
196 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 196 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
197 mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0); 197 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
198 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), 198 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
199 pptb_pl); 199 pptb_pl);
200} 200}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 0b323661c0b6..01cfb7512827 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -103,7 +103,8 @@ static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
103 103
104 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port); 104 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
105 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 105 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
106 mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]); 106 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
107
107 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), 108 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
108 pptb_pl); 109 pptb_pl);
109} 110}
@@ -249,6 +250,7 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
249 return err; 250 return err;
250 251
251 memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets)); 252 memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
253 mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
252 254
253 return 0; 255 return 0;
254} 256}
@@ -351,7 +353,8 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
351 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 353 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
352 int err; 354 int err;
353 355
354 if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) { 356 if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
357 pfc->pfc_en) {
355 netdev_err(dev, "PAUSE frames already enabled on port\n"); 358 netdev_err(dev, "PAUSE frames already enabled on port\n");
356 return -EINVAL; 359 return -EINVAL;
357 } 360 }
@@ -371,6 +374,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
371 } 374 }
372 375
373 memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc)); 376 memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
377 mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
374 378
375 return 0; 379 return 0;
376 380
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 8dedafa1a95d..a30ee427efab 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2601,8 +2601,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
2601 spin_lock_bh(&pn->all_channels_lock); 2601 spin_lock_bh(&pn->all_channels_lock);
2602 list_del(&pch->list); 2602 list_del(&pch->list);
2603 spin_unlock_bh(&pn->all_channels_lock); 2603 spin_unlock_bh(&pn->all_channels_lock);
2604 put_net(pch->chan_net);
2605 pch->chan_net = NULL;
2606 2604
2607 pch->file.dead = 1; 2605 pch->file.dead = 1;
2608 wake_up_interruptible(&pch->file.rwait); 2606 wake_up_interruptible(&pch->file.rwait);
@@ -3136,6 +3134,9 @@ ppp_disconnect_channel(struct channel *pch)
3136 */ 3134 */
3137static void ppp_destroy_channel(struct channel *pch) 3135static void ppp_destroy_channel(struct channel *pch)
3138{ 3136{
3137 put_net(pch->chan_net);
3138 pch->chan_net = NULL;
3139
3139 atomic_dec(&channel_count); 3140 atomic_dec(&channel_count);
3140 3141
3141 if (!pch->file.dead) { 3142 if (!pch->file.dead) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0da72d39b4f9..e9654a685381 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,6 +26,7 @@
26#include <linux/mdio.h> 26#include <linux/mdio.h>
27#include <linux/usb/cdc.h> 27#include <linux/usb/cdc.h>
28#include <linux/suspend.h> 28#include <linux/suspend.h>
29#include <linux/acpi.h>
29 30
30/* Information for net-next */ 31/* Information for net-next */
31#define NETNEXT_VERSION "08" 32#define NETNEXT_VERSION "08"
@@ -460,6 +461,11 @@
460/* SRAM_IMPEDANCE */ 461/* SRAM_IMPEDANCE */
461#define RX_DRIVING_MASK 0x6000 462#define RX_DRIVING_MASK 0x6000
462 463
464/* MAC PASSTHRU */
465#define AD_MASK 0xfee0
466#define EFUSE 0xcfdb
467#define PASS_THRU_MASK 0x1
468
463enum rtl_register_content { 469enum rtl_register_content {
464 _1000bps = 0x10, 470 _1000bps = 0x10,
465 _100bps = 0x08, 471 _100bps = 0x08,
@@ -1036,6 +1042,65 @@ out1:
1036 return ret; 1042 return ret;
1037} 1043}
1038 1044
1045/* Devices containing RTL8153-AD can support a persistent
1046 * host system provided MAC address.
1047 * Examples of this are Dell TB15 and Dell WD15 docks
1048 */
1049static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
1050{
1051 acpi_status status;
1052 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1053 union acpi_object *obj;
1054 int ret = -EINVAL;
1055 u32 ocp_data;
1056 unsigned char buf[6];
1057
1058 /* test for -AD variant of RTL8153 */
1059 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
1060 if ((ocp_data & AD_MASK) != 0x1000)
1061 return -ENODEV;
1062
1063 /* test for MAC address pass-through bit */
1064 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
1065 if ((ocp_data & PASS_THRU_MASK) != 1)
1066 return -ENODEV;
1067
1068 /* returns _AUXMAC_#AABBCCDDEEFF# */
1069 status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
1070 obj = (union acpi_object *)buffer.pointer;
1071 if (!ACPI_SUCCESS(status))
1072 return -ENODEV;
1073 if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
1074 netif_warn(tp, probe, tp->netdev,
1075 "Invalid buffer when reading pass-thru MAC addr: "
1076 "(%d, %d)\n",
1077 obj->type, obj->string.length);
1078 goto amacout;
1079 }
1080 if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
1081 strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
1082 netif_warn(tp, probe, tp->netdev,
1083 "Invalid header when reading pass-thru MAC addr\n");
1084 goto amacout;
1085 }
1086 ret = hex2bin(buf, obj->string.pointer + 9, 6);
1087 if (!(ret == 0 && is_valid_ether_addr(buf))) {
1088 netif_warn(tp, probe, tp->netdev,
1089 "Invalid MAC when reading pass-thru MAC addr: "
1090 "%d, %pM\n", ret, buf);
1091 ret = -EINVAL;
1092 goto amacout;
1093 }
1094 memcpy(sa->sa_data, buf, 6);
1095 ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
1096 netif_info(tp, probe, tp->netdev,
1097 "Using pass-thru MAC addr %pM\n", sa->sa_data);
1098
1099amacout:
1100 kfree(obj);
1101 return ret;
1102}
1103
1039static int set_ethernet_addr(struct r8152 *tp) 1104static int set_ethernet_addr(struct r8152 *tp)
1040{ 1105{
1041 struct net_device *dev = tp->netdev; 1106 struct net_device *dev = tp->netdev;
@@ -1044,8 +1109,15 @@ static int set_ethernet_addr(struct r8152 *tp)
1044 1109
1045 if (tp->version == RTL_VER_01) 1110 if (tp->version == RTL_VER_01)
1046 ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data); 1111 ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
1047 else 1112 else {
1048 ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); 1113 /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
1114 * or system doesn't provide valid _SB.AMAC this will be
1115 * be expected to non-zero
1116 */
1117 ret = vendor_mac_passthru_addr_read(tp, &sa);
1118 if (ret < 0)
1119 ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
1120 }
1049 1121
1050 if (ret < 0) { 1122 if (ret < 0) {
1051 netif_err(tp, probe, dev, "Get ether addr fail\n"); 1123 netif_err(tp, probe, dev, "Get ether addr fail\n");
@@ -2296,10 +2368,6 @@ static u32 __rtl_get_wol(struct r8152 *tp)
2296 u32 ocp_data; 2368 u32 ocp_data;
2297 u32 wolopts = 0; 2369 u32 wolopts = 0;
2298 2370
2299 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
2300 if (!(ocp_data & LAN_WAKE_EN))
2301 return 0;
2302
2303 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); 2371 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
2304 if (ocp_data & LINK_ON_WAKE_EN) 2372 if (ocp_data & LINK_ON_WAKE_EN)
2305 wolopts |= WAKE_PHY; 2373 wolopts |= WAKE_PHY;
@@ -2332,15 +2400,13 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
2332 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); 2400 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
2333 2401
2334 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); 2402 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
2335 ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN); 2403 ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN);
2336 if (wolopts & WAKE_UCAST) 2404 if (wolopts & WAKE_UCAST)
2337 ocp_data |= UWF_EN; 2405 ocp_data |= UWF_EN;
2338 if (wolopts & WAKE_BCAST) 2406 if (wolopts & WAKE_BCAST)
2339 ocp_data |= BWF_EN; 2407 ocp_data |= BWF_EN;
2340 if (wolopts & WAKE_MCAST) 2408 if (wolopts & WAKE_MCAST)
2341 ocp_data |= MWF_EN; 2409 ocp_data |= MWF_EN;
2342 if (wolopts & WAKE_ANY)
2343 ocp_data |= LAN_WAKE_EN;
2344 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data); 2410 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
2345 2411
2346 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2412 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
@@ -4359,3 +4425,4 @@ module_usb_driver(rtl8152_driver);
4359MODULE_AUTHOR(DRIVER_AUTHOR); 4425MODULE_AUTHOR(DRIVER_AUTHOR);
4360MODULE_DESCRIPTION(DRIVER_DESC); 4426MODULE_DESCRIPTION(DRIVER_DESC);
4361MODULE_LICENSE("GPL"); 4427MODULE_LICENSE("GPL");
4428MODULE_VERSION(DRIVER_VERSION);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6fc31ef1da2d..8f74f3d61894 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -467,7 +467,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
467} 467}
468#endif /* CONFIG_DEBUG_SET_MODULE_RONX */ 468#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
469 469
470int sk_filter(struct sock *sk, struct sk_buff *skb); 470int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
471static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
472{
473 return sk_filter_trim_cap(sk, skb, 1);
474}
471 475
472struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); 476struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
473void bpf_prog_free(struct bpf_prog *fp); 477void bpf_prog_free(struct bpf_prog *fp);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f45929ce8157..da4b33bea982 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4145,6 +4145,13 @@ static inline void netif_keep_dst(struct net_device *dev)
4145 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 4145 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4146} 4146}
4147 4147
4148/* return true if dev can't cope with mtu frames that need vlan tag insertion */
4149static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4150{
4151 /* TODO: reserve and use an additional IFF bit, if we get more users */
4152 return dev->priv_flags & IFF_MACSEC;
4153}
4154
4148extern struct pernet_operations __net_initdata loopback_net_ops; 4155extern struct pernet_operations __net_initdata loopback_net_ops;
4149 4156
4150/* Logging, debugging and troubleshooting/diagnostic helpers. */ 4157/* Logging, debugging and troubleshooting/diagnostic helpers. */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index dd78bea227c8..b6083c34ef0d 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -284,6 +284,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
284 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK; 284 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
285} 285}
286 286
287/* jiffies until ct expires, 0 if already expired */
288static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
289{
290 long timeout = (long)ct->timeout.expires - (long)jiffies;
291
292 return timeout > 0 ? timeout : 0;
293}
294
287struct kernel_param; 295struct kernel_param;
288 296
289int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); 297int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
diff --git a/include/net/sock.h b/include/net/sock.h
index 649d2a8c17fc..ff5be7e8ddea 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1576,7 +1576,13 @@ static inline void sock_put(struct sock *sk)
1576 */ 1576 */
1577void sock_gen_put(struct sock *sk); 1577void sock_gen_put(struct sock *sk);
1578 1578
1579int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested); 1579int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1580 unsigned int trim_cap);
1581static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1582 const int nested)
1583{
1584 return __sk_receive_skb(sk, skb, nested, 1);
1585}
1580 1586
1581static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) 1587static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1582{ 1588{
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 985619a59323..1d8e158241da 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -60,7 +60,7 @@ struct switchdev_attr {
60 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ 60 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
61 u8 stp_state; /* PORT_STP_STATE */ 61 u8 stp_state; /* PORT_STP_STATE */
62 unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ 62 unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
63 u32 ageing_time; /* BRIDGE_AGEING_TIME */ 63 clock_t ageing_time; /* BRIDGE_AGEING_TIME */
64 bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ 64 bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
65 } u; 65 } u;
66}; 66};
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 86ae75b77390..516b0e73263c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -146,10 +146,12 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
146 146
147static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 147static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
148{ 148{
149 /* TODO: gotta make sure the underlying layer can handle it, 149 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
150 * maybe an IFF_VLAN_CAPABLE flag for devices? 150 unsigned int max_mtu = real_dev->mtu;
151 */ 151
152 if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu) 152 if (netif_reduces_vlan_mtu(real_dev))
153 max_mtu -= VLAN_HLEN;
154 if (max_mtu < new_mtu)
153 return -ERANGE; 155 return -ERANGE;
154 156
155 dev->mtu = new_mtu; 157 dev->mtu = new_mtu;
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index c92b52f37d38..1270207f3d7c 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -118,6 +118,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
118{ 118{
119 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 119 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
120 struct net_device *real_dev; 120 struct net_device *real_dev;
121 unsigned int max_mtu;
121 __be16 proto; 122 __be16 proto;
122 int err; 123 int err;
123 124
@@ -144,9 +145,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
144 if (err < 0) 145 if (err < 0)
145 return err; 146 return err;
146 147
148 max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN :
149 real_dev->mtu;
147 if (!tb[IFLA_MTU]) 150 if (!tb[IFLA_MTU])
148 dev->mtu = real_dev->mtu; 151 dev->mtu = max_mtu;
149 else if (dev->mtu > real_dev->mtu) 152 else if (dev->mtu > max_mtu)
150 return -EINVAL; 153 return -EINVAL;
151 154
152 err = vlan_changelink(dev, tb, data); 155 err = vlan_changelink(dev, tb, data);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 748a9ead7ce5..825a5cdf4382 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -177,10 +177,21 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
177static void batadv_claim_release(struct kref *ref) 177static void batadv_claim_release(struct kref *ref)
178{ 178{
179 struct batadv_bla_claim *claim; 179 struct batadv_bla_claim *claim;
180 struct batadv_bla_backbone_gw *old_backbone_gw;
180 181
181 claim = container_of(ref, struct batadv_bla_claim, refcount); 182 claim = container_of(ref, struct batadv_bla_claim, refcount);
182 183
183 batadv_backbone_gw_put(claim->backbone_gw); 184 spin_lock_bh(&claim->backbone_lock);
185 old_backbone_gw = claim->backbone_gw;
186 claim->backbone_gw = NULL;
187 spin_unlock_bh(&claim->backbone_lock);
188
189 spin_lock_bh(&old_backbone_gw->crc_lock);
190 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
191 spin_unlock_bh(&old_backbone_gw->crc_lock);
192
193 batadv_backbone_gw_put(old_backbone_gw);
194
184 kfree_rcu(claim, rcu); 195 kfree_rcu(claim, rcu);
185} 196}
186 197
@@ -418,9 +429,12 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
418 break; 429 break;
419 } 430 }
420 431
421 if (vid & BATADV_VLAN_HAS_TAG) 432 if (vid & BATADV_VLAN_HAS_TAG) {
422 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), 433 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
423 vid & VLAN_VID_MASK); 434 vid & VLAN_VID_MASK);
435 if (!skb)
436 goto out;
437 }
424 438
425 skb_reset_mac_header(skb); 439 skb_reset_mac_header(skb);
426 skb->protocol = eth_type_trans(skb, soft_iface); 440 skb->protocol = eth_type_trans(skb, soft_iface);
@@ -674,8 +688,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
674 const u8 *mac, const unsigned short vid, 688 const u8 *mac, const unsigned short vid,
675 struct batadv_bla_backbone_gw *backbone_gw) 689 struct batadv_bla_backbone_gw *backbone_gw)
676{ 690{
691 struct batadv_bla_backbone_gw *old_backbone_gw;
677 struct batadv_bla_claim *claim; 692 struct batadv_bla_claim *claim;
678 struct batadv_bla_claim search_claim; 693 struct batadv_bla_claim search_claim;
694 bool remove_crc = false;
679 int hash_added; 695 int hash_added;
680 696
681 ether_addr_copy(search_claim.addr, mac); 697 ether_addr_copy(search_claim.addr, mac);
@@ -689,8 +705,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
689 return; 705 return;
690 706
691 ether_addr_copy(claim->addr, mac); 707 ether_addr_copy(claim->addr, mac);
708 spin_lock_init(&claim->backbone_lock);
692 claim->vid = vid; 709 claim->vid = vid;
693 claim->lasttime = jiffies; 710 claim->lasttime = jiffies;
711 kref_get(&backbone_gw->refcount);
694 claim->backbone_gw = backbone_gw; 712 claim->backbone_gw = backbone_gw;
695 713
696 kref_init(&claim->refcount); 714 kref_init(&claim->refcount);
@@ -718,15 +736,26 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
718 "bla_add_claim(): changing ownership for %pM, vid %d\n", 736 "bla_add_claim(): changing ownership for %pM, vid %d\n",
719 mac, BATADV_PRINT_VID(vid)); 737 mac, BATADV_PRINT_VID(vid));
720 738
721 spin_lock_bh(&claim->backbone_gw->crc_lock); 739 remove_crc = true;
722 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
723 spin_unlock_bh(&claim->backbone_gw->crc_lock);
724 batadv_backbone_gw_put(claim->backbone_gw);
725 } 740 }
726 /* set (new) backbone gw */ 741
742 /* replace backbone_gw atomically and adjust reference counters */
743 spin_lock_bh(&claim->backbone_lock);
744 old_backbone_gw = claim->backbone_gw;
727 kref_get(&backbone_gw->refcount); 745 kref_get(&backbone_gw->refcount);
728 claim->backbone_gw = backbone_gw; 746 claim->backbone_gw = backbone_gw;
747 spin_unlock_bh(&claim->backbone_lock);
729 748
749 if (remove_crc) {
750 /* remove claim address from old backbone_gw */
751 spin_lock_bh(&old_backbone_gw->crc_lock);
752 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
753 spin_unlock_bh(&old_backbone_gw->crc_lock);
754 }
755
756 batadv_backbone_gw_put(old_backbone_gw);
757
758 /* add claim address to new backbone_gw */
730 spin_lock_bh(&backbone_gw->crc_lock); 759 spin_lock_bh(&backbone_gw->crc_lock);
731 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 760 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
732 spin_unlock_bh(&backbone_gw->crc_lock); 761 spin_unlock_bh(&backbone_gw->crc_lock);
@@ -737,6 +766,26 @@ claim_free_ref:
737} 766}
738 767
739/** 768/**
769 * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
770 * claim
771 * @claim: claim whose backbone_gw should be returned
772 *
773 * Return: valid reference to claim::backbone_gw
774 */
775static struct batadv_bla_backbone_gw *
776batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
777{
778 struct batadv_bla_backbone_gw *backbone_gw;
779
780 spin_lock_bh(&claim->backbone_lock);
781 backbone_gw = claim->backbone_gw;
782 kref_get(&backbone_gw->refcount);
783 spin_unlock_bh(&claim->backbone_lock);
784
785 return backbone_gw;
786}
787
788/**
740 * batadv_bla_del_claim - delete a claim from the claim hash 789 * batadv_bla_del_claim - delete a claim from the claim hash
741 * @bat_priv: the bat priv with all the soft interface information 790 * @bat_priv: the bat priv with all the soft interface information
742 * @mac: mac address of the claim to be removed 791 * @mac: mac address of the claim to be removed
@@ -760,10 +809,6 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
760 batadv_choose_claim, claim); 809 batadv_choose_claim, claim);
761 batadv_claim_put(claim); /* reference from the hash is gone */ 810 batadv_claim_put(claim); /* reference from the hash is gone */
762 811
763 spin_lock_bh(&claim->backbone_gw->crc_lock);
764 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
765 spin_unlock_bh(&claim->backbone_gw->crc_lock);
766
767 /* don't need the reference from hash_find() anymore */ 812 /* don't need the reference from hash_find() anymore */
768 batadv_claim_put(claim); 813 batadv_claim_put(claim);
769} 814}
@@ -1216,6 +1261,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1216 struct batadv_hard_iface *primary_if, 1261 struct batadv_hard_iface *primary_if,
1217 int now) 1262 int now)
1218{ 1263{
1264 struct batadv_bla_backbone_gw *backbone_gw;
1219 struct batadv_bla_claim *claim; 1265 struct batadv_bla_claim *claim;
1220 struct hlist_head *head; 1266 struct hlist_head *head;
1221 struct batadv_hashtable *hash; 1267 struct batadv_hashtable *hash;
@@ -1230,14 +1276,17 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1230 1276
1231 rcu_read_lock(); 1277 rcu_read_lock();
1232 hlist_for_each_entry_rcu(claim, head, hash_entry) { 1278 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1279 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1233 if (now) 1280 if (now)
1234 goto purge_now; 1281 goto purge_now;
1235 if (!batadv_compare_eth(claim->backbone_gw->orig, 1282
1283 if (!batadv_compare_eth(backbone_gw->orig,
1236 primary_if->net_dev->dev_addr)) 1284 primary_if->net_dev->dev_addr))
1237 continue; 1285 goto skip;
1286
1238 if (!batadv_has_timed_out(claim->lasttime, 1287 if (!batadv_has_timed_out(claim->lasttime,
1239 BATADV_BLA_CLAIM_TIMEOUT)) 1288 BATADV_BLA_CLAIM_TIMEOUT))
1240 continue; 1289 goto skip;
1241 1290
1242 batadv_dbg(BATADV_DBG_BLA, bat_priv, 1291 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1243 "bla_purge_claims(): %pM, vid %d, time out\n", 1292 "bla_purge_claims(): %pM, vid %d, time out\n",
@@ -1245,8 +1294,10 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1245 1294
1246purge_now: 1295purge_now:
1247 batadv_handle_unclaim(bat_priv, primary_if, 1296 batadv_handle_unclaim(bat_priv, primary_if,
1248 claim->backbone_gw->orig, 1297 backbone_gw->orig,
1249 claim->addr, claim->vid); 1298 claim->addr, claim->vid);
1299skip:
1300 batadv_backbone_gw_put(backbone_gw);
1250 } 1301 }
1251 rcu_read_unlock(); 1302 rcu_read_unlock();
1252 } 1303 }
@@ -1757,9 +1808,11 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1757bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, 1808bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1758 unsigned short vid, bool is_bcast) 1809 unsigned short vid, bool is_bcast)
1759{ 1810{
1811 struct batadv_bla_backbone_gw *backbone_gw;
1760 struct ethhdr *ethhdr; 1812 struct ethhdr *ethhdr;
1761 struct batadv_bla_claim search_claim, *claim = NULL; 1813 struct batadv_bla_claim search_claim, *claim = NULL;
1762 struct batadv_hard_iface *primary_if; 1814 struct batadv_hard_iface *primary_if;
1815 bool own_claim;
1763 bool ret; 1816 bool ret;
1764 1817
1765 ethhdr = eth_hdr(skb); 1818 ethhdr = eth_hdr(skb);
@@ -1794,8 +1847,12 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1794 } 1847 }
1795 1848
1796 /* if it is our own claim ... */ 1849 /* if it is our own claim ... */
1797 if (batadv_compare_eth(claim->backbone_gw->orig, 1850 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1798 primary_if->net_dev->dev_addr)) { 1851 own_claim = batadv_compare_eth(backbone_gw->orig,
1852 primary_if->net_dev->dev_addr);
1853 batadv_backbone_gw_put(backbone_gw);
1854
1855 if (own_claim) {
1799 /* ... allow it in any case */ 1856 /* ... allow it in any case */
1800 claim->lasttime = jiffies; 1857 claim->lasttime = jiffies;
1801 goto allow; 1858 goto allow;
@@ -1859,7 +1916,9 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1859{ 1916{
1860 struct ethhdr *ethhdr; 1917 struct ethhdr *ethhdr;
1861 struct batadv_bla_claim search_claim, *claim = NULL; 1918 struct batadv_bla_claim search_claim, *claim = NULL;
1919 struct batadv_bla_backbone_gw *backbone_gw;
1862 struct batadv_hard_iface *primary_if; 1920 struct batadv_hard_iface *primary_if;
1921 bool client_roamed;
1863 bool ret = false; 1922 bool ret = false;
1864 1923
1865 primary_if = batadv_primary_if_get_selected(bat_priv); 1924 primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -1889,8 +1948,12 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1889 goto allow; 1948 goto allow;
1890 1949
1891 /* check if we are responsible. */ 1950 /* check if we are responsible. */
1892 if (batadv_compare_eth(claim->backbone_gw->orig, 1951 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1893 primary_if->net_dev->dev_addr)) { 1952 client_roamed = batadv_compare_eth(backbone_gw->orig,
1953 primary_if->net_dev->dev_addr);
1954 batadv_backbone_gw_put(backbone_gw);
1955
1956 if (client_roamed) {
1894 /* if yes, the client has roamed and we have 1957 /* if yes, the client has roamed and we have
1895 * to unclaim it. 1958 * to unclaim it.
1896 */ 1959 */
@@ -1938,6 +2001,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1938 struct net_device *net_dev = (struct net_device *)seq->private; 2001 struct net_device *net_dev = (struct net_device *)seq->private;
1939 struct batadv_priv *bat_priv = netdev_priv(net_dev); 2002 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1940 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 2003 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
2004 struct batadv_bla_backbone_gw *backbone_gw;
1941 struct batadv_bla_claim *claim; 2005 struct batadv_bla_claim *claim;
1942 struct batadv_hard_iface *primary_if; 2006 struct batadv_hard_iface *primary_if;
1943 struct hlist_head *head; 2007 struct hlist_head *head;
@@ -1962,17 +2026,21 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1962 2026
1963 rcu_read_lock(); 2027 rcu_read_lock();
1964 hlist_for_each_entry_rcu(claim, head, hash_entry) { 2028 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1965 is_own = batadv_compare_eth(claim->backbone_gw->orig, 2029 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
2030
2031 is_own = batadv_compare_eth(backbone_gw->orig,
1966 primary_addr); 2032 primary_addr);
1967 2033
1968 spin_lock_bh(&claim->backbone_gw->crc_lock); 2034 spin_lock_bh(&backbone_gw->crc_lock);
1969 backbone_crc = claim->backbone_gw->crc; 2035 backbone_crc = backbone_gw->crc;
1970 spin_unlock_bh(&claim->backbone_gw->crc_lock); 2036 spin_unlock_bh(&backbone_gw->crc_lock);
1971 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n", 2037 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
1972 claim->addr, BATADV_PRINT_VID(claim->vid), 2038 claim->addr, BATADV_PRINT_VID(claim->vid),
1973 claim->backbone_gw->orig, 2039 backbone_gw->orig,
1974 (is_own ? 'x' : ' '), 2040 (is_own ? 'x' : ' '),
1975 backbone_crc); 2041 backbone_crc);
2042
2043 batadv_backbone_gw_put(backbone_gw);
1976 } 2044 }
1977 rcu_read_unlock(); 2045 rcu_read_unlock();
1978 } 2046 }
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 278800a99c69..aee3b3991471 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1009,9 +1009,12 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
1009 if (!skb_new) 1009 if (!skb_new)
1010 goto out; 1010 goto out;
1011 1011
1012 if (vid & BATADV_VLAN_HAS_TAG) 1012 if (vid & BATADV_VLAN_HAS_TAG) {
1013 skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), 1013 skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
1014 vid & VLAN_VID_MASK); 1014 vid & VLAN_VID_MASK);
1015 if (!skb_new)
1016 goto out;
1017 }
1015 1018
1016 skb_reset_mac_header(skb_new); 1019 skb_reset_mac_header(skb_new);
1017 skb_new->protocol = eth_type_trans(skb_new, 1020 skb_new->protocol = eth_type_trans(skb_new,
@@ -1089,9 +1092,12 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
1089 */ 1092 */
1090 skb_reset_mac_header(skb_new); 1093 skb_reset_mac_header(skb_new);
1091 1094
1092 if (vid & BATADV_VLAN_HAS_TAG) 1095 if (vid & BATADV_VLAN_HAS_TAG) {
1093 skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), 1096 skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
1094 vid & VLAN_VID_MASK); 1097 vid & VLAN_VID_MASK);
1098 if (!skb_new)
1099 goto out;
1100 }
1095 1101
1096 /* To preserve backwards compatibility, the node has choose the outgoing 1102 /* To preserve backwards compatibility, the node has choose the outgoing
1097 * format based on the incoming request packet type. The assumption is 1103 * format based on the incoming request packet type. The assumption is
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 7f51bc2c06eb..ab8c4f9738fe 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -765,6 +765,8 @@ static void batadv_orig_node_release(struct kref *ref)
765 struct batadv_neigh_node *neigh_node; 765 struct batadv_neigh_node *neigh_node;
766 struct batadv_orig_node *orig_node; 766 struct batadv_orig_node *orig_node;
767 struct batadv_orig_ifinfo *orig_ifinfo; 767 struct batadv_orig_ifinfo *orig_ifinfo;
768 struct batadv_orig_node_vlan *vlan;
769 struct batadv_orig_ifinfo *last_candidate;
768 770
769 orig_node = container_of(ref, struct batadv_orig_node, refcount); 771 orig_node = container_of(ref, struct batadv_orig_node, refcount);
770 772
@@ -782,8 +784,21 @@ static void batadv_orig_node_release(struct kref *ref)
782 hlist_del_rcu(&orig_ifinfo->list); 784 hlist_del_rcu(&orig_ifinfo->list);
783 batadv_orig_ifinfo_put(orig_ifinfo); 785 batadv_orig_ifinfo_put(orig_ifinfo);
784 } 786 }
787
788 last_candidate = orig_node->last_bonding_candidate;
789 orig_node->last_bonding_candidate = NULL;
785 spin_unlock_bh(&orig_node->neigh_list_lock); 790 spin_unlock_bh(&orig_node->neigh_list_lock);
786 791
792 if (last_candidate)
793 batadv_orig_ifinfo_put(last_candidate);
794
795 spin_lock_bh(&orig_node->vlan_list_lock);
796 hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
797 hlist_del_rcu(&vlan->list);
798 batadv_orig_node_vlan_put(vlan);
799 }
800 spin_unlock_bh(&orig_node->vlan_list_lock);
801
787 /* Free nc_nodes */ 802 /* Free nc_nodes */
788 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL); 803 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
789 804
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 6c2901a86230..bfac086b4d01 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -456,6 +456,29 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
456} 456}
457 457
458/** 458/**
459 * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
460 * @orig_node: originator node whose bonding candidates should be replaced
461 * @new_candidate: new bonding candidate or NULL
462 */
463static void
464batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
465 struct batadv_orig_ifinfo *new_candidate)
466{
467 struct batadv_orig_ifinfo *old_candidate;
468
469 spin_lock_bh(&orig_node->neigh_list_lock);
470 old_candidate = orig_node->last_bonding_candidate;
471
472 if (new_candidate)
473 kref_get(&new_candidate->refcount);
474 orig_node->last_bonding_candidate = new_candidate;
475 spin_unlock_bh(&orig_node->neigh_list_lock);
476
477 if (old_candidate)
478 batadv_orig_ifinfo_put(old_candidate);
479}
480
481/**
459 * batadv_find_router - find a suitable router for this originator 482 * batadv_find_router - find a suitable router for this originator
460 * @bat_priv: the bat priv with all the soft interface information 483 * @bat_priv: the bat priv with all the soft interface information
461 * @orig_node: the destination node 484 * @orig_node: the destination node
@@ -562,10 +585,6 @@ next:
562 } 585 }
563 rcu_read_unlock(); 586 rcu_read_unlock();
564 587
565 /* last_bonding_candidate is reset below, remove the old reference. */
566 if (orig_node->last_bonding_candidate)
567 batadv_orig_ifinfo_put(orig_node->last_bonding_candidate);
568
569 /* After finding candidates, handle the three cases: 588 /* After finding candidates, handle the three cases:
570 * 1) there is a next candidate, use that 589 * 1) there is a next candidate, use that
571 * 2) there is no next candidate, use the first of the list 590 * 2) there is no next candidate, use the first of the list
@@ -574,21 +593,28 @@ next:
574 if (next_candidate) { 593 if (next_candidate) {
575 batadv_neigh_node_put(router); 594 batadv_neigh_node_put(router);
576 595
577 /* remove references to first candidate, we don't need it. */ 596 kref_get(&next_candidate_router->refcount);
578 if (first_candidate) {
579 batadv_neigh_node_put(first_candidate_router);
580 batadv_orig_ifinfo_put(first_candidate);
581 }
582 router = next_candidate_router; 597 router = next_candidate_router;
583 orig_node->last_bonding_candidate = next_candidate; 598 batadv_last_bonding_replace(orig_node, next_candidate);
584 } else if (first_candidate) { 599 } else if (first_candidate) {
585 batadv_neigh_node_put(router); 600 batadv_neigh_node_put(router);
586 601
587 /* refcounting has already been done in the loop above. */ 602 kref_get(&first_candidate_router->refcount);
588 router = first_candidate_router; 603 router = first_candidate_router;
589 orig_node->last_bonding_candidate = first_candidate; 604 batadv_last_bonding_replace(orig_node, first_candidate);
590 } else { 605 } else {
591 orig_node->last_bonding_candidate = NULL; 606 batadv_last_bonding_replace(orig_node, NULL);
607 }
608
609 /* cleanup of candidates */
610 if (first_candidate) {
611 batadv_neigh_node_put(first_candidate_router);
612 batadv_orig_ifinfo_put(first_candidate);
613 }
614
615 if (next_candidate) {
616 batadv_neigh_node_put(next_candidate_router);
617 batadv_orig_ifinfo_put(next_candidate);
592 } 618 }
593 619
594 return router; 620 return router;
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index f2f125684ed9..010397650fa5 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -424,8 +424,8 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
424 struct batadv_orig_node *orig_node; 424 struct batadv_orig_node *orig_node;
425 425
426 orig_node = batadv_gw_get_selected_orig(bat_priv); 426 orig_node = batadv_gw_get_selected_orig(bat_priv);
427 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, 427 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
428 orig_node, vid); 428 BATADV_P_DATA, orig_node, vid);
429} 429}
430 430
431void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) 431void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ba846b078af8..74d865a4df46 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -330,7 +330,9 @@ struct batadv_orig_node {
330 DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); 330 DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
331 u32 last_bcast_seqno; 331 u32 last_bcast_seqno;
332 struct hlist_head neigh_list; 332 struct hlist_head neigh_list;
333 /* neigh_list_lock protects: neigh_list and router */ 333 /* neigh_list_lock protects: neigh_list, ifinfo_list,
334 * last_bonding_candidate and router
335 */
334 spinlock_t neigh_list_lock; 336 spinlock_t neigh_list_lock;
335 struct hlist_node hash_entry; 337 struct hlist_node hash_entry;
336 struct batadv_priv *bat_priv; 338 struct batadv_priv *bat_priv;
@@ -1042,6 +1044,7 @@ struct batadv_bla_backbone_gw {
1042 * @addr: mac address of claimed non-mesh client 1044 * @addr: mac address of claimed non-mesh client
1043 * @vid: vlan id this client was detected on 1045 * @vid: vlan id this client was detected on
1044 * @backbone_gw: pointer to backbone gw claiming this client 1046 * @backbone_gw: pointer to backbone gw claiming this client
1047 * @backbone_lock: lock protecting backbone_gw pointer
1045 * @lasttime: last time we heard of claim (locals only) 1048 * @lasttime: last time we heard of claim (locals only)
1046 * @hash_entry: hlist node for batadv_priv_bla::claim_hash 1049 * @hash_entry: hlist node for batadv_priv_bla::claim_hash
1047 * @refcount: number of contexts the object is used 1050 * @refcount: number of contexts the object is used
@@ -1051,6 +1054,7 @@ struct batadv_bla_claim {
1051 u8 addr[ETH_ALEN]; 1054 u8 addr[ETH_ALEN];
1052 unsigned short vid; 1055 unsigned short vid;
1053 struct batadv_bla_backbone_gw *backbone_gw; 1056 struct batadv_bla_backbone_gw *backbone_gw;
1057 spinlock_t backbone_lock; /* protects backbone_gw */
1054 unsigned long lasttime; 1058 unsigned long lasttime;
1055 struct hlist_node hash_entry; 1059 struct hlist_node hash_entry;
1056 struct rcu_head rcu; 1060 struct rcu_head rcu;
diff --git a/net/core/filter.c b/net/core/filter.c
index c4b330c85c02..e759d90e8cef 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -53,9 +53,10 @@
53#include <net/sock_reuseport.h> 53#include <net/sock_reuseport.h>
54 54
55/** 55/**
56 * sk_filter - run a packet through a socket filter 56 * sk_filter_trim_cap - run a packet through a socket filter
57 * @sk: sock associated with &sk_buff 57 * @sk: sock associated with &sk_buff
58 * @skb: buffer to filter 58 * @skb: buffer to filter
59 * @cap: limit on how short the eBPF program may trim the packet
59 * 60 *
60 * Run the eBPF program and then cut skb->data to correct size returned by 61 * Run the eBPF program and then cut skb->data to correct size returned by
61 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller 62 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
@@ -64,7 +65,7 @@
64 * be accepted or -EPERM if the packet should be tossed. 65 * be accepted or -EPERM if the packet should be tossed.
65 * 66 *
66 */ 67 */
67int sk_filter(struct sock *sk, struct sk_buff *skb) 68int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
68{ 69{
69 int err; 70 int err;
70 struct sk_filter *filter; 71 struct sk_filter *filter;
@@ -85,14 +86,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
85 filter = rcu_dereference(sk->sk_filter); 86 filter = rcu_dereference(sk->sk_filter);
86 if (filter) { 87 if (filter) {
87 unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb); 88 unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
88 89 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
89 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
90 } 90 }
91 rcu_read_unlock(); 91 rcu_read_unlock();
92 92
93 return err; 93 return err;
94} 94}
95EXPORT_SYMBOL(sk_filter); 95EXPORT_SYMBOL(sk_filter_trim_cap);
96 96
97static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 97static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
98{ 98{
diff --git a/net/core/sock.c b/net/core/sock.c
index 08bf97eceeb3..25dab8b60223 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -452,11 +452,12 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
452} 452}
453EXPORT_SYMBOL(sock_queue_rcv_skb); 453EXPORT_SYMBOL(sock_queue_rcv_skb);
454 454
455int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 455int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
456 const int nested, unsigned int trim_cap)
456{ 457{
457 int rc = NET_RX_SUCCESS; 458 int rc = NET_RX_SUCCESS;
458 459
459 if (sk_filter(sk, skb)) 460 if (sk_filter_trim_cap(sk, skb, trim_cap))
460 goto discard_and_relse; 461 goto discard_and_relse;
461 462
462 skb->dev = NULL; 463 skb->dev = NULL;
@@ -492,7 +493,7 @@ discard_and_relse:
492 kfree_skb(skb); 493 kfree_skb(skb);
493 goto out; 494 goto out;
494} 495}
495EXPORT_SYMBOL(sk_receive_skb); 496EXPORT_SYMBOL(__sk_receive_skb);
496 497
497struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 498struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
498{ 499{
@@ -1938,6 +1939,10 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1938 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; 1939 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
1939 sockc->tsflags |= tsflags; 1940 sockc->tsflags |= tsflags;
1940 break; 1941 break;
1942 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
1943 case SCM_RIGHTS:
1944 case SCM_CREDENTIALS:
1945 break;
1941 default: 1946 default:
1942 return -EINVAL; 1947 return -EINVAL;
1943 } 1948 }
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 5c7e413a3ae4..345a3aeb8c7e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -462,7 +462,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
462 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 462 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
463 rt = ip_route_output_flow(net, &fl4, sk); 463 rt = ip_route_output_flow(net, &fl4, sk);
464 if (IS_ERR(rt)) { 464 if (IS_ERR(rt)) {
465 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 465 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
466 return NULL; 466 return NULL;
467 } 467 }
468 468
@@ -527,17 +527,19 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
527 rxiph->daddr); 527 rxiph->daddr);
528 skb_dst_set(skb, dst_clone(dst)); 528 skb_dst_set(skb, dst_clone(dst));
529 529
530 local_bh_disable();
530 bh_lock_sock(ctl_sk); 531 bh_lock_sock(ctl_sk);
531 err = ip_build_and_send_pkt(skb, ctl_sk, 532 err = ip_build_and_send_pkt(skb, ctl_sk,
532 rxiph->daddr, rxiph->saddr, NULL); 533 rxiph->daddr, rxiph->saddr, NULL);
533 bh_unlock_sock(ctl_sk); 534 bh_unlock_sock(ctl_sk);
534 535
535 if (net_xmit_eval(err) == 0) { 536 if (net_xmit_eval(err) == 0) {
536 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 537 __DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
537 DCCP_INC_STATS(DCCP_MIB_OUTRSTS); 538 __DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
538 } 539 }
540 local_bh_enable();
539out: 541out:
540 dst_release(dst); 542 dst_release(dst);
541} 543}
542 544
543static void dccp_v4_reqsk_destructor(struct request_sock *req) 545static void dccp_v4_reqsk_destructor(struct request_sock *req)
@@ -866,7 +868,7 @@ lookup:
866 goto discard_and_relse; 868 goto discard_and_relse;
867 nf_reset(skb); 869 nf_reset(skb);
868 870
869 return sk_receive_skb(sk, skb, 1); 871 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
870 872
871no_dccp_socket: 873no_dccp_socket:
872 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 874 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index d176f4e66369..3ff137d9471d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -732,7 +732,7 @@ lookup:
732 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 732 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
733 goto discard_and_relse; 733 goto discard_and_relse;
734 734
735 return sk_receive_skb(sk, skb, 1) ? -1 : 0; 735 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
736 736
737no_dccp_socket: 737no_dccp_socket:
738 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 738 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d09173bf9500..539fa264e67d 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -479,6 +479,9 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
479 if (!rtnh_ok(rtnh, remaining)) 479 if (!rtnh_ok(rtnh, remaining))
480 return -EINVAL; 480 return -EINVAL;
481 481
482 if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
483 return -EINVAL;
484
482 nexthop_nh->nh_flags = 485 nexthop_nh->nh_flags =
483 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; 486 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
484 nexthop_nh->nh_oif = rtnh->rtnh_ifindex; 487 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
@@ -1003,6 +1006,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
1003 if (fib_props[cfg->fc_type].scope > cfg->fc_scope) 1006 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
1004 goto err_inval; 1007 goto err_inval;
1005 1008
1009 if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
1010 goto err_inval;
1011
1006#ifdef CONFIG_IP_ROUTE_MULTIPATH 1012#ifdef CONFIG_IP_ROUTE_MULTIPATH
1007 if (cfg->fc_mp) { 1013 if (cfg->fc_mp) {
1008 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len); 1014 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d6c8f4cd0800..42bf89aaf6a5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -87,7 +87,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
87EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 87EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
88 88
89/* rfc5961 challenge ack rate limiting */ 89/* rfc5961 challenge ack rate limiting */
90int sysctl_tcp_challenge_ack_limit = 100; 90int sysctl_tcp_challenge_ack_limit = 1000;
91 91
92int sysctl_tcp_stdurg __read_mostly; 92int sysctl_tcp_stdurg __read_mostly;
93int sysctl_tcp_rfc1337 __read_mostly; 93int sysctl_tcp_rfc1337 __read_mostly;
@@ -3421,6 +3421,23 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
3421 return flag; 3421 return flag;
3422} 3422}
3423 3423
3424static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
3425 u32 *last_oow_ack_time)
3426{
3427 if (*last_oow_ack_time) {
3428 s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
3429
3430 if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
3431 NET_INC_STATS(net, mib_idx);
3432 return true; /* rate-limited: don't send yet! */
3433 }
3434 }
3435
3436 *last_oow_ack_time = tcp_time_stamp;
3437
3438 return false; /* not rate-limited: go ahead, send dupack now! */
3439}
3440
3424/* Return true if we're currently rate-limiting out-of-window ACKs and 3441/* Return true if we're currently rate-limiting out-of-window ACKs and
3425 * thus shouldn't send a dupack right now. We rate-limit dupacks in 3442 * thus shouldn't send a dupack right now. We rate-limit dupacks in
3426 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS 3443 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
@@ -3434,21 +3451,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
3434 /* Data packets without SYNs are not likely part of an ACK loop. */ 3451 /* Data packets without SYNs are not likely part of an ACK loop. */
3435 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && 3452 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
3436 !tcp_hdr(skb)->syn) 3453 !tcp_hdr(skb)->syn)
3437 goto not_rate_limited; 3454 return false;
3438
3439 if (*last_oow_ack_time) {
3440 s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
3441
3442 if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
3443 NET_INC_STATS(net, mib_idx);
3444 return true; /* rate-limited: don't send yet! */
3445 }
3446 }
3447
3448 *last_oow_ack_time = tcp_time_stamp;
3449 3455
3450not_rate_limited: 3456 return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
3451 return false; /* not rate-limited: go ahead, send dupack now! */
3452} 3457}
3453 3458
3454/* RFC 5961 7 [ACK Throttling] */ 3459/* RFC 5961 7 [ACK Throttling] */
@@ -3458,21 +3463,26 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
3458 static u32 challenge_timestamp; 3463 static u32 challenge_timestamp;
3459 static unsigned int challenge_count; 3464 static unsigned int challenge_count;
3460 struct tcp_sock *tp = tcp_sk(sk); 3465 struct tcp_sock *tp = tcp_sk(sk);
3461 u32 now; 3466 u32 count, now;
3462 3467
3463 /* First check our per-socket dupack rate limit. */ 3468 /* First check our per-socket dupack rate limit. */
3464 if (tcp_oow_rate_limited(sock_net(sk), skb, 3469 if (__tcp_oow_rate_limited(sock_net(sk),
3465 LINUX_MIB_TCPACKSKIPPEDCHALLENGE, 3470 LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
3466 &tp->last_oow_ack_time)) 3471 &tp->last_oow_ack_time))
3467 return; 3472 return;
3468 3473
3469 /* Then check the check host-wide RFC 5961 rate limit. */ 3474 /* Then check host-wide RFC 5961 rate limit. */
3470 now = jiffies / HZ; 3475 now = jiffies / HZ;
3471 if (now != challenge_timestamp) { 3476 if (now != challenge_timestamp) {
3477 u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
3478
3472 challenge_timestamp = now; 3479 challenge_timestamp = now;
3473 challenge_count = 0; 3480 WRITE_ONCE(challenge_count, half +
3481 prandom_u32_max(sysctl_tcp_challenge_ack_limit));
3474 } 3482 }
3475 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { 3483 count = READ_ONCE(challenge_count);
3484 if (count > 0) {
3485 WRITE_ONCE(challenge_count, count - 1);
3476 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); 3486 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
3477 tcp_send_ack(sk); 3487 tcp_send_ack(sk);
3478 } 3488 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ca5e8ea29538..4aed8fc23d32 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1583,6 +1583,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1583 1583
1584 if (sk_filter(sk, skb)) 1584 if (sk_filter(sk, skb))
1585 goto drop; 1585 goto drop;
1586 if (unlikely(skb->len < sizeof(struct udphdr)))
1587 goto drop;
1586 1588
1587 udp_csum_pull_header(skb); 1589 udp_csum_pull_header(skb);
1588 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { 1590 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 005dc82c2138..acc09705618b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -620,6 +620,8 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
620 620
621 if (sk_filter(sk, skb)) 621 if (sk_filter(sk, skb))
622 goto drop; 622 goto drop;
623 if (unlikely(skb->len < sizeof(struct udphdr)))
624 goto drop;
623 625
624 udp_csum_pull_header(skb); 626 udp_csum_pull_header(skb);
625 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { 627 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 803001a45aa1..1b07578bedf3 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1545,7 +1545,8 @@ error:
1545/* 1545/*
1546 * Set up receiving multicast socket over UDP 1546 * Set up receiving multicast socket over UDP
1547 */ 1547 */
1548static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id) 1548static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
1549 int ifindex)
1549{ 1550{
1550 /* multicast addr */ 1551 /* multicast addr */
1551 union ipvs_sockaddr mcast_addr; 1552 union ipvs_sockaddr mcast_addr;
@@ -1566,6 +1567,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
1566 set_sock_size(sock->sk, 0, result); 1567 set_sock_size(sock->sk, 0, result);
1567 1568
1568 get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); 1569 get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
1570 sock->sk->sk_bound_dev_if = ifindex;
1569 result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen); 1571 result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
1570 if (result < 0) { 1572 if (result < 0) {
1571 pr_err("Error binding to the multicast addr\n"); 1573 pr_err("Error binding to the multicast addr\n");
@@ -1868,7 +1870,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1868 if (state == IP_VS_STATE_MASTER) 1870 if (state == IP_VS_STATE_MASTER)
1869 sock = make_send_sock(ipvs, id); 1871 sock = make_send_sock(ipvs, id);
1870 else 1872 else
1871 sock = make_receive_sock(ipvs, id); 1873 sock = make_receive_sock(ipvs, id, dev->ifindex);
1872 if (IS_ERR(sock)) { 1874 if (IS_ERR(sock)) {
1873 result = PTR_ERR(sock); 1875 result = PTR_ERR(sock);
1874 goto outtinfo; 1876 goto outtinfo;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f204274a9b6b..9f530adad10d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -646,6 +646,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
646 646
647 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 647 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
648 if (l4proto->allow_clash && 648 if (l4proto->allow_clash &&
649 !nfct_nat(ct) &&
649 !nf_ct_is_dying(ct) && 650 !nf_ct_is_dying(ct) &&
650 atomic_inc_not_zero(&ct->ct_general.use)) { 651 atomic_inc_not_zero(&ct->ct_general.use)) {
651 nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct); 652 nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
@@ -1601,8 +1602,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1601 unsigned int nr_slots, i; 1602 unsigned int nr_slots, i;
1602 size_t sz; 1603 size_t sz;
1603 1604
1605 if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1606 return NULL;
1607
1604 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); 1608 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1605 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); 1609 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1610
1611 if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1612 return NULL;
1613
1606 sz = nr_slots * sizeof(struct hlist_nulls_head); 1614 sz = nr_slots * sizeof(struct hlist_nulls_head);
1607 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 1615 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1608 get_order(sz)); 1616 get_order(sz));
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2c881871db38..cf7c74599cbe 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1724,9 +1724,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
1724 1724
1725 err = nf_tables_newexpr(ctx, &info, expr); 1725 err = nf_tables_newexpr(ctx, &info, expr);
1726 if (err < 0) 1726 if (err < 0)
1727 goto err2; 1727 goto err3;
1728 1728
1729 return expr; 1729 return expr;
1730err3:
1731 kfree(expr);
1730err2: 1732err2:
1731 module_put(info.ops->type->owner); 1733 module_put(info.ops->type->owner);
1732err1: 1734err1:
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 137e308d5b24..81fbb450783e 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -54,7 +54,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
54 const struct nf_conn_help *help; 54 const struct nf_conn_help *help;
55 const struct nf_conntrack_tuple *tuple; 55 const struct nf_conntrack_tuple *tuple;
56 const struct nf_conntrack_helper *helper; 56 const struct nf_conntrack_helper *helper;
57 long diff;
58 unsigned int state; 57 unsigned int state;
59 58
60 ct = nf_ct_get(pkt->skb, &ctinfo); 59 ct = nf_ct_get(pkt->skb, &ctinfo);
@@ -94,10 +93,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
94 return; 93 return;
95#endif 94#endif
96 case NFT_CT_EXPIRATION: 95 case NFT_CT_EXPIRATION:
97 diff = (long)jiffies - (long)ct->timeout.expires; 96 *dest = jiffies_to_msecs(nf_ct_expires(ct));
98 if (diff < 0)
99 diff = 0;
100 *dest = jiffies_to_msecs(diff);
101 return; 97 return;
102 case NFT_CT_HELPER: 98 case NFT_CT_HELPER:
103 if (ct->master == NULL) 99 if (ct->master == NULL)
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 16c50b0dd426..f4bad9dc15c4 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -227,7 +227,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
227 skb->pkt_type = value; 227 skb->pkt_type = value;
228 break; 228 break;
229 case NFT_META_NFTRACE: 229 case NFT_META_NFTRACE:
230 skb->nf_trace = 1; 230 skb->nf_trace = !!value;
231 break; 231 break;
232 default: 232 default:
233 WARN_ON(1); 233 WARN_ON(1);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9f0983fa4d52..b43c4015b2f7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1927,13 +1927,11 @@ retry:
1927 goto out_unlock; 1927 goto out_unlock;
1928 } 1928 }
1929 1929
1930 sockc.tsflags = 0; 1930 sockc.tsflags = sk->sk_tsflags;
1931 if (msg->msg_controllen) { 1931 if (msg->msg_controllen) {
1932 err = sock_cmsg_send(sk, msg, &sockc); 1932 err = sock_cmsg_send(sk, msg, &sockc);
1933 if (unlikely(err)) { 1933 if (unlikely(err))
1934 err = -EINVAL;
1935 goto out_unlock; 1934 goto out_unlock;
1936 }
1937 } 1935 }
1938 1936
1939 skb->protocol = proto; 1937 skb->protocol = proto;
@@ -2678,7 +2676,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2678 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2676 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2679 } 2677 }
2680 2678
2681 sockc.tsflags = 0; 2679 sockc.tsflags = po->sk.sk_tsflags;
2682 if (msg->msg_controllen) { 2680 if (msg->msg_controllen) {
2683 err = sock_cmsg_send(&po->sk, msg, &sockc); 2681 err = sock_cmsg_send(&po->sk, msg, &sockc);
2684 if (unlikely(err)) 2682 if (unlikely(err))
@@ -2881,7 +2879,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2881 if (unlikely(!(dev->flags & IFF_UP))) 2879 if (unlikely(!(dev->flags & IFF_UP)))
2882 goto out_unlock; 2880 goto out_unlock;
2883 2881
2884 sockc.tsflags = 0; 2882 sockc.tsflags = sk->sk_tsflags;
2885 sockc.mark = sk->sk_mark; 2883 sockc.mark = sk->sk_mark;
2886 if (msg->msg_controllen) { 2884 if (msg->msg_controllen) {
2887 err = sock_cmsg_send(sk, msg, &sockc); 2885 err = sock_cmsg_send(sk, msg, &sockc);
diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index 79c4abcfa6b4..0a6394754e81 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -164,7 +164,8 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
164 rose_frames_acked(sk, nr); 164 rose_frames_acked(sk, nr);
165 if (ns == rose->vr) { 165 if (ns == rose->vr) {
166 rose_start_idletimer(sk); 166 rose_start_idletimer(sk);
167 if (sock_queue_rcv_skb(sk, skb) == 0) { 167 if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
168 __sock_queue_rcv_skb(sk, skb) == 0) {
168 rose->vr = (rose->vr + 1) % ROSE_MODULUS; 169 rose->vr = (rose->vr + 1) % ROSE_MODULUS;
169 queued = 1; 170 queued = 1;
170 } else { 171 } else {
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 62f9d8100c6e..052f84d6cc23 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1140,8 +1140,10 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1140 1140
1141 if (!cl->level && cl->un.leaf.q) 1141 if (!cl->level && cl->un.leaf.q)
1142 qlen = cl->un.leaf.q->q.qlen; 1142 qlen = cl->un.leaf.q->q.qlen;
1143 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); 1143 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1144 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); 1144 INT_MIN, INT_MAX);
1145 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1146 INT_MIN, INT_MAX);
1145 1147
1146 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 1148 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
1147 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || 1149 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a701527a9480..47cf4604d19c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -112,7 +112,6 @@ int sctp_rcv(struct sk_buff *skb)
112 struct sctp_ep_common *rcvr; 112 struct sctp_ep_common *rcvr;
113 struct sctp_transport *transport = NULL; 113 struct sctp_transport *transport = NULL;
114 struct sctp_chunk *chunk; 114 struct sctp_chunk *chunk;
115 struct sctphdr *sh;
116 union sctp_addr src; 115 union sctp_addr src;
117 union sctp_addr dest; 116 union sctp_addr dest;
118 int family; 117 int family;
@@ -127,8 +126,6 @@ int sctp_rcv(struct sk_buff *skb)
127 if (skb_linearize(skb)) 126 if (skb_linearize(skb))
128 goto discard_it; 127 goto discard_it;
129 128
130 sh = sctp_hdr(skb);
131
132 /* Pull up the IP and SCTP headers. */ 129 /* Pull up the IP and SCTP headers. */
133 __skb_pull(skb, skb_transport_offset(skb)); 130 __skb_pull(skb, skb_transport_offset(skb));
134 if (skb->len < sizeof(struct sctphdr)) 131 if (skb->len < sizeof(struct sctphdr))
@@ -230,7 +227,7 @@ int sctp_rcv(struct sk_buff *skb)
230 chunk->rcvr = rcvr; 227 chunk->rcvr = rcvr;
231 228
232 /* Remember the SCTP header. */ 229 /* Remember the SCTP header. */
233 chunk->sctp_hdr = sh; 230 chunk->sctp_hdr = sctp_hdr(skb);
234 231
235 /* Set the source and destination addresses of the incoming chunk. */ 232 /* Set the source and destination addresses of the incoming chunk. */
236 sctp_init_addrs(chunk, &src, &dest); 233 sctp_init_addrs(chunk, &src, &dest);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index bf8f05c3eb82..a597708ae381 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -330,6 +330,21 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
330 return 0; 330 return 0;
331} 331}
332 332
333/* tipc_bearer_reset_all - reset all links on all bearers
334 */
335void tipc_bearer_reset_all(struct net *net)
336{
337 struct tipc_net *tn = tipc_net(net);
338 struct tipc_bearer *b;
339 int i;
340
341 for (i = 0; i < MAX_BEARERS; i++) {
342 b = rcu_dereference_rtnl(tn->bearer_list[i]);
343 if (b)
344 tipc_reset_bearer(net, b);
345 }
346}
347
333/** 348/**
334 * bearer_disable 349 * bearer_disable
335 * 350 *
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index f686e41b5abb..60e49c3be19c 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -198,6 +198,7 @@ void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest);
198void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest); 198void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
199struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name); 199struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
200struct tipc_media *tipc_media_find(const char *name); 200struct tipc_media *tipc_media_find(const char *name);
201void tipc_bearer_reset_all(struct net *net);
201int tipc_bearer_setup(void); 202int tipc_bearer_setup(void);
202void tipc_bearer_cleanup(void); 203void tipc_bearer_cleanup(void);
203void tipc_bearer_stop(struct net *net); 204void tipc_bearer_stop(struct net *net);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 67b6ab9f4c8d..7d89f8713d49 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -349,6 +349,8 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
349 u16 ack = snd_l->snd_nxt - 1; 349 u16 ack = snd_l->snd_nxt - 1;
350 350
351 snd_l->ackers--; 351 snd_l->ackers--;
352 rcv_l->bc_peer_is_up = true;
353 rcv_l->state = LINK_ESTABLISHED;
352 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq); 354 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
353 tipc_link_reset(rcv_l); 355 tipc_link_reset(rcv_l);
354 rcv_l->state = LINK_RESET; 356 rcv_l->state = LINK_RESET;
@@ -1559,7 +1561,12 @@ void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1559 if (!msg_peer_node_is_up(hdr)) 1561 if (!msg_peer_node_is_up(hdr))
1560 return; 1562 return;
1561 1563
1562 l->bc_peer_is_up = true; 1564 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1565 if (msg_ack(hdr))
1566 l->bc_peer_is_up = true;
1567
1568 if (!l->bc_peer_is_up)
1569 return;
1563 1570
1564 /* Ignore if peers_snd_nxt goes beyond receive window */ 1571 /* Ignore if peers_snd_nxt goes beyond receive window */
1565 if (more(peers_snd_nxt, l->rcv_nxt + l->window)) 1572 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
diff --git a/net/tipc/node.c b/net/tipc/node.c
index e01e2c71b5a1..23d4761842a0 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1297,10 +1297,6 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
1297 1297
1298 rc = tipc_bcast_rcv(net, be->link, skb); 1298 rc = tipc_bcast_rcv(net, be->link, skb);
1299 1299
1300 /* Broadcast link reset may happen at reassembly failure */
1301 if (rc & TIPC_LINK_DOWN_EVT)
1302 tipc_node_reset_links(n);
1303
1304 /* Broadcast ACKs are sent on a unicast link */ 1300 /* Broadcast ACKs are sent on a unicast link */
1305 if (rc & TIPC_LINK_SND_BC_ACK) { 1301 if (rc & TIPC_LINK_SND_BC_ACK) {
1306 tipc_node_read_lock(n); 1302 tipc_node_read_lock(n);
@@ -1320,6 +1316,17 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
1320 spin_unlock_bh(&be->inputq2.lock); 1316 spin_unlock_bh(&be->inputq2.lock);
1321 tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2); 1317 tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
1322 } 1318 }
1319
1320 if (rc & TIPC_LINK_DOWN_EVT) {
1321 /* Reception reassembly failure => reset all links to peer */
1322 if (!tipc_link_is_up(be->link))
1323 tipc_node_reset_links(n);
1324
1325 /* Retransmission failure => reset all links to all peers */
1326 if (!tipc_link_is_up(tipc_bc_sndlink(net)))
1327 tipc_bearer_reset_all(net);
1328 }
1329
1323 tipc_node_put(n); 1330 tipc_node_put(n);
1324} 1331}
1325 1332
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d7599014055d..7d72283901a3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3487,16 +3487,16 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
3487 params.smps_mode = NL80211_SMPS_OFF; 3487 params.smps_mode = NL80211_SMPS_OFF;
3488 } 3488 }
3489 3489
3490 params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
3491 if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
3492 return -EOPNOTSUPP;
3493
3490 if (info->attrs[NL80211_ATTR_ACL_POLICY]) { 3494 if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
3491 params.acl = parse_acl_data(&rdev->wiphy, info); 3495 params.acl = parse_acl_data(&rdev->wiphy, info);
3492 if (IS_ERR(params.acl)) 3496 if (IS_ERR(params.acl))
3493 return PTR_ERR(params.acl); 3497 return PTR_ERR(params.acl);
3494 } 3498 }
3495 3499
3496 params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
3497 if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
3498 return -EOPNOTSUPP;
3499
3500 wdev_lock(wdev); 3500 wdev_lock(wdev);
3501 err = rdev_start_ap(rdev, dev, &params); 3501 err = rdev_start_ap(rdev, dev, &params);
3502 if (!err) { 3502 if (!err) {
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 2443ee30ba5b..b7d1592bd5b8 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -721,6 +721,8 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
721 * alignment since sizeof(struct ethhdr) is 14. 721 * alignment since sizeof(struct ethhdr) is 14.
722 */ 722 */
723 frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len); 723 frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len);
724 if (!frame)
725 return NULL;
724 726
725 skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2); 727 skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
726 skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len); 728 skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);