aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt2
-rw-r--r--Documentation/networking/packet_mmap.txt10
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c41
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c8
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c26
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/virtio_net.c17
-rw-r--r--drivers/net/xen-netback/netback.c236
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/net/ipv6.h3
-rw-r--r--include/net/sock.h6
-rw-r--r--net/bridge/br_private.h10
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/ipv4/tcp_memcontrol.c7
-rw-r--r--net/packet/af_packet.c65
-rw-r--r--net/sched/act_api.c26
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_ipt.c4
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c1
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/act_skbedit.c1
-rw-r--r--net/sctp/transport.c2
37 files changed, 314 insertions, 208 deletions
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index 48b259e29e87..bad381faf036 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -4,7 +4,7 @@ This file provides information, what the device node
4for the davinci_emac interface contains. 4for the davinci_emac interface contains.
5 5
6Required properties: 6Required properties:
7- compatible: "ti,davinci-dm6467-emac"; 7- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac"
8- reg: Offset and length of the register set for the device 8- reg: Offset and length of the register set for the device
9- ti,davinci-ctrl-reg-offset: offset to control register 9- ti,davinci-ctrl-reg-offset: offset to control register
10- ti,davinci-ctrl-mod-reg-offset: offset to control module register 10- ti,davinci-ctrl-mod-reg-offset: offset to control module register
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index c01223628a87..8e48e3b14227 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below.
123[shutdown] close() --------> destruction of the transmission socket and 123[shutdown] close() --------> destruction of the transmission socket and
124 deallocation of all associated resources. 124 deallocation of all associated resources.
125 125
126Socket creation and destruction is also straight forward, and is done
127the same way as in capturing described in the previous paragraph:
128
129 int fd = socket(PF_PACKET, mode, 0);
130
131The protocol can optionally be 0 in case we only want to transmit
132via this socket, which avoids an expensive call to packet_rcv().
133In this case, you also need to bind(2) the TX_RING with sll_protocol = 0
134set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example.
135
126Binding the socket to your network interface is mandatory (with zero copy) to 136Binding the socket to your network interface is mandatory (with zero copy) to
127know the header size of frames used in the circular buffer. 137know the header size of frames used in the circular buffer.
128 138
diff --git a/MAINTAINERS b/MAINTAINERS
index b9ea9321adde..63ae89617fc5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4450,10 +4450,8 @@ M: Bruce Allan <bruce.w.allan@intel.com>
4450M: Carolyn Wyborny <carolyn.wyborny@intel.com> 4450M: Carolyn Wyborny <carolyn.wyborny@intel.com>
4451M: Don Skidmore <donald.c.skidmore@intel.com> 4451M: Don Skidmore <donald.c.skidmore@intel.com>
4452M: Greg Rose <gregory.v.rose@intel.com> 4452M: Greg Rose <gregory.v.rose@intel.com>
4453M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
4454M: Alex Duyck <alexander.h.duyck@intel.com> 4453M: Alex Duyck <alexander.h.duyck@intel.com>
4455M: John Ronciak <john.ronciak@intel.com> 4454M: John Ronciak <john.ronciak@intel.com>
4456M: Tushar Dave <tushar.n.dave@intel.com>
4457L: e1000-devel@lists.sourceforge.net 4455L: e1000-devel@lists.sourceforge.net
4458W: http://www.intel.com/support/feedback.htm 4456W: http://www.intel.com/support/feedback.htm
4459W: http://e1000.sourceforge.net/ 4457W: http://e1000.sourceforge.net/
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 36eab0c4fb33..398e299ee1bd 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4199,9 +4199,9 @@ static int bond_check_params(struct bond_params *params)
4199 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { 4199 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
4200 /* not complete check, but should be good enough to 4200 /* not complete check, but should be good enough to
4201 catch mistakes */ 4201 catch mistakes */
4202 __be32 ip = in_aton(arp_ip_target[i]); 4202 __be32 ip;
4203 if (!isdigit(arp_ip_target[i][0]) || ip == 0 || 4203 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
4204 ip == htonl(INADDR_BROADCAST)) { 4204 IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
4205 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", 4205 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
4206 arp_ip_target[i]); 4206 arp_ip_target[i]);
4207 arp_interval = 0; 4207 arp_interval = 0;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index c778a26fb4da..e46467683e82 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1634,12 +1634,12 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
1634 char *buf) 1634 char *buf)
1635{ 1635{
1636 struct bonding *bond = to_bond(d); 1636 struct bonding *bond = to_bond(d);
1637 int packets_per_slave = bond->params.packets_per_slave; 1637 unsigned int packets_per_slave = bond->params.packets_per_slave;
1638 1638
1639 if (packets_per_slave > 1) 1639 if (packets_per_slave > 1)
1640 packets_per_slave = reciprocal_value(packets_per_slave); 1640 packets_per_slave = reciprocal_value(packets_per_slave);
1641 1641
1642 return sprintf(buf, "%d\n", packets_per_slave); 1642 return sprintf(buf, "%u\n", packets_per_slave);
1643} 1643}
1644 1644
1645static ssize_t bonding_store_packets_per_slave(struct device *d, 1645static ssize_t bonding_store_packets_per_slave(struct device *d,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 0216d592d0ce..2e46c28fc601 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -3114,6 +3114,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3114{ 3114{
3115 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3115 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3116 3116
3117 if (!IS_SRIOV(bp)) {
3118 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
3119 return -EINVAL;
3120 }
3121
3117 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3122 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3118 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3123 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3119 3124
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3e2162121601..dc88782185f2 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -64,6 +64,9 @@
64#define SLIPORT_ERROR_NO_RESOURCE1 0x2 64#define SLIPORT_ERROR_NO_RESOURCE1 0x2
65#define SLIPORT_ERROR_NO_RESOURCE2 0x9 65#define SLIPORT_ERROR_NO_RESOURCE2 0x9
66 66
67#define SLIPORT_ERROR_FW_RESET1 0x2
68#define SLIPORT_ERROR_FW_RESET2 0x0
69
67/********* Memory BAR register ************/ 70/********* Memory BAR register ************/
68#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 71#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
69/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt 72/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index fee64bf10446..0fde69d5cb6a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2464,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter)
2464 */ 2464 */
2465 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 2465 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2466 adapter->hw_error = true; 2466 adapter->hw_error = true;
2467 dev_err(&adapter->pdev->dev, 2467 /* Do not log error messages if its a FW reset */
2468 "Error detected in the card\n"); 2468 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2469 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2470 dev_info(&adapter->pdev->dev,
2471 "Firmware update in progress\n");
2472 return;
2473 } else {
2474 dev_err(&adapter->pdev->dev,
2475 "Error detected in the card\n");
2476 }
2469 } 2477 }
2470 2478
2471 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 2479 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2932,28 +2940,35 @@ static void be_cancel_worker(struct be_adapter *adapter)
2932 } 2940 }
2933} 2941}
2934 2942
2935static int be_clear(struct be_adapter *adapter) 2943static void be_mac_clear(struct be_adapter *adapter)
2936{ 2944{
2937 int i; 2945 int i;
2938 2946
2947 if (adapter->pmac_id) {
2948 for (i = 0; i < (adapter->uc_macs + 1); i++)
2949 be_cmd_pmac_del(adapter, adapter->if_handle,
2950 adapter->pmac_id[i], 0);
2951 adapter->uc_macs = 0;
2952
2953 kfree(adapter->pmac_id);
2954 adapter->pmac_id = NULL;
2955 }
2956}
2957
2958static int be_clear(struct be_adapter *adapter)
2959{
2939 be_cancel_worker(adapter); 2960 be_cancel_worker(adapter);
2940 2961
2941 if (sriov_enabled(adapter)) 2962 if (sriov_enabled(adapter))
2942 be_vf_clear(adapter); 2963 be_vf_clear(adapter);
2943 2964
2944 /* delete the primary mac along with the uc-mac list */ 2965 /* delete the primary mac along with the uc-mac list */
2945 for (i = 0; i < (adapter->uc_macs + 1); i++) 2966 be_mac_clear(adapter);
2946 be_cmd_pmac_del(adapter, adapter->if_handle,
2947 adapter->pmac_id[i], 0);
2948 adapter->uc_macs = 0;
2949 2967
2950 be_cmd_if_destroy(adapter, adapter->if_handle, 0); 2968 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2951 2969
2952 be_clear_queues(adapter); 2970 be_clear_queues(adapter);
2953 2971
2954 kfree(adapter->pmac_id);
2955 adapter->pmac_id = NULL;
2956
2957 be_msix_disable(adapter); 2972 be_msix_disable(adapter);
2958 return 0; 2973 return 0;
2959} 2974}
@@ -3812,6 +3827,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
3812 } 3827 }
3813 3828
3814 if (change_status == LANCER_FW_RESET_NEEDED) { 3829 if (change_status == LANCER_FW_RESET_NEEDED) {
3830 dev_info(&adapter->pdev->dev,
3831 "Resetting adapter to activate new FW\n");
3815 status = lancer_physdev_ctrl(adapter, 3832 status = lancer_physdev_ctrl(adapter,
3816 PHYSDEV_CONTROL_FW_RESET_MASK); 3833 PHYSDEV_CONTROL_FW_RESET_MASK);
3817 if (status) { 3834 if (status) {
@@ -4363,13 +4380,13 @@ static int lancer_recover_func(struct be_adapter *adapter)
4363 goto err; 4380 goto err;
4364 } 4381 }
4365 4382
4366 dev_err(dev, "Error recovery successful\n"); 4383 dev_err(dev, "Adapter recovery successful\n");
4367 return 0; 4384 return 0;
4368err: 4385err:
4369 if (status == -EAGAIN) 4386 if (status == -EAGAIN)
4370 dev_err(dev, "Waiting for resource provisioning\n"); 4387 dev_err(dev, "Waiting for resource provisioning\n");
4371 else 4388 else
4372 dev_err(dev, "Error recovery failed\n"); 4389 dev_err(dev, "Adapter recovery failed\n");
4373 4390
4374 return status; 4391 return status;
4375} 4392}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b8e232b4ea2d..d5f0d72e5e33 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1378 1378
1379 dev_kfree_skb_any(skb); 1379 dev_kfree_skb_any(skb);
1380 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1380 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1381 rx_desc->data_size, DMA_FROM_DEVICE); 1381 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1382 } 1382 }
1383 1383
1384 if (rx_done) 1384 if (rx_done)
@@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1424 } 1424 }
1425 1425
1426 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1426 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1427 rx_desc->data_size, DMA_FROM_DEVICE); 1427 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1428 1428
1429 rx_bytes = rx_desc->data_size - 1429 rx_bytes = rx_desc->data_size -
1430 (ETH_FCS_LEN + MVNETA_MH_SIZE); 1430 (ETH_FCS_LEN + MVNETA_MH_SIZE);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index d8bdaf6d82d2..493a1125f54f 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5149,8 +5149,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
5149{ 5149{
5150 struct fe_priv *np = netdev_priv(dev); 5150 struct fe_priv *np = netdev_priv(dev);
5151 u8 __iomem *base = get_hwbase(dev); 5151 u8 __iomem *base = get_hwbase(dev);
5152 int result; 5152 int result, count;
5153 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); 5153
5154 count = nv_get_sset_count(dev, ETH_SS_TEST);
5155 memset(buffer, 0, count * sizeof(u64));
5154 5156
5155 if (!nv_link_test(dev)) { 5157 if (!nv_link_test(dev)) {
5156 test->flags |= ETH_TEST_FL_FAILED; 5158 test->flags |= ETH_TEST_FL_FAILED;
@@ -5194,7 +5196,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
5194 return; 5196 return;
5195 } 5197 }
5196 5198
5197 if (!nv_loopback_test(dev)) { 5199 if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
5198 test->flags |= ETH_TEST_FL_FAILED; 5200 test->flags |= ETH_TEST_FL_FAILED;
5199 buffer[3] = 1; 5201 buffer[3] = 1;
5200 } 5202 }
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 0c9c4e895595..03517478e589 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "1.00.00.33" 21#define DRV_VERSION "1.00.00.34"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 0780e039b271..8dee1beb9854 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
181}; 181};
182#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) 182#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
183#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) 183#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
184#define QLGE_RCV_MAC_ERR_STATS 7
184 185
185static int ql_update_ring_coalescing(struct ql_adapter *qdev) 186static int ql_update_ring_coalescing(struct ql_adapter *qdev)
186{ 187{
@@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
280 iter++; 281 iter++;
281 } 282 }
282 283
284 /* Update receive mac error statistics */
285 iter += QLGE_RCV_MAC_ERR_STATS;
286
283 /* 287 /*
284 * Get Per-priority TX pause frame counter statistics. 288 * Get Per-priority TX pause frame counter statistics.
285 */ 289 */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index a245dc18d769..449f506d2e8f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2376,14 +2376,6 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
2376 netdev_features_t features) 2376 netdev_features_t features)
2377{ 2377{
2378 int err; 2378 int err;
2379 /*
2380 * Since there is no support for separate rx/tx vlan accel
2381 * enable/disable make sure tx flag is always in same state as rx.
2382 */
2383 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2384 features |= NETIF_F_HW_VLAN_CTAG_TX;
2385 else
2386 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2387 2379
2388 /* Update the behavior of vlan accel in the adapter */ 2380 /* Update the behavior of vlan accel in the adapter */
2389 err = qlge_update_hw_vlan_features(ndev, features); 2381 err = qlge_update_hw_vlan_features(ndev, features);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 41ba974bf37c..cd9b164a0434 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -61,6 +61,7 @@
61#include <linux/davinci_emac.h> 61#include <linux/davinci_emac.h>
62#include <linux/of.h> 62#include <linux/of.h>
63#include <linux/of_address.h> 63#include <linux/of_address.h>
64#include <linux/of_device.h>
64#include <linux/of_irq.h> 65#include <linux/of_irq.h>
65#include <linux/of_net.h> 66#include <linux/of_net.h>
66 67
@@ -1752,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = {
1752#endif 1753#endif
1753}; 1754};
1754 1755
1756static const struct of_device_id davinci_emac_of_match[];
1757
1755static struct emac_platform_data * 1758static struct emac_platform_data *
1756davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) 1759davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1757{ 1760{
1758 struct device_node *np; 1761 struct device_node *np;
1762 const struct of_device_id *match;
1763 const struct emac_platform_data *auxdata;
1759 struct emac_platform_data *pdata = NULL; 1764 struct emac_platform_data *pdata = NULL;
1760 const u8 *mac_addr; 1765 const u8 *mac_addr;
1761 1766
@@ -1793,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1793 1798
1794 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 1799 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
1795 if (!priv->phy_node) 1800 if (!priv->phy_node)
1796 pdata->phy_id = ""; 1801 pdata->phy_id = NULL;
1802
1803 auxdata = pdev->dev.platform_data;
1804 if (auxdata) {
1805 pdata->interrupt_enable = auxdata->interrupt_enable;
1806 pdata->interrupt_disable = auxdata->interrupt_disable;
1807 }
1808
1809 match = of_match_device(davinci_emac_of_match, &pdev->dev);
1810 if (match && match->data) {
1811 auxdata = match->data;
1812 pdata->version = auxdata->version;
1813 pdata->hw_ram_addr = auxdata->hw_ram_addr;
1814 }
1797 1815
1798 pdev->dev.platform_data = pdata; 1816 pdev->dev.platform_data = pdata;
1799 1817
@@ -2020,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
2020}; 2038};
2021 2039
2022#if IS_ENABLED(CONFIG_OF) 2040#if IS_ENABLED(CONFIG_OF)
2041static const struct emac_platform_data am3517_emac_data = {
2042 .version = EMAC_VERSION_2,
2043 .hw_ram_addr = 0x01e20000,
2044};
2045
2023static const struct of_device_id davinci_emac_of_match[] = { 2046static const struct of_device_id davinci_emac_of_match[] = {
2024 {.compatible = "ti,davinci-dm6467-emac", }, 2047 {.compatible = "ti,davinci-dm6467-emac", },
2048 {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
2025 {}, 2049 {},
2026}; 2050};
2027MODULE_DEVICE_TABLE(of, davinci_emac_of_match); 2051MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 4c6f84c79e95..4a34bcb6549f 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -872,6 +872,8 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
872 872
873 ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK); 873 ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
874 ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */ 874 ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
875 if (ret > 0)
876 iocb->ki_pos = ret;
875out: 877out:
876 return ret; 878 return ret;
877} 879}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 6b0b2a057a47..3c5a8d8cde50 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1355,6 +1355,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1355 ret = tun_do_read(tun, tfile, iv, len, 1355 ret = tun_do_read(tun, tfile, iv, len,
1356 file->f_flags & O_NONBLOCK); 1356 file->f_flags & O_NONBLOCK);
1357 ret = min_t(ssize_t, ret, len); 1357 ret = min_t(ssize_t, ret, len);
1358 if (ret > 0)
1359 iocb->ki_pos = ret;
1358out: 1360out:
1359 tun_put(tun); 1361 tun_put(tun);
1360 return ret; 1362 return ret;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5c24288814db..56c2229d28fd 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -425,10 +425,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
425 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 425 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
426 pr_debug("%s: short packet %i\n", dev->name, len); 426 pr_debug("%s: short packet %i\n", dev->name, len);
427 dev->stats.rx_length_errors++; 427 dev->stats.rx_length_errors++;
428 if (vi->big_packets) 428 if (vi->mergeable_rx_bufs)
429 give_pages(rq, buf);
430 else if (vi->mergeable_rx_bufs)
431 put_page(virt_to_head_page(buf)); 429 put_page(virt_to_head_page(buf));
430 else if (vi->big_packets)
431 give_pages(rq, buf);
432 else 432 else
433 dev_kfree_skb(buf); 433 dev_kfree_skb(buf);
434 return; 434 return;
@@ -1366,6 +1366,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
1366 1366
1367static void virtnet_free_queues(struct virtnet_info *vi) 1367static void virtnet_free_queues(struct virtnet_info *vi)
1368{ 1368{
1369 int i;
1370
1371 for (i = 0; i < vi->max_queue_pairs; i++)
1372 netif_napi_del(&vi->rq[i].napi);
1373
1369 kfree(vi->rq); 1374 kfree(vi->rq);
1370 kfree(vi->sq); 1375 kfree(vi->sq);
1371} 1376}
@@ -1395,10 +1400,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
1395 struct virtqueue *vq = vi->rq[i].vq; 1400 struct virtqueue *vq = vi->rq[i].vq;
1396 1401
1397 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1402 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1398 if (vi->big_packets) 1403 if (vi->mergeable_rx_bufs)
1399 give_pages(&vi->rq[i], buf);
1400 else if (vi->mergeable_rx_bufs)
1401 put_page(virt_to_head_page(buf)); 1404 put_page(virt_to_head_page(buf));
1405 else if (vi->big_packets)
1406 give_pages(&vi->rq[i], buf);
1402 else 1407 else
1403 dev_kfree_skb(buf); 1408 dev_kfree_skb(buf);
1404 --vi->rq[i].num; 1409 --vi->rq[i].num;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 64f0e0d18b81..acf13920e6d1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1149,49 +1149,72 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1149 return 0; 1149 return 0;
1150} 1150}
1151 1151
1152static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len) 1152static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
1153 unsigned int max)
1153{ 1154{
1154 if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) { 1155 if (skb_headlen(skb) >= len)
1155 /* If we need to pullup then pullup to the max, so we 1156 return 0;
1156 * won't need to do it again. 1157
1157 */ 1158 /* If we need to pullup then pullup to the max, so we
1158 int target = min_t(int, skb->len, MAX_TCP_HEADER); 1159 * won't need to do it again.
1159 __pskb_pull_tail(skb, target - skb_headlen(skb)); 1160 */
1160 } 1161 if (max > skb->len)
1162 max = skb->len;
1163
1164 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
1165 return -ENOMEM;
1166
1167 if (skb_headlen(skb) < len)
1168 return -EPROTO;
1169
1170 return 0;
1161} 1171}
1162 1172
1173/* This value should be large enough to cover a tagged ethernet header plus
1174 * maximally sized IP and TCP or UDP headers.
1175 */
1176#define MAX_IP_HDR_LEN 128
1177
1163static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, 1178static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1164 int recalculate_partial_csum) 1179 int recalculate_partial_csum)
1165{ 1180{
1166 struct iphdr *iph = (void *)skb->data;
1167 unsigned int header_size;
1168 unsigned int off; 1181 unsigned int off;
1169 int err = -EPROTO; 1182 bool fragment;
1183 int err;
1184
1185 fragment = false;
1186
1187 err = maybe_pull_tail(skb,
1188 sizeof(struct iphdr),
1189 MAX_IP_HDR_LEN);
1190 if (err < 0)
1191 goto out;
1170 1192
1171 off = sizeof(struct iphdr); 1193 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
1194 fragment = true;
1172 1195
1173 header_size = skb->network_header + off + MAX_IPOPTLEN; 1196 off = ip_hdrlen(skb);
1174 maybe_pull_tail(skb, header_size);
1175 1197
1176 off = iph->ihl * 4; 1198 err = -EPROTO;
1177 1199
1178 switch (iph->protocol) { 1200 switch (ip_hdr(skb)->protocol) {
1179 case IPPROTO_TCP: 1201 case IPPROTO_TCP:
1180 if (!skb_partial_csum_set(skb, off, 1202 if (!skb_partial_csum_set(skb, off,
1181 offsetof(struct tcphdr, check))) 1203 offsetof(struct tcphdr, check)))
1182 goto out; 1204 goto out;
1183 1205
1184 if (recalculate_partial_csum) { 1206 if (recalculate_partial_csum) {
1185 struct tcphdr *tcph = tcp_hdr(skb); 1207 err = maybe_pull_tail(skb,
1186 1208 off + sizeof(struct tcphdr),
1187 header_size = skb->network_header + 1209 MAX_IP_HDR_LEN);
1188 off + 1210 if (err < 0)
1189 sizeof(struct tcphdr); 1211 goto out;
1190 maybe_pull_tail(skb, header_size); 1212
1191 1213 tcp_hdr(skb)->check =
1192 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1214 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1193 skb->len - off, 1215 ip_hdr(skb)->daddr,
1194 IPPROTO_TCP, 0); 1216 skb->len - off,
1217 IPPROTO_TCP, 0);
1195 } 1218 }
1196 break; 1219 break;
1197 case IPPROTO_UDP: 1220 case IPPROTO_UDP:
@@ -1200,24 +1223,20 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1200 goto out; 1223 goto out;
1201 1224
1202 if (recalculate_partial_csum) { 1225 if (recalculate_partial_csum) {
1203 struct udphdr *udph = udp_hdr(skb); 1226 err = maybe_pull_tail(skb,
1204 1227 off + sizeof(struct udphdr),
1205 header_size = skb->network_header + 1228 MAX_IP_HDR_LEN);
1206 off + 1229 if (err < 0)
1207 sizeof(struct udphdr); 1230 goto out;
1208 maybe_pull_tail(skb, header_size); 1231
1209 1232 udp_hdr(skb)->check =
1210 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1233 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1211 skb->len - off, 1234 ip_hdr(skb)->daddr,
1212 IPPROTO_UDP, 0); 1235 skb->len - off,
1236 IPPROTO_UDP, 0);
1213 } 1237 }
1214 break; 1238 break;
1215 default: 1239 default:
1216 if (net_ratelimit())
1217 netdev_err(vif->dev,
1218 "Attempting to checksum a non-TCP/UDP packet, "
1219 "dropping a protocol %d packet\n",
1220 iph->protocol);
1221 goto out; 1240 goto out;
1222 } 1241 }
1223 1242
@@ -1227,75 +1246,99 @@ out:
1227 return err; 1246 return err;
1228} 1247}
1229 1248
1249/* This value should be large enough to cover a tagged ethernet header plus
1250 * an IPv6 header, all options, and a maximal TCP or UDP header.
1251 */
1252#define MAX_IPV6_HDR_LEN 256
1253
1254#define OPT_HDR(type, skb, off) \
1255 (type *)(skb_network_header(skb) + (off))
1256
1230static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, 1257static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1231 int recalculate_partial_csum) 1258 int recalculate_partial_csum)
1232{ 1259{
1233 int err = -EPROTO; 1260 int err;
1234 struct ipv6hdr *ipv6h = (void *)skb->data;
1235 u8 nexthdr; 1261 u8 nexthdr;
1236 unsigned int header_size;
1237 unsigned int off; 1262 unsigned int off;
1263 unsigned int len;
1238 bool fragment; 1264 bool fragment;
1239 bool done; 1265 bool done;
1240 1266
1267 fragment = false;
1241 done = false; 1268 done = false;
1242 1269
1243 off = sizeof(struct ipv6hdr); 1270 off = sizeof(struct ipv6hdr);
1244 1271
1245 header_size = skb->network_header + off; 1272 err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
1246 maybe_pull_tail(skb, header_size); 1273 if (err < 0)
1274 goto out;
1247 1275
1248 nexthdr = ipv6h->nexthdr; 1276 nexthdr = ipv6_hdr(skb)->nexthdr;
1249 1277
1250 while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) && 1278 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
1251 !done) { 1279 while (off <= len && !done) {
1252 switch (nexthdr) { 1280 switch (nexthdr) {
1253 case IPPROTO_DSTOPTS: 1281 case IPPROTO_DSTOPTS:
1254 case IPPROTO_HOPOPTS: 1282 case IPPROTO_HOPOPTS:
1255 case IPPROTO_ROUTING: { 1283 case IPPROTO_ROUTING: {
1256 struct ipv6_opt_hdr *hp = (void *)(skb->data + off); 1284 struct ipv6_opt_hdr *hp;
1257 1285
1258 header_size = skb->network_header + 1286 err = maybe_pull_tail(skb,
1259 off + 1287 off +
1260 sizeof(struct ipv6_opt_hdr); 1288 sizeof(struct ipv6_opt_hdr),
1261 maybe_pull_tail(skb, header_size); 1289 MAX_IPV6_HDR_LEN);
1290 if (err < 0)
1291 goto out;
1262 1292
1293 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
1263 nexthdr = hp->nexthdr; 1294 nexthdr = hp->nexthdr;
1264 off += ipv6_optlen(hp); 1295 off += ipv6_optlen(hp);
1265 break; 1296 break;
1266 } 1297 }
1267 case IPPROTO_AH: { 1298 case IPPROTO_AH: {
1268 struct ip_auth_hdr *hp = (void *)(skb->data + off); 1299 struct ip_auth_hdr *hp;
1300
1301 err = maybe_pull_tail(skb,
1302 off +
1303 sizeof(struct ip_auth_hdr),
1304 MAX_IPV6_HDR_LEN);
1305 if (err < 0)
1306 goto out;
1307
1308 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
1309 nexthdr = hp->nexthdr;
1310 off += ipv6_authlen(hp);
1311 break;
1312 }
1313 case IPPROTO_FRAGMENT: {
1314 struct frag_hdr *hp;
1269 1315
1270 header_size = skb->network_header + 1316 err = maybe_pull_tail(skb,
1271 off + 1317 off +
1272 sizeof(struct ip_auth_hdr); 1318 sizeof(struct frag_hdr),
1273 maybe_pull_tail(skb, header_size); 1319 MAX_IPV6_HDR_LEN);
1320 if (err < 0)
1321 goto out;
1322
1323 hp = OPT_HDR(struct frag_hdr, skb, off);
1324
1325 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
1326 fragment = true;
1274 1327
1275 nexthdr = hp->nexthdr; 1328 nexthdr = hp->nexthdr;
1276 off += (hp->hdrlen+2)<<2; 1329 off += sizeof(struct frag_hdr);
1277 break; 1330 break;
1278 } 1331 }
1279 case IPPROTO_FRAGMENT:
1280 fragment = true;
1281 /* fall through */
1282 default: 1332 default:
1283 done = true; 1333 done = true;
1284 break; 1334 break;
1285 } 1335 }
1286 } 1336 }
1287 1337
1288 if (!done) { 1338 err = -EPROTO;
1289 if (net_ratelimit())
1290 netdev_err(vif->dev, "Failed to parse packet header\n");
1291 goto out;
1292 }
1293 1339
1294 if (fragment) { 1340 if (!done || fragment)
1295 if (net_ratelimit())
1296 netdev_err(vif->dev, "Packet is a fragment!\n");
1297 goto out; 1341 goto out;
1298 }
1299 1342
1300 switch (nexthdr) { 1343 switch (nexthdr) {
1301 case IPPROTO_TCP: 1344 case IPPROTO_TCP:
@@ -1304,17 +1347,17 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1304 goto out; 1347 goto out;
1305 1348
1306 if (recalculate_partial_csum) { 1349 if (recalculate_partial_csum) {
1307 struct tcphdr *tcph = tcp_hdr(skb); 1350 err = maybe_pull_tail(skb,
1308 1351 off + sizeof(struct tcphdr),
1309 header_size = skb->network_header + 1352 MAX_IPV6_HDR_LEN);
1310 off + 1353 if (err < 0)
1311 sizeof(struct tcphdr); 1354 goto out;
1312 maybe_pull_tail(skb, header_size); 1355
1313 1356 tcp_hdr(skb)->check =
1314 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, 1357 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1315 &ipv6h->daddr, 1358 &ipv6_hdr(skb)->daddr,
1316 skb->len - off, 1359 skb->len - off,
1317 IPPROTO_TCP, 0); 1360 IPPROTO_TCP, 0);
1318 } 1361 }
1319 break; 1362 break;
1320 case IPPROTO_UDP: 1363 case IPPROTO_UDP:
@@ -1323,25 +1366,20 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1323 goto out; 1366 goto out;
1324 1367
1325 if (recalculate_partial_csum) { 1368 if (recalculate_partial_csum) {
1326 struct udphdr *udph = udp_hdr(skb); 1369 err = maybe_pull_tail(skb,
1327 1370 off + sizeof(struct udphdr),
1328 header_size = skb->network_header + 1371 MAX_IPV6_HDR_LEN);
1329 off + 1372 if (err < 0)
1330 sizeof(struct udphdr); 1373 goto out;
1331 maybe_pull_tail(skb, header_size); 1374
1332 1375 udp_hdr(skb)->check =
1333 udph->check = ~csum_ipv6_magic(&ipv6h->saddr, 1376 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1334 &ipv6h->daddr, 1377 &ipv6_hdr(skb)->daddr,
1335 skb->len - off, 1378 skb->len - off,
1336 IPPROTO_UDP, 0); 1379 IPPROTO_UDP, 0);
1337 } 1380 }
1338 break; 1381 break;
1339 default: 1382 default:
1340 if (net_ratelimit())
1341 netdev_err(vif->dev,
1342 "Attempting to checksum a non-TCP/UDP packet, "
1343 "dropping a protocol %d packet\n",
1344 nexthdr);
1345 goto out; 1383 goto out;
1346 } 1384 }
1347 1385
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5d89d1b808a6..c56c350324e4 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -4,6 +4,7 @@
4#include <uapi/linux/ipv6.h> 4#include <uapi/linux/ipv6.h>
5 5
6#define ipv6_optlen(p) (((p)->hdrlen+1) << 3) 6#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
7#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
7/* 8/*
8 * This structure contains configuration options per IPv6 link. 9 * This structure contains configuration options per IPv6 link.
9 */ 10 */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index eb198acaac1d..488316e339a1 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -110,7 +110,8 @@ struct frag_hdr {
110 __be32 identification; 110 __be32 identification;
111}; 111};
112 112
113#define IP6_MF 0x0001 113#define IP6_MF 0x0001
114#define IP6_OFFSET 0xFFF8
114 115
115#include <net/sock.h> 116#include <net/sock.h>
116 117
diff --git a/include/net/sock.h b/include/net/sock.h
index e3a18ff0c38b..2ef3c3eca47a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1035,7 +1035,6 @@ enum cg_proto_flags {
1035}; 1035};
1036 1036
1037struct cg_proto { 1037struct cg_proto {
1038 void (*enter_memory_pressure)(struct sock *sk);
1039 struct res_counter memory_allocated; /* Current allocated memory. */ 1038 struct res_counter memory_allocated; /* Current allocated memory. */
1040 struct percpu_counter sockets_allocated; /* Current number of sockets. */ 1039 struct percpu_counter sockets_allocated; /* Current number of sockets. */
1041 int memory_pressure; 1040 int memory_pressure;
@@ -1155,8 +1154,7 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
1155 struct proto *prot = sk->sk_prot; 1154 struct proto *prot = sk->sk_prot;
1156 1155
1157 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1156 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1158 if (cg_proto->memory_pressure) 1157 cg_proto->memory_pressure = 0;
1159 cg_proto->memory_pressure = 0;
1160 } 1158 }
1161 1159
1162} 1160}
@@ -1171,7 +1169,7 @@ static inline void sk_enter_memory_pressure(struct sock *sk)
1171 struct proto *prot = sk->sk_prot; 1169 struct proto *prot = sk->sk_prot;
1172 1170
1173 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1171 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1174 cg_proto->enter_memory_pressure(sk); 1172 cg_proto->memory_pressure = 1;
1175 } 1173 }
1176 1174
1177 sk->sk_prot->enter_memory_pressure(sk); 1175 sk->sk_prot->enter_memory_pressure(sk);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 229d820bdf0b..045d56eaeca2 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -426,6 +426,16 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
426int br_handle_frame_finish(struct sk_buff *skb); 426int br_handle_frame_finish(struct sk_buff *skb);
427rx_handler_result_t br_handle_frame(struct sk_buff **pskb); 427rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
428 428
429static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
430{
431 return rcu_dereference(dev->rx_handler) == br_handle_frame;
432}
433
434static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
435{
436 return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
437}
438
429/* br_ioctl.c */ 439/* br_ioctl.c */
430int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 440int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
431int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, 441int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 8660ea3be705..bdb459d21ad8 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
153 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0) 153 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
154 goto err; 154 goto err;
155 155
156 p = br_port_get_rcu(dev); 156 p = br_port_get_check_rcu(dev);
157 if (!p) 157 if (!p)
158 goto err; 158 goto err;
159 159
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2718fed53d8c..06e72d3cdf60 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3584,6 +3584,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
3584 skb->tstamp.tv64 = 0; 3584 skb->tstamp.tv64 = 0;
3585 skb->pkt_type = PACKET_HOST; 3585 skb->pkt_type = PACKET_HOST;
3586 skb->skb_iif = 0; 3586 skb->skb_iif = 0;
3587 skb->local_df = 0;
3587 skb_dst_drop(skb); 3588 skb_dst_drop(skb);
3588 skb->mark = 0; 3589 skb->mark = 0;
3589 secpath_reset(skb); 3590 secpath_reset(skb);
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 269a89ecd2f4..f7e522c558ba 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -6,13 +6,6 @@
6#include <linux/memcontrol.h> 6#include <linux/memcontrol.h>
7#include <linux/module.h> 7#include <linux/module.h>
8 8
9static void memcg_tcp_enter_memory_pressure(struct sock *sk)
10{
11 if (sk->sk_cgrp->memory_pressure)
12 sk->sk_cgrp->memory_pressure = 1;
13}
14EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
15
16int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 9int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
17{ 10{
18 /* 11 /*
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c235da940019..e4171dd98590 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -237,6 +237,30 @@ struct packet_skb_cb {
237static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 237static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
238static void __fanout_link(struct sock *sk, struct packet_sock *po); 238static void __fanout_link(struct sock *sk, struct packet_sock *po);
239 239
240static struct net_device *packet_cached_dev_get(struct packet_sock *po)
241{
242 struct net_device *dev;
243
244 rcu_read_lock();
245 dev = rcu_dereference(po->cached_dev);
246 if (likely(dev))
247 dev_hold(dev);
248 rcu_read_unlock();
249
250 return dev;
251}
252
253static void packet_cached_dev_assign(struct packet_sock *po,
254 struct net_device *dev)
255{
256 rcu_assign_pointer(po->cached_dev, dev);
257}
258
259static void packet_cached_dev_reset(struct packet_sock *po)
260{
261 RCU_INIT_POINTER(po->cached_dev, NULL);
262}
263
240/* register_prot_hook must be invoked with the po->bind_lock held, 264/* register_prot_hook must be invoked with the po->bind_lock held,
241 * or from a context in which asynchronous accesses to the packet 265 * or from a context in which asynchronous accesses to the packet
242 * socket is not possible (packet_create()). 266 * socket is not possible (packet_create()).
@@ -246,12 +270,10 @@ static void register_prot_hook(struct sock *sk)
246 struct packet_sock *po = pkt_sk(sk); 270 struct packet_sock *po = pkt_sk(sk);
247 271
248 if (!po->running) { 272 if (!po->running) {
249 if (po->fanout) { 273 if (po->fanout)
250 __fanout_link(sk, po); 274 __fanout_link(sk, po);
251 } else { 275 else
252 dev_add_pack(&po->prot_hook); 276 dev_add_pack(&po->prot_hook);
253 rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
254 }
255 277
256 sock_hold(sk); 278 sock_hold(sk);
257 po->running = 1; 279 po->running = 1;
@@ -270,12 +292,11 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
270 struct packet_sock *po = pkt_sk(sk); 292 struct packet_sock *po = pkt_sk(sk);
271 293
272 po->running = 0; 294 po->running = 0;
273 if (po->fanout) { 295
296 if (po->fanout)
274 __fanout_unlink(sk, po); 297 __fanout_unlink(sk, po);
275 } else { 298 else
276 __dev_remove_pack(&po->prot_hook); 299 __dev_remove_pack(&po->prot_hook);
277 RCU_INIT_POINTER(po->cached_dev, NULL);
278 }
279 300
280 __sock_put(sk); 301 __sock_put(sk);
281 302
@@ -2061,19 +2082,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2061 return tp_len; 2082 return tp_len;
2062} 2083}
2063 2084
2064static struct net_device *packet_cached_dev_get(struct packet_sock *po)
2065{
2066 struct net_device *dev;
2067
2068 rcu_read_lock();
2069 dev = rcu_dereference(po->cached_dev);
2070 if (dev)
2071 dev_hold(dev);
2072 rcu_read_unlock();
2073
2074 return dev;
2075}
2076
2077static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2085static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2078{ 2086{
2079 struct sk_buff *skb; 2087 struct sk_buff *skb;
@@ -2090,7 +2098,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2090 2098
2091 mutex_lock(&po->pg_vec_lock); 2099 mutex_lock(&po->pg_vec_lock);
2092 2100
2093 if (saddr == NULL) { 2101 if (likely(saddr == NULL)) {
2094 dev = packet_cached_dev_get(po); 2102 dev = packet_cached_dev_get(po);
2095 proto = po->num; 2103 proto = po->num;
2096 addr = NULL; 2104 addr = NULL;
@@ -2244,7 +2252,7 @@ static int packet_snd(struct socket *sock,
2244 * Get and verify the address. 2252 * Get and verify the address.
2245 */ 2253 */
2246 2254
2247 if (saddr == NULL) { 2255 if (likely(saddr == NULL)) {
2248 dev = packet_cached_dev_get(po); 2256 dev = packet_cached_dev_get(po);
2249 proto = po->num; 2257 proto = po->num;
2250 addr = NULL; 2258 addr = NULL;
@@ -2453,6 +2461,8 @@ static int packet_release(struct socket *sock)
2453 2461
2454 spin_lock(&po->bind_lock); 2462 spin_lock(&po->bind_lock);
2455 unregister_prot_hook(sk, false); 2463 unregister_prot_hook(sk, false);
2464 packet_cached_dev_reset(po);
2465
2456 if (po->prot_hook.dev) { 2466 if (po->prot_hook.dev) {
2457 dev_put(po->prot_hook.dev); 2467 dev_put(po->prot_hook.dev);
2458 po->prot_hook.dev = NULL; 2468 po->prot_hook.dev = NULL;
@@ -2508,14 +2518,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
2508 2518
2509 spin_lock(&po->bind_lock); 2519 spin_lock(&po->bind_lock);
2510 unregister_prot_hook(sk, true); 2520 unregister_prot_hook(sk, true);
2521
2511 po->num = protocol; 2522 po->num = protocol;
2512 po->prot_hook.type = protocol; 2523 po->prot_hook.type = protocol;
2513 if (po->prot_hook.dev) 2524 if (po->prot_hook.dev)
2514 dev_put(po->prot_hook.dev); 2525 dev_put(po->prot_hook.dev);
2515 po->prot_hook.dev = dev;
2516 2526
2527 po->prot_hook.dev = dev;
2517 po->ifindex = dev ? dev->ifindex : 0; 2528 po->ifindex = dev ? dev->ifindex : 0;
2518 2529
2530 packet_cached_dev_assign(po, dev);
2531
2519 if (protocol == 0) 2532 if (protocol == 0)
2520 goto out_unlock; 2533 goto out_unlock;
2521 2534
@@ -2628,7 +2641,8 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
2628 po = pkt_sk(sk); 2641 po = pkt_sk(sk);
2629 sk->sk_family = PF_PACKET; 2642 sk->sk_family = PF_PACKET;
2630 po->num = proto; 2643 po->num = proto;
2631 RCU_INIT_POINTER(po->cached_dev, NULL); 2644
2645 packet_cached_dev_reset(po);
2632 2646
2633 sk->sk_destruct = packet_sock_destruct; 2647 sk->sk_destruct = packet_sock_destruct;
2634 sk_refcnt_debug_inc(sk); 2648 sk_refcnt_debug_inc(sk);
@@ -3339,6 +3353,7 @@ static int packet_notifier(struct notifier_block *this,
3339 sk->sk_error_report(sk); 3353 sk->sk_error_report(sk);
3340 } 3354 }
3341 if (msg == NETDEV_UNREGISTER) { 3355 if (msg == NETDEV_UNREGISTER) {
3356 packet_cached_dev_reset(po);
3342 po->ifindex = -1; 3357 po->ifindex = -1;
3343 if (po->prot_hook.dev) 3358 if (po->prot_hook.dev)
3344 dev_put(po->prot_hook.dev); 3359 dev_put(po->prot_hook.dev);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index fd7072827a40..69cb848e8345 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -270,6 +270,16 @@ int tcf_register_action(struct tc_action_ops *act)
270{ 270{
271 struct tc_action_ops *a, **ap; 271 struct tc_action_ops *a, **ap;
272 272
273 /* Must supply act, dump, cleanup and init */
274 if (!act->act || !act->dump || !act->cleanup || !act->init)
275 return -EINVAL;
276
277 /* Supply defaults */
278 if (!act->lookup)
279 act->lookup = tcf_hash_search;
280 if (!act->walk)
281 act->walk = tcf_generic_walker;
282
273 write_lock(&act_mod_lock); 283 write_lock(&act_mod_lock);
274 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { 284 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) {
275 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { 285 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
@@ -381,7 +391,7 @@ int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act,
381 } 391 }
382 while ((a = act) != NULL) { 392 while ((a = act) != NULL) {
383repeat: 393repeat:
384 if (a->ops && a->ops->act) { 394 if (a->ops) {
385 ret = a->ops->act(skb, a, res); 395 ret = a->ops->act(skb, a, res);
386 if (TC_MUNGED & skb->tc_verd) { 396 if (TC_MUNGED & skb->tc_verd) {
387 /* copied already, allow trampling */ 397 /* copied already, allow trampling */
@@ -405,7 +415,7 @@ void tcf_action_destroy(struct tc_action *act, int bind)
405 struct tc_action *a; 415 struct tc_action *a;
406 416
407 for (a = act; a; a = act) { 417 for (a = act; a; a = act) {
408 if (a->ops && a->ops->cleanup) { 418 if (a->ops) {
409 if (a->ops->cleanup(a, bind) == ACT_P_DELETED) 419 if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
410 module_put(a->ops->owner); 420 module_put(a->ops->owner);
411 act = act->next; 421 act = act->next;
@@ -424,7 +434,7 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
424{ 434{
425 int err = -EINVAL; 435 int err = -EINVAL;
426 436
427 if (a->ops == NULL || a->ops->dump == NULL) 437 if (a->ops == NULL)
428 return err; 438 return err;
429 return a->ops->dump(skb, a, bind, ref); 439 return a->ops->dump(skb, a, bind, ref);
430} 440}
@@ -436,7 +446,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
436 unsigned char *b = skb_tail_pointer(skb); 446 unsigned char *b = skb_tail_pointer(skb);
437 struct nlattr *nest; 447 struct nlattr *nest;
438 448
439 if (a->ops == NULL || a->ops->dump == NULL) 449 if (a->ops == NULL)
440 return err; 450 return err;
441 451
442 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 452 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
@@ -723,8 +733,6 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
723 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); 733 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
724 if (a->ops == NULL) 734 if (a->ops == NULL)
725 goto err_free; 735 goto err_free;
726 if (a->ops->lookup == NULL)
727 goto err_mod;
728 err = -ENOENT; 736 err = -ENOENT;
729 if (a->ops->lookup(a, index) == 0) 737 if (a->ops->lookup(a, index) == 0)
730 goto err_mod; 738 goto err_mod;
@@ -1084,12 +1092,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1084 memset(&a, 0, sizeof(struct tc_action)); 1092 memset(&a, 0, sizeof(struct tc_action));
1085 a.ops = a_o; 1093 a.ops = a_o;
1086 1094
1087 if (a_o->walk == NULL) {
1088 WARN(1, "tc_dump_action: %s !capable of dumping table\n",
1089 a_o->kind);
1090 goto out_module_put;
1091 }
1092
1093 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1095 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1094 cb->nlh->nlmsg_type, sizeof(*t), 0); 1096 cb->nlh->nlmsg_type, sizeof(*t), 0);
1095 if (!nlh) 1097 if (!nlh)
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 3a4c0caa1f7d..5c5edf56adbd 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -585,9 +585,7 @@ static struct tc_action_ops act_csum_ops = {
585 .act = tcf_csum, 585 .act = tcf_csum,
586 .dump = tcf_csum_dump, 586 .dump = tcf_csum_dump,
587 .cleanup = tcf_csum_cleanup, 587 .cleanup = tcf_csum_cleanup,
588 .lookup = tcf_hash_search,
589 .init = tcf_csum_init, 588 .init = tcf_csum_init,
590 .walk = tcf_generic_walker
591}; 589};
592 590
593MODULE_DESCRIPTION("Checksum updating actions"); 591MODULE_DESCRIPTION("Checksum updating actions");
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index fd2b3cff5fa2..5645a4d32abd 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -206,9 +206,7 @@ static struct tc_action_ops act_gact_ops = {
206 .act = tcf_gact, 206 .act = tcf_gact,
207 .dump = tcf_gact_dump, 207 .dump = tcf_gact_dump,
208 .cleanup = tcf_gact_cleanup, 208 .cleanup = tcf_gact_cleanup,
209 .lookup = tcf_hash_search,
210 .init = tcf_gact_init, 209 .init = tcf_gact_init,
211 .walk = tcf_generic_walker
212}; 210};
213 211
214MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); 212MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60d88b6b9560..882a89762f77 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -298,9 +298,7 @@ static struct tc_action_ops act_ipt_ops = {
298 .act = tcf_ipt, 298 .act = tcf_ipt,
299 .dump = tcf_ipt_dump, 299 .dump = tcf_ipt_dump,
300 .cleanup = tcf_ipt_cleanup, 300 .cleanup = tcf_ipt_cleanup,
301 .lookup = tcf_hash_search,
302 .init = tcf_ipt_init, 301 .init = tcf_ipt_init,
303 .walk = tcf_generic_walker
304}; 302};
305 303
306static struct tc_action_ops act_xt_ops = { 304static struct tc_action_ops act_xt_ops = {
@@ -312,9 +310,7 @@ static struct tc_action_ops act_xt_ops = {
312 .act = tcf_ipt, 310 .act = tcf_ipt,
313 .dump = tcf_ipt_dump, 311 .dump = tcf_ipt_dump,
314 .cleanup = tcf_ipt_cleanup, 312 .cleanup = tcf_ipt_cleanup,
315 .lookup = tcf_hash_search,
316 .init = tcf_ipt_init, 313 .init = tcf_ipt_init,
317 .walk = tcf_generic_walker
318}; 314};
319 315
320MODULE_AUTHOR("Jamal Hadi Salim(2002-13)"); 316MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 977c10e0631b..252378121ce7 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -271,9 +271,7 @@ static struct tc_action_ops act_mirred_ops = {
271 .act = tcf_mirred, 271 .act = tcf_mirred,
272 .dump = tcf_mirred_dump, 272 .dump = tcf_mirred_dump,
273 .cleanup = tcf_mirred_cleanup, 273 .cleanup = tcf_mirred_cleanup,
274 .lookup = tcf_hash_search,
275 .init = tcf_mirred_init, 274 .init = tcf_mirred_init,
276 .walk = tcf_generic_walker
277}; 275};
278 276
279MODULE_AUTHOR("Jamal Hadi Salim(2002)"); 277MODULE_AUTHOR("Jamal Hadi Salim(2002)");
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 876f0ef29694..6a15ace00241 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -308,9 +308,7 @@ static struct tc_action_ops act_nat_ops = {
308 .act = tcf_nat, 308 .act = tcf_nat,
309 .dump = tcf_nat_dump, 309 .dump = tcf_nat_dump,
310 .cleanup = tcf_nat_cleanup, 310 .cleanup = tcf_nat_cleanup,
311 .lookup = tcf_hash_search,
312 .init = tcf_nat_init, 311 .init = tcf_nat_init,
313 .walk = tcf_generic_walker
314}; 312};
315 313
316MODULE_DESCRIPTION("Stateless NAT actions"); 314MODULE_DESCRIPTION("Stateless NAT actions");
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 7ed78c9e505c..03b67674169c 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -243,9 +243,7 @@ static struct tc_action_ops act_pedit_ops = {
243 .act = tcf_pedit, 243 .act = tcf_pedit,
244 .dump = tcf_pedit_dump, 244 .dump = tcf_pedit_dump,
245 .cleanup = tcf_pedit_cleanup, 245 .cleanup = tcf_pedit_cleanup,
246 .lookup = tcf_hash_search,
247 .init = tcf_pedit_init, 246 .init = tcf_pedit_init,
248 .walk = tcf_generic_walker
249}; 247};
250 248
251MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); 249MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 272d8e924cf6..16a62c36928a 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -407,7 +407,6 @@ static struct tc_action_ops act_police_ops = {
407 .act = tcf_act_police, 407 .act = tcf_act_police,
408 .dump = tcf_act_police_dump, 408 .dump = tcf_act_police_dump,
409 .cleanup = tcf_act_police_cleanup, 409 .cleanup = tcf_act_police_cleanup,
410 .lookup = tcf_hash_search,
411 .init = tcf_act_police_locate, 410 .init = tcf_act_police_locate,
412 .walk = tcf_act_police_walker 411 .walk = tcf_act_police_walker
413}; 412};
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 7725eb4ab756..31157d3e729c 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -201,7 +201,6 @@ static struct tc_action_ops act_simp_ops = {
201 .dump = tcf_simp_dump, 201 .dump = tcf_simp_dump,
202 .cleanup = tcf_simp_cleanup, 202 .cleanup = tcf_simp_cleanup,
203 .init = tcf_simp_init, 203 .init = tcf_simp_init,
204 .walk = tcf_generic_walker,
205}; 204};
206 205
207MODULE_AUTHOR("Jamal Hadi Salim(2005)"); 206MODULE_AUTHOR("Jamal Hadi Salim(2005)");
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 9119b73527c9..cf20add1c3ff 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -202,7 +202,6 @@ static struct tc_action_ops act_skbedit_ops = {
202 .dump = tcf_skbedit_dump, 202 .dump = tcf_skbedit_dump,
203 .cleanup = tcf_skbedit_cleanup, 203 .cleanup = tcf_skbedit_cleanup,
204 .init = tcf_skbedit_init, 204 .init = tcf_skbedit_init,
205 .walk = tcf_generic_walker,
206}; 205};
207 206
208MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); 207MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 1d4f3ac29baf..d0810dc5f079 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -572,7 +572,7 @@ void sctp_transport_burst_limited(struct sctp_transport *t)
572 u32 old_cwnd = t->cwnd; 572 u32 old_cwnd = t->cwnd;
573 u32 max_burst_bytes; 573 u32 max_burst_bytes;
574 574
575 if (t->burst_limited) 575 if (t->burst_limited || asoc->max_burst == 0)
576 return; 576 return;
577 577
578 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); 578 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);